You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dolphinscheduler.apache.org by gi...@apache.org on 2020/09/08 03:19:02 UTC

[incubator-dolphinscheduler-website] branch asf-site updated: Automated deployment: Tue Sep 8 03:18:50 UTC 2020 ce451a1015e061d78aa9c33bdee8bd8011e4dc45

This is an automated email from the ASF dual-hosted git repository.

github-bot pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/incubator-dolphinscheduler-website.git


The following commit(s) were added to refs/heads/asf-site by this push:
     new 1045640  Automated deployment: Tue Sep  8 03:18:50 UTC 2020 ce451a1015e061d78aa9c33bdee8bd8011e4dc45
1045640 is described below

commit 10456402d5124b474c5e64055097af8dc552e4aa
Author: dailidong <da...@users.noreply.github.com>
AuthorDate: Tue Sep 8 03:18:50 2020 +0000

    Automated deployment: Tue Sep  8 03:18:50 UTC 2020 ce451a1015e061d78aa9c33bdee8bd8011e4dc45
---
 build/documentation.js                       |   2 +-
 en-us/docs/1.3.1/user_doc/metadata-1.3.html  | 743 ++++++++++++++++++++
 en-us/docs/1.3.1/user_doc/metadata-1.3.json  |   6 +
 en-us/docs/1.3.1/user_doc/system-manual.html | 987 +++++++++++++++++++++++++++
 en-us/docs/1.3.1/user_doc/system-manual.json |   6 +
 en-us/docs/1.3.1/user_doc/upgrade.html       | 122 ++++
 en-us/docs/1.3.1/user_doc/upgrade.json       |   6 +
 img/addtenant-en.png                         | Bin 0 -> 187537 bytes
 img/auth-en.png                              | Bin 0 -> 262873 bytes
 img/complement_en.png                        | Bin 0 -> 190707 bytes
 img/create-token-en.png                      | Bin 0 -> 326368 bytes
 img/dag5.png                                 | Bin 0 -> 508254 bytes
 img/dag6.png                                 | Bin 0 -> 564481 bytes
 img/dag7.png                                 | Bin 0 -> 566175 bytes
 img/dag8.png                                 | Bin 0 -> 79398 bytes
 img/datax-en.png                             | Bin 0 -> 563788 bytes
 img/depend-node-en.png                       | Bin 0 -> 496668 bytes
 img/depend-node1-en.png                      | Bin 0 -> 516594 bytes
 img/depend-node3-en.png                      | Bin 0 -> 479533 bytes
 img/editDag-en.png                           | Bin 0 -> 421677 bytes
 img/file_create_en.png                       | Bin 0 -> 477930 bytes
 img/file_detail_en.png                       | Bin 0 -> 494959 bytes
 img/file_rename_en.png                       | Bin 0 -> 385164 bytes
 img/flink-en.png                             | Bin 0 -> 516614 bytes
 img/global_parameter_en.png                  | Bin 0 -> 219165 bytes
 img/hive1-en.png                             | Bin 0 -> 241030 bytes
 img/http-en.png                              | Bin 0 -> 519526 bytes
 img/instance-list-en.png                     | Bin 0 -> 199051 bytes
 img/instanceViewLog-en.png                   | Bin 0 -> 582387 bytes
 img/local_parameter_en.png                   | Bin 0 -> 72458 bytes
 img/mail-en.png                              | Bin 0 -> 224035 bytes
 img/master-jk-en.png                         | Bin 0 -> 291918 bytes
 img/mr_edit_en.png                           | Bin 0 -> 182250 bytes
 img/mr_java_en.png                           | Bin 0 -> 186447 bytes
 img/mysql-jk-en.png                          | Bin 0 -> 193968 bytes
 img/postgresql-en.png                        | Bin 0 -> 256660 bytes
 img/procedure-en.png                         | Bin 0 -> 165706 bytes
 img/run_params_en.png                        | Bin 0 -> 468680 bytes
 img/run_work_en.png                          | Bin 0 -> 130884 bytes
 img/spark-en.png                             | Bin 0 -> 224331 bytes
 img/sql-en.png                               | Bin 0 -> 548091 bytes
 img/sql-node-en.png                          | Bin 0 -> 205549 bytes
 img/task-list-en.png                         | Bin 0 -> 279528 bytes
 img/task-log-en.png                          | Bin 0 -> 385429 bytes
 img/task-log2-en.png                         | Bin 0 -> 239927 bytes
 img/task_history_en.png                      | Bin 0 -> 593365 bytes
 img/time-manage-list-en.png                  | Bin 0 -> 165932 bytes
 img/time_schedule_en.png                     | Bin 0 -> 172406 bytes
 img/tree_en.png                              | Bin 0 -> 453382 bytes
 img/udf_edit_en.png                          | Bin 0 -> 208093 bytes
 img/user-en.png                              | Bin 0 -> 288550 bytes
 img/work_list_en.png                         | Bin 0 -> 428834 bytes
 img/worker-jk-en.png                         | Bin 0 -> 295298 bytes
 zh-cn/docs/1.3.1/user_doc/upgrade.html       |   2 +-
 zh-cn/docs/1.3.1/user_doc/upgrade.json       |   2 +-
 55 files changed, 1873 insertions(+), 3 deletions(-)

diff --git a/build/documentation.js b/build/documentation.js
index bd2095f..f84c8d3 100644
--- a/build/documentation.js
+++ b/build/documentation.js
@@ -170,4 +170,4 @@ Object.defineProperty(t,"__esModule",{value:!0});var i="function"==typeof Symbol
  * Copyright © 2012-2019 Faisal Salman <f...@faisalman.com>
  * Licensed under MIT License
  */
-return"string"==typeof e?e.replace(/[^\d\.]/g,"").split(".")[0]:void 0},trim:function(e){return e.replace(/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,"")}},m={rgx:function(e,t){for(var n,r,o,i,a,c,s=0;s<t.length&&!a;){var l=t[s],u=t[s+1];for(n=r=0;n<l.length&&!a;)if(a=l[n++].exec(e))for(o=0;o<u.length;o++)c=a[++r],i=u[o],"object"==typeof i&&i.length>0?2==i.length?"function"==typeof i[1]?this[i[0]]=i[1].call(this,c):this[i[0]]=i[1]:3==i.length?"function"!=typeof i[1]||i[1].exec&&i[1].test?this[i [...]
\ No newline at end of file
+return"string"==typeof e?e.replace(/[^\d\.]/g,"").split(".")[0]:void 0},trim:function(e){return e.replace(/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,"")}},m={rgx:function(e,t){for(var n,r,o,i,a,c,s=0;s<t.length&&!a;){var l=t[s],u=t[s+1];for(n=r=0;n<l.length&&!a;)if(a=l[n++].exec(e))for(o=0;o<u.length;o++)c=a[++r],i=u[o],"object"==typeof i&&i.length>0?2==i.length?"function"==typeof i[1]?this[i[0]]=i[1].call(this,c):this[i[0]]=i[1]:3==i.length?"function"!=typeof i[1]||i[1].exec&&i[1].test?this[i [...]
\ No newline at end of file
diff --git a/en-us/docs/1.3.1/user_doc/metadata-1.3.html b/en-us/docs/1.3.1/user_doc/metadata-1.3.html
new file mode 100644
index 0000000..2a83941
--- /dev/null
+++ b/en-us/docs/1.3.1/user_doc/metadata-1.3.html
@@ -0,0 +1,743 @@
+<!DOCTYPE html>
+<html lang="en">
+
+<head>
+	<meta charset="UTF-8">
+	<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
+	<meta name="keywords" content="metadata-1.3" />
+	<meta name="description" content="metadata-1.3" />
+	<!-- 网页标签标题 -->
+	<title>metadata-1.3</title>
+	<link rel="shortcut icon" href="/img/docsite.ico"/>
+	<link rel="stylesheet" href="/build/documentation.css" />
+</head>
+<body>
+	<div id="root"><div class="documentation-page" data-reactroot=""><header class="header-container header-container-normal"><div class="header-body"><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_colorful.svg"/></a><div class="search search-normal"><span class="icon-search"></span></div><span class="language-switch language-switch-normal">中</span><div class="header-menu"><img class="header-menu-toggle" src="/img/system/menu_gray.png"/><div><ul class="ant-menu blackClass ant [...]
+<p><a name="25Ald"></a></p>
+<h3>Table overview</h3>
+<table>
+<thead>
+<tr>
+<th style="text-align:center">Table Name</th>
+<th style="text-align:center">Table information</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td style="text-align:center">t_ds_access_token</td>
+<td style="text-align:center">Access the token of the ds backend</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_alert</td>
+<td style="text-align:center">Warning message</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_alertgroup</td>
+<td style="text-align:center">Alarm group</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_command</td>
+<td style="text-align:center">Excuting an order</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_datasource</td>
+<td style="text-align:center">data source</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_error_command</td>
+<td style="text-align:center">Wrong command</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_process_definition</td>
+<td style="text-align:center">Process definition</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_process_instance</td>
+<td style="text-align:center">Process instance</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_project</td>
+<td style="text-align:center">project</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_queue</td>
+<td style="text-align:center">queue</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_relation_datasource_user</td>
+<td style="text-align:center">User associated data source</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_relation_process_instance</td>
+<td style="text-align:center">Subprocess</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_relation_project_user</td>
+<td style="text-align:center">User-related projects</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_relation_resources_user</td>
+<td style="text-align:center">User associated resources</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_relation_udfs_user</td>
+<td style="text-align:center">User associated UDF function</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_relation_user_alertgroup</td>
+<td style="text-align:center">User associated alarm group</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_resources</td>
+<td style="text-align:center">resource</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_schedules</td>
+<td style="text-align:center">Process timing scheduling</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_session</td>
+<td style="text-align:center">User login session</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_task_instance</td>
+<td style="text-align:center">Task instance</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_tenant</td>
+<td style="text-align:center">Tenant</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_udfs</td>
+<td style="text-align:center">UDF resources</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_user</td>
+<td style="text-align:center">user</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_version</td>
+<td style="text-align:center">ds version information</td>
+</tr>
+</tbody>
+</table>
+<p><a name="VNVGr"></a></p>
+<h3>user	queue	data source</h3>
+<p><img src="/img/metadata-erd/user-queue-datasource.png" alt="image.png"></p>
+<ul>
+<li>There can be multiple users under a tenant<br /></li>
+<li>The queue field in t_ds_user stores the queue_name information in the queue list, and t_ds_tenant stores queue_id. During the execution of the process definition, the user queue has the highest priority. If the user queue is empty, the tenant queue is used<br /></li>
+<li>The user_id field in the t_ds_datasource table represents the user who created the data source, and the user_id in t_ds_relation_datasource_user represents the user who has permission to the data source<br />
+<a name="HHyGV"></a></li>
+</ul>
+<h3>project	Resources	Alert</h3>
+<p><img src="/img/metadata-erd/project-resource-alert.png" alt="image.png"></p>
+<ul>
+<li>A user can have multiple projects, and the user project is authorized to complete the relationship binding between project_id and user_id through the t_ds_relation_project_user table<br /></li>
+<li>The user_id in the t_ds_projcet table represents the user who created the project, and the user_id in the t_ds_relation_project_user table represents the user who has permission to the project<br /></li>
+<li>The user_id in the t_ds_resources table represents the user who created the resource, and the user_id in t_ds_relation_resources_user represents the user who has permission to the resource<br /></li>
+<li>The user_id in the t_ds_udfs table represents the user who created the UDF, and the user_id in the t_ds_relation_udfs_user table represents the user who has permission to the UDF<br />
+<a name="Bg2Sn"></a></li>
+</ul>
+<h3>command	Process	task</h3>
+<p><img src="/img/metadata-erd/command.png" alt="image.png"><br /><img src="/img/metadata-erd/process-task.png" alt="image.png"></p>
+<ul>
+<li>A project has multiple process definitions, one process definition can generate multiple process instances, and one process instance can generate multiple task instances<br /></li>
+<li>The t_ds_schedulers table stores the timing scheduling information defined by the process<br /></li>
+<li>The data stored in the t_ds_relation_process_instance table is used to handle the case where the process definition contains sub-processes. parent_process_instance_id represents the main process instance id containing the sub-process, process_instance_id represents the id of the sub-process instance, parent_task_instance_id represents the task instance id of the sub-process node, the process instance table and The task instance table corresponds to the t_ds_process_instance table and [...]
+<a name="Pv25P"></a></li>
+</ul>
+<h3>Core table schema</h3>
+<p><a name="32Jzd"></a></p>
+<h4>t_ds_process_definition</h4>
+<table>
+<thead>
+<tr>
+<th>Field</th>
+<th>Type</th>
+<th>Comment</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>id</td>
+<td>int</td>
+<td>Primary key</td>
+</tr>
+<tr>
+<td>name</td>
+<td>varchar</td>
+<td>Process definition name</td>
+</tr>
+<tr>
+<td>version</td>
+<td>int</td>
+<td>Process definition version</td>
+</tr>
+<tr>
+<td>release_state</td>
+<td>tinyint</td>
+<td>Release status of the process definition: 0 Not online  1 Online</td>
+</tr>
+<tr>
+<td>project_id</td>
+<td>int</td>
+<td>project id</td>
+</tr>
+<tr>
+<td>user_id</td>
+<td>int</td>
+<td>User to whom the process definition belongs id</td>
+</tr>
+<tr>
+<td>process_definition_json</td>
+<td>longtext</td>
+<td>Process definition json string</td>
+</tr>
+<tr>
+<td>description</td>
+<td>text</td>
+<td>Process definition description</td>
+</tr>
+<tr>
+<td>global_params</td>
+<td>text</td>
+<td>Global parameters</td>
+</tr>
+<tr>
+<td>flag</td>
+<td>tinyint</td>
+<td>Whether the process is available: 0 is not available, 1 is available</td>
+</tr>
+<tr>
+<td>locations</td>
+<td>text</td>
+<td>Node coordinate information</td>
+</tr>
+<tr>
+<td>connects</td>
+<td>text</td>
+<td>Node connection information</td>
+</tr>
+<tr>
+<td>receivers</td>
+<td>text</td>
+<td>Recipient</td>
+</tr>
+<tr>
+<td>receivers_cc</td>
+<td>text</td>
+<td>Cc</td>
+</tr>
+<tr>
+<td>create_time</td>
+<td>datetime</td>
+<td>Creation time</td>
+</tr>
+<tr>
+<td>timeout</td>
+<td>int</td>
+<td>overtime time</td>
+</tr>
+<tr>
+<td>tenant_id</td>
+<td>int</td>
+<td>queue id</td>
+</tr>
+<tr>
+<td>update_time</td>
+<td>datetime</td>
+<td>Update time</td>
+</tr>
+<tr>
+<td>modify_by</td>
+<td>varchar</td>
+<td>Modify user</td>
+</tr>
+<tr>
+<td>resource_ids</td>
+<td>varchar</td>
+<td>Resource id set</td>
+</tr>
+</tbody>
+</table>
+<p><a name="e6jfz"></a></p>
+<h4>t_ds_process_instance</h4>
+<table>
+<thead>
+<tr>
+<th>Field</th>
+<th>Type</th>
+<th>Comment</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>id</td>
+<td>int</td>
+<td>Primary key</td>
+</tr>
+<tr>
+<td>name</td>
+<td>varchar</td>
+<td>Process instance name</td>
+</tr>
+<tr>
+<td>process_definition_id</td>
+<td>int</td>
+<td>Process definition id</td>
+</tr>
+<tr>
+<td>state</td>
+<td>tinyint</td>
+<td>Process instance status: 0 Submitted successfully,1 running,2 Ready to pause,3 time out,4 Ready to stop,5 stop,6 failure,7 success,8 Need for fault tolerance,9 kill,10 Waiting thread,11 Wait for dependencies to complete</td>
+</tr>
+<tr>
+<td>recovery</td>
+<td>tinyint</td>
+<td>Process instance fault tolerance ID: 0 normal,1 Need to be restarted by fault tolerance</td>
+</tr>
+<tr>
+<td>start_time</td>
+<td>datetime</td>
+<td>Process instance start time</td>
+</tr>
+<tr>
+<td>end_time</td>
+<td>datetime</td>
+<td>Process instance end time</td>
+</tr>
+<tr>
+<td>run_times</td>
+<td>int</td>
+<td>Number of process instance runs</td>
+</tr>
+<tr>
+<td>host</td>
+<td>varchar</td>
+<td>The machine where the process instance is located</td>
+</tr>
+<tr>
+<td>command_type</td>
+<td>tinyint</td>
+<td>Command type: 0 Start the workflow,1 Start execution from the current node,2 Restore a fault-tolerant workflow,3 Resume suspended process,4 Start execution from the failed node,5 Complement,6 Scheduling,7 Rerun,8 time out,9 stop,10 Resume waiting thread</td>
+</tr>
+<tr>
+<td>command_param</td>
+<td>text</td>
+<td>Command parameters (json format)</td>
+</tr>
+<tr>
+<td>task_depend_type</td>
+<td>tinyint</td>
+<td>Node dependency type: 0 current node, 1 forward execution, 2 backward execution</td>
+</tr>
+<tr>
+<td>max_try_times</td>
+<td>tinyint</td>
+<td>Maximum number of retries</td>
+</tr>
+<tr>
+<td>failure_strategy</td>
+<td>tinyint</td>
+<td>Failure strategy 0 ends after failure, 1 continues after failure</td>
+</tr>
+<tr>
+<td>warning_type</td>
+<td>tinyint</td>
+<td>Alarm type: 0 not sent, 1 sent if the process is successful, 2 sent if the process fails, 3 sent both if the process fails</td>
+</tr>
+<tr>
+<td>warning_group_id</td>
+<td>int</td>
+<td>Alarm group id</td>
+</tr>
+<tr>
+<td>schedule_time</td>
+<td>datetime</td>
+<td>Expected running time</td>
+</tr>
+<tr>
+<td>command_start_time</td>
+<td>datetime</td>
+<td>Start command time</td>
+</tr>
+<tr>
+<td>global_params</td>
+<td>text</td>
+<td>Global parameters (parameters defined by the curing process)</td>
+</tr>
+<tr>
+<td>process_instance_json</td>
+<td>longtext</td>
+<td>Process instance json (json of the process definition of copy)</td>
+</tr>
+<tr>
+<td>flag</td>
+<td>tinyint</td>
+<td>Is it available, 1 is available, 0 is not available</td>
+</tr>
+<tr>
+<td>update_time</td>
+<td>timestamp</td>
+<td>Update time</td>
+</tr>
+<tr>
+<td>is_sub_process</td>
+<td>int</td>
+<td>Is it a sub-workflow 1 yes, 0 no</td>
+</tr>
+<tr>
+<td>executor_id</td>
+<td>int</td>
+<td>Command execution user</td>
+</tr>
+<tr>
+<td>locations</td>
+<td>text</td>
+<td>Node coordinate information</td>
+</tr>
+<tr>
+<td>connects</td>
+<td>text</td>
+<td>Node connection information</td>
+</tr>
+<tr>
+<td>history_cmd</td>
+<td>text</td>
+<td>Historical commands, record all operations on process instances</td>
+</tr>
+<tr>
+<td>dependence_schedule_times</td>
+<td>text</td>
+<td>Depend on the estimated time of the node</td>
+</tr>
+<tr>
+<td>process_instance_priority</td>
+<td>int</td>
+<td>Process instance priority: 0 Highest, 1 High, 2 Medium, 3 Low, 4 Lowest</td>
+</tr>
+<tr>
+<td>worker_group</td>
+<td>varchar</td>
+<td>Tasks specify the group of workers to run</td>
+</tr>
+<tr>
+<td>timeout</td>
+<td>int</td>
+<td>overtime time</td>
+</tr>
+<tr>
+<td>tenant_id</td>
+<td>int</td>
+<td>queue id</td>
+</tr>
+</tbody>
+</table>
+<p><a name="IvHEc"></a></p>
+<h4>t_ds_task_instance</h4>
+<table>
+<thead>
+<tr>
+<th>Field</th>
+<th>Type</th>
+<th>Comment</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>id</td>
+<td>int</td>
+<td>Primary key</td>
+</tr>
+<tr>
+<td>name</td>
+<td>varchar</td>
+<td>mission name</td>
+</tr>
+<tr>
+<td>task_type</td>
+<td>varchar</td>
+<td>Task type</td>
+</tr>
+<tr>
+<td>process_definition_id</td>
+<td>int</td>
+<td>Process definition id</td>
+</tr>
+<tr>
+<td>process_instance_id</td>
+<td>int</td>
+<td>Process instance id</td>
+</tr>
+<tr>
+<td>task_json</td>
+<td>longtext</td>
+<td>Task node json</td>
+</tr>
+<tr>
+<td>state</td>
+<td>tinyint</td>
+<td>Task instance status: 0 submitted successfully, 1 running, 2 ready to be suspended, 3 suspended, 4 ready to stop, 5 stopped, 6 failed, 7 successful, 8 needs fault tolerance, 9 kill, 10 waiting for thread, 11 waiting for dependency to complete</td>
+</tr>
+<tr>
+<td>submit_time</td>
+<td>datetime</td>
+<td>Task submission time</td>
+</tr>
+<tr>
+<td>start_time</td>
+<td>datetime</td>
+<td>Task start time</td>
+</tr>
+<tr>
+<td>end_time</td>
+<td>datetime</td>
+<td>Task end time</td>
+</tr>
+<tr>
+<td>host</td>
+<td>varchar</td>
+<td>The machine performing the task</td>
+</tr>
+<tr>
+<td>execute_path</td>
+<td>varchar</td>
+<td>Task execution path</td>
+</tr>
+<tr>
+<td>log_path</td>
+<td>varchar</td>
+<td>Task log path</td>
+</tr>
+<tr>
+<td>alert_flag</td>
+<td>tinyint</td>
+<td>Whether to alert</td>
+</tr>
+<tr>
+<td>retry_times</td>
+<td>int</td>
+<td>number of retries</td>
+</tr>
+<tr>
+<td>pid</td>
+<td>int</td>
+<td>Process pid</td>
+</tr>
+<tr>
+<td>app_link</td>
+<td>varchar</td>
+<td>yarn app id</td>
+</tr>
+<tr>
+<td>flag</td>
+<td>tinyint</td>
+<td>vailability: 0 is not available, 1 is available</td>
+</tr>
+<tr>
+<td>retry_interval</td>
+<td>int</td>
+<td>Retry interval</td>
+</tr>
+<tr>
+<td>max_retry_times</td>
+<td>int</td>
+<td>Maximum number of retries</td>
+</tr>
+<tr>
+<td>task_instance_priority</td>
+<td>int</td>
+<td>Task instance priority: 0 Highest, 1 High, 2 Medium, 3 Low, 4 Lowest</td>
+</tr>
+<tr>
+<td>worker_group</td>
+<td>varchar</td>
+<td>Tasks specify the group of workers to run</td>
+</tr>
+</tbody>
+</table>
+<p><a name="pPQkU"></a></p>
+<h4>t_ds_schedules</h4>
+<table>
+<thead>
+<tr>
+<th>Field</th>
+<th>Type</th>
+<th>Comment</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>id</td>
+<td>int</td>
+<td>Primary key</td>
+</tr>
+<tr>
+<td>process_definition_id</td>
+<td>int</td>
+<td>Process definition id</td>
+</tr>
+<tr>
+<td>start_time</td>
+<td>datetime</td>
+<td>Schedule start time</td>
+</tr>
+<tr>
+<td>end_time</td>
+<td>datetime</td>
+<td>Schedule end time</td>
+</tr>
+<tr>
+<td>crontab</td>
+<td>varchar</td>
+<td>crontab expression</td>
+</tr>
+<tr>
+<td>failure_strategy</td>
+<td>tinyint</td>
+<td>Failure strategy: 0 ends, 1 continues</td>
+</tr>
+<tr>
+<td>user_id</td>
+<td>int</td>
+<td>User id</td>
+</tr>
+<tr>
+<td>release_state</td>
+<td>tinyint</td>
+<td>Status: 0 not online, 1 online</td>
+</tr>
+<tr>
+<td>warning_type</td>
+<td>tinyint</td>
+<td>Alarm type: 0 not sent, 1 sent if the process is successful, 2 sent if the process fails, 3 sent both if the process fails</td>
+</tr>
+<tr>
+<td>warning_group_id</td>
+<td>int</td>
+<td>Alarm group id</td>
+</tr>
+<tr>
+<td>process_instance_priority</td>
+<td>int</td>
+<td>Process instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest</td>
+</tr>
+<tr>
+<td>worker_group</td>
+<td>varchar</td>
+<td>Tasks specify the group of workers to run</td>
+</tr>
+<tr>
+<td>create_time</td>
+<td>datetime</td>
+<td>Creation time</td>
+</tr>
+<tr>
+<td>update_time</td>
+<td>datetime</td>
+<td>Update time</td>
+</tr>
+</tbody>
+</table>
+<p><a name="TkQzn"></a></p>
+<h4>t_ds_command</h4>
+<table>
+<thead>
+<tr>
+<th>Field</th>
+<th>Type</th>
+<th>Comment</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>id</td>
+<td>int</td>
+<td>Primary key</td>
+</tr>
+<tr>
+<td>command_type</td>
+<td>tinyint</td>
+<td>Command type: 0 start the workflow, 1 start execution from the current node, 2 resume the fault-tolerant workflow, 3 resume the suspended process, 4 start execution from the failed node, 5 complement, 6 schedule, 7 rerun, 8 pause, 9 Stop, 10 resume waiting thread</td>
+</tr>
+<tr>
+<td>process_definition_id</td>
+<td>int</td>
+<td>Process definition id</td>
+</tr>
+<tr>
+<td>command_param</td>
+<td>text</td>
+<td>Command parameters (json format)</td>
+</tr>
+<tr>
+<td>task_depend_type</td>
+<td>tinyint</td>
+<td>Node dependency type: 0 current node, 1 forward execution, 2 backward execution</td>
+</tr>
+<tr>
+<td>failure_strategy</td>
+<td>tinyint</td>
+<td>Failure strategy: 0 ends, 1 continues</td>
+</tr>
+<tr>
+<td>warning_type</td>
+<td>tinyint</td>
+<td>Alarm type: 0 not sent, 1 sent if the process is successful, 2 sent if the process fails, 3 sent both if the process fails</td>
+</tr>
+<tr>
+<td>warning_group_id</td>
+<td>int</td>
+<td>Alarm group</td>
+</tr>
+<tr>
+<td>schedule_time</td>
+<td>datetime</td>
+<td>Expected running time</td>
+</tr>
+<tr>
+<td>start_time</td>
+<td>datetime</td>
+<td>Starting time</td>
+</tr>
+<tr>
+<td>executor_id</td>
+<td>int</td>
+<td>Execute user id</td>
+</tr>
+<tr>
+<td>dependence</td>
+<td>varchar</td>
+<td>Dependent field</td>
+</tr>
+<tr>
+<td>update_time</td>
+<td>datetime</td>
+<td>Update time</td>
+</tr>
+<tr>
+<td>process_instance_priority</td>
+<td>int</td>
+<td>Process instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest</td>
+</tr>
+<tr>
+<td>worker_group</td>
+<td>varchar</td>
+<td>Tasks specify the group of workers to run</td>
+</tr>
+</tbody>
+</table>
+</div></section><footer class="footer-container"><div class="footer-body"><img src="/img/ds_gray.svg"/><div class="cols-container"><div class="col col-12"><h3>Disclaimer</h3><p>Apache DolphinScheduler (incubating) is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by Incubator. 
+Incubation is required of all newly accepted projects until a further review indicates 
+that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. 
+While incubation status is not necessarily a reflection of the completeness or stability of the code, 
+it does indicate that the project has yet to be fully endorsed by the ASF.</p></div><div class="col col-6"><dl><dt>Documentation</dt><dd><a href="/en-us/docs/development/architecture-design.html" target="_self">Overview</a></dd><dd><a href="/en-us/docs/1.2.0/user_doc/quick-start.html" target="_self">Quick start</a></dd><dd><a href="/en-us/docs/development/backend-development.html" target="_self">Developer guide</a></dd></dl></div><div class="col col-6"><dl><dt>ASF</dt><dd><a href="http:/ [...]
+	<script src="https://f.alicdn.com/react/15.4.1/react-with-addons.min.js"></script>
+	<script src="https://f.alicdn.com/react/15.4.1/react-dom.min.js"></script>
+	<script>
+		window.rootPath = '';
+  </script>
+	<script src="/build/documentation.js"></script>
+</body>
+</html>
\ No newline at end of file
diff --git a/en-us/docs/1.3.1/user_doc/metadata-1.3.json b/en-us/docs/1.3.1/user_doc/metadata-1.3.json
new file mode 100644
index 0000000..5216bef
--- /dev/null
+++ b/en-us/docs/1.3.1/user_doc/metadata-1.3.json
@@ -0,0 +1,6 @@
+{
+  "filename": "metadata-1.3.md",
+  "__html": "<h1>Dolphin Scheduler 1.3 Metadata document</h1>\n<p><a name=\"25Ald\"></a></p>\n<h3>Table overview</h3>\n<table>\n<thead>\n<tr>\n<th style=\"text-align:center\">Table Name</th>\n<th style=\"text-align:center\">Table information</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td style=\"text-align:center\">t_ds_access_token</td>\n<td style=\"text-align:center\">Access the token of the ds backend</td>\n</tr>\n<tr>\n<td style=\"text-align:center\">t_ds_alert</td>\n<td style=\"text-alig [...]
+  "link": "/en-us/docs/1.3.1/user_doc/metadata-1.3.html",
+  "meta": {}
+}
\ No newline at end of file
diff --git a/en-us/docs/1.3.1/user_doc/system-manual.html b/en-us/docs/1.3.1/user_doc/system-manual.html
new file mode 100644
index 0000000..23c718f
--- /dev/null
+++ b/en-us/docs/1.3.1/user_doc/system-manual.html
@@ -0,0 +1,987 @@
+<!DOCTYPE html>
+<html lang="en">
+
+<head>
+	<meta charset="UTF-8">
+	<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
+	<meta name="keywords" content="system-manual" />
+	<meta name="description" content="system-manual" />
+	<!-- 网页标签标题 -->
+	<title>system-manual</title>
+	<link rel="shortcut icon" href="/img/docsite.ico"/>
+	<link rel="stylesheet" href="/build/documentation.css" />
+</head>
+<body>
+	<div id="root"><div class="documentation-page" data-reactroot=""><header class="header-container header-container-normal"><div class="header-body"><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_colorful.svg"/></a><div class="search search-normal"><span class="icon-search"></span></div><span class="language-switch language-switch-normal">中</span><div class="header-menu"><img class="header-menu-toggle" src="/img/system/menu_gray.png"/><div><ul class="ant-menu blackClass ant [...]
+<h2>Get started quickly</h2>
+<blockquote>
+<p>Please refer to<a href="quick-start.html">Get started quickly</a></p>
+</blockquote>
+<h2>Operation guide</h2>
+<h3>1. Home</h3>
+<p>The home page contains task status statistics, process status statistics, and workflow definition statistics of all items of the user.
+<p align="center">
+<img src="/img/home_en.png" width="80%" />
+</p></p>
+<h3>2. Project management</h3>
+<h4>2.1 Create project</h4>
+<ul>
+<li>
+<p>Click &quot;Project Management&quot; to enter the project management page, click the &quot;Create Project&quot; button, enter the project name, project description, and click &quot;Submit&quot; to create a new project。</p>
+  <p align="center">
+      <img src="/img/create_project_en.png" width="80%" />
+  </p>
+</li>
+</ul>
+<h4>2.2 Project Home</h4>
+<ul>
+<li>
+<p>Click the project name link on the project management page to enter the project home page, as shown in the figure below, the project home page contains the task status statistics, process status statistics, and workflow definition statistics of the project。</p>
+  <p align="center">
+      <img src="/img/project_home_en.png" width="80%" />
+   </p>
+</li>
+<li>
+<p>Task status statistics: within the specified time range, count the number of tasks in the task instance as submitted successfully, running, ready to pause, pause, ready to stop, stop, failure, success, fault tolerance, kill, and waiting threads</p>
+</li>
+<li>
+<p>Process status statistics: within the specified time range, count the number of statuses in the workflow instance as submission success, running, ready to pause, pause, ready to stop, stop, failure, success, fault tolerance, kill, and waiting threads</p>
+</li>
+<li>
+<p>Workflow definition statistics: count the workflow definitions created by users and the workflow definitions granted to the user by the administrator</p>
+</li>
+</ul>
+<h4>2.3 Workflow definition</h4>
+<h4><span id=creatDag>2.3.1 Create a workflow definition</span></h4>
+<ul>
+<li>
+<p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, and click the &quot;Create Workflow&quot; button to enter<strong>Workflow DAG editing</strong>Page, as shown in the figure below:</p>
+  <p align="center">
+      <img src="/img/dag5.png" width="80%" />
+  </p>
+</li>
+<li>
+<p>Drag in the toolbar<img src="/img/shell.png" width="35"/>
+Add a Shell task to the drawing board, as shown in the figure below:</p>
+  <p align="center">
+      <img src="/img/shell-en.png" width="80%" />
+  </p>
+<ul>
+<li><strong>Add parameter settings for shell tasks:</strong></li>
+</ul>
+<ol>
+<li>Fill in the &quot;Node Name&quot;, &quot;Description&quot;, and &quot;Script&quot; fields;</li>
+<li>Check “Normal” for “Run Flag”. If you check “Execution Prohibited”, the task will not be executed when running the workflow;</li>
+<li>Select &quot;Task Priority&quot;: When the number of worker threads is insufficient, high-level tasks will be executed first in the execution queue, and tasks with the same priority will be executed in the order of first in, first out;</li>
+<li>Timeout alarm (not required): Check the timeout alarm, timeout failure, and fill in the &quot;timeout period&quot;. When the task execution time exceeds <strong>timeout period</strong>, an alert email will be sent and the task timeout fails;</li>
+<li>Resources (optional). The resource file is a file created or uploaded on the Resource Center -&gt; File Management page. For example, the file name is <code>test.sh</code>, and the resource call command in the script is <code>sh test.sh</code>;</li>
+<li>Custom parameters (not required), refer to <a href="#UserDefinedParameters">Custom Parameters</a>;</li>
+<li>Click the &quot;Confirm Add&quot; button to save the task settings.</li>
+</ol>
+<ul>
+<li>
+<p><strong>Increase the order of task execution:</strong> Click the icon in the upper right corner<img src="/img/line.png" width="35"/>Connect tasks; as shown in the figure below, task 2 and task 3 are executed in parallel. When task 1 is completed, tasks 2 and 3 will be executed at the same time.</p>
+<p align="center">
+    <img src="/img/dag6.png" width="80%" />
+</p>
+</li>
+</ul>
+</li>
+<li>
+<p><strong>Remove dependencies:</strong> lick the &quot;arrow&quot; icon in the upper right corner<img src="/img/arrow.png" width="35"/>,Select the connecting line and click the &quot;delete&quot; icon in the upper right corner<img src="/img/delete.png" width="35"/>,Remove dependencies between tasks.</p>
+  <p align="center">
+     <img src="/img/dag7.png" width="80%" />
+  </p>
+</li>
+<li>
+<p><strong>Save the workflow definition:</strong> Click the &quot;Save&quot; button, and the &quot;Set DAG Diagram Name&quot; pop-up box will pop up, as shown in the figure below, enter the workflow definition name, workflow definition description, and set global parameters (optional, refer to <a href="#UserDefinedParameters">Custom Parameters</a>) , Click the &quot;Add&quot; button, the workflow definition is created successfully.</p>
+  <p align="center">
+     <img src="/img/dag8.png" width="80%" />
+   </p>
+> For other types of tasks, please refer to [Task Node Type and Parameter Settings](#TaskParamers).
+</li>
+</ul>
+<h4>2.3.2  Workflow definition operation function</h4>
+<p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, as shown below:
+<p align="center">
+<img src="/img/work_list_en.png" width="80%" />
+</p>
+The operation functions of the workflow definition list are as follows:</p>
+<ul>
+<li><strong>edit:</strong> Only &quot;offline&quot; workflow definitions can be edited. Workflow DAG editing is the same as <a href="#creatDag">Create Workflow Definition</a>.</li>
+<li><strong>online:</strong> When the workflow status is &quot;offline&quot;, the workflow is online. Only the workflow in the &quot;online&quot; state can run, but cannot be edited.</li>
+<li><strong>Offline:</strong> When the workflow status is &quot;online&quot;, the offline workflow and the offline workflow can be edited but not run。</li>
+<li><strong>run:</strong> Only online workflows can run. See [2.3.3 Run Workflow] for the operation steps(#runWorkflow)</li>
+<li><strong>timing:</strong> Only the online workflow can set the timing, and the system automatically schedules the workflow to run regularly. The status after creating a timing is &quot;offline&quot;, and the timing must be online on the timing management page to take effect. For timing operation steps, please refer to <a href="#creatTiming">2.3.4 Workflow Timing</a>.</li>
+<li><strong>Timing management:</strong> The timing management page can be edited, online/offline, and deleted.</li>
+<li><strong>delete:</strong> Delete the workflow definition.</li>
+<li><strong>download:</strong> Download the workflow definition to the local.</li>
+<li><strong>Tree diagram:</strong> Display the task node type and task status in a tree structure, as shown in the figure below:<p align="center">
+    <img src="/img/tree_en.png" width="80%" />
+</p> 
+</li>
+</ul>
+<h4><span id=runWorkflow>2.3.3 Run the workflow</span></h4>
+<ul>
+<li>
+<p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, as shown in the figure below, click the &quot;Go Online&quot; button<img src="/img/online.png" width="35"/>,Go online workflow.</p>
+<p align="center">
+    <img src="/img/work_list_en.png" width="80%" />
+</p>
+</li>
+<li>
+<p>Click the &quot;Run&quot; button to pop up the startup parameter setting pop-up box, as shown in the figure below, set the startup parameters, click the &quot;Run&quot; button in the pop-up box, the workflow starts running, and the workflow instance page generates a workflow instance.</p>
+ <p align="center">
+   <img src="/img/run_work_en.png" width="80%" />
+ </p>  
+</li>
+</ul>
+<p><span id=runParamers>Description of workflow operating parameters:</span></p>
+<pre><code>* Failure strategy: When a task node fails to execute, other parallel task nodes need to execute the strategy. &quot;Continue&quot; means: after a certain task fails, other task nodes execute normally; &quot;End&quot; means: terminate all the tasks being executed, and terminate the entire process.
+* Notification strategy: When the process is over, the process execution information notification email is sent according to the process status, including any status is not sent, successful, failed, successful or failed.
+* Process priority: The priority of process operation, divided into five levels: highest (HIGHEST), high (HIGH), medium (MEDIUM), low (LOW), and lowest (LOWEST). When the number of master threads is insufficient, high-level processes will be executed first in the execution queue, and processes with the same priority will be executed in the order of first in, first out.
+* Worker group: The process can only be executed in the specified worker machine group. The default is Default, which can be executed on any worker.
+* Notification group: select notification strategy||timeout alarm||when fault tolerance occurs, process information or email will be sent to all members in the notification group.
+* Recipient: Select notification policy||Timeout alarm||When fault tolerance occurs, process information or alarm email will be sent to the recipient list.
+* Cc: Select the notification strategy||Timeout alarm||When fault tolerance occurs, process information or warning emails will be copied to the CC list.
+* Complement: Two modes including serial complement and parallel complement. Serial complement: within the specified time range, the complement is executed sequentially from the start date to the end date, and only one process instance is generated; parallel complement: within the specified time range, multiple days are complemented at the same time to generate N process instances. 
+</code></pre>
+<ul>
+<li>
+<p>Complement: Execute the workflow definition of the specified date, you can choose the time range of the complement (currently only supports the complement for consecutive days), for example, you need to supplement the data from May 1 to May 10, as shown in the following figure:</p>
+<p align="center">
+    <img src="/img/complement_en.png" width="80%" />
+</p>
+<blockquote>
+<p>Serial mode: The complement is executed sequentially from May 1 to May 10, and a process instance is generated on the process instance page;</p>
+</blockquote>
+<blockquote>
+<p>Parallel mode: The tasks from May 1 to May 10 are executed simultaneously, and ten process instances are generated on the process instance page.</p>
+</blockquote>
+</li>
+</ul>
+<h4><span id=creatTiming>2.3.4 Workflow timing</span></h4>
+<ul>
+<li>Create timing: Click Project Management -&gt; Workflow -&gt; Workflow Definition, enter the workflow definition page, go online the workflow, and click the &quot;timing&quot; button<img src="/img/timing.png" width="35"/>,The timing parameter setting dialog box will pop up, as shown in the figure below:<p align="center">
+    <img src="/img/time_schedule_en.png" width="80%" />
+</p>
+</li>
+<li>Choose the start and end time. In the start and end time range, the workflow is run at regular intervals; not in the start and end time range, no more regular workflow instances are generated.</li>
+<li>Add a timing that is executed once every day at 5 AM, as shown in the following figure:<p align="center">
+    <img src="/img/timer-en.png" width="80%" />
+</p>
+</li>
+<li>Failure strategy, notification strategy, process priority, Worker grouping, notification group, recipient, and CC are the same <a href="#runParamers">workflow running parameters</a>。</li>
+<li>点Click the &quot;Create&quot; button, and the timing is successfully created. At this time, the timing status is &quot;<strong>Offline</strong>&quot;, and the timing needs to be <strong>Online</strong> to take effect.</li>
+<li>Timed online: Click the &quot;Timing Management&quot; button<img src="/img/timeManagement.png" width="35"/>,Enter the timing management page, click the &quot;online&quot; button, the timing status will change to &quot;online&quot;, as shown in the figure below, the workflow takes effect regularly.<p align="center">
+    <img src="/img/time-manage-list-en.png" width="80%" />
+</p>
+</li>
+</ul>
+<h4>2.3.5 Import workflow</h4>
+<p>Click Project Management -&gt; Workflow -&gt; Workflow Definition to enter the workflow definition page, click the &quot;Import Workflow&quot; button to import the local workflow file, the workflow definition list displays the imported workflow, and the status is offline.</p>
+<h4>2.4 Workflow example</h4>
+<h4>2.4.1 View workflow example</h4>
+<ul>
+<li>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the Workflow Instance page, as shown in the figure below:   <p align="center">
+      <img src="/img/instance-list-en.png" width="80%" />
+   </p>           
+</li>
+<li>Click the workflow name to enter the DAG view page to view the task execution status, as shown in the figure below.<p align="center">
+  <img src="/img/instance-runs-en.png" width="80%" />
+</p>
+</li>
+</ul>
+<h4>2.4.2 View task log</h4>
+<ul>
+<li>Enter the workflow instance page, click the workflow name, enter the DAG view page, double-click the task node, as shown in the following figure: <p align="center">
+   <img src="/img/instanceViewLog-en.png" width="80%" />
+ </p>
+</li>
+<li>Click &quot;View Log&quot;, a log pop-up box will pop up, as shown in the figure below, the task log can also be viewed on the task instance page, refer to <a href="#taskLog">Task View Log</a>。 <p align="center">
+   <img src="/img/task-log-en.png" width="80%" />
+ </p>
+</li>
+</ul>
+<h4>2.4.3 View task history</h4>
+<ul>
+<li>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the workflow instance page, and click the workflow name to enter the workflow DAG page;</li>
+<li>Double-click the task node, as shown in the figure below, click &quot;View History&quot; to jump to the task instance page, and display a list of task instances running by the workflow instance <p align="center">
+   <img src="/img/task_history_en.png" width="80%" />
+ </p>
+</li>
+</ul>
+<h4>2.4.4 View operating parameters</h4>
+<ul>
+<li>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the workflow instance page, and click the workflow name to enter the workflow DAG page;</li>
+<li>Click the icon in the upper left corner<img src="/img/run_params_button.png" width="35"/>,View the startup parameters of the workflow instance; click the icon<img src="/img/global_param.png" width="35"/>,View the global and local parameters of the workflow instance, as shown in the following figure: <p align="center">
+   <img src="/img/run_params_en.png" width="80%" />
+ </p>      
+</li>
+</ul>
+<h4>2.4.4 Workflow instance operation function</h4>
+<p>Click Project Management -&gt; Workflow -&gt; Workflow Instance to enter the Workflow Instance page, as shown in the figure below:<br>
+<p align="center">
+<img src="/img/instance-list-en.png" width="80%" />
+</p></p>
+<ul>
+<li><strong>edit:</strong> Only terminated processes can be edited. Click the &quot;Edit&quot; button or the name of the workflow instance to enter the DAG editing page. After editing, click the &quot;Save&quot; button to pop up the Save DAG pop-up box, as shown in the figure below. In the pop-up box, check &quot;Whether to update to workflow definition&quot; and save After that, the workflow definition will be updated; if it is not checked, the workflow definition will not be updated.   [...]
+     <img src="/img/editDag-en.png" width="80%" />
+   </p>
+</li>
+<li><strong>Rerun:</strong> Re-execute the terminated process.</li>
+<li><strong>Recovery failed:</strong> For failed processes, you can perform recovery operations, starting from the failed node.</li>
+<li><strong>stop:</strong> To <strong>stop</strong> the running process, the background will first <code>kill</code>worker process, and then execute <code>kill -9</code> operation</li>
+<li><strong>Pause:</strong> Perform a <strong>pause</strong> operation on the running process, the system status will change to <strong>waiting for execution</strong>, it will wait for the end of the task being executed, and pause the next task to be executed.</li>
+<li><strong>Resume pause:</strong> To resume the paused process, start running directly from the <strong>paused node</strong></li>
+<li><strong>Delete:</strong> Delete the workflow instance and the task instance under the workflow instance</li>
+<li><strong>Gantt chart:</strong> The vertical axis of the Gantt chart is the topological sorting of task instances under a certain workflow instance, and the horizontal axis is the running time of the task instances, as shown in the figure:   <p align="center">
+       <img src="/img/gant-en.png" width="80%" />
+   </p>
+</li>
+</ul>
+<h4>2.5 Task instance</h4>
+<ul>
+<li>
+<p>Click Project Management -&gt; Workflow -&gt; Task Instance to enter the task instance page, as shown in the figure below, click the name of the workflow instance, you can jump to the workflow instance DAG chart to view the task status.</p>
+   <p align="center">
+      <img src="/img/task-list-en.png" width="80%" />
+   </p>
+</li>
+<li>
+<p><span id=taskLog>View log:</span>Click the &quot;view log&quot; button in the operation column to view the log of task execution.</p>
+   <p align="center">
+      <img src="/img/task-log2-en.png" width="80%" />
+   </p>
+</li>
+</ul>
+<h3>3. Resource Center</h3>
+<h4>3.1 hdfs resource configuration</h4>
+<ul>
+<li>Upload resource files and udf functions, all uploaded files and resources will be stored on hdfs, so the following configuration items are required:</li>
+</ul>
+<pre><code>conf/common/common.properties  
+    # Users who have permission to create directories under the HDFS root path
+    hdfs.root.user=hdfs
+    # data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。&quot;/escheduler&quot; is recommended
+    data.store2hdfs.basepath=/dolphinscheduler
+    # resource upload startup type : HDFS,S3,NONE
+    res.upload.startup.type=HDFS
+    # whether kerberos starts
+    hadoop.security.authentication.startup.state=false
+    # java.security.krb5.conf path
+    java.security.krb5.conf.path=/opt/krb5.conf
+    # loginUserFromKeytab user
+    login.user.keytab.username=hdfs-mycluster@ESZ.COM
+    # loginUserFromKeytab path
+    login.user.keytab.path=/opt/hdfs.headless.keytab
+    
+conf/common/hadoop.properties      
+    # ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
+    # to the conf directory,support s3,for example : s3a://dolphinscheduler
+    fs.defaultFS=hdfs://mycluster:8020    
+    #resourcemanager ha note this need ips , this empty if single
+    yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx    
+    # If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
+    yarn.application.status.address=http://xxxx:8088/ws/v1/cluster/apps/%s
+
+</code></pre>
+<ul>
+<li>Only one address needs to be configured for yarn.resourcemanager.ha.rm.ids and yarn.application.status.address, and the other address is empty.</li>
+<li>You need to copy core-site.xml and hdfs-site.xml from the conf directory of the Hadoop cluster to the conf directory of the dolphinscheduler project, and restart the api-server service.</li>
+</ul>
+<h4>3.2 File management</h4>
+<blockquote>
+<p>It is the management of various resource files, including the creation of basic txt/log/sh/conf/py/java and other files, uploading jar packages and other types of files, which can be edited, renamed, downloaded, and deleted.</p>
+</blockquote>
+  <p align="center">
+   <img src="/img/file-manage-en.png" width="80%" />
+ </p>
+<ul>
+<li>Create a file</li>
+</ul>
+<blockquote>
+<p>The file format supports the following types:txt、log、sh、conf、cfg、py、java、sql、xml、hql、properties</p>
+</blockquote>
+<p align="center">
+   <img src="/img/file_create_en.png" width="80%" />
+ </p>
+<ul>
+<li>upload files</li>
+</ul>
+<blockquote>
+<p>上Upload file: click the &quot;upload file&quot; button to upload, drag the file to the upload area, the file name will be automatically completed with the uploaded file name</p>
+</blockquote>
+<p align="center">
+   <img src="/img/file-upload-en.png" width="80%" />
+ </p>
+<ul>
+<li>File View</li>
+</ul>
+<blockquote>
+<p>For the file types that can be viewed, click the file name to view the file details</p>
+</blockquote>
+<p align="center">
+   <img src="/img/file_detail_en.png" width="80%" />
+ </p>
+<ul>
+<li>download file</li>
+</ul>
+<blockquote>
+<p>Click the &quot;Download&quot; button in the file list to download the file or click the &quot;Download&quot; button in the upper right corner of the file details to download the file</p>
+</blockquote>
+<ul>
+<li>File rename</li>
+</ul>
+<p align="center">
+   <img src="/img/file_rename_en.png" width="80%" />
+ </p>
+<ul>
+<li>delete</li>
+</ul>
+<blockquote>
+<p>File list -&gt; Click the &quot;Delete&quot; button to delete the specified file</p>
+</blockquote>
+<h4>3.3 UDF management</h4>
+<h4>3.3.1 Resource management</h4>
+<blockquote>
+<p>The resource management and file management functions are similar. The difference is that the resource management is the uploaded UDF function, and the file management uploads the user program, script and configuration file.
+Operation function: rename, download, delete.</p>
+</blockquote>
+<ul>
+<li>Upload udf resources</li>
+</ul>
+<blockquote>
+<p>Same as uploading files.</p>
+</blockquote>
+<h4>3.3.2 Function management</h4>
+<ul>
+<li>Create UDF function</li>
+</ul>
+<blockquote>
+<p>Click &quot;Create UDF Function&quot;, enter the udf function parameters, select the udf resource, and click &quot;Submit&quot; to create the udf function.</p>
+</blockquote>
+<blockquote>
+<p>Currently only supports temporary UDF functions of HIVE</p>
+</blockquote>
+<ul>
+<li>UDF function name: the name when the UDF function is entered</li>
+<li>Package name Class name: Enter the full path of the UDF function</li>
+<li>UDF resource: Set the resource file corresponding to the created UDF</li>
+</ul>
+<p align="center">
+   <img src="/img/udf_edit_en.png" width="80%" />
+ </p>
+<h3>4. Create data source</h3>
+<blockquote>
+<p>Data source center supports MySQL, POSTGRESQL, HIVE/IMPALA, SPARK, CLICKHOUSE, ORACLE, SQLSERVER and other data sources</p>
+</blockquote>
+<h4>4.1 Create/Edit MySQL data source</h4>
+<ul>
+<li>
+<p>Click &quot;Data Source Center -&gt; Create Data Source&quot; to create different types of data sources according to requirements.</p>
+</li>
+<li>
+<p>Data source: select MYSQL</p>
+</li>
+<li>
+<p>Data source name: enter the name of the data source</p>
+</li>
+<li>
+<p>Description: Enter a description of the data source</p>
+</li>
+<li>
+<p>IP hostname: enter the IP to connect to MySQL</p>
+</li>
+<li>
+<p>Port: Enter the port to connect to MySQL</p>
+</li>
+<li>
+<p>Username: Set the username for connecting to MySQL</p>
+</li>
+<li>
+<p>Password: Set the password for connecting to MySQL</p>
+</li>
+<li>
+<p>Database name: Enter the name of the database connected to MySQL</p>
+</li>
+<li>
+<p>Jdbc connection parameters: parameter settings for MySQL connection, filled in in JSON form</p>
+</li>
+</ul>
+<p align="center">
+   <img src="/img/mysql-en.png" width="80%" />
+ </p>
+<blockquote>
+<p>Click &quot;Test Connection&quot; to test whether the data source can be successfully connected.</p>
+</blockquote>
+<h4>4.2 Create/Edit POSTGRESQL data source</h4>
+<ul>
+<li>Data source: select POSTGRESQL</li>
+<li>Data source name: enter the name of the data source</li>
+<li>Description: Enter a description of the data source</li>
+<li>IP/Host Name: Enter the IP to connect to POSTGRESQL</li>
+<li>Port: Enter the port to connect to POSTGRESQL</li>
+<li>Username: Set the username for connecting to POSTGRESQL</li>
+<li>Password: Set the password for connecting to POSTGRESQL</li>
+<li>Database name: Enter the name of the database connected to POSTGRESQL</li>
+<li>Jdbc connection parameters: parameter settings for POSTGRESQL connection, filled in in JSON form</li>
+</ul>
+<p align="center">
+   <img src="/img/postgresql-en.png" width="80%" />
+ </p>
+<h4>4.3 Create/Edit HIVE data source</h4>
+<p>1.Use HiveServer2 to connect</p>
+ <p align="center">
+    <img src="/img/hive-en.png" width="80%" />
+  </p>
+<ul>
+<li>Data source: select HIVE</li>
+<li>Data source name: enter the name of the data source</li>
+<li>Description: Enter a description of the data source</li>
+<li>IP/Host Name: Enter the IP connected to HIVE</li>
+<li>Port: Enter the port connected to HIVE</li>
+<li>Username: Set the username for connecting to HIVE</li>
+<li>Password: Set the password for connecting to HIVE</li>
+<li>Database name: Enter the name of the database connected to HIVE</li>
+<li>Jdbc connection parameters: parameter settings for HIVE connection, filled in in JSON form</li>
+</ul>
+<p>2.Use HiveServer2 HA Zookeeper to connect</p>
+ <p align="center">
+    <img src="/img/hive1-en.png" width="80%" />
+  </p>
+<p>Note: If you enable <strong>kerberos</strong>, you need to fill in <strong>Principal</strong></p>
+<p align="center">
+    <img src="/img/hive-en.png" width="80%" />
+  </p>
+<h4>4.4 Create/Edit Spark data source</h4>
+<p align="center">
+   <img src="/img/spark-en.png" width="80%" />
+ </p>
+<ul>
+<li>Data source: select Spark</li>
+<li>Data source name: enter the name of the data source</li>
+<li>Description: Enter a description of the data source</li>
+<li>IP/Hostname: Enter the IP connected to Spark</li>
+<li>Port: Enter the port connected to Spark</li>
+<li>Username: Set the username for connecting to Spark</li>
+<li>Password: Set the password for connecting to Spark</li>
+<li>Database name: Enter the name of the database connected to Spark</li>
+<li>Jdbc connection parameters: parameter settings for Spark connection, filled in in JSON form</li>
+</ul>
+<h3>5. Security Center (Permission System)</h3>
+<pre><code> * Only the administrator account in the security center has the authority to operate. It has functions such as queue management, tenant management, user management, alarm group management, worker group management, token management, etc. In the user management module, resources, data sources, projects, etc. Authorization
+ * Administrator login, default user name and password: admin/dolphinscheduler123
+</code></pre>
+<h4>5.1 Create queue</h4>
+<ul>
+<li>Queue is used when the &quot;queue&quot; parameter is needed to execute programs such as spark and mapreduce.</li>
+<li>The administrator enters the Security Center-&gt;Queue Management page and clicks the &quot;Create Queue&quot; button to create a queue.</li>
+</ul>
+ <p align="center">
+    <img src="/img/create-queue-en.png" width="80%" />
+  </p>
+<h4>5.2 Add tenant</h4>
+<ul>
+<li>The tenant corresponds to the Linux user, which is used by the worker to submit the job. If Linux does not have this user, the worker will create this user when executing the script.</li>
+<li>Tenant Code: <strong>Tenant Code is the only user on Linux and cannot be repeated</strong></li>
+<li>The administrator enters the Security Center-&gt;Tenant Management page and clicks the &quot;Create Tenant&quot; button to create a tenant.</li>
+</ul>
+ <p align="center">
+    <img src="/img/addtenant-en.png" width="80%" />
+  </p>
+<h4>5.3 Create normal user</h4>
+<ul>
+<li>Users are divided into <strong>administrator users</strong> and <strong>normal users</strong></li>
+</ul>
+<pre><code>* The administrator has authorization and user management authority, but does not have the authority to create project and workflow definition operations.
+* Ordinary users can create projects and create, edit, and execute workflow definitions.
+* Note: If the user switches tenants, all resources under the tenant where the user belongs will be copied to the new tenant that is switched.
+</code></pre>
+<ul>
+<li>The administrator enters the Security Center -&gt; User Management page and clicks the &quot;Create User&quot; button to create a user.</li>
+</ul>
+<p align="center">
+   <img src="/img/user-en.png" width="80%" />
+ </p>
+<blockquote>
+<p><strong>Edit user information</strong></p>
+</blockquote>
+<ul>
+<li>The administrator enters the Security Center-&gt;User Management page and clicks the &quot;Edit&quot; button to edit user information.</li>
+<li>After an ordinary user logs in, click the user information in the user name drop-down box to enter the user information page, and click the &quot;Edit&quot; button to edit the user information.</li>
+</ul>
+<blockquote>
+<p><strong>Modify user password</strong></p>
+</blockquote>
+<ul>
+<li>The administrator enters the Security Center-&gt;User Management page and clicks the &quot;Edit&quot; button. When editing user information, enter the new password to modify the user password.</li>
+<li>After a normal user logs in, click the user information in the user name drop-down box to enter the password modification page, enter the password and confirm the password and click the &quot;Edit&quot; button, then the password modification is successful.</li>
+</ul>
+<h4>5.4 Create alarm group</h4>
+<ul>
+<li>The alarm group is a parameter set at startup. After the process ends, the status of the process and other information will be sent to the alarm group in the form of email.</li>
+</ul>
+<ul>
+<li>The administrator enters the Security Center -&gt; Alarm Group Management page and clicks the &quot;Create Alarm Group&quot; button to create an alarm group.</li>
+</ul>
+  <p align="center">
+    <img src="/img/mail-en.png" width="80%" />
+  </p>
+<h4>5.5 Token management</h4>
+<blockquote>
+<p>Since the back-end interface has login check, token management provides a way to perform various operations on the system by calling the interface.</p>
+</blockquote>
+<ul>
+<li>The administrator enters the Security Center -&gt; Token Management page, clicks the &quot;Create Token&quot; button, selects the expiration time and user, clicks the &quot;Generate Token&quot; button, and clicks the &quot;Submit&quot; button, then the selected user's token is created successfully.</li>
+</ul>
+  <p align="center">
+      <img src="/img/creat-token-en.png" width="80%" />
+   </p>
+<ul>
+<li>
+<p>After an ordinary user logs in, click the user information in the user name drop-down box, enter the token management page, select the expiration time, click the &quot;generate token&quot; button, and click the &quot;submit&quot; button, then the user creates a token successfully.</p>
+</li>
+<li>
+<p>Call example:</p>
+</li>
+</ul>
+<pre><code class="language-Token">    /**
+     * test token
+     */
+    public  void doPOSTParam()throws Exception{
+        // create HttpClient
+        CloseableHttpClient httpclient = HttpClients.createDefault();
+
+        // create http post request
+        HttpPost httpPost = new HttpPost(&quot;http://127.0.0.1:12345/escheduler/projects/create&quot;);
+        httpPost.setHeader(&quot;token&quot;, &quot;123&quot;);
+        // set parameters
+        List&lt;NameValuePair&gt; parameters = new ArrayList&lt;NameValuePair&gt;();
+        parameters.add(new BasicNameValuePair(&quot;projectName&quot;, &quot;qzw&quot;));
+        parameters.add(new BasicNameValuePair(&quot;desc&quot;, &quot;qzw&quot;));
+        UrlEncodedFormEntity formEntity = new UrlEncodedFormEntity(parameters);
+        httpPost.setEntity(formEntity);
+        CloseableHttpResponse response = null;
+        try {
+            // execute
+            response = httpclient.execute(httpPost);
+            // response status code 200
+            if (response.getStatusLine().getStatusCode() == 200) {
+                String content = EntityUtils.toString(response.getEntity(), &quot;UTF-8&quot;);
+                System.out.println(content);
+            }
+        } finally {
+            if (response != null) {
+                response.close();
+            }
+            httpclient.close();
+        }
+    }
+</code></pre>
+<h4>5.6 Granted permission</h4>
+<pre><code>* Granted permissions include project permissions, resource permissions, data source permissions, UDF function permissions.
+* The administrator can authorize the projects, resources, data sources and UDF functions not created by ordinary users. Because the authorization methods for projects, resources, data sources and UDF functions are the same, we take project authorization as an example.
+* Note: For projects created by users themselves, the user has all permissions. The project list and the selected project list will not be displayed.
+</code></pre>
+<ul>
+<li>The administrator enters the Security Center -&gt; User Management page and clicks the &quot;Authorize&quot; button of the user who needs to be authorized, as shown in the figure below:</li>
+</ul>
+  <p align="center">
+   <img src="/img/auth-en.png" width="80%" />
+ </p>
+<ul>
+<li>Select the project to authorize the project.</li>
+</ul>
+<p align="center">
+   <img src="/img/authproject-en.png" width="80%" />
+ </p>
+<ul>
+<li>Resources, data sources, and UDF function authorization are the same as project authorization.</li>
+</ul>
+<h3>6. monitoring Center</h3>
+<h4>6.1 Service management</h4>
+<ul>
+<li>Service management is mainly to monitor and display the health status and basic information of each service in the system</li>
+</ul>
+<h4>6.1.1 master monitoring</h4>
+<ul>
+<li>Mainly related to master information.</li>
+</ul>
+<p align="center">
+   <img src="/img/master-jk-en.png" width="80%" />
+ </p>
+<h4>6.1.2 worker monitoring</h4>
+<ul>
+<li>Mainly related to worker information.</li>
+</ul>
+<p align="center">
+   <img src="/img/worker-jk-en.png" width="80%" />
+ </p>
+<h4>6.1.3 Zookeeper monitoring</h4>
+<ul>
+<li>Mainly related configuration information of each worker and master in zookpeeper.</li>
+</ul>
+<p align="center">
+   <img src="/img/zookeeper-monitor-en.png" width="80%" />
+ </p>
+<h4>6.1.4 DB monitoring</h4>
+<ul>
+<li>Mainly the health of the DB</li>
+</ul>
+<p align="center">
+   <img src="/img/mysql-jk-en.png" width="80%" />
+ </p>
+<h4>6.2 Statistics management</h4>
+<p align="center">
+   <img src="/img/statistics-en.png" width="80%" />
+ </p>
+<ul>
+<li>Number of commands to be executed: statistics on the t_ds_command table</li>
+<li>The number of failed commands: statistics on the t_ds_error_command table</li>
+<li>Number of tasks to run: Count the data of task_queue in Zookeeper</li>
+<li>Number of tasks to be killed: Count the data of task_kill in Zookeeper</li>
+</ul>
+<h3>7. <span id=TaskParamers>Task node type and parameter settings</span></h3>
+<h4>7.1 Shell node</h4>
+<blockquote>
+<p>Shell node, when the worker is executed, a temporary shell script is generated, and the linux user with the same name as the tenant executes the script.</p>
+</blockquote>
+<ul>
+<li>
+<p>Click Project Management-Project Name-Workflow Definition, and click the &quot;Create Workflow&quot; button to enter the DAG editing page.</p>
+</li>
+<li>
+<p>Drag <img src="/img/shell.png" width="35"/> from the toolbar to the drawing board, as shown in the figure below:</p>
+<p align="center">
+    <img src="/img/shell-en.png" width="80%" />
+</p> 
+</li>
+<li>
+<p>Node name: The node name in a workflow definition is unique.</p>
+</li>
+<li>
+<p>Run flag: Identifies whether this node can be scheduled normally, if it does not need to be executed, you can turn on the prohibition switch.</p>
+</li>
+<li>
+<p>Descriptive information: describe the function of the node.</p>
+</li>
+<li>
+<p>Task priority: When the number of worker threads is insufficient, they are executed in order from high to low, and when the priority is the same, they are executed according to the first-in first-out principle.</p>
+</li>
+<li>
+<p>Worker grouping: Tasks are assigned to the machines of the worker group to execute. If Default is selected, a worker machine will be randomly selected for execution.</p>
+</li>
+<li>
+<p>Number of failed retry attempts: The number of times the task failed to be resubmitted. It supports drop-down and hand-filling.</p>
+</li>
+<li>
+<p>Failed retry interval: The time interval for resubmitting the task after a failed task. It supports drop-down and hand-filling.</p>
+</li>
+<li>
+<p>Timeout alarm: Check the timeout alarm and timeout failure. When the task exceeds the &quot;timeout period&quot;, an alarm email will be sent and the task execution will fail.</p>
+</li>
+<li>
+<p>Script: SHELL program developed by users.</p>
+</li>
+<li>
+<p>Resource: Refers to the list of resource files that need to be called in the script, and the files uploaded or created by the resource center-file management.</p>
+</li>
+<li>
+<p>User-defined parameters: It is a user-defined parameter that is part of SHELL, which will replace the content with ${variable} in the script.</p>
+</li>
+</ul>
+<h4>7.2 Sub-process node</h4>
+<ul>
+<li>The sub-process node is to execute a certain external workflow definition as a task node.</li>
+</ul>
+<blockquote>
+<p>Drag the <img src="https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_SUB_PROCESS.png" alt="PNG"> task node in the toolbar to the drawing board, as shown in the following figure:</p>
+</blockquote>
+<p align="center">
+   <img src="/img/sub-process-en.png" width="80%" />
+ </p>
+<ul>
+<li>Node name: The node name in a workflow definition is unique</li>
+<li>Run flag: identify whether this node can be scheduled normally</li>
+<li>Descriptive information: describe the function of the node</li>
+<li>Timeout alarm: Check the timeout alarm and timeout failure. When the task exceeds the &quot;timeout period&quot;, an alarm email will be sent and the task execution will fail.</li>
+<li>Sub-node: It is the workflow definition of the selected sub-process. Enter the sub-node in the upper right corner to jump to the workflow definition of the selected sub-process</li>
+</ul>
+<h4>7.3 DEPENDENT node</h4>
+<ul>
+<li>Dependent nodes are <strong>dependency check nodes</strong>. For example, process A depends on the successful execution of process B yesterday, and the dependent node will check whether process B has a successful execution yesterday.</li>
+</ul>
+<blockquote>
+<p>Drag the <img src="https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_DEPENDENT.png" alt="PNG"> task node in the toolbar to the drawing board, as shown in the following figure:</p>
+</blockquote>
+<p align="center">
+   <img src="/img/dependent-nodes-en.png" width="80%" />
+ </p>
+<blockquote>
+<p>The dependent node provides a logical judgment function, such as checking whether the B process was successful yesterday, or whether the C process was executed successfully.</p>
+</blockquote>
+  <p align="center">
+   <img src="/img/depend-node-en.png" width="80%" />
+ </p>
+<blockquote>
+<p>For example, process A is a weekly report task, processes B and C are daily tasks, and task A requires tasks B and C to be successfully executed every day of the last week, as shown in the figure:</p>
+</blockquote>
+ <p align="center">
+   <img src="/img/depend-node1-en.png" width="80%" />
+ </p>
+<blockquote>
+<p>If the weekly report A also needs to be executed successfully last Tuesday:</p>
+</blockquote>
+ <p align="center">
+   <img src="/img/depend-node3-en.png" width="80%" />
+ </p>
+<h4>7.4 Stored procedure node</h4>
+<ul>
+<li>According to the selected data source, execute the stored procedure.</li>
+</ul>
+<blockquote>
+<p>Drag in the toolbar<img src="https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_PROCEDURE.png" alt="PNG">The task node to the drawing board, as shown in the following figure:</p>
+</blockquote>
+<p align="center">
+   <img src="/img/procedure-en.png" width="80%" />
+ </p>
+<ul>
+<li>Data source: The data source type of the stored procedure supports MySQL and POSTGRESQL, select the corresponding data source</li>
+<li>Method: is the method name of the stored procedure</li>
+<li>Custom parameters: The custom parameter types of the stored procedure support IN and OUT, and the data types support nine data types: VARCHAR, INTEGER, LONG, FLOAT, DOUBLE, DATE, TIME, TIMESTAMP, and BOOLEAN</li>
+</ul>
+<h4>7.5 SQL node</h4>
+<ul>
+<li>Drag in the toolbar<img src="https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_SQL.png" alt="PNG">Task node into the drawing board</li>
+<li>Non-query SQL function: edit non-query SQL task information, select non-query for sql type, as shown in the figure below:</li>
+</ul>
+  <p align="center">
+   <img src="/img/sql-en.png" width="80%" />
+ </p>
+<ul>
+<li>Query SQL function: Edit and query SQL task information, sql type selection query, select form or attachment to send mail to the specified recipient, as shown in the figure below.</li>
+</ul>
+<p align="center">
+   <img src="/img/sql-node-en.png" width="80%" />
+ </p>
+<ul>
+<li>Data source: select the corresponding data source</li>
+<li>sql type: supports query and non-query. The query is a select type query, which is returned with a result set. You can specify three templates for email notification as form, attachment or form attachment. Non-queries are returned without a result set, and are for three types of operations: update, delete, and insert.</li>
+<li>sql parameter: the input parameter format is key1=value1;key2=value2...</li>
+<li>sql statement: SQL statement</li>
+<li>UDF function: For data sources of type HIVE, you can refer to UDF functions created in the resource center. UDF functions are not supported for other types of data sources.</li>
+<li>Custom parameters: SQL task type, and stored procedure is a custom parameter order to set values for the method. The custom parameter type and data type are the same as the stored procedure task type. The difference is that the SQL task type custom parameter will replace the ${variable} in the SQL statement.</li>
+<li>Pre-sql: Pre-sql is executed before the sql statement.</li>
+<li>Post-sql: Post-sql is executed after the sql statement.</li>
+</ul>
+<h4>7.6 SPARK node</h4>
+<ul>
+<li>Through the SPARK node, you can directly execute the SPARK program. For the spark node, the worker will use the <code>spark-submit</code> method to submit tasks</li>
+</ul>
+<blockquote>
+<p>Drag in the toolbar<img src="https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_SPARK.png" alt="PNG">The task node to the drawing board, as shown in the following figure:</p>
+</blockquote>
+<p align="center">
+   <img src="/img/spark_edit.png" width="80%" />
+ </p>
+<ul>
+<li>Program type: supports JAVA, Scala and Python three languages</li>
+<li>The class of the main function: is the full path of the Spark program’s entry Main Class</li>
+<li>Main jar package: Spark jar package</li>
+<li>Deployment mode: support three modes of yarn-cluster, yarn-client and local</li>
+<li>Driver core number: You can set the number of Driver cores and the number of memory</li>
+<li>Number of Executors: You can set the number of Executors, the number of Executor memory, and the number of Executor cores</li>
+<li>Command line parameters: Set the input parameters of the Spark program and support the substitution of custom parameter variables.</li>
+<li>Other parameters: support --jars, --files, --archives, --conf format</li>
+<li>Resource: If the resource file is referenced in other parameters, you need to select and specify in the resource</li>
+<li>User-defined parameter: It is a user-defined parameter of the MR part, which will replace the content with ${variable} in the script</li>
+</ul>
+<p>Note: JAVA and Scala are only used for identification, there is no difference, if it is Spark developed by Python, there is no main function class, and the others are the same</p>
+<h4>7.7 MapReduce(MR)节点</h4>
+<ul>
+<li>Using the MR node, you can directly execute the MR program. For the mr node, the worker will use the <code>hadoop jar</code> method to submit tasks</li>
+</ul>
+<blockquote>
+<p>Drag the <img src="https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_MR.png" alt="PNG"> task node in the toolbar to the drawing board, as shown in the following figure:</p>
+</blockquote>
+<ol>
+<li>JAVA program</li>
+</ol>
+ <p align="center">
+   <img src="/img/mr_java_en.png" width="80%" />
+ </p>
+<ul>
+<li>The class of the main function: is the full path of the Main Class, the entry point of the MR program</li>
+<li>Program type: select JAVA language</li>
+<li>Main jar package: is the MR jar package</li>
+<li>Command line parameters: set the input parameters of the MR program and support the substitution of custom parameter variables</li>
+<li>Other parameters: support -D, -files, -libjars, -archives format</li>
+<li>Resource: If the resource file is referenced in other parameters, you need to select and specify in the resource</li>
+<li>User-defined parameter: It is a user-defined parameter of the MR part, which will replace the content with ${variable} in the script</li>
+</ul>
+<ol start="2">
+<li>Python program</li>
+</ol>
+<p align="center">
+   <img src="/img/mr_edit_en.png" width="80%" />
+ </p>
+<ul>
+<li>Program type: select Python language</li>
+<li>Main jar package: is the Python jar package for running MR</li>
+<li>Other parameters: support -D, -mapper, -reducer, -input -output format, here you can set the input of user-defined parameters, such as:</li>
+<li>-mapper &quot;<a href="http://mapper.py">mapper.py</a> 1&quot; -file <a href="http://mapper.py">mapper.py</a> -reducer <a href="http://reducer.py">reducer.py</a> -file <a href="http://reducer.py">reducer.py</a> –input /journey/words.txt -output /journey/out/mr/${currentTimeMillis}</li>
+<li>The <a href="http://mapper.py">mapper.py</a> 1 after -mapper is two parameters, the first parameter is <a href="http://mapper.py">mapper.py</a>, and the second parameter is 1</li>
+<li>Resource: If the resource file is referenced in other parameters, you need to select and specify in the resource</li>
+<li>User-defined parameter: It is a user-defined parameter of the MR part, which will replace the content with ${variable} in the script</li>
+</ul>
+<h4>7.8 Python Node</h4>
+<ul>
+<li>Using python nodes, you can directly execute python scripts. For python nodes, workers will use <code>python **</code> to submit tasks.</li>
+</ul>
+<blockquote>
+<p>Drag in the toolbar<img src="https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_PYTHON.png" alt="PNG">The task node to the drawing board, as shown in the following figure:</p>
+</blockquote>
+<p align="center">
+   <img src="/img/python-en.png" width="80%" />
+ </p>
+<ul>
+<li>Script: Python program developed by the user</li>
+<li>Resources: refers to the list of resource files that need to be called in the script</li>
+<li>User-defined parameter: It is a local user-defined parameter of Python, which will replace the content with ${variable} in the script</li>
+</ul>
+<h4>7.9 Flink Node</h4>
+<ul>
+<li>Drag in the toolbar<img src="/img/flink.png" width="35"/>The task node to the drawing board, as shown in the following figure:</li>
+</ul>
+<p align="center">
+  <img src="/img/flink-en.png" width="80%" />
+</p>
+<ul>
+<li>Program type: supports JAVA, Scala and Python three languages</li>
+<li>The class of the main function: is the full path of the Main Class, the entry point of the Flink program</li>
+<li>Main jar package: is the Flink jar package</li>
+<li>Deployment mode: support three modes of cluster and local</li>
+<li>Number of slots: You can set the number of slots</li>
+<li>Number of taskManage: You can set the number of taskManage</li>
+<li>JobManager memory number: You can set the jobManager memory number</li>
+<li>TaskManager memory number: You can set the taskManager memory number</li>
+<li>Command line parameters: Set the input parameters of the Spark program and support the substitution of custom parameter variables.</li>
+<li>Other parameters: support --jars, --files, --archives, --conf format</li>
+<li>Resource: If the resource file is referenced in other parameters, you need to select and specify in the resource</li>
+<li>Custom parameter: It is a local user-defined parameter of Flink, which will replace the content with ${variable} in the script</li>
+</ul>
+<p>Note: JAVA and Scala are only used for identification, there is no difference, if it is Flink developed by Python, there is no class of the main function, the others are the same</p>
+<h4>7.10 http Node</h4>
+<ul>
+<li>Drag in the toolbar<img src="/img/http.png" width="35"/>The task node to the drawing board, as shown in the following figure:</li>
+</ul>
+<p align="center">
+   <img src="/img/http-en.png" width="80%" />
+ </p>
+<ul>
+<li>Node name: The node name in a workflow definition is unique.</li>
+<li>Run flag: Identifies whether this node can be scheduled normally, if it does not need to be executed, you can turn on the prohibition switch.</li>
+<li>Descriptive information: describe the function of the node.</li>
+<li>Task priority: When the number of worker threads is insufficient, they are executed in order from high to low, and when the priority is the same, they are executed according to the first-in first-out principle.</li>
+<li>Worker grouping: Tasks are assigned to the machines of the worker group to execute. If Default is selected, a worker machine will be randomly selected for execution.</li>
+<li>Number of failed retry attempts: The number of times the task failed to be resubmitted. It supports drop-down and hand-filling.</li>
+<li>Failed retry interval: The time interval for resubmitting the task after a failed task. It supports drop-down and hand-filling.</li>
+<li>Timeout alarm: Check the timeout alarm and timeout failure. When the task exceeds the &quot;timeout period&quot;, an alarm email will be sent and the task execution will fail.</li>
+<li>Request address: http request URL.</li>
+<li>Request type: support GET, POSt, HEAD, PUT, DELETE.</li>
+<li>Request parameters: Support Parameter, Body, Headers.</li>
+<li>Verification conditions: support default response code, custom response code, content included, content not included.</li>
+<li>Verification content: When the verification condition selects a custom response code, the content contains, and the content does not contain, the verification content is required.</li>
+<li>Custom parameter: It is a user-defined parameter of http part, which will replace the content with ${variable} in the script.</li>
+</ul>
+<h4>7.11 DATAX Node</h4>
+<ul>
+<li>Drag in the toolbar<img src="/img/datax.png" width="35"/>Task node into the drawing board</li>
+</ul>
+  <p align="center">
+   <img src="/img/datax-en.png" width="80%" />
+  </p>
+<ul>
+<li>Custom template: When you turn on the custom template switch, you can customize the content of the json configuration file of the datax node (applicable when the control configuration does not meet the requirements)</li>
+<li>Data source: select the data source to extract the data</li>
+<li>sql statement: the sql statement used to extract data from the target database, the sql query column name is automatically parsed when the node is executed, and mapped to the target table synchronization column name. When the source table and target table column names are inconsistent, they can be converted by column alias (as)</li>
+<li>Target library: select the target library for data synchronization</li>
+<li>Target table: the name of the target table for data synchronization</li>
+<li>Pre-sql: Pre-sql is executed before the sql statement (executed by the target library).</li>
+<li>Post-sql: Post-sql is executed after the sql statement (executed by the target library).</li>
+<li>json: json configuration file for datax synchronization</li>
+<li>Custom parameters: SQL task type, and stored procedure is a custom parameter order to set values for the method. The custom parameter type and data type are the same as the stored procedure task type. The difference is that the SQL task type custom parameter will replace the ${variable} in the SQL statement.</li>
+</ul>
+<h4>8. parameter</h4>
+<h4>8.1 System parameters</h4>
+<table>
+    <tr><th>variable</th><th>meaning</th></tr>
+    <tr>
+        <td>${system.biz.date}</td>
+        <td>The day before the scheduled time of the daily scheduling instance, the format is yyyyMMdd, when the data is supplemented, the date is +1</td>
+    </tr>
+    <tr>
+        <td>${system.biz.curdate}</td>
+        <td>The timing time of the daily scheduling instance, the format is yyyyMMdd, when the data is supplemented, the date is +1</td>
+    </tr>
+    <tr>
+        <td>${system.datetime}</td>
+        <td>The timing time of the daily scheduling instance, the format is yyyyMMddHHmmss, when the data is supplemented, the date is +1</td>
+    </tr>
+</table>
+<h4>8.2 Time custom parameters</h4>
+<ul>
+<li>
+<p>Support custom variable names in the code, declaration method: ${variable name}. It can refer to &quot;system parameters&quot; or specify &quot;constants&quot;.</p>
+</li>
+<li>
+<p>We define this benchmark variable as $[...] format, $[yyyyMMddHHmmss] can be decomposed and combined arbitrarily, such as: $[yyyyMMdd], $[HHmmss], $[yyyy-MM-dd], etc.</p>
+</li>
+<li>
+<p>The following format can also be used:</p>
+<pre><code>* Next N years:$[add_months(yyyyMMdd,12*N)]
+* N years before:$[add_months(yyyyMMdd,-12*N)]
+* Next N months:$[add_months(yyyyMMdd,N)]
+* N months before:$[add_months(yyyyMMdd,-N)]
+* Next N weeks:$[yyyyMMdd+7*N]
+* First N weeks:$[yyyyMMdd-7*N]
+* Next N days:$[yyyyMMdd+N]
+* N days before:$[yyyyMMdd-N]
+* Next N hours:$[HHmmss+N/24]
+* First N hours:$[HHmmss-N/24]
+* Next N minutes:$[HHmmss+N/24/60]
+* First N minutes:$[HHmmss-N/24/60]
+</code></pre>
+</li>
+</ul>
+<h4>8.3 <span id=UserDefinedParameters>User-defined parameters</span></h4>
+<ul>
+<li>User-defined parameters are divided into global parameters and local parameters. Global parameters are global parameters passed when saving workflow definitions and workflow instances. Global parameters can be referenced in the local parameters of any task node in the entire process.
+example:</li>
+</ul>
+<p align="center">
+   <img src="/img/local_parameter_en.png" width="80%" />
+ </p>
+<ul>
+<li>global_bizdate is a global parameter, which refers to a system parameter.</li>
+</ul>
+<p align="center">
+   <img src="/img/global_parameter_en.png" width="80%" />
+ </p>
+<ul>
+<li>In the task, local_param_bizdate uses ${global_bizdate} to refer to global parameters. For scripts, you can use ${local_param_bizdate} to refer to the value of global variable global_bizdate, or directly set the value of local_param_bizdate through JDBC.</li>
+</ul>
+</div></section><footer class="footer-container"><div class="footer-body"><img src="/img/ds_gray.svg"/><div class="cols-container"><div class="col col-12"><h3>Disclaimer</h3><p>Apache DolphinScheduler (incubating) is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by Incubator. 
+Incubation is required of all newly accepted projects until a further review indicates 
+that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. 
+While incubation status is not necessarily a reflection of the completeness or stability of the code, 
+it does indicate that the project has yet to be fully endorsed by the ASF.</p></div><div class="col col-6"><dl><dt>Documentation</dt><dd><a href="/en-us/docs/development/architecture-design.html" target="_self">Overview</a></dd><dd><a href="/en-us/docs/1.2.0/user_doc/quick-start.html" target="_self">Quick start</a></dd><dd><a href="/en-us/docs/development/backend-development.html" target="_self">Developer guide</a></dd></dl></div><div class="col col-6"><dl><dt>ASF</dt><dd><a href="http:/ [...]
+	<script src="https://f.alicdn.com/react/15.4.1/react-with-addons.min.js"></script>
+	<script src="https://f.alicdn.com/react/15.4.1/react-dom.min.js"></script>
+	<script>
+		window.rootPath = '';
+  </script>
+	<script src="/build/documentation.js"></script>
+</body>
+</html>
\ No newline at end of file
diff --git a/en-us/docs/1.3.1/user_doc/system-manual.json b/en-us/docs/1.3.1/user_doc/system-manual.json
new file mode 100644
index 0000000..24714f9
--- /dev/null
+++ b/en-us/docs/1.3.1/user_doc/system-manual.json
@@ -0,0 +1,6 @@
+{
+  "filename": "system-manual.md",
+  "__html": "<h1>System User Manual</h1>\n<h2>Get started quickly</h2>\n<blockquote>\n<p>Please refer to<a href=\"quick-start.html\">Get started quickly</a></p>\n</blockquote>\n<h2>Operation guide</h2>\n<h3>1. Home</h3>\n<p>The home page contains task status statistics, process status statistics, and workflow definition statistics of all items of the user.\n<p align=\"center\">\n<img src=\"/img/home_en.png\" width=\"80%\" />\n</p></p>\n<h3>2. Project management</h3>\n<h4>2.1 Create proje [...]
+  "link": "/en-us/docs/1.3.1/user_doc/system-manual.html",
+  "meta": {}
+}
\ No newline at end of file
diff --git a/en-us/docs/1.3.1/user_doc/upgrade.html b/en-us/docs/1.3.1/user_doc/upgrade.html
new file mode 100644
index 0000000..7b1d053
--- /dev/null
+++ b/en-us/docs/1.3.1/user_doc/upgrade.html
@@ -0,0 +1,122 @@
+<!DOCTYPE html>
+<html lang="en">
+
+<head>
+	<meta charset="UTF-8">
+	<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
+	<meta name="keywords" content="upgrade" />
+	<meta name="description" content="upgrade" />
+	<!-- 网页标签标题 -->
+	<title>upgrade</title>
+	<link rel="shortcut icon" href="/img/docsite.ico"/>
+	<link rel="stylesheet" href="/build/documentation.css" />
+</head>
+<body>
+	<div id="root"><div class="documentation-page" data-reactroot=""><header class="header-container header-container-normal"><div class="header-body"><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_colorful.svg"/></a><div class="search search-normal"><span class="icon-search"></span></div><span class="language-switch language-switch-normal">中</span><div class="header-menu"><img class="header-menu-toggle" src="/img/system/menu_gray.png"/><div><ul class="ant-menu blackClass ant [...]
+<h2>1. Back up files and databases of the previous version</h2>
+<h2>2. Stop all services of dolphinscheduler</h2>
+<p><code>sh ./script/stop-all.sh</code></p>
+<h2>3. Download the new version of the installation package</h2>
+<ul>
+<li><a href="https://dolphinscheduler.apache.org/en-us/docs/release/download.html">Download</a>, Download the latest version of the binary installation package</li>
+<li>The following upgrade operations need to be performed in the new version directory</li>
+</ul>
+<h2>4. Database upgrade</h2>
+<ul>
+<li>
+<p>Modify the following properties in conf/datasource.properties</p>
+</li>
+<li>
+<p>If you choose MySQL, please comment out the PostgreSQL related configuration (the same is true for the reverse), and you also need to manually add the [<a href="https://downloads.MySQL.com/archives/cj/"> mysql-connector-java driver jar</a>] package to lib In the directory, here is mysql-connector-java-5.1.47.jar, and then correctly configure the database connection related information</p>
+<pre><code class="language-properties"><span class="hljs-comment">  # postgre</span>
+<span class="hljs-comment">  #spring.datasource.driver-class-name=org.postgresql.Driver</span>
+<span class="hljs-comment">  #spring.datasource.url=jdbc:postgresql://localhost:5432/dolphinscheduler</span>
+<span class="hljs-comment">  # mysql</span>
+  <span class="hljs-meta">spring.datasource.driver-class-name</span>=<span class="hljs-string">com.mysql.jdbc.Driver</span>
+  <span class="hljs-meta">spring.datasource.url</span>=<span class="hljs-string">jdbc:mysql://xxx:3306/dolphinscheduler?useUnicode=true&amp;characterEncoding=UTF-8&amp;allowMultiQueries=true     Need to modify the ip, the local localhost can</span>
+  <span class="hljs-meta">spring.datasource.username</span>=<span class="hljs-string">xxx						Need to be modified to the above {user} value</span>
+  <span class="hljs-meta">spring.datasource.password</span>=<span class="hljs-string">xxx						Need to be modified to the above {password} value</span>
+</code></pre>
+</li>
+<li>
+<p>Execute database upgrade script</p>
+</li>
+</ul>
+<p><code>sh ./script/upgrade-escheduler.sh</code></p>
+<h2>5. Service upgrade</h2>
+<h3>5.1 Modify <code>conf/config/install_config.conf</code> configuration content</h3>
+<p>For standalone deployment, please refer to <a href="https://dolphinscheduler.apache.org/zh-cn/docs/1.3.1/user_doc/standalone-deployment.html">Standalone deployment</a> in <code>6. Modify the running parameters section</code>
+For cluster deployment, please refer to <a href="https://dolphinscheduler.apache.org/zh-cn/docs/1.3.1/user_doc/cluster-deployment.html">Cluster Deployment (Cluster)</a> in <code>6. Modify the operating parameters section</code></p>
+<h3>Precautions</h3>
+<p>Creating worker groups has a different design in version 1.3.1 and previous versions</p>
+<ul>
+<li>The worker group was created through the UI interface before version 1.3.1</li>
+<li>Worker grouping in version 1.3.1 is to modify the worker configuration designation</li>
+</ul>
+<h3>How to set the worker grouping during the upgrade is the same as before</h3>
+<p>1、Query the database that has been backed up, check the t_ds_worker_group table records, and focus on the three fields id, name and ip_list</p>
+<table>
+<thead>
+<tr>
+<th style="text-align:left">id</th>
+<th style="text-align:center">name</th>
+<th style="text-align:right">ip_list</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td style="text-align:left">1</td>
+<td style="text-align:center">service1</td>
+<td style="text-align:right">192.168.xx.10</td>
+</tr>
+<tr>
+<td style="text-align:left">2</td>
+<td style="text-align:center">service2</td>
+<td style="text-align:right">192.168.xx.11,192.168.xx.12</td>
+</tr>
+</tbody>
+</table>
+<p>2、Modify the workers parameter in conf/config/install_config.conf</p>
+<p>Assume that the following is the correspondence between the host name and ip of the worker to be deployed</p>
+<table>
+<thead>
+<tr>
+<th style="text-align:left">CPU name</th>
+<th style="text-align:center">ip</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td style="text-align:left">ds1</td>
+<td style="text-align:center">192.168.xx.10</td>
+</tr>
+<tr>
+<td style="text-align:left">ds2</td>
+<td style="text-align:center">192.168.xx.11</td>
+</tr>
+<tr>
+<td style="text-align:left">ds3</td>
+<td style="text-align:center">192.168.xx.12</td>
+</tr>
+</tbody>
+</table>
+<p>In order to keep the grouping consistent with the previous version of the worker, you need to change the workers parameter to the following</p>
+<pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash">workerService is deployed on <span class="hljs-built_in">which</span> machine, and specify <span class="hljs-built_in">which</span> worker group this worker belongs to</span>
+workers="ds1:service1,ds2:service2,ds3:service2"
+</code></pre>
+<h3>5.2 Execute deployment script</h3>
+<pre><code class="language-shell">`sh install.sh`
+</code></pre>
+</div></section><footer class="footer-container"><div class="footer-body"><img src="/img/ds_gray.svg"/><div class="cols-container"><div class="col col-12"><h3>Disclaimer</h3><p>Apache DolphinScheduler (incubating) is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by Incubator. 
+Incubation is required of all newly accepted projects until a further review indicates 
+that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. 
+While incubation status is not necessarily a reflection of the completeness or stability of the code, 
+it does indicate that the project has yet to be fully endorsed by the ASF.</p></div><div class="col col-6"><dl><dt>Documentation</dt><dd><a href="/en-us/docs/development/architecture-design.html" target="_self">Overview</a></dd><dd><a href="/en-us/docs/1.2.0/user_doc/quick-start.html" target="_self">Quick start</a></dd><dd><a href="/en-us/docs/development/backend-development.html" target="_self">Developer guide</a></dd></dl></div><div class="col col-6"><dl><dt>ASF</dt><dd><a href="http:/ [...]
+	<script src="https://f.alicdn.com/react/15.4.1/react-with-addons.min.js"></script>
+	<script src="https://f.alicdn.com/react/15.4.1/react-dom.min.js"></script>
+	<script>
+		window.rootPath = '';
+  </script>
+	<script src="/build/documentation.js"></script>
+</body>
+</html>
\ No newline at end of file
diff --git a/en-us/docs/1.3.1/user_doc/upgrade.json b/en-us/docs/1.3.1/user_doc/upgrade.json
new file mode 100644
index 0000000..de13eb5
--- /dev/null
+++ b/en-us/docs/1.3.1/user_doc/upgrade.json
@@ -0,0 +1,6 @@
+{
+  "filename": "upgrade.md",
+  "__html": "<h1>DolphinScheduler Upgrade document</h1>\n<h2>1. Back up files and databases of the previous version</h2>\n<h2>2. Stop all services of dolphinscheduler</h2>\n<p><code>sh ./script/stop-all.sh</code></p>\n<h2>3. Download the new version of the installation package</h2>\n<ul>\n<li><a href=\"https://dolphinscheduler.apache.org/en-us/docs/release/download.html\">Download</a>, Download the latest version of the binary installation package</li>\n<li>The following upgrade operatio [...]
+  "link": "/en-us/docs/1.3.1/user_doc/upgrade.html",
+  "meta": {}
+}
\ No newline at end of file
diff --git a/img/addtenant-en.png b/img/addtenant-en.png
new file mode 100644
index 0000000..1744c9a
Binary files /dev/null and b/img/addtenant-en.png differ
diff --git a/img/auth-en.png b/img/auth-en.png
new file mode 100644
index 0000000..b4fe13c
Binary files /dev/null and b/img/auth-en.png differ
diff --git a/img/complement_en.png b/img/complement_en.png
new file mode 100644
index 0000000..a92c5cd
Binary files /dev/null and b/img/complement_en.png differ
diff --git a/img/create-token-en.png b/img/create-token-en.png
new file mode 100644
index 0000000..cefe0f9
Binary files /dev/null and b/img/create-token-en.png differ
diff --git a/img/dag5.png b/img/dag5.png
new file mode 100644
index 0000000..d405833
Binary files /dev/null and b/img/dag5.png differ
diff --git a/img/dag6.png b/img/dag6.png
new file mode 100644
index 0000000..9b87478
Binary files /dev/null and b/img/dag6.png differ
diff --git a/img/dag7.png b/img/dag7.png
new file mode 100644
index 0000000..063acf5
Binary files /dev/null and b/img/dag7.png differ
diff --git a/img/dag8.png b/img/dag8.png
new file mode 100644
index 0000000..fb63e65
Binary files /dev/null and b/img/dag8.png differ
diff --git a/img/datax-en.png b/img/datax-en.png
new file mode 100644
index 0000000..38c4b2b
Binary files /dev/null and b/img/datax-en.png differ
diff --git a/img/depend-node-en.png b/img/depend-node-en.png
new file mode 100644
index 0000000..d1711d7
Binary files /dev/null and b/img/depend-node-en.png differ
diff --git a/img/depend-node1-en.png b/img/depend-node1-en.png
new file mode 100644
index 0000000..62314cb
Binary files /dev/null and b/img/depend-node1-en.png differ
diff --git a/img/depend-node3-en.png b/img/depend-node3-en.png
new file mode 100644
index 0000000..18ad849
Binary files /dev/null and b/img/depend-node3-en.png differ
diff --git a/img/editDag-en.png b/img/editDag-en.png
new file mode 100644
index 0000000..b3b62ae
Binary files /dev/null and b/img/editDag-en.png differ
diff --git a/img/file_create_en.png b/img/file_create_en.png
new file mode 100644
index 0000000..793dff5
Binary files /dev/null and b/img/file_create_en.png differ
diff --git a/img/file_detail_en.png b/img/file_detail_en.png
new file mode 100644
index 0000000..80a6436
Binary files /dev/null and b/img/file_detail_en.png differ
diff --git a/img/file_rename_en.png b/img/file_rename_en.png
new file mode 100644
index 0000000..b83bbda
Binary files /dev/null and b/img/file_rename_en.png differ
diff --git a/img/flink-en.png b/img/flink-en.png
new file mode 100644
index 0000000..ac6f271
Binary files /dev/null and b/img/flink-en.png differ
diff --git a/img/global_parameter_en.png b/img/global_parameter_en.png
new file mode 100644
index 0000000..32aecf3
Binary files /dev/null and b/img/global_parameter_en.png differ
diff --git a/img/hive1-en.png b/img/hive1-en.png
new file mode 100644
index 0000000..12bdd81
Binary files /dev/null and b/img/hive1-en.png differ
diff --git a/img/http-en.png b/img/http-en.png
new file mode 100644
index 0000000..fb57476
Binary files /dev/null and b/img/http-en.png differ
diff --git a/img/instance-list-en.png b/img/instance-list-en.png
new file mode 100644
index 0000000..a8b177a
Binary files /dev/null and b/img/instance-list-en.png differ
diff --git a/img/instanceViewLog-en.png b/img/instanceViewLog-en.png
new file mode 100644
index 0000000..3b5fab5
Binary files /dev/null and b/img/instanceViewLog-en.png differ
diff --git a/img/local_parameter_en.png b/img/local_parameter_en.png
new file mode 100644
index 0000000..2d72f03
Binary files /dev/null and b/img/local_parameter_en.png differ
diff --git a/img/mail-en.png b/img/mail-en.png
new file mode 100644
index 0000000..bcdaa25
Binary files /dev/null and b/img/mail-en.png differ
diff --git a/img/master-jk-en.png b/img/master-jk-en.png
new file mode 100644
index 0000000..4f1d6d8
Binary files /dev/null and b/img/master-jk-en.png differ
diff --git a/img/mr_edit_en.png b/img/mr_edit_en.png
new file mode 100644
index 0000000..30d5d5a
Binary files /dev/null and b/img/mr_edit_en.png differ
diff --git a/img/mr_java_en.png b/img/mr_java_en.png
new file mode 100644
index 0000000..ecb0480
Binary files /dev/null and b/img/mr_java_en.png differ
diff --git a/img/mysql-jk-en.png b/img/mysql-jk-en.png
new file mode 100644
index 0000000..a8e06e8
Binary files /dev/null and b/img/mysql-jk-en.png differ
diff --git a/img/postgresql-en.png b/img/postgresql-en.png
new file mode 100644
index 0000000..f1f3de8
Binary files /dev/null and b/img/postgresql-en.png differ
diff --git a/img/procedure-en.png b/img/procedure-en.png
new file mode 100644
index 0000000..b4679b3
Binary files /dev/null and b/img/procedure-en.png differ
diff --git a/img/run_params_en.png b/img/run_params_en.png
new file mode 100644
index 0000000..acb3c24
Binary files /dev/null and b/img/run_params_en.png differ
diff --git a/img/run_work_en.png b/img/run_work_en.png
new file mode 100644
index 0000000..1c2c28b
Binary files /dev/null and b/img/run_work_en.png differ
diff --git a/img/spark-en.png b/img/spark-en.png
new file mode 100644
index 0000000..52b5ddf
Binary files /dev/null and b/img/spark-en.png differ
diff --git a/img/sql-en.png b/img/sql-en.png
new file mode 100644
index 0000000..3fc779c
Binary files /dev/null and b/img/sql-en.png differ
diff --git a/img/sql-node-en.png b/img/sql-node-en.png
new file mode 100644
index 0000000..fbf2faf
Binary files /dev/null and b/img/sql-node-en.png differ
diff --git a/img/task-list-en.png b/img/task-list-en.png
new file mode 100644
index 0000000..ba5da67
Binary files /dev/null and b/img/task-list-en.png differ
diff --git a/img/task-log-en.png b/img/task-log-en.png
new file mode 100644
index 0000000..12680ef
Binary files /dev/null and b/img/task-log-en.png differ
diff --git a/img/task-log2-en.png b/img/task-log2-en.png
new file mode 100644
index 0000000..013f58f
Binary files /dev/null and b/img/task-log2-en.png differ
diff --git a/img/task_history_en.png b/img/task_history_en.png
new file mode 100644
index 0000000..c6b5e9a
Binary files /dev/null and b/img/task_history_en.png differ
diff --git a/img/time-manage-list-en.png b/img/time-manage-list-en.png
new file mode 100644
index 0000000..99e4c42
Binary files /dev/null and b/img/time-manage-list-en.png differ
diff --git a/img/time_schedule_en.png b/img/time_schedule_en.png
new file mode 100644
index 0000000..6a830d1
Binary files /dev/null and b/img/time_schedule_en.png differ
diff --git a/img/tree_en.png b/img/tree_en.png
new file mode 100644
index 0000000..3486bbc
Binary files /dev/null and b/img/tree_en.png differ
diff --git a/img/udf_edit_en.png b/img/udf_edit_en.png
new file mode 100644
index 0000000..a5160d8
Binary files /dev/null and b/img/udf_edit_en.png differ
diff --git a/img/user-en.png b/img/user-en.png
new file mode 100644
index 0000000..35952a8
Binary files /dev/null and b/img/user-en.png differ
diff --git a/img/work_list_en.png b/img/work_list_en.png
new file mode 100644
index 0000000..b1d6bd4
Binary files /dev/null and b/img/work_list_en.png differ
diff --git a/img/worker-jk-en.png b/img/worker-jk-en.png
new file mode 100644
index 0000000..4472602
Binary files /dev/null and b/img/worker-jk-en.png differ
diff --git a/zh-cn/docs/1.3.1/user_doc/upgrade.html b/zh-cn/docs/1.3.1/user_doc/upgrade.html
index 248ef00..88f6563 100644
--- a/zh-cn/docs/1.3.1/user_doc/upgrade.html
+++ b/zh-cn/docs/1.3.1/user_doc/upgrade.html
@@ -18,7 +18,7 @@
 <p><code>sh ./script/stop-all.sh</code></p>
 <h2>3. 下载新版本的安装包</h2>
 <ul>
-<li><a href="https://dolphinscheduler.apache.org/en-us/docs/user_doc/download.html">下载</a>, 下载最新版本的二进制安装包</li>
+<li><a href="https://dolphinscheduler.apache.org/zh-cn/docs/release/download.html">下载</a>, 下载最新版本的二进制安装包</li>
 <li>以下升级操作都需要在新版本的目录进行</li>
 </ul>
 <h2>4. 数据库升级</h2>
diff --git a/zh-cn/docs/1.3.1/user_doc/upgrade.json b/zh-cn/docs/1.3.1/user_doc/upgrade.json
index 6780d4b..94f65ed 100644
--- a/zh-cn/docs/1.3.1/user_doc/upgrade.json
+++ b/zh-cn/docs/1.3.1/user_doc/upgrade.json
@@ -1,6 +1,6 @@
 {
   "filename": "upgrade.md",
-  "__html": "<h1>DolphinScheduler升级文档</h1>\n<h2>1. 备份上一版本文件和数据库</h2>\n<h2>2. 停止dolphinscheduler所有服务</h2>\n<p><code>sh ./script/stop-all.sh</code></p>\n<h2>3. 下载新版本的安装包</h2>\n<ul>\n<li><a href=\"https://dolphinscheduler.apache.org/en-us/docs/user_doc/download.html\">下载</a>, 下载最新版本的二进制安装包</li>\n<li>以下升级操作都需要在新版本的目录进行</li>\n</ul>\n<h2>4. 数据库升级</h2>\n<ul>\n<li>\n<p>修改conf/datasource.properties中的下列属性</p>\n</li>\n<li>\n<p>如果选择 MySQL,请注释掉 PostgreSQL 相关配置(反之同理), 还需要手动添加 [<a href=\"https://downlo [...]
+  "__html": "<h1>DolphinScheduler升级文档</h1>\n<h2>1. 备份上一版本文件和数据库</h2>\n<h2>2. 停止dolphinscheduler所有服务</h2>\n<p><code>sh ./script/stop-all.sh</code></p>\n<h2>3. 下载新版本的安装包</h2>\n<ul>\n<li><a href=\"https://dolphinscheduler.apache.org/zh-cn/docs/release/download.html\">下载</a>, 下载最新版本的二进制安装包</li>\n<li>以下升级操作都需要在新版本的目录进行</li>\n</ul>\n<h2>4. 数据库升级</h2>\n<ul>\n<li>\n<p>修改conf/datasource.properties中的下列属性</p>\n</li>\n<li>\n<p>如果选择 MySQL,请注释掉 PostgreSQL 相关配置(反之同理), 还需要手动添加 [<a href=\"https://downloa [...]
   "link": "/zh-cn/docs/1.3.1/user_doc/upgrade.html",
   "meta": {}
 }
\ No newline at end of file