You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dolphinscheduler.apache.org by gi...@apache.org on 2020/06/16 09:54:52 UTC

[incubator-dolphinscheduler-website] branch asf-site updated: Automated deployment: Tue Jun 16 09:54:00 UTC 2020 d60d51f00ba7f23828452211c4c8110f3b0d0872

This is an automated email from the ASF dual-hosted git repository.

github-bot pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/incubator-dolphinscheduler-website.git


The following commit(s) were added to refs/heads/asf-site by this push:
     new 8736c89  Automated deployment: Tue Jun 16 09:54:00 UTC 2020 d60d51f00ba7f23828452211c4c8110f3b0d0872
8736c89 is described below

commit 8736c892a6455ef338e2f13f3e768304be3f0ee7
Author: dailidong <da...@users.noreply.github.com>
AuthorDate: Tue Jun 16 09:54:00 2020 +0000

    Automated deployment: Tue Jun 16 09:54:00 UTC 2020 d60d51f00ba7f23828452211c4c8110f3b0d0872
---
 build/blog.js                                      |   2 +-
 build/blogDetail.js                                |   2 +-
 build/community.js                                 |   2 +-
 build/documentation.js                             |   2 +-
 build/home.js                                      |   2 +-
 en-us/docs/1.3.0/user_doc/cluster-deployment.html  | 400 +++++++++
 en-us/docs/1.3.0/user_doc/cluster-deployment.json  |   6 +
 .../docs/1.3.0/user_doc/hardware-environment.html  | 132 +++
 .../docs/1.3.0/user_doc/hardware-environment.json  |   6 +
 .../docs/1.3.0/user_doc/standalone-deployment.html | 393 +++++++++
 .../docs/1.3.0/user_doc/standalone-deployment.json |   6 +
 zh-cn/docs/1.3.0/user_doc/metadata-1.3.html        | 733 ++++++++++++++++
 zh-cn/docs/1.3.0/user_doc/metadata-1.3.json        |   6 +
 .../docs/1.3.0/user_doc/standalone-deployment.html |   2 +-
 .../docs/1.3.0/user_doc/standalone-deployment.json |   2 +-
 zh-cn/docs/1.3.0/user_doc/system-manual.html       | 976 +++++++++++++++++++++
 zh-cn/docs/1.3.0/user_doc/system-manual.json       |   6 +
 zh-cn/docs/1.3.0/user_doc/upgrade.html             |   2 +-
 zh-cn/docs/1.3.0/user_doc/upgrade.json             |   2 +-
 19 files changed, 2673 insertions(+), 9 deletions(-)

diff --git a/build/blog.js b/build/blog.js
index 3a8d711..1fbb724 100644
--- a/build/blog.js
+++ b/build/blog.js
@@ -10,7 +10,7 @@ object-assign
 (c) Sindre Sorhus
 @license MIT
 */
-var o=Object.getOwnPropertySymbols,i=Object.prototype.hasOwnProperty,a=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmn [...]
+var o=Object.getOwnPropertySymbols,i=Object.prototype.hasOwnProperty,a=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmn [...]
 //! moment.js locale configuration
 var t=function(e){return 0===e?0:1===e?1:2===e?2:e%100>=3&&e%100<=10?3:e%100>=11?4:5},n={s:["أقل من ثانية","ثانية واحدة",["ثانيتان","ثانيتين"],"%d ثوان","%d ثانية","%d ثانية"],m:["أقل من دقيقة","دقيقة واحدة",["دقيقتان","دقيقتين"],"%d دقائق","%d دقيقة","%d دقيقة"],h:["أقل من ساعة","ساعة واحدة",["ساعتان","ساعتين"],"%d ساعات","%d ساعة","%d ساعة"],d:["أقل من يوم","يوم واحد",["يومان","يومين"],"%d أيام","%d يومًا","%d يوم"],M:["أقل من شهر","شهر واحد",["شهران","شهرين"],"%d أشهر","%d شهرا","%d ش [...]
 //! moment.js locale configuration
diff --git a/build/blogDetail.js b/build/blogDetail.js
index c329aba..32c0fc7 100644
--- a/build/blogDetail.js
+++ b/build/blogDetail.js
@@ -10,7 +10,7 @@ object-assign
 (c) Sindre Sorhus
 @license MIT
 */
-var o=Object.getOwnPropertySymbols,i=Object.prototype.hasOwnProperty,a=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmn [...]
+var o=Object.getOwnPropertySymbols,i=Object.prototype.hasOwnProperty,a=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmn [...]
 //! moment.js locale configuration
 var t=function(e){return 0===e?0:1===e?1:2===e?2:e%100>=3&&e%100<=10?3:e%100>=11?4:5},n={s:["أقل من ثانية","ثانية واحدة",["ثانيتان","ثانيتين"],"%d ثوان","%d ثانية","%d ثانية"],m:["أقل من دقيقة","دقيقة واحدة",["دقيقتان","دقيقتين"],"%d دقائق","%d دقيقة","%d دقيقة"],h:["أقل من ساعة","ساعة واحدة",["ساعتان","ساعتين"],"%d ساعات","%d ساعة","%d ساعة"],d:["أقل من يوم","يوم واحد",["يومان","يومين"],"%d أيام","%d يومًا","%d يوم"],M:["أقل من شهر","شهر واحد",["شهران","شهرين"],"%d أشهر","%d شهرا","%d ش [...]
 //! moment.js locale configuration
diff --git a/build/community.js b/build/community.js
index 36f5d99..b6afe46 100644
--- a/build/community.js
+++ b/build/community.js
@@ -10,7 +10,7 @@ object-assign
 (c) Sindre Sorhus
 @license MIT
 */
-var o=Object.getOwnPropertySymbols,i=Object.prototype.hasOwnProperty,a=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmn [...]
+var o=Object.getOwnPropertySymbols,i=Object.prototype.hasOwnProperty,a=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmn [...]
 //! moment.js locale configuration
 var t=function(e){return 0===e?0:1===e?1:2===e?2:e%100>=3&&e%100<=10?3:e%100>=11?4:5},n={s:["أقل من ثانية","ثانية واحدة",["ثانيتان","ثانيتين"],"%d ثوان","%d ثانية","%d ثانية"],m:["أقل من دقيقة","دقيقة واحدة",["دقيقتان","دقيقتين"],"%d دقائق","%d دقيقة","%d دقيقة"],h:["أقل من ساعة","ساعة واحدة",["ساعتان","ساعتين"],"%d ساعات","%d ساعة","%d ساعة"],d:["أقل من يوم","يوم واحد",["يومان","يومين"],"%d أيام","%d يومًا","%d يوم"],M:["أقل من شهر","شهر واحد",["شهران","شهرين"],"%d أشهر","%d شهرا","%d ش [...]
 //! moment.js locale configuration
diff --git a/build/documentation.js b/build/documentation.js
index d03247c..56b0981 100644
--- a/build/documentation.js
+++ b/build/documentation.js
@@ -10,7 +10,7 @@ object-assign
 (c) Sindre Sorhus
 @license MIT
 */
-var o=Object.getOwnPropertySymbols,i=Object.prototype.hasOwnProperty,a=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmn [...]
+var o=Object.getOwnPropertySymbols,i=Object.prototype.hasOwnProperty,a=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmn [...]
 //! moment.js locale configuration
 var t=function(e){return 0===e?0:1===e?1:2===e?2:e%100>=3&&e%100<=10?3:e%100>=11?4:5},n={s:["أقل من ثانية","ثانية واحدة",["ثانيتان","ثانيتين"],"%d ثوان","%d ثانية","%d ثانية"],m:["أقل من دقيقة","دقيقة واحدة",["دقيقتان","دقيقتين"],"%d دقائق","%d دقيقة","%d دقيقة"],h:["أقل من ساعة","ساعة واحدة",["ساعتان","ساعتين"],"%d ساعات","%d ساعة","%d ساعة"],d:["أقل من يوم","يوم واحد",["يومان","يومين"],"%d أيام","%d يومًا","%d يوم"],M:["أقل من شهر","شهر واحد",["شهران","شهرين"],"%d أشهر","%d شهرا","%d ش [...]
 //! moment.js locale configuration
diff --git a/build/home.js b/build/home.js
index 87366da..84a8c0d 100644
--- a/build/home.js
+++ b/build/home.js
@@ -10,7 +10,7 @@ object-assign
 (c) Sindre Sorhus
 @license MIT
 */
-var o=Object.getOwnPropertySymbols,i=Object.prototype.hasOwnProperty,a=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmn [...]
+var o=Object.getOwnPropertySymbols,i=Object.prototype.hasOwnProperty,a=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmn [...]
 //! moment.js locale configuration
 var t=function(e){return 0===e?0:1===e?1:2===e?2:e%100>=3&&e%100<=10?3:e%100>=11?4:5},n={s:["أقل من ثانية","ثانية واحدة",["ثانيتان","ثانيتين"],"%d ثوان","%d ثانية","%d ثانية"],m:["أقل من دقيقة","دقيقة واحدة",["دقيقتان","دقيقتين"],"%d دقائق","%d دقيقة","%d دقيقة"],h:["أقل من ساعة","ساعة واحدة",["ساعتان","ساعتين"],"%d ساعات","%d ساعة","%d ساعة"],d:["أقل من يوم","يوم واحد",["يومان","يومين"],"%d أيام","%d يومًا","%d يوم"],M:["أقل من شهر","شهر واحد",["شهران","شهرين"],"%d أشهر","%d شهرا","%d ش [...]
 //! moment.js locale configuration
diff --git a/en-us/docs/1.3.0/user_doc/cluster-deployment.html b/en-us/docs/1.3.0/user_doc/cluster-deployment.html
new file mode 100644
index 0000000..686a587
--- /dev/null
+++ b/en-us/docs/1.3.0/user_doc/cluster-deployment.html
@@ -0,0 +1,400 @@
+<!DOCTYPE html>
+<html lang="en">
+
+<head>
+	<meta charset="UTF-8">
+	<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
+	<meta name="keywords" content="cluster-deployment" />
+	<meta name="description" content="cluster-deployment" />
+	<!-- 网页标签标题 -->
+	<title>cluster-deployment</title>
+	<link rel="shortcut icon" href="/img/docsite.ico"/>
+	<link rel="stylesheet" href="/build/documentation.css" />
+</head>
+<body>
+	<div id="root"><div class="documentation-page" data-reactroot=""><header class="header-container header-container-normal"><div class="header-body"><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_colorful.svg"/></a><div class="search search-normal"><span class="icon-search"></span></div><span class="language-switch language-switch-normal">中</span><div class="header-menu"><img class="header-menu-toggle" src="/img/system/menu_gray.png"/><div><ul class="ant-menu blackClass ant [...]
+<h1>1、Before you begin (please install requirement basic software by yourself)</h1>
+<ul>
+<li>PostgreSQL (8.2.15+) or MySQL (5.7)  :  Choose One</li>
+<li><a href="https://www.oracle.com/technetwork/java/javase/downloads/index.html">JDK</a> (1.8+) :  Required. Double-check configure JAVA_HOME and PATH environment variables in /etc/profile</li>
+<li>ZooKeeper (3.4.6+) :Required</li>
+<li>Hadoop (2.6+) or MinIO :Optional. If you need to upload a resource function, you can choose a local file directory as the upload folder for a single machine (this operation does not need to deploy Hadoop). Of course, you can also choose to upload to Hadoop or MinIO.</li>
+</ul>
+<pre><code class="language-markdown"> Tips:DolphinScheduler itself does not rely on Hadoop, Hive, Spark, only use their clients for the corresponding task of running.
+</code></pre>
+<h1>2、Download the binary package.</h1>
+<ul>
+<li>Please download the latest version of the default installation package to the server deployment directory. For example, use /opt/dolphinscheduler as the installation and deployment directory. Download address: <a href="https://dist.apache.org/repos/dist/dev/incubator/dolphinscheduler/1.3.0/apache-dolphinscheduler-incubating-1.3.0-dolphinscheduler-bin.tar.gz">Download</a>,Download the package and move to the installation and deployment directory. Then unzip it.</li>
+</ul>
+<pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> Create the deployment directory. Do not choose a deployment directory with a high-privilege directory such as / root or / home.</span>
+mkdir -p /opt/dolphinscheduler;
+cd /opt/dolphinscheduler;
+<span class="hljs-meta">#</span><span class="bash"> unzip</span>
+tar -zxvf apache-dolphinscheduler-incubating-1.3.0-dolphinscheduler-bin.tar.gz -C /opt/dolphinscheduler;
+
+mv apache-dolphinscheduler-incubating-1.3.0-dolphinscheduler-bin  dolphinscheduler-bin
+</code></pre>
+<h1>3、Create deployment user and hosts mapping</h1>
+<ul>
+<li>Create a deployment user on the ** all ** deployment machines, and be sure to configure sudo passwordless. If we plan to deploy DolphinScheduler on 4 machines: ds1, ds2, ds3, and ds4, we first need to create a deployment user on each machine.</li>
+</ul>
+<pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> To create a user, you need to <span class="hljs-built_in">log</span> <span class="hljs-keyword">in</span> as root and <span class="hljs-built_in">set</span> the deployment user name. Please modify it yourself. The following uses dolphinscheduler as an example.</span>
+useradd dolphinscheduler;
+<span class="hljs-meta">
+#</span><span class="bash"> Set the user password, please modify it yourself. The following takes dolphinscheduler123 as an example.</span>
+echo "dolphinscheduler123" | passwd --stdin dolphinscheduler
+<span class="hljs-meta">
+#</span><span class="bash"> Configure sudo passwordless</span>
+echo 'dolphinscheduler  ALL=(ALL)  NOPASSWD: NOPASSWD: ALL' &gt;&gt; /etc/sudoers
+sed -i 's/Defaults    requirett/#Defaults    requirett/g' /etc/sudoers
+
+</code></pre>
+<pre><code> Notes:
+ - Because the task execution service is based on 'sudo -u {linux-user}' to switch between different Linux users to implement multi-tenant running jobs, the deployment user needs to have sudo permissions and is passwordless. The first-time learners who can ignore it if they don't understand.
+ - If find the &quot;Default requiretty&quot; in the &quot;/etc/sudoers&quot; file, also comment out.
+ - If you need to use resource upload, you need to assign the user of permission to operate the local file system, HDFS or MinIO.
+</code></pre>
+<h1>4、Configure hosts mapping and ssh access and modify directory permissions.</h1>
+<ul>
+<li>
+<p>Use the first machine (hostname is ds1) as the deployment machine, configure the hosts of all machines to be deployed on ds1, and login as root on ds1.</p>
+<pre><code class="language-shell">vi /etc/hosts
+<span class="hljs-meta">
+#</span><span class="bash">add ip hostname</span>
+192.168.xxx.xxx ds1
+192.168.xxx.xxx ds2
+192.168.xxx.xxx ds3
+192.168.xxx.xxx ds4
+</code></pre>
+<p><em>Note: Please delete or comment out the line 127.0.0.1</em></p>
+</li>
+<li>
+<p>Sync /etc/hosts on ds1 to all deployment machines</p>
+<pre><code class="language-shell">for ip in ds2 ds3;     # Please replace ds2 ds3 here with the hostname of machines you want to deploy
+do
+    sudo scp -r /etc/hosts  $ip:/etc/          # Need to enter root password during operation
+done
+</code></pre>
+<p><em>Note: can use <code>sshpass -p xxx sudo scp -r /etc/hosts $ip:/etc/</code> to avoid type password.</em></p>
+<blockquote>
+<p>Install sshpass in Centos:</p>
+<ol>
+<li>
+<p>Install epel</p>
+<p>yum install -y epel-release</p>
+<p>yum repolist</p>
+</li>
+<li>
+<p>After installing epel, you can install sshpass</p>
+<p>yum install -y sshpass</p>
+</li>
+</ol>
+</blockquote>
+</li>
+<li>
+<p>On ds1, switch to the deployment user and configure ssh passwordless login</p>
+<pre><code class="language-shell"> su dolphinscheduler;
+
+ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
+cat ~/.ssh/id_rsa.pub &gt;&gt; ~/.ssh/authorized_keys
+chmod 600 ~/.ssh/authorized_keys
+</code></pre>
+</li>
+</ul>
+<p>​      Note: <em>If configure success, the dolphinscheduler user does not need to enter a password when executing the command <code>ssh localhost</code></em></p>
+<ul>
+<li>
+<p>On ds1, configure the deployment user dolphinscheduler ssh to connect to other machines to be deployed.</p>
+<pre><code class="language-shell">su dolphinscheduler;
+for ip in ds2 ds3;     # Please replace ds2 ds3 here with the hostname of the machine you want to deploy.
+do
+    ssh-copy-id  $ip   # You need to manually enter the password of the dolphinscheduler user during the operation.
+done
+<span class="hljs-meta">#</span><span class="bash"> can use `sshpass -p xxx ssh-copy-id <span class="hljs-variable">$ip</span>` to avoid <span class="hljs-built_in">type</span> password.</span>
+</code></pre>
+</li>
+<li>
+<p>On ds1, modify the directory permissions so that the deployment user has operation permissions on the dolphinscheduler-backend directory.</p>
+<pre><code class="language-shell">sudo chown -R dolphinscheduler:dolphinscheduler dolphinscheduler-backend
+</code></pre>
+</li>
+</ul>
+<h1>5、Database initialization</h1>
+<ul>
+<li>Into the database. The default database is PostgreSQL. If you select MySQL, you need to add the mysql-connector-java driver package to the lib directory of DolphinScheduler.</li>
+</ul>
+<pre><code>mysql -uroot -p
+</code></pre>
+<ul>
+<li>After entering the database command line window, execute the database initialization command and set the user and password. <strong>Note: {user} and {password} need to be replaced with a specific database username and password</strong></li>
+</ul>
+<pre><code class="language-mysql">   mysql&gt; CREATE DATABASE dolphinscheduler DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
+   mysql&gt; GRANT ALL PRIVILEGES ON dolphinscheduler.* TO '{user}'@'%' IDENTIFIED BY '{password}';
+   mysql&gt; GRANT ALL PRIVILEGES ON dolphinscheduler.* TO '{user}'@'localhost' IDENTIFIED BY '{password}';
+   mysql&gt; flush privileges;
+</code></pre>
+<ul>
+<li>
+<p>Create tables and import basic data</p>
+<ul>
+<li>Modify the following configuration in datasource.properties under the conf directory</li>
+</ul>
+<pre><code class="language-shell">  vi conf/datasource.properties
+</code></pre>
+<ul>
+<li>If you choose Mysql, please comment out the relevant configuration of PostgreSQL (vice versa), you also need to manually add the [[mysql-connector-java driver jar] (<a href="https://downloads.mysql.com/archives/c-j/">https://downloads.mysql.com/archives/c-j/</a>)] package to lib under the directory, and then configure the database connection information correctly.</li>
+</ul>
+<pre><code class="language-properties"><span class="hljs-comment">  #postgre</span>
+<span class="hljs-comment">  #spring.datasource.driver-class-name=org.postgresql.Driver</span>
+<span class="hljs-comment">  #spring.datasource.url=jdbc:postgresql://localhost:5432/dolphinscheduler</span>
+<span class="hljs-comment">  # mysql</span>
+  <span class="hljs-meta">spring.datasource.driver-class-name</span>=<span class="hljs-string">com.mysql.jdbc.Driver</span>
+  <span class="hljs-meta">spring.datasource.url</span>=<span class="hljs-string">jdbc:mysql://xxx:3306/dolphinscheduler?useUnicode=true&amp;characterEncoding=UTF-8&amp;allowMultiQueries=true     # Replace the correct IP address</span>
+  <span class="hljs-meta">spring.datasource.username</span>=<span class="hljs-string">xxx						# replace the correct {user} value</span>
+  <span class="hljs-meta">spring.datasource.password</span>=<span class="hljs-string">xxx						# replace the correct {password} value</span>
+</code></pre>
+<ul>
+<li>After modifying and saving, execute the create table and import data script in the script directory.</li>
+</ul>
+<pre><code class="language-shell">sh script/create-dolphinscheduler.sh
+</code></pre>
+</li>
+</ul>
+<p>​       <em>Note: If you execute the above script and report &quot;/bin/java: No such file or directory&quot; error, please configure JAVA_HOME and PATH variables in /etc/profile</em></p>
+<h1>6、Modify runtime parameters.</h1>
+<ul>
+<li>
+<p>Modify the environment variable in <code>dolphinscheduler_env.sh</code> file which on the 'conf/env' directory (take the relevant software installed under '/opt/soft' as an example)</p>
+<pre><code class="language-shell">    export HADOOP_HOME=/opt/soft/hadoop
+    export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
+    #export SPARK_HOME1=/opt/soft/spark1
+    export SPARK_HOME2=/opt/soft/spark2
+    export PYTHON_HOME=/opt/soft/python
+    export JAVA_HOME=/opt/soft/java
+    export HIVE_HOME=/opt/soft/hive
+    export FLINK_HOME=/opt/soft/flink
+    export DATAX_HOME=/opt/soft/datax/bin/datax.py
+    export PATH=$HADOOP_HOME/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$DATAX_HOME:$PATH
+
+    ```
+
+ `Note: This step is very important. For example, JAVA_HOME and PATH must be configured. Those that are not used can be ignored or commented out.`
+
+
+
+</code></pre>
+</li>
+<li>
+<p>Create Soft link jdk to /usr/bin/java (still JAVA_HOME=/opt/soft/java as an example)</p>
+<pre><code class="language-shell">sudo ln -s /opt/soft/java/bin/java /usr/bin/java
+</code></pre>
+</li>
+<li>
+<p>Modify the parameters in the one-click deployment config file <code>conf/config/install_config.conf</code>, pay special attention to the configuration of the following parameters.</p>
+<pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> choose mysql or postgresql</span>
+dbtype="mysql"
+<span class="hljs-meta">
+#</span><span class="bash"> Database connection address and port</span>
+dbhost="192.168.xx.xx:3306"
+<span class="hljs-meta">
+#</span><span class="bash"> database name</span>
+dbname="dolphinscheduler"
+<span class="hljs-meta">
+#</span><span class="bash"> database username</span>
+username="xxx"
+<span class="hljs-meta">
+#</span><span class="bash"> database password</span>
+<span class="hljs-meta">#</span><span class="bash"> NOTICE: <span class="hljs-keyword">if</span> there are special characters, please use the \ to escape, <span class="hljs-keyword">for</span> example, `[` escape to `\[`</span>
+passowrd="xxx"
+<span class="hljs-meta">
+#</span><span class="bash">Zookeeper cluster</span>
+zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
+<span class="hljs-meta">
+#</span><span class="bash"> Note: the target installation path <span class="hljs-keyword">for</span> dolphinscheduler, please not config as the same as the current path (<span class="hljs-built_in">pwd</span>)</span>
+installPath="/opt/soft/dolphinscheduler"
+<span class="hljs-meta">
+#</span><span class="bash"> deployment user</span>
+<span class="hljs-meta">#</span><span class="bash"> Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself</span>
+deployUser="dolphinscheduler"
+<span class="hljs-meta">
+#</span><span class="bash"> alert config,take QQ email <span class="hljs-keyword">for</span> example</span>
+<span class="hljs-meta">#</span><span class="bash"> mail protocol</span>
+mailProtocol="SMTP"
+<span class="hljs-meta">
+#</span><span class="bash"> mail server host</span>
+mailServerHost="smtp.qq.com"
+<span class="hljs-meta">
+#</span><span class="bash"> mail server port</span>
+<span class="hljs-meta">#</span><span class="bash"> note: Different protocols and encryption methods correspond to different ports, when SSL/TLS is enabled, make sure the port is correct.</span>
+mailServerPort="25"
+<span class="hljs-meta">
+#</span><span class="bash"> mail sender</span>
+mailSender="xxx@qq.com"
+<span class="hljs-meta">
+#</span><span class="bash"> mail user</span>
+mailUser="xxx@qq.com"
+<span class="hljs-meta">
+#</span><span class="bash"> mail sender password</span>
+<span class="hljs-meta">#</span><span class="bash"> note: The mail.passwd is email service authorization code, not the email login password.</span>
+mailPassword="xxx"
+<span class="hljs-meta">
+#</span><span class="bash"> Whether TLS mail protocol is supported,<span class="hljs-literal">true</span> is supported and <span class="hljs-literal">false</span> is not supported</span>
+starttlsEnable="true"
+<span class="hljs-meta">
+#</span><span class="bash"> Whether TLS mail protocol is supported,<span class="hljs-literal">true</span> is supported and <span class="hljs-literal">false</span> is not supported。</span>
+<span class="hljs-meta">#</span><span class="bash"> note: only one of TLS and SSL can be <span class="hljs-keyword">in</span> the <span class="hljs-literal">true</span> state.</span>
+sslEnable="false"
+<span class="hljs-meta">
+#</span><span class="bash"> note: sslTrust is the same as mailServerHost</span>
+sslTrust="smtp.qq.com"
+<span class="hljs-meta">
+
+#</span><span class="bash"> resource storage <span class="hljs-built_in">type</span>:HDFS,S3,NONE</span>
+resourceStorageType="HDFS"
+<span class="hljs-meta">
+#</span><span class="bash"> <span class="hljs-keyword">if</span> resourceStorageType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml <span class="hljs-keyword">in</span> the conf directory.</span>
+<span class="hljs-meta">#</span><span class="bash"> <span class="hljs-keyword">if</span> S3,write S3 address,HA,<span class="hljs-keyword">for</span> example :s3a://dolphinscheduler,</span>
+<span class="hljs-meta">#</span><span class="bash"> Note,s3 be sure to create the root directory /dolphinscheduler</span>
+defaultFS="hdfs://mycluster:8020"
+<span class="hljs-meta">
+
+#</span><span class="bash"> <span class="hljs-keyword">if</span> not use hadoop resourcemanager, please keep default value; <span class="hljs-keyword">if</span> resourcemanager HA <span class="hljs-built_in">enable</span>, please <span class="hljs-built_in">type</span> the HA ips ; <span class="hljs-keyword">if</span> resourcemanager is single, make this value empty</span>
+yarnHaIps="192.168.xx.xx,192.168.xx.xx"
+<span class="hljs-meta">
+#</span><span class="bash"> <span class="hljs-keyword">if</span> resourcemanager HA <span class="hljs-built_in">enable</span> or not use resourcemanager, please skip this value setting; If resourcemanager is single, you only need to replace yarnIp1 to actual resourcemanager hostname.</span>
+singleYarnIp="yarnIp1"
+<span class="hljs-meta">
+#</span><span class="bash"> resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have <span class="hljs-built_in">read</span> write permissions。/dolphinscheduler is recommended</span>
+resourceUploadPath="/dolphinscheduler"
+<span class="hljs-meta">
+#</span><span class="bash"> who have permissions to create directory under HDFS/S3 root path</span>
+<span class="hljs-meta">#</span><span class="bash"> Note: <span class="hljs-keyword">if</span> kerberos is enabled, please config hdfsRootUser=</span>
+hdfsRootUser="hdfs"
+<span class="hljs-meta">
+
+
+#</span><span class="bash"> install hosts</span>
+<span class="hljs-meta">#</span><span class="bash"> Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname</span>
+ips="ds1,ds2,ds3,ds4"
+<span class="hljs-meta">
+#</span><span class="bash"> ssh port, default 22</span>
+<span class="hljs-meta">#</span><span class="bash"> Note: <span class="hljs-keyword">if</span> ssh port is not default, modify here</span>
+sshPort="22"
+<span class="hljs-meta">
+#</span><span class="bash"> run master machine</span>
+<span class="hljs-meta">#</span><span class="bash"> Note: list of hosts hostname <span class="hljs-keyword">for</span> deploying master</span>
+masters="ds1,ds2"
+<span class="hljs-meta">
+#</span><span class="bash"> run worker machine</span>
+<span class="hljs-meta">#</span><span class="bash"> note: need to write the worker group name of each worker, the default value is <span class="hljs-string">"default"</span></span>
+workers="ds3:default,ds4:default"
+<span class="hljs-meta">
+#</span><span class="bash"> run alert machine</span>
+<span class="hljs-meta">#</span><span class="bash"> note: list of machine hostnames <span class="hljs-keyword">for</span> deploying alert server</span>
+alertServer="ds2"
+<span class="hljs-meta">
+#</span><span class="bash"> run api machine</span>
+<span class="hljs-meta">#</span><span class="bash"> note: list of machine hostnames <span class="hljs-keyword">for</span> deploying api server</span>
+apiServers="ds1"
+
+</code></pre>
+<p><em>Attention:</em></p>
+<ul>
+<li>If you need to upload resources to the Hadoop cluster, and the NameNode of the Hadoop cluster is configured with HA, you need to enable HDFS resource upload, and you need to copy the core-site.xml and hdfs-site.xml in the Hadoop cluster to /opt/ dolphinscheduler/conf. Non-NameNode HA skips the next step.</li>
+</ul>
+</li>
+</ul>
+<h1>7、Automated Deployment</h1>
+<ul>
+<li>
+<p>Switch to the deployment user and execute the one-click deployment script</p>
+<p><code>sh install.sh</code></p>
+<pre><code>Note:
+For the first deployment, the following message appears in step 3 of `3, stop server` during operation. This message can be ignored.
+sh: bin/dolphinscheduler-daemon.sh: No such file or directory
+</code></pre>
+</li>
+<li>
+<p>After the script is completed, the following 5 services will be started. Use the <code>jps</code> command to check whether the services are started (<code>jps</code> comes with <code>java JDK</code>)</p>
+</li>
+</ul>
+<pre><code class="language-aidl">    MasterServer         ----- master service
+    WorkerServer         ----- worker service
+    LoggerServer         ----- logger service
+    ApiApplicationServer ----- api service
+    AlertServer          ----- alert service
+</code></pre>
+<p>If the above services are started normally, the automatic deployment is successful.</p>
+<p>After the deployment is successful, you can view the logs. The logs are stored in the logs folder.</p>
+<pre><code class="language-log"> logs/
+    ├── dolphinscheduler-alert-server.log
+    ├── dolphinscheduler-master-server.log
+    |—— dolphinscheduler-worker-server.log
+    |—— dolphinscheduler-api-server.log
+    |—— dolphinscheduler-logger-server.log
+</code></pre>
+<h1>8、login</h1>
+<ul>
+<li>
+<p>Access the address of the front page, interface IP (self-modified)
+<a href="http://192.168.xx.xx:12345/dolphinscheduler">http://192.168.xx.xx:12345/dolphinscheduler</a></p>
+ <p align="center">
+   <img src="/img/login.png" width="60%" />
+ </p>
+</li>
+</ul>
+<h1>9、Start and stop service</h1>
+<ul>
+<li>
+<p>Stop all services</p>
+<p><code>sh ./bin/stop-all.sh</code></p>
+</li>
+<li>
+<p>Start all services</p>
+<p><code>sh ./bin/start-all.sh</code></p>
+</li>
+<li>
+<p>Start and stop master service</p>
+</li>
+</ul>
+<pre><code class="language-shell">sh ./bin/dolphinscheduler-daemon.sh start master-server
+sh ./bin/dolphinscheduler-daemon.sh stop master-server
+</code></pre>
+<ul>
+<li>Start and stop worker Service</li>
+</ul>
+<pre><code class="language-shell">sh ./bin/dolphinscheduler-daemon.sh start worker-server
+sh ./bin/dolphinscheduler-daemon.sh stop worker-server
+</code></pre>
+<ul>
+<li>Start and stop api Service</li>
+</ul>
+<pre><code class="language-shell">sh ./bin/dolphinscheduler-daemon.sh start api-server
+sh ./bin/dolphinscheduler-daemon.sh stop api-server
+</code></pre>
+<ul>
+<li>Start and stop logger Service</li>
+</ul>
+<pre><code class="language-shell">sh ./bin/dolphinscheduler-daemon.sh start logger-server
+sh ./bin/dolphinscheduler-daemon.sh stop logger-server
+</code></pre>
+<ul>
+<li>Start and stop alert service</li>
+</ul>
+<pre><code class="language-shell">sh ./bin/dolphinscheduler-daemon.sh start alert-server
+sh ./bin/dolphinscheduler-daemon.sh stop alert-server
+</code></pre>
+<p><code>Note: Please refer to the &quot;Architecture Design&quot; section for service usage</code></p>
+</div></section><footer class="footer-container"><div class="footer-body"><img src="/img/ds_gray.svg"/><div class="cols-container"><div class="col col-12"><h3>Disclaimer</h3><p>Apache DolphinScheduler (incubating) is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by Incubator. 
+Incubation is required of all newly accepted projects until a further review indicates 
+that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. 
+While incubation status is not necessarily a reflection of the completeness or stability of the code, 
+it does indicate that the project has yet to be fully endorsed by the ASF.</p></div><div class="col col-6"><dl><dt>Documentation</dt><dd><a href="/en-us/docs/1.2.0/user_doc/architecture-design.html" target="_self">Overview</a></dd><dd><a href="/en-us/docs/1.2.0/user_doc/quick-start.html" target="_self">Quick start</a></dd><dd><a href="/en-us/docs/1.2.0/user_doc/backend-development.html" target="_self">Developer guide</a></dd></dl></div><div class="col col-6"><dl><dt>ASF</dt><dd><a href=" [...]
+	<script src="https://f.alicdn.com/react/15.4.1/react-with-addons.min.js"></script>
+	<script src="https://f.alicdn.com/react/15.4.1/react-dom.min.js"></script>
+	<script>
+		window.rootPath = '';
+  </script>
+	<script src="/build/documentation.js"></script>
+</body>
+</html>
\ No newline at end of file
diff --git a/en-us/docs/1.3.0/user_doc/cluster-deployment.json b/en-us/docs/1.3.0/user_doc/cluster-deployment.json
new file mode 100644
index 0000000..1a68173
--- /dev/null
+++ b/en-us/docs/1.3.0/user_doc/cluster-deployment.json
@@ -0,0 +1,6 @@
+{
+  "filename": "cluster-deployment.md",
+  "__html": "<h1>Cluster Deployment</h1>\n<h1>1、Before you begin (please install requirement basic software by yourself)</h1>\n<ul>\n<li>PostgreSQL (8.2.15+) or MySQL (5.7)  :  Choose One</li>\n<li><a href=\"https://www.oracle.com/technetwork/java/javase/downloads/index.html\">JDK</a> (1.8+) :  Required. Double-check configure JAVA_HOME and PATH environment variables in /etc/profile</li>\n<li>ZooKeeper (3.4.6+) :Required</li>\n<li>Hadoop (2.6+) or MinIO :Optional. If you need to upload a [...]
+  "link": "/en-us/docs/1.3.0/user_doc/cluster-deployment.html",
+  "meta": {}
+}
\ No newline at end of file
diff --git a/en-us/docs/1.3.0/user_doc/hardware-environment.html b/en-us/docs/1.3.0/user_doc/hardware-environment.html
new file mode 100644
index 0000000..c706f83
--- /dev/null
+++ b/en-us/docs/1.3.0/user_doc/hardware-environment.html
@@ -0,0 +1,132 @@
+<!DOCTYPE html>
+<html lang="en">
+
+<head>
+	<meta charset="UTF-8">
+	<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
+	<meta name="keywords" content="hardware-environment" />
+	<meta name="description" content="hardware-environment" />
+	<!-- 网页标签标题 -->
+	<title>hardware-environment</title>
+	<link rel="shortcut icon" href="/img/docsite.ico"/>
+	<link rel="stylesheet" href="/build/documentation.css" />
+</head>
+<body>
+	<div id="root"><div class="documentation-page" data-reactroot=""><header class="header-container header-container-normal"><div class="header-body"><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_colorful.svg"/></a><div class="search search-normal"><span class="icon-search"></span></div><span class="language-switch language-switch-normal">中</span><div class="header-menu"><img class="header-menu-toggle" src="/img/system/menu_gray.png"/><div><ul class="ant-menu blackClass ant [...]
+<p>DolphinScheduler, as an open-source distributed workflow task scheduling system, can be well deployed and run in Intel architecture server environments and mainstream virtualization environments, and supports mainstream Linux operating system environments.</p>
+<h2>1. Linux operating system version requirements</h2>
+<table>
+<thead>
+<tr>
+<th style="text-align:left">OS</th>
+<th style="text-align:center">Version</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td style="text-align:left">Red Hat Enterprise Linux</td>
+<td style="text-align:center">7.0 and above</td>
+</tr>
+<tr>
+<td style="text-align:left">CentOS</td>
+<td style="text-align:center">7.0 and above</td>
+</tr>
+<tr>
+<td style="text-align:left">Oracle Enterprise Linux</td>
+<td style="text-align:center">7.0 and above</td>
+</tr>
+<tr>
+<td style="text-align:left">Ubuntu LTS</td>
+<td style="text-align:center">16.04 and above</td>
+</tr>
+</tbody>
+</table>
+<blockquote>
+<p><strong>Attention:</strong>
+The above Linux operating systems can run on physical servers and mainstream virtualization environments such as VMware, KVM, and XEN.</p>
+</blockquote>
+<h2>2. Recommended server configuration</h2>
+<p>DolphinScheduler supports 64-bit hardware platforms with Intel x86-64 architecture. The following recommendation is made for server hardware configuration in a production environment:</p>
+<h3>Production Environment</h3>
+<table>
+<thead>
+<tr>
+<th><strong>CPU</strong></th>
+<th><strong>MEM</strong></th>
+<th><strong>HD</strong></th>
+<th><strong>NIC</strong></th>
+<th><strong>Num</strong></th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>4 core+</td>
+<td>8 GB+</td>
+<td>SAS</td>
+<td>GbE</td>
+<td>1+</td>
+</tr>
+</tbody>
+</table>
+<blockquote>
+<p><strong>Attention:</strong></p>
+<ul>
+<li>The above-recommended configuration is the minimum configuration for deploying DolphinScheduler. The higher configuration is strongly recommended for production environments.</li>
+<li>The hard disk size configuration is recommended by more than 50GB. The system disk and data disk are separated.</li>
+</ul>
+</blockquote>
+<h2>3. Network requirements</h2>
+<p>DolphinScheduler provides the following network port configurations for normal operation:</p>
+<table>
+<thead>
+<tr>
+<th>Server</th>
+<th>Port</th>
+<th>Desc</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>MasterServer</td>
+<td>5566</td>
+<td>Not the communication port. Require the native ports do not conflict</td>
+</tr>
+<tr>
+<td>WorkerServer</td>
+<td>7788</td>
+<td>Not the communication port. Require the native ports do not conflict</td>
+</tr>
+<tr>
+<td>ApiApplicationServer</td>
+<td>12345</td>
+<td>Backend communication port</td>
+</tr>
+<tr>
+<td>nginx</td>
+<td>8888</td>
+<td>The port for DolphinScheduler UI</td>
+</tr>
+</tbody>
+</table>
+<blockquote>
+<p><strong>Attention:</strong></p>
+<ul>
+<li>MasterServer and WorkerServer do not need to enable communication between the networks. As long as the local ports do not conflict.</li>
+<li>Administrators can adjust relevant ports on the network side and host-side according to the deployment plan of DolphinScheduler components in the actual environment.</li>
+</ul>
+</blockquote>
+<h2>4. Browser requirements</h2>
+<p>DolphinScheduler recommends Chrome and the latest browsers which using Chrome Kernel to access the front-end visual operator page.</p>
+</div></section><footer class="footer-container"><div class="footer-body"><img src="/img/ds_gray.svg"/><div class="cols-container"><div class="col col-12"><h3>Disclaimer</h3><p>Apache DolphinScheduler (incubating) is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by Incubator. 
+Incubation is required of all newly accepted projects until a further review indicates 
+that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. 
+While incubation status is not necessarily a reflection of the completeness or stability of the code, 
+it does indicate that the project has yet to be fully endorsed by the ASF.</p></div><div class="col col-6"><dl><dt>Documentation</dt><dd><a href="/en-us/docs/1.2.0/user_doc/architecture-design.html" target="_self">Overview</a></dd><dd><a href="/en-us/docs/1.2.0/user_doc/quick-start.html" target="_self">Quick start</a></dd><dd><a href="/en-us/docs/1.2.0/user_doc/backend-development.html" target="_self">Developer guide</a></dd></dl></div><div class="col col-6"><dl><dt>ASF</dt><dd><a href=" [...]
+	<script src="https://f.alicdn.com/react/15.4.1/react-with-addons.min.js"></script>
+	<script src="https://f.alicdn.com/react/15.4.1/react-dom.min.js"></script>
+	<script>
+		window.rootPath = '';
+  </script>
+	<script src="/build/documentation.js"></script>
+</body>
+</html>
\ No newline at end of file
diff --git a/en-us/docs/1.3.0/user_doc/hardware-environment.json b/en-us/docs/1.3.0/user_doc/hardware-environment.json
new file mode 100644
index 0000000..0e68fb4
--- /dev/null
+++ b/en-us/docs/1.3.0/user_doc/hardware-environment.json
@@ -0,0 +1,6 @@
+{
+  "filename": "hardware-environment.md",
+  "__html": "<h1>Hareware Environment</h1>\n<p>DolphinScheduler, as an open-source distributed workflow task scheduling system, can be well deployed and run in Intel architecture server environments and mainstream virtualization environments, and supports mainstream Linux operating system environments.</p>\n<h2>1. Linux operating system version requirements</h2>\n<table>\n<thead>\n<tr>\n<th style=\"text-align:left\">OS</th>\n<th style=\"text-align:center\">Version</th>\n</tr>\n</thead>\n [...]
+  "link": "/en-us/docs/1.3.0/user_doc/hardware-environment.html",
+  "meta": {}
+}
\ No newline at end of file
diff --git a/en-us/docs/1.3.0/user_doc/standalone-deployment.html b/en-us/docs/1.3.0/user_doc/standalone-deployment.html
new file mode 100644
index 0000000..17ca3bf
--- /dev/null
+++ b/en-us/docs/1.3.0/user_doc/standalone-deployment.html
@@ -0,0 +1,393 @@
+<!DOCTYPE html>
+<html lang="en">
+
+<head>
+	<meta charset="UTF-8">
+	<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
+	<meta name="keywords" content="standalone-deployment" />
+	<meta name="description" content="standalone-deployment" />
+	<!-- 网页标签标题 -->
+	<title>standalone-deployment</title>
+	<link rel="shortcut icon" href="/img/docsite.ico"/>
+	<link rel="stylesheet" href="/build/documentation.css" />
+</head>
+<body>
+	<div id="root"><div class="documentation-page" data-reactroot=""><header class="header-container header-container-normal"><div class="header-body"><a href="/en-us/index.html"><img class="logo" src="/img/hlogo_colorful.svg"/></a><div class="search search-normal"><span class="icon-search"></span></div><span class="language-switch language-switch-normal">中</span><div class="header-menu"><img class="header-menu-toggle" src="/img/system/menu_gray.png"/><div><ul class="ant-menu blackClass ant [...]
+<h1>1、Before you begin (please install requirement basic software by yourself)</h1>
+<ul>
+<li>PostgreSQL (8.2.15+) or MySQL (5.7)  :  Choose One</li>
+<li><a href="https://www.oracle.com/technetwork/java/javase/downloads/index.html">JDK</a> (1.8+) :  Required. Double-check configure JAVA_HOME and PATH environment variables in /etc/profile</li>
+<li>ZooKeeper (3.4.6+) :Required</li>
+<li>Hadoop (2.6+) or MinIO :Optional. If you need to upload a resource function, you can choose a local file directory as the upload folder for a single machine (this operation does not need to deploy Hadoop). Of course, you can also choose to upload to Hadoop or MinIO.</li>
+</ul>
+<pre><code class="language-markdown"> Tips:DolphinScheduler itself does not rely on Hadoop, Hive, Spark, only use their clients for the corresponding task of running.
+</code></pre>
+<h1>2、Download the binary package.</h1>
+<ul>
+<li>Please download the latest version of the default installation package to the server deployment directory. For example, use /opt/dolphinscheduler as the installation and deployment directory. Download address: <a href="https://dist.apache.org/repos/dist/dev/incubator/dolphinscheduler/1.3.0/apache-dolphinscheduler-incubating-1.3.0-dolphinscheduler-bin.tar.gz">Download</a>,Download the package and move to the installation and deployment directory. Then unzip it.</li>
+</ul>
+<pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> Create the deployment directory. Do not choose a deployment directory with a high-privilege directory such as / root or / home.</span>
+mkdir -p /opt/dolphinscheduler;
+cd /opt/dolphinscheduler;
+<span class="hljs-meta">#</span><span class="bash"> unzip</span>
+tar -zxvf apache-dolphinscheduler-incubating-1.3.0-dolphinscheduler-bin.tar.gz -C /opt/dolphinscheduler;
+
+mv apache-dolphinscheduler-incubating-1.3.0-dolphinscheduler-bin  dolphinscheduler-bin
+</code></pre>
+<h1>3、Create deployment user and hosts mapping</h1>
+<ul>
+<li>Create a deployment user on the ** all ** deployment machines, and be sure to configure sudo passwordless. If we plan to deploy DolphinScheduler on 4 machines: ds1, ds2, ds3, and ds4, we first need to create a deployment user on each machine.</li>
+</ul>
+<pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> To create a user, you need to <span class="hljs-built_in">log</span> <span class="hljs-keyword">in</span> as root and <span class="hljs-built_in">set</span> the deployment user name. Please modify it yourself. The following uses dolphinscheduler as an example.</span>
+useradd dolphinscheduler;
+<span class="hljs-meta">
+#</span><span class="bash"> Set the user password, please modify it yourself. The following takes dolphinscheduler123 as an example.</span>
+echo "dolphinscheduler123" | passwd --stdin dolphinscheduler
+<span class="hljs-meta">
+#</span><span class="bash"> Configure sudo passwordless</span>
+echo 'dolphinscheduler  ALL=(ALL)  NOPASSWD: NOPASSWD: ALL' &gt;&gt; /etc/sudoers
+sed -i 's/Defaults    requirett/#Defaults    requirett/g' /etc/sudoers
+
+</code></pre>
+<pre><code> Notes:
+ - Because the task execution service is based on 'sudo -u {linux-user}' to switch between different Linux users to implement multi-tenant running jobs, the deployment user needs to have sudo permissions and is passwordless. The first-time learners who can ignore it if they don't understand.
+ - If find the &quot;Default requiretty&quot; in the &quot;/etc/sudoers&quot; file, also comment out.
+ - If you need to use resource upload, you need to assign the user of permission to operate the local file system, HDFS or MinIO.
+</code></pre>
+<h1>4、Configure hosts mapping and ssh access and modify directory permissions.</h1>
+<ul>
+<li>
+<p>Use the first machine (hostname is ds1) as the deployment machine, configure the hosts of all machines to be deployed on ds1, and login as root on ds1.</p>
+<pre><code class="language-shell">vi /etc/hosts
+<span class="hljs-meta">
+#</span><span class="bash">add ip hostname</span>
+192.168.xxx.xxx ds1
+192.168.xxx.xxx ds2
+192.168.xxx.xxx ds3
+192.168.xxx.xxx ds4
+</code></pre>
+<p><em>Note: Please delete or comment out the line 127.0.0.1</em></p>
+</li>
+<li>
+<p>Sync /etc/hosts on ds1 to all deployment machines</p>
+<pre><code class="language-shell">for ip in ds2 ds3;     # Please replace ds2 ds3 here with the hostname of machines you want to deploy
+do
+    sudo scp -r /etc/hosts  $ip:/etc/          # Need to enter root password during operation
+done
+</code></pre>
+<p><em>Note: can use <code>sshpass -p xxx sudo scp -r /etc/hosts $ip:/etc/</code> to avoid type password.</em></p>
+<blockquote>
+<p>Install sshpass in Centos:</p>
+<ol>
+<li>
+<p>Install epel</p>
+<p>yum install -y epel-release</p>
+<p>yum repolist</p>
+</li>
+<li>
+<p>After installing epel, you can install sshpass</p>
+<p>yum install -y sshpass</p>
+</li>
+</ol>
+</blockquote>
+</li>
+<li>
+<p>On ds1, switch to the deployment user and configure ssh passwordless login</p>
+<pre><code class="language-shell"> su dolphinscheduler;
+
+ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
+cat ~/.ssh/id_rsa.pub &gt;&gt; ~/.ssh/authorized_keys
+chmod 600 ~/.ssh/authorized_keys
+</code></pre>
+</li>
+</ul>
+<p>​      Note: <em>If configure success, the dolphinscheduler user does not need to enter a password when executing the command <code>ssh localhost</code></em></p>
+<ul>
+<li>
+<p>On ds1, configure the deployment user dolphinscheduler ssh to connect to other machines to be deployed.</p>
+<pre><code class="language-shell">su dolphinscheduler;
+for ip in ds2 ds3;     # Please replace ds2 ds3 here with the hostname of the machine you want to deploy.
+do
+    ssh-copy-id  $ip   # You need to manually enter the password of the dolphinscheduler user during the operation.
+done
+<span class="hljs-meta">#</span><span class="bash"> can use `sshpass -p xxx ssh-copy-id <span class="hljs-variable">$ip</span>` to avoid <span class="hljs-built_in">type</span> password.</span>
+</code></pre>
+</li>
+<li>
+<p>On ds1, modify the directory permissions so that the deployment user has operation permissions on the dolphinscheduler-backend directory.</p>
+<pre><code class="language-shell">sudo chown -R dolphinscheduler:dolphinscheduler dolphinscheduler-backend
+</code></pre>
+</li>
+</ul>
+<h1>5、Database initialization</h1>
+<ul>
+<li>Into the database. The default database is PostgreSQL. If you select MySQL, you need to add the mysql-connector-java driver package to the lib directory of DolphinScheduler.</li>
+</ul>
+<pre><code>mysql -uroot -p
+</code></pre>
+<ul>
+<li>After entering the database command line window, execute the database initialization command and set the user and password. <strong>Note: {user} and {password} need to be replaced with a specific database username and password</strong></li>
+</ul>
+<pre><code class="language-mysql">   mysql&gt; CREATE DATABASE dolphinscheduler DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
+   mysql&gt; GRANT ALL PRIVILEGES ON dolphinscheduler.* TO '{user}'@'%' IDENTIFIED BY '{password}';
+   mysql&gt; GRANT ALL PRIVILEGES ON dolphinscheduler.* TO '{user}'@'localhost' IDENTIFIED BY '{password}';
+   mysql&gt; flush privileges;
+</code></pre>
+<ul>
+<li>
+<p>Create tables and import basic data</p>
+<ul>
+<li>Modify the following configuration in datasource.properties under the conf directory</li>
+</ul>
+<pre><code class="language-shell">  vi conf/datasource.properties
+</code></pre>
+<ul>
+<li>If you choose Mysql, please comment out the relevant configuration of PostgreSQL (vice versa), you also need to manually add the [[mysql-connector-java driver jar] (<a href="https://downloads.mysql.com/archives/c-j/">https://downloads.mysql.com/archives/c-j/</a>)] package to lib under the directory, and then configure the database connection information correctly.</li>
+</ul>
+<pre><code class="language-properties"><span class="hljs-comment">  #postgre</span>
+<span class="hljs-comment">  #spring.datasource.driver-class-name=org.postgresql.Driver</span>
+<span class="hljs-comment">  #spring.datasource.url=jdbc:postgresql://localhost:5432/dolphinscheduler</span>
+<span class="hljs-comment">  # mysql</span>
+  <span class="hljs-meta">spring.datasource.driver-class-name</span>=<span class="hljs-string">com.mysql.jdbc.Driver</span>
+  <span class="hljs-meta">spring.datasource.url</span>=<span class="hljs-string">jdbc:mysql://xxx:3306/dolphinscheduler?useUnicode=true&amp;characterEncoding=UTF-8&amp;allowMultiQueries=true     # Replace the correct IP address</span>
+  <span class="hljs-meta">spring.datasource.username</span>=<span class="hljs-string">xxx						# replace the correct {user} value</span>
+  <span class="hljs-meta">spring.datasource.password</span>=<span class="hljs-string">xxx						# replace the correct {password} value</span>
+</code></pre>
+<ul>
+<li>After modifying and saving, execute the create table and import data script in the script directory.</li>
+</ul>
+<pre><code class="language-shell">sh script/create-dolphinscheduler.sh
+</code></pre>
+</li>
+</ul>
+<p>​       <em>Note: If you execute the above script and report &quot;/bin/java: No such file or directory&quot; error, please configure JAVA_HOME and PATH variables in /etc/profile</em></p>
+<h1>6、Modify runtime parameters.</h1>
+<ul>
+<li>
+<p>Modify the environment variable in <code>dolphinscheduler_env.sh</code> file which on the 'conf/env' directory (take the relevant software installed under '/opt/soft' as an example)</p>
+<pre><code class="language-shell">    export HADOOP_HOME=/opt/soft/hadoop
+    export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
+    #export SPARK_HOME1=/opt/soft/spark1
+    export SPARK_HOME2=/opt/soft/spark2
+    export PYTHON_HOME=/opt/soft/python
+    export JAVA_HOME=/opt/soft/java
+    export HIVE_HOME=/opt/soft/hive
+    export FLINK_HOME=/opt/soft/flink
+    export DATAX_HOME=/opt/soft/datax/bin/datax.py
+    export PATH=$HADOOP_HOME/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$DATAX_HOME:$PATH
+
+    ```
+
+ `Note: This step is very important. For example, JAVA_HOME and PATH must be configured. Those that are not used can be ignored or commented out.`
+
+
+
+</code></pre>
+</li>
+<li>
+<p>Create Soft link jdk to /usr/bin/java (still JAVA_HOME=/opt/soft/java as an example)</p>
+<pre><code class="language-shell">sudo ln -s /opt/soft/java/bin/java /usr/bin/java
+</code></pre>
+</li>
+<li>
+<p>Modify the parameters in the one-click deployment config file <code>conf/config/install_config.conf</code>, pay special attention to the configuration of the following parameters.</p>
+<pre><code class="language-shell"><span class="hljs-meta">#</span><span class="bash"> choose mysql or postgresql</span>
+dbtype="mysql"
+<span class="hljs-meta">
+#</span><span class="bash"> Database connection address and port</span>
+dbhost="localhost:3306"
+<span class="hljs-meta">
+#</span><span class="bash"> database name</span>
+dbname="dolphinscheduler"
+<span class="hljs-meta">
+#</span><span class="bash"> database username</span>
+username="xxx"
+<span class="hljs-meta">
+#</span><span class="bash"> database password</span>
+<span class="hljs-meta">#</span><span class="bash"> NOTICE: <span class="hljs-keyword">if</span> there are special characters, please use the \ to escape, <span class="hljs-keyword">for</span> example, `[` escape to `\[`</span>
+passowrd="xxx"
+<span class="hljs-meta">
+#</span><span class="bash"> Zookeeper address, localhost:2181, remember the port 2181</span>
+zkQuorum="localhost:2181"
+<span class="hljs-meta">
+#</span><span class="bash"> Note: the target installation path <span class="hljs-keyword">for</span> dolphinscheduler, please not config as the same as the current path (<span class="hljs-built_in">pwd</span>)</span>
+installPath="/opt/soft/dolphinscheduler"
+<span class="hljs-meta">
+#</span><span class="bash"> deployment user</span>
+<span class="hljs-meta">#</span><span class="bash"> Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself</span>
+deployUser="dolphinscheduler"
+<span class="hljs-meta">
+#</span><span class="bash"> alert config,take QQ email <span class="hljs-keyword">for</span> example</span>
+<span class="hljs-meta">#</span><span class="bash"> mail protocol</span>
+mailProtocol="SMTP"
+<span class="hljs-meta">
+#</span><span class="bash"> mail server host</span>
+mailServerHost="smtp.qq.com"
+<span class="hljs-meta">
+#</span><span class="bash"> mail server port</span>
+<span class="hljs-meta">#</span><span class="bash"> note: Different protocols and encryption methods correspond to different ports, when SSL/TLS is enabled, make sure the port is correct.</span>
+mailServerPort="25"
+<span class="hljs-meta">
+#</span><span class="bash"> mail sender</span>
+mailSender="xxx@qq.com"
+<span class="hljs-meta">
+#</span><span class="bash"> mail user</span>
+mailUser="xxx@qq.com"
+<span class="hljs-meta">
+#</span><span class="bash"> mail sender password</span>
+<span class="hljs-meta">#</span><span class="bash"> note: The mail.passwd is email service authorization code, not the email login password.</span>
+mailPassword="xxx"
+<span class="hljs-meta">
+#</span><span class="bash"> Whether TLS mail protocol is supported,<span class="hljs-literal">true</span> is supported and <span class="hljs-literal">false</span> is not supported</span>
+starttlsEnable="true"
+<span class="hljs-meta">
+#</span><span class="bash"> Whether TLS mail protocol is supported,<span class="hljs-literal">true</span> is supported and <span class="hljs-literal">false</span> is not supported。</span>
+<span class="hljs-meta">#</span><span class="bash"> note: only one of TLS and SSL can be <span class="hljs-keyword">in</span> the <span class="hljs-literal">true</span> state.</span>
+sslEnable="false"
+<span class="hljs-meta">
+#</span><span class="bash"> note: sslTrust is the same as mailServerHost</span>
+sslTrust="smtp.qq.com"
+<span class="hljs-meta">
+
+#</span><span class="bash"> resource storage <span class="hljs-built_in">type</span>:HDFS,S3,NONE</span>
+resourceStorageType="HDFS"
+<span class="hljs-meta">
+#</span><span class="bash"> here is an example of saving to a <span class="hljs-built_in">local</span> file system</span>
+<span class="hljs-meta">#</span><span class="bash"> Note: If you want to upload to HDFS and the NameNode has HA enabled, you need to put core-site.xml and hdfs-site.xml <span class="hljs-keyword">in</span> the conf directory. In this example, it is placed under /opt/dolphinscheduler/conf, and Configure the namenode cluster name; <span class="hljs-keyword">if</span> the NameNode is not HA, modify it to a specific IP or host name.</span>
+defaultFS="file:///data/dolphinscheduler"
+<span class="hljs-meta">
+
+#</span><span class="bash"> <span class="hljs-keyword">if</span> not use hadoop resourcemanager, please keep default value; <span class="hljs-keyword">if</span> resourcemanager HA <span class="hljs-built_in">enable</span>, please <span class="hljs-built_in">type</span> the HA ips ; <span class="hljs-keyword">if</span> resourcemanager is single, make this value empty</span>
+yarnHaIps="192.168.xx.xx,192.168.xx.xx"
+<span class="hljs-meta">
+#</span><span class="bash"> <span class="hljs-keyword">if</span> resourcemanager HA <span class="hljs-built_in">enable</span> or not use resourcemanager, please skip this value setting; If resourcemanager is single, you only need to replace yarnIp1 to actual resourcemanager hostname.</span>
+singleYarnIp="yarnIp1"
+<span class="hljs-meta">
+#</span><span class="bash"> resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have <span class="hljs-built_in">read</span> write permissions。/dolphinscheduler is recommended</span>
+resourceUploadPath="/data/dolphinscheduler"
+<span class="hljs-meta">
+#</span><span class="bash"> specify the user who have permissions to create directory under HDFS/S3 root path</span>
+hdfsRootUser="hdfs"
+<span class="hljs-meta">
+
+
+#</span><span class="bash"> On <span class="hljs-built_in">which</span> machines to deploy the DS service, choose localhost <span class="hljs-keyword">for</span> this machine</span>
+ips="localhost"
+<span class="hljs-meta">
+#</span><span class="bash"> ssh port, default 22</span>
+<span class="hljs-meta">#</span><span class="bash"> Note: <span class="hljs-keyword">if</span> ssh port is not default, modify here</span>
+sshPort="22"
+<span class="hljs-meta">
+#</span><span class="bash"> run master machine</span>
+masters="localhost"
+<span class="hljs-meta">
+#</span><span class="bash"> run worker machine</span>
+workers="localhost"
+<span class="hljs-meta">
+#</span><span class="bash"> run alert machine</span>
+alertServer="localhost"
+<span class="hljs-meta">
+#</span><span class="bash"> run api machine</span>
+apiServers="localhost"
+
+</code></pre>
+<p><em>Attention:</em></p>
+<ul>
+<li>If you need to upload resources to the Hadoop cluster, and the NameNode of the Hadoop cluster is configured with HA, you need to enable HDFS resource upload, and you need to copy the core-site.xml and hdfs-site.xml in the Hadoop cluster to /opt/ dolphinscheduler/conf. Non-NameNode HA skips the next step.</li>
+</ul>
+</li>
+</ul>
+<h1>7、Automated Deployment</h1>
+<ul>
+<li>
+<p>Switch to the deployment user and execute the one-click deployment script</p>
+<p><code>sh install.sh</code></p>
+<pre><code>Note:
+For the first deployment, the following message appears in step 3 of `3, stop server` during operation. This message can be ignored.
+sh: bin/dolphinscheduler-daemon.sh: No such file or directory
+</code></pre>
+</li>
+<li>
+<p>After the script is completed, the following 5 services will be started. Use the <code>jps</code> command to check whether the services are started (<code>jps</code> comes with <code>java JDK</code>)</p>
+</li>
+</ul>
+<pre><code class="language-aidl">    MasterServer         ----- master service
+    WorkerServer         ----- worker service
+    LoggerServer         ----- logger service
+    ApiApplicationServer ----- api service
+    AlertServer          ----- alert service
+</code></pre>
+<p>If the above services are started normally, the automatic deployment is successful.</p>
+<p>After the deployment is successful, you can view the logs. The logs are stored in the logs folder.</p>
+<pre><code class="language-log"> logs/
+    ├── dolphinscheduler-alert-server.log
+    ├── dolphinscheduler-master-server.log
+    |—— dolphinscheduler-worker-server.log
+    |—— dolphinscheduler-api-server.log
+    |—— dolphinscheduler-logger-server.log
+</code></pre>
+<h1>8、login</h1>
+<ul>
+<li>
+<p>Access the address of the front page, interface IP (self-modified)
+<a href="http://192.168.xx.xx:12345/dolphinscheduler">http://192.168.xx.xx:12345/dolphinscheduler</a></p>
+ <p align="center">
+   <img src="/img/login.png" width="60%" />
+ </p>
+</li>
+</ul>
+<h1>9、Start and stop service</h1>
+<ul>
+<li>
+<p>Stop all services</p>
+<p><code>sh ./bin/stop-all.sh</code></p>
+</li>
+<li>
+<p>Start all services</p>
+<p><code>sh ./bin/start-all.sh</code></p>
+</li>
+<li>
+<p>Start and stop master service</p>
+</li>
+</ul>
+<pre><code class="language-shell">sh ./bin/dolphinscheduler-daemon.sh start master-server
+sh ./bin/dolphinscheduler-daemon.sh stop master-server
+</code></pre>
+<ul>
+<li>Start and stop worker Service</li>
+</ul>
+<pre><code class="language-shell">sh ./bin/dolphinscheduler-daemon.sh start worker-server
+sh ./bin/dolphinscheduler-daemon.sh stop worker-server
+</code></pre>
+<ul>
+<li>Start and stop api Service</li>
+</ul>
+<pre><code class="language-shell">sh ./bin/dolphinscheduler-daemon.sh start api-server
+sh ./bin/dolphinscheduler-daemon.sh stop api-server
+</code></pre>
+<ul>
+<li>Start and stop logger Service</li>
+</ul>
+<pre><code class="language-shell">sh ./bin/dolphinscheduler-daemon.sh start logger-server
+sh ./bin/dolphinscheduler-daemon.sh stop logger-server
+</code></pre>
+<ul>
+<li>Start and stop alert service</li>
+</ul>
+<pre><code class="language-shell">sh ./bin/dolphinscheduler-daemon.sh start alert-server
+sh ./bin/dolphinscheduler-daemon.sh stop alert-server
+</code></pre>
+<p><code>Note: Please refer to the &quot;Architecture Design&quot; section for service usage</code></p>
+</div></section><footer class="footer-container"><div class="footer-body"><img src="/img/ds_gray.svg"/><div class="cols-container"><div class="col col-12"><h3>Disclaimer</h3><p>Apache DolphinScheduler (incubating) is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by Incubator. 
+Incubation is required of all newly accepted projects until a further review indicates 
+that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. 
+While incubation status is not necessarily a reflection of the completeness or stability of the code, 
+it does indicate that the project has yet to be fully endorsed by the ASF.</p></div><div class="col col-6"><dl><dt>Documentation</dt><dd><a href="/en-us/docs/1.2.0/user_doc/architecture-design.html" target="_self">Overview</a></dd><dd><a href="/en-us/docs/1.2.0/user_doc/quick-start.html" target="_self">Quick start</a></dd><dd><a href="/en-us/docs/1.2.0/user_doc/backend-development.html" target="_self">Developer guide</a></dd></dl></div><div class="col col-6"><dl><dt>ASF</dt><dd><a href=" [...]
+	<script src="https://f.alicdn.com/react/15.4.1/react-with-addons.min.js"></script>
+	<script src="https://f.alicdn.com/react/15.4.1/react-dom.min.js"></script>
+	<script>
+		window.rootPath = '';
+  </script>
+	<script src="/build/documentation.js"></script>
+</body>
+</html>
\ No newline at end of file
diff --git a/en-us/docs/1.3.0/user_doc/standalone-deployment.json b/en-us/docs/1.3.0/user_doc/standalone-deployment.json
new file mode 100644
index 0000000..a4ceceb
--- /dev/null
+++ b/en-us/docs/1.3.0/user_doc/standalone-deployment.json
@@ -0,0 +1,6 @@
+{
+  "filename": "standalone-deployment.md",
+  "__html": "<h1>Standalone Deployment</h1>\n<h1>1、Before you begin (please install requirement basic software by yourself)</h1>\n<ul>\n<li>PostgreSQL (8.2.15+) or MySQL (5.7)  :  Choose One</li>\n<li><a href=\"https://www.oracle.com/technetwork/java/javase/downloads/index.html\">JDK</a> (1.8+) :  Required. Double-check configure JAVA_HOME and PATH environment variables in /etc/profile</li>\n<li>ZooKeeper (3.4.6+) :Required</li>\n<li>Hadoop (2.6+) or MinIO :Optional. If you need to uploa [...]
+  "link": "/en-us/docs/1.3.0/user_doc/standalone-deployment.html",
+  "meta": {}
+}
\ No newline at end of file
diff --git a/zh-cn/docs/1.3.0/user_doc/metadata-1.3.html b/zh-cn/docs/1.3.0/user_doc/metadata-1.3.html
new file mode 100644
index 0000000..fbba8fb
--- /dev/null
+++ b/zh-cn/docs/1.3.0/user_doc/metadata-1.3.html
@@ -0,0 +1,733 @@
+<!DOCTYPE html>
+<html lang="en">
+
+<head>
+	<meta charset="UTF-8">
+	<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
+	<meta name="keywords" content="metadata-1.3" />
+	<meta name="description" content="metadata-1.3" />
+	<!-- 网页标签标题 -->
+	<title>metadata-1.3</title>
+	<link rel="shortcut icon" href="/img/docsite.ico"/>
+	<link rel="stylesheet" href="/build/documentation.css" />
+</head>
+<body>
+	<div id="root"><div class="documentation-page" data-reactroot=""><header class="header-container header-container-normal"><div class="header-body"><a href="/zh-cn/index.html"><img class="logo" src="/img/hlogo_colorful.svg"/></a><div class="search search-normal"><span class="icon-search"></span></div><span class="language-switch language-switch-normal">En</span><div class="header-menu"><img class="header-menu-toggle" src="/img/system/menu_gray.png"/><div><ul class="ant-menu blackClass an [...]
+<p><a name="25Ald"></a></p>
+<h3>表概览</h3>
+<table>
+<thead>
+<tr>
+<th style="text-align:center">表名</th>
+<th style="text-align:center">表信息</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td style="text-align:center">t_ds_access_token</td>
+<td style="text-align:center">访问ds后端的token</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_alert</td>
+<td style="text-align:center">告警信息</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_alertgroup</td>
+<td style="text-align:center">告警组</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_command</td>
+<td style="text-align:center">执行命令</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_datasource</td>
+<td style="text-align:center">数据源</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_error_command</td>
+<td style="text-align:center">错误命令</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_process_definition</td>
+<td style="text-align:center">流程定义</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_process_instance</td>
+<td style="text-align:center">流程实例</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_project</td>
+<td style="text-align:center">项目</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_queue</td>
+<td style="text-align:center">队列</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_relation_datasource_user</td>
+<td style="text-align:center">用户关联数据源</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_relation_process_instance</td>
+<td style="text-align:center">子流程</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_relation_project_user</td>
+<td style="text-align:center">用户关联项目</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_relation_resources_user</td>
+<td style="text-align:center">用户关联资源</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_relation_udfs_user</td>
+<td style="text-align:center">用户关联UDF函数</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_relation_user_alertgroup</td>
+<td style="text-align:center">用户关联告警组</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_resources</td>
+<td style="text-align:center">资源文件</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_schedules</td>
+<td style="text-align:center">流程定时调度</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_session</td>
+<td style="text-align:center">用户登录的session</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_task_instance</td>
+<td style="text-align:center">任务实例</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_tenant</td>
+<td style="text-align:center">租户</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_udfs</td>
+<td style="text-align:center">UDF资源</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_user</td>
+<td style="text-align:center">用户</td>
+</tr>
+<tr>
+<td style="text-align:center">t_ds_version</td>
+<td style="text-align:center">ds版本信息</td>
+</tr>
+</tbody>
+</table>
+<p><a name="VNVGr"></a></p>
+<h3>用户	队列	数据源</h3>
+<p><img src="/img/metadata-erd/user-queue-datasource.png" alt="image.png"></p>
+<ul>
+<li>一个租户下可以有多个用户<br /></li>
+<li>t_ds_user中的queue字段存储的是队列表中的queue_name信息,t_ds_tenant下存的是queue_id,在流程定义执行过程中,用户队列优先级最高,用户队列为空则采用租户队列<br /></li>
+<li>t_ds_datasource表中的user_id字段表示创建该数据源的用户,t_ds_relation_datasource_user中的user_id表示,对数据源有权限的用户<br />
+<a name="HHyGV"></a></li>
+</ul>
+<h3>项目	资源	告警</h3>
+<p><img src="/img/metadata-erd/project-resource-alert.png" alt="image.png"></p>
+<ul>
+<li>一个用户可以有多个项目,用户项目授权通过t_ds_relation_project_user表完成project_id和user_id的关系绑定<br /></li>
+<li>t_ds_projcet表中的user_id表示创建该项目的用户,t_ds_relation_project_user表中的user_id表示对项目有权限的用户<br /></li>
+<li>t_ds_resources表中的user_id表示创建该资源的用户,t_ds_relation_resources_user中的user_id表示对资源有权限的用户<br /></li>
+<li>t_ds_udfs表中的user_id表示创建该UDF的用户,t_ds_relation_udfs_user表中的user_id表示对UDF有权限的用户<br />
+<a name="Bg2Sn"></a></li>
+</ul>
+<h3>命令	流程	任务</h3>
+<p><img src="/img/metadata-erd/command.png" alt="image.png"><br /><img src="/img/metadata-erd/process-task.png" alt="image.png"></p>
+<ul>
+<li>一个项目有多个流程定义,一个流程定义可以生成多个流程实例,一个流程实例可以生成多个任务实例<br /></li>
+<li>t_ds_schedulers表存放流程定义的定时调度信息<br /></li>
+<li>t_ds_relation_process_instance表存放的数据用于处理流程定义中含有子流程的情况,parent_process_instance_id表示含有子流程的主流程实例id,process_instance_id表示子流程实例的id,parent_task_instance_id表示子流程节点的任务实例id,流程实例表和任务实例表分别对应t_ds_process_instance表和t_ds_task_instance表
+<a name="Pv25P"></a></li>
+</ul>
+<h3>核心表Schema</h3>
+<p><a name="32Jzd"></a></p>
+<h4>t_ds_process_definition</h4>
+<table>
+<thead>
+<tr>
+<th>字段</th>
+<th>类型</th>
+<th>注释</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>id</td>
+<td>int</td>
+<td>主键</td>
+</tr>
+<tr>
+<td>name</td>
+<td>varchar</td>
+<td>流程定义名称</td>
+</tr>
+<tr>
+<td>version</td>
+<td>int</td>
+<td>流程定义版本</td>
+</tr>
+<tr>
+<td>release_state</td>
+<td>tinyint</td>
+<td>流程定义的发布状态:0 未上线  1已上线</td>
+</tr>
+<tr>
+<td>project_id</td>
+<td>int</td>
+<td>项目id</td>
+</tr>
+<tr>
+<td>user_id</td>
+<td>int</td>
+<td>流程定义所属用户id</td>
+</tr>
+<tr>
+<td>process_definition_json</td>
+<td>longtext</td>
+<td>流程定义json串</td>
+</tr>
+<tr>
+<td>description</td>
+<td>text</td>
+<td>流程定义描述</td>
+</tr>
+<tr>
+<td>global_params</td>
+<td>text</td>
+<td>全局参数</td>
+</tr>
+<tr>
+<td>flag</td>
+<td>tinyint</td>
+<td>流程是否可用:0 不可用,1 可用</td>
+</tr>
+<tr>
+<td>locations</td>
+<td>text</td>
+<td>节点坐标信息</td>
+</tr>
+<tr>
+<td>connects</td>
+<td>text</td>
+<td>节点连线信息</td>
+</tr>
+<tr>
+<td>receivers</td>
+<td>text</td>
+<td>收件人</td>
+</tr>
+<tr>
+<td>receivers_cc</td>
+<td>text</td>
+<td>抄送人</td>
+</tr>
+<tr>
+<td>create_time</td>
+<td>datetime</td>
+<td>创建时间</td>
+</tr>
+<tr>
+<td>timeout</td>
+<td>int</td>
+<td>超时时间</td>
+</tr>
+<tr>
+<td>tenant_id</td>
+<td>int</td>
+<td>租户id</td>
+</tr>
+<tr>
+<td>update_time</td>
+<td>datetime</td>
+<td>更新时间</td>
+</tr>
+</tbody>
+</table>
+<p><a name="e6jfz"></a></p>
+<h4>t_ds_process_instance</h4>
+<table>
+<thead>
+<tr>
+<th>字段</th>
+<th>类型</th>
+<th>注释</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>id</td>
+<td>int</td>
+<td>主键</td>
+</tr>
+<tr>
+<td>name</td>
+<td>varchar</td>
+<td>流程实例名称</td>
+</tr>
+<tr>
+<td>process_definition_id</td>
+<td>int</td>
+<td>流程定义id</td>
+</tr>
+<tr>
+<td>state</td>
+<td>tinyint</td>
+<td>流程实例状态:0 提交成功,1 正在运行,2 准备暂停,3 暂停,4 准备停止,5 停止,6 失败,7 成功,8 需要容错,9 kill,10 等待线程,11 等待依赖完成</td>
+</tr>
+<tr>
+<td>recovery</td>
+<td>tinyint</td>
+<td>流程实例容错标识:0 正常,1 需要被容错重启</td>
+</tr>
+<tr>
+<td>start_time</td>
+<td>datetime</td>
+<td>流程实例开始时间</td>
+</tr>
+<tr>
+<td>end_time</td>
+<td>datetime</td>
+<td>流程实例结束时间</td>
+</tr>
+<tr>
+<td>run_times</td>
+<td>int</td>
+<td>流程实例运行次数</td>
+</tr>
+<tr>
+<td>host</td>
+<td>varchar</td>
+<td>流程实例所在的机器</td>
+</tr>
+<tr>
+<td>command_type</td>
+<td>tinyint</td>
+<td>命令类型:0 启动工作流,1 从当前节点开始执行,2 恢复被容错的工作流,3 恢复暂停流程,4 从失败节点开始执行,5 补数,6 调度,7 重跑,8 暂停,9 停止,10 恢复等待线程</td>
+</tr>
+<tr>
+<td>command_param</td>
+<td>text</td>
+<td>命令的参数(json格式)</td>
+</tr>
+<tr>
+<td>task_depend_type</td>
+<td>tinyint</td>
+<td>节点依赖类型:0 当前节点,1 向前执行,2 向后执行</td>
+</tr>
+<tr>
+<td>max_try_times</td>
+<td>tinyint</td>
+<td>最大重试次数</td>
+</tr>
+<tr>
+<td>failure_strategy</td>
+<td>tinyint</td>
+<td>失败策略 0 失败后结束,1 失败后继续</td>
+</tr>
+<tr>
+<td>warning_type</td>
+<td>tinyint</td>
+<td>告警类型:0 不发,1 流程成功发,2 流程失败发,3 成功失败都发</td>
+</tr>
+<tr>
+<td>warning_group_id</td>
+<td>int</td>
+<td>告警组id</td>
+</tr>
+<tr>
+<td>schedule_time</td>
+<td>datetime</td>
+<td>预期运行时间</td>
+</tr>
+<tr>
+<td>command_start_time</td>
+<td>datetime</td>
+<td>开始命令时间</td>
+</tr>
+<tr>
+<td>global_params</td>
+<td>text</td>
+<td>全局参数(固化流程定义的参数)</td>
+</tr>
+<tr>
+<td>process_instance_json</td>
+<td>longtext</td>
+<td>流程实例json(copy的流程定义的json)</td>
+</tr>
+<tr>
+<td>flag</td>
+<td>tinyint</td>
+<td>是否可用,1 可用,0不可用</td>
+</tr>
+<tr>
+<td>update_time</td>
+<td>timestamp</td>
+<td>更新时间</td>
+</tr>
+<tr>
+<td>is_sub_process</td>
+<td>int</td>
+<td>是否是子工作流 1 是,0 不是</td>
+</tr>
+<tr>
+<td>executor_id</td>
+<td>int</td>
+<td>命令执行用户</td>
+</tr>
+<tr>
+<td>locations</td>
+<td>text</td>
+<td>节点坐标信息</td>
+</tr>
+<tr>
+<td>connects</td>
+<td>text</td>
+<td>节点连线信息</td>
+</tr>
+<tr>
+<td>history_cmd</td>
+<td>text</td>
+<td>历史命令,记录所有对流程实例的操作</td>
+</tr>
+<tr>
+<td>dependence_schedule_times</td>
+<td>text</td>
+<td>依赖节点的预估时间</td>
+</tr>
+<tr>
+<td>process_instance_priority</td>
+<td>int</td>
+<td>流程实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest</td>
+</tr>
+<tr>
+<td>worker_group_id</td>
+<td>int</td>
+<td>任务指定运行的worker分组</td>
+</tr>
+<tr>
+<td>timeout</td>
+<td>int</td>
+<td>超时时间</td>
+</tr>
+<tr>
+<td>tenant_id</td>
+<td>int</td>
+<td>租户id</td>
+</tr>
+</tbody>
+</table>
+<p><a name="IvHEc"></a></p>
+<h4>t_ds_task_instance</h4>
+<table>
+<thead>
+<tr>
+<th>字段</th>
+<th>类型</th>
+<th>注释</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>id</td>
+<td>int</td>
+<td>主键</td>
+</tr>
+<tr>
+<td>name</td>
+<td>varchar</td>
+<td>任务名称</td>
+</tr>
+<tr>
+<td>task_type</td>
+<td>varchar</td>
+<td>任务类型</td>
+</tr>
+<tr>
+<td>process_definition_id</td>
+<td>int</td>
+<td>流程定义id</td>
+</tr>
+<tr>
+<td>process_instance_id</td>
+<td>int</td>
+<td>流程实例id</td>
+</tr>
+<tr>
+<td>task_json</td>
+<td>longtext</td>
+<td>任务节点json</td>
+</tr>
+<tr>
+<td>state</td>
+<td>tinyint</td>
+<td>任务实例状态:0 提交成功,1 正在运行,2 准备暂停,3 暂停,4 准备停止,5 停止,6 失败,7 成功,8 需要容错,9 kill,10 等待线程,11 等待依赖完成</td>
+</tr>
+<tr>
+<td>submit_time</td>
+<td>datetime</td>
+<td>任务提交时间</td>
+</tr>
+<tr>
+<td>start_time</td>
+<td>datetime</td>
+<td>任务开始时间</td>
+</tr>
+<tr>
+<td>end_time</td>
+<td>datetime</td>
+<td>任务结束时间</td>
+</tr>
+<tr>
+<td>host</td>
+<td>varchar</td>
+<td>执行任务的机器</td>
+</tr>
+<tr>
+<td>execute_path</td>
+<td>varchar</td>
+<td>任务执行路径</td>
+</tr>
+<tr>
+<td>log_path</td>
+<td>varchar</td>
+<td>任务日志路径</td>
+</tr>
+<tr>
+<td>alert_flag</td>
+<td>tinyint</td>
+<td>是否告警</td>
+</tr>
+<tr>
+<td>retry_times</td>
+<td>int</td>
+<td>重试次数</td>
+</tr>
+<tr>
+<td>pid</td>
+<td>int</td>
+<td>进程pid</td>
+</tr>
+<tr>
+<td>app_link</td>
+<td>varchar</td>
+<td>yarn app id</td>
+</tr>
+<tr>
+<td>flag</td>
+<td>tinyint</td>
+<td>是否可用:0 不可用,1 可用</td>
+</tr>
+<tr>
+<td>retry_interval</td>
+<td>int</td>
+<td>重试间隔</td>
+</tr>
+<tr>
+<td>max_retry_times</td>
+<td>int</td>
+<td>最大重试次数</td>
+</tr>
+<tr>
+<td>task_instance_priority</td>
+<td>int</td>
+<td>任务实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest</td>
+</tr>
+<tr>
+<td>worker_group_id</td>
+<td>int</td>
+<td>任务指定运行的worker分组</td>
+</tr>
+</tbody>
+</table>
+<p><a name="pPQkU"></a></p>
+<h4>t_ds_schedules</h4>
+<table>
+<thead>
+<tr>
+<th>字段</th>
+<th>类型</th>
+<th>注释</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>id</td>
+<td>int</td>
+<td>主键</td>
+</tr>
+<tr>
+<td>process_definition_id</td>
+<td>int</td>
+<td>流程定义id</td>
+</tr>
+<tr>
+<td>start_time</td>
+<td>datetime</td>
+<td>调度开始时间</td>
+</tr>
+<tr>
+<td>end_time</td>
+<td>datetime</td>
+<td>调度结束时间</td>
+</tr>
+<tr>
+<td>crontab</td>
+<td>varchar</td>
+<td>crontab 表达式</td>
+</tr>
+<tr>
+<td>failure_strategy</td>
+<td>tinyint</td>
+<td>失败策略: 0 结束,1 继续</td>
+</tr>
+<tr>
+<td>user_id</td>
+<td>int</td>
+<td>用户id</td>
+</tr>
+<tr>
+<td>release_state</td>
+<td>tinyint</td>
+<td>状态:0 未上线,1 上线</td>
+</tr>
+<tr>
+<td>warning_type</td>
+<td>tinyint</td>
+<td>告警类型:0 不发,1 流程成功发,2 流程失败发,3 成功失败都发</td>
+</tr>
+<tr>
+<td>warning_group_id</td>
+<td>int</td>
+<td>告警组id</td>
+</tr>
+<tr>
+<td>process_instance_priority</td>
+<td>int</td>
+<td>流程实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest</td>
+</tr>
+<tr>
+<td>worker_group_id</td>
+<td>int</td>
+<td>任务指定运行的worker分组</td>
+</tr>
+<tr>
+<td>create_time</td>
+<td>datetime</td>
+<td>创建时间</td>
+</tr>
+<tr>
+<td>update_time</td>
+<td>datetime</td>
+<td>更新时间</td>
+</tr>
+</tbody>
+</table>
+<p><a name="TkQzn"></a></p>
+<h4>t_ds_command</h4>
+<table>
+<thead>
+<tr>
+<th>字段</th>
+<th>类型</th>
+<th>注释</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>id</td>
+<td>int</td>
+<td>主键</td>
+</tr>
+<tr>
+<td>command_type</td>
+<td>tinyint</td>
+<td>命令类型:0 启动工作流,1 从当前节点开始执行,2 恢复被容错的工作流,3 恢复暂停流程,4 从失败节点开始执行,5 补数,6 调度,7 重跑,8 暂停,9 停止,10 恢复等待线程</td>
+</tr>
+<tr>
+<td>process_definition_id</td>
+<td>int</td>
+<td>流程定义id</td>
+</tr>
+<tr>
+<td>command_param</td>
+<td>text</td>
+<td>命令的参数(json格式)</td>
+</tr>
+<tr>
+<td>task_depend_type</td>
+<td>tinyint</td>
+<td>节点依赖类型:0 当前节点,1 向前执行,2 向后执行</td>
+</tr>
+<tr>
+<td>failure_strategy</td>
+<td>tinyint</td>
+<td>失败策略:0结束,1继续</td>
+</tr>
+<tr>
+<td>warning_type</td>
+<td>tinyint</td>
+<td>告警类型:0 不发,1 流程成功发,2 流程失败发,3 成功失败都发</td>
+</tr>
+<tr>
+<td>warning_group_id</td>
+<td>int</td>
+<td>告警组</td>
+</tr>
+<tr>
+<td>schedule_time</td>
+<td>datetime</td>
+<td>预期运行时间</td>
+</tr>
+<tr>
+<td>start_time</td>
+<td>datetime</td>
+<td>开始时间</td>
+</tr>
+<tr>
+<td>executor_id</td>
+<td>int</td>
+<td>执行用户id</td>
+</tr>
+<tr>
+<td>dependence</td>
+<td>varchar</td>
+<td>依赖字段</td>
+</tr>
+<tr>
+<td>update_time</td>
+<td>datetime</td>
+<td>更新时间</td>
+</tr>
+<tr>
+<td>process_instance_priority</td>
+<td>int</td>
+<td>流程实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest</td>
+</tr>
+<tr>
+<td>worker_group_id</td>
+<td>int</td>
+<td>任务指定运行的worker分组</td>
+</tr>
+</tbody>
+</table>
+</div></section><footer class="footer-container"><div class="footer-body"><img src="/img/ds_gray.svg"/><div class="cols-container"><div class="col col-12"><h3>Disclaimer</h3><p>Apache DolphinScheduler (incubating) is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by Incubator. 
+Incubation is required of all newly accepted projects until a further review indicates 
+that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. 
+While incubation status is not necessarily a reflection of the completeness or stability of the code, 
+it does indicate that the project has yet to be fully endorsed by the ASF.</p></div><div class="col col-6"><dl><dt>文档</dt><dd><a href="/zh-cn/docs/1.2.0/user_doc/architecture-design.html" target="_self">概览</a></dd><dd><a href="/zh-cn/docs/1.2.0/user_doc/quick-start.html" target="_self">快速开始</a></dd><dd><a href="/zh-cn/docs/1.2.0/user_doc/backend-development.html" target="_self">开发者指南</a></dd></dl></div><div class="col col-6"><dl><dt>ASF</dt><dd><a href="http://www.apache.org" target="_se [...]
+	<script src="https://f.alicdn.com/react/15.4.1/react-with-addons.min.js"></script>
+	<script src="https://f.alicdn.com/react/15.4.1/react-dom.min.js"></script>
+	<script>
+		window.rootPath = '';
+  </script>
+	<script src="/build/documentation.js"></script>
+</body>
+</html>
\ No newline at end of file
diff --git a/zh-cn/docs/1.3.0/user_doc/metadata-1.3.json b/zh-cn/docs/1.3.0/user_doc/metadata-1.3.json
new file mode 100644
index 0000000..02008e9
--- /dev/null
+++ b/zh-cn/docs/1.3.0/user_doc/metadata-1.3.json
@@ -0,0 +1,6 @@
+{
+  "filename": "metadata-1.3.md",
+  "__html": "<h1>Dolphin Scheduler 1.2元数据文档</h1>\n<p><a name=\"25Ald\"></a></p>\n<h3>表概览</h3>\n<table>\n<thead>\n<tr>\n<th style=\"text-align:center\">表名</th>\n<th style=\"text-align:center\">表信息</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td style=\"text-align:center\">t_ds_access_token</td>\n<td style=\"text-align:center\">访问ds后端的token</td>\n</tr>\n<tr>\n<td style=\"text-align:center\">t_ds_alert</td>\n<td style=\"text-align:center\">告警信息</td>\n</tr>\n<tr>\n<td style=\"text-align:center\">t [...]
+  "link": "/zh-cn/docs/1.3.0/user_doc/metadata-1.3.html",
+  "meta": {}
+}
\ No newline at end of file
diff --git a/zh-cn/docs/1.3.0/user_doc/standalone-deployment.html b/zh-cn/docs/1.3.0/user_doc/standalone-deployment.html
index 29e8f89..5e9a034 100644
--- a/zh-cn/docs/1.3.0/user_doc/standalone-deployment.html
+++ b/zh-cn/docs/1.3.0/user_doc/standalone-deployment.html
@@ -207,7 +207,7 @@ yarnHaIps="192.168.xx.xx,192.168.xx.xx"
 #</span><span class="bash"> 如果ResourceManager是HA或者没有使用到Yarn保持默认值即可;如果是单ResourceManager,请配置真实的ResourceManager主机名或者ip</span>
 singleYarnIp="yarnIp1"
 <span class="hljs-meta">
-#</span><span class="bash"> 资源上传根路径,主持HDFS和S3,由于hdfs支持本地文件系统,需要确保本地文件夹存在且有读写权限</span>
+#</span><span class="bash"> 资源上传根路径,支持HDFS和S3,由于hdfs支持本地文件系统,需要确保本地文件夹存在且有读写权限</span>
 resourceUploadPath="/data/dolphinscheduler"
 <span class="hljs-meta">
 #</span><span class="bash"> 具备权限创建resourceUploadPath的用户</span>
diff --git a/zh-cn/docs/1.3.0/user_doc/standalone-deployment.json b/zh-cn/docs/1.3.0/user_doc/standalone-deployment.json
index 5085989..199e051 100644
--- a/zh-cn/docs/1.3.0/user_doc/standalone-deployment.json
+++ b/zh-cn/docs/1.3.0/user_doc/standalone-deployment.json
@@ -1,6 +1,6 @@
 {
   "filename": "standalone-deployment.md",
-  "__html": "<h1>单机部署(Standalone)</h1>\n<p>DolphinScheduler单机部署分为后端部署和前端部署两部分:</p>\n<h1>1、基础软件安装(必装项请自行安装)</h1>\n<ul>\n<li>PostgreSQL (8.2.15+) or MySQL (5.7系列)  :  两者任选其一即可</li>\n<li><a href=\"https://www.oracle.com/technetwork/java/javase/downloads/index.html\">JDK</a> (1.8+) :  必装,请安装好后在/etc/profile下配置 JAVA_HOME 及 PATH 变量</li>\n<li>ZooKeeper (3.4.6+) :必装</li>\n<li>Hadoop (2.6+) or MinIO :选装, 如果需要用到资源上传功能,针对单机可以选择本地文件目录作为上传文件夹(此操作不需要部署Hadoop);当然也可以选择上传到Hadoop or MinIO集群上</li>\n</ul>\n< [...]
+  "__html": "<h1>单机部署(Standalone)</h1>\n<p>DolphinScheduler单机部署分为后端部署和前端部署两部分:</p>\n<h1>1、基础软件安装(必装项请自行安装)</h1>\n<ul>\n<li>PostgreSQL (8.2.15+) or MySQL (5.7系列)  :  两者任选其一即可</li>\n<li><a href=\"https://www.oracle.com/technetwork/java/javase/downloads/index.html\">JDK</a> (1.8+) :  必装,请安装好后在/etc/profile下配置 JAVA_HOME 及 PATH 变量</li>\n<li>ZooKeeper (3.4.6+) :必装</li>\n<li>Hadoop (2.6+) or MinIO :选装, 如果需要用到资源上传功能,针对单机可以选择本地文件目录作为上传文件夹(此操作不需要部署Hadoop);当然也可以选择上传到Hadoop or MinIO集群上</li>\n</ul>\n< [...]
   "link": "/zh-cn/docs/1.3.0/user_doc/standalone-deployment.html",
   "meta": {}
 }
\ No newline at end of file
diff --git a/zh-cn/docs/1.3.0/user_doc/system-manual.html b/zh-cn/docs/1.3.0/user_doc/system-manual.html
new file mode 100644
index 0000000..1888d7f
--- /dev/null
+++ b/zh-cn/docs/1.3.0/user_doc/system-manual.html
@@ -0,0 +1,976 @@
+<!DOCTYPE html>
+<html lang="en">
+
+<head>
+	<meta charset="UTF-8">
+	<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
+	<meta name="keywords" content="system-manual" />
+	<meta name="description" content="system-manual" />
+	<!-- 网页标签标题 -->
+	<title>system-manual</title>
+	<link rel="shortcut icon" href="/img/docsite.ico"/>
+	<link rel="stylesheet" href="/build/documentation.css" />
+</head>
+<body>
+	<div id="root"><div class="documentation-page" data-reactroot=""><header class="header-container header-container-normal"><div class="header-body"><a href="/zh-cn/index.html"><img class="logo" src="/img/hlogo_colorful.svg"/></a><div class="search search-normal"><span class="icon-search"></span></div><span class="language-switch language-switch-normal">En</span><div class="header-menu"><img class="header-menu-toggle" src="/img/system/menu_gray.png"/><div><ul class="ant-menu blackClass an [...]
+<h2>快速上手</h2>
+<blockquote>
+<p>请参照<a href="quick-start.html">快速上手</a></p>
+</blockquote>
+<h2>操作指南</h2>
+<h3>1. 首页</h3>
+<p>首页包含用户所有项目的任务状态统计、流程状态统计、工作流定义统计。
+<p align="center">
+<img src="/img/home.png" width="80%" />
+</p></p>
+<h3>2. 项目管理</h3>
+<h4>2.1 创建项目</h4>
+<ul>
+<li>
+<p>点击&quot;项目管理&quot;进入项目管理页面,点击“创建项目”按钮,输入项目名称,项目描述,点击“提交”,创建新的项目。</p>
+<p align="center">
+    <img src="/img/project.png" width="80%" />
+</p>
+</li>
+</ul>
+<h4>2.2 项目首页</h4>
+<ul>
+<li>
+<p>在项目管理页面点击项目名称链接,进入项目首页,如下图所示,项目首页包含该项目的任务状态统计、流程状态统计、工作流定义统计。</p>
+<p align="center">
+   <img src="/img/project-home.png" width="80%" />
+</p>
+</li>
+<li>
+<p>任务状态统计:在指定时间范围内,统计任务实例中状态为提交成功、正在运行、准备暂停、暂停、准备停止、停止、失败、成功、需要容错、kill、等待线程的个数</p>
+</li>
+<li>
+<p>流程状态统计:在指定时间范围内,统计工作流实例中状态为提交成功、正在运行、准备暂停、暂停、准备停止、停止、失败、成功、需要容错、kill、等待线程的个数</p>
+</li>
+<li>
+<p>工作流定义统计:统计用户创建的工作流定义及管理员授予该用户的工作流定义</p>
+</li>
+</ul>
+<h4>2.3 工作流定义</h4>
+<h4><span id=creatDag>2.3.1 创建工作流定义</span></h4>
+<ul>
+<li>点击项目管理-&gt;工作流-&gt;工作流定义,进入工作流定义页面,点击“创建工作流”按钮,进入<strong>工作流DAG编辑</strong>页面,如下图所示:<p align="center">
+    <img src="/img/dag0.png" width="80%" />
+</p>  
+</li>
+<li>工具栏中拖拽<img src="/img/shell.png" width="35"/>到画板中,新增一个Shell任务,如下图所示:<p align="center">
+    <img src="/img/shell_dag.png" width="80%" />
+</p>  
+</li>
+<li><strong>添加shell任务的参数设置:</strong></li>
+</ul>
+<ol>
+<li>填写“节点名称”,“描述”,“脚本”字段;</li>
+<li>“运行标志”勾选“正常”,若勾选“禁止执行”,运行工作流不会执行该任务;</li>
+<li>选择“任务优先级”:当worker线程数不足时,级别高的任务在执行队列中会优先执行,相同优先级的任务按照先进先出的顺序执行;</li>
+<li>超时告警(非必选):勾选超时告警、超时失败,填写“超时时长”,当任务执行时间超过<strong>超时时长</strong>,会发送告警邮件并且任务超时失败;</li>
+<li>资源(非必选)。资源文件是资源中心-&gt;文件管理页面创建或上传的文件,如文件名为<code>test.sh</code>,脚本中调用资源命令为<code>sh test.sh</code>;</li>
+<li>自定义参数(非必填),参考<a href="#UserDefinedParameters">自定义参数</a>;</li>
+<li>点击&quot;确认添加&quot;按钮,保存任务设置。</li>
+</ol>
+<ul>
+<li>
+<p><strong>增加任务执行的先后顺序:</strong> 点击右上角图标<img src="/img/line.png" width="35"/>连接任务;如下图所示,任务2和任务3并行执行,当任务1执行完,任务2、3会同时执行。</p>
+<p align="center">
+   <img src="/img/dag2.png" width="80%" />
+</p>
+</li>
+<li>
+<p><strong>删除依赖关系:</strong> 点击右上角&quot;箭头&quot;图标<img src="/img/arrow.png" width="35"/>,选中连接线,点击右上角&quot;删除&quot;图标<img src="/img/delete.png" width="35"/>,删除任务间的依赖关系。</p>
+<p align="center">
+   <img src="/img/dag3.png" width="80%" />
+</p>
+</li>
+<li>
+<p><strong>保存工作流定义:</strong> 点击”保存“按钮,弹出&quot;设置DAG图名称&quot;弹框,如下图所示,输入工作流定义名称,工作流定义描述,设置全局参数(选填,参考<a href="#UserDefinedParameters">自定义参数</a>),点击&quot;添加&quot;按钮,工作流定义创建成功。</p>
+<p align="center">
+   <img src="/img/dag4.png" width="80%" />
+ </p>
+</li>
+</ul>
+<blockquote>
+<p>其他类型任务,请参考 <a href="#TaskParamers">任务节点类型和参数设置</a>。</p>
+</blockquote>
+<h4>2.3.2  工作流定义操作功能</h4>
+<p>点击项目管理-&gt;工作流-&gt;工作流定义,进入工作流定义页面,如下图所示:
+<p align="center">
+<img src="/img/work_list.png" width="80%" />
+</p>
+工作流定义列表的操作功能如下:</p>
+<ul>
+<li><strong>编辑:</strong> 只能编辑&quot;下线&quot;的工作流定义。工作流DAG编辑同<a href="#creatDag">创建工作流定义</a>。</li>
+<li><strong>上线:</strong> 工作流状态为&quot;下线&quot;时,上线工作流,只有&quot;上线&quot;状态的工作流能运行,但不能编辑。</li>
+<li><strong>下线:</strong> 工作流状态为&quot;上线&quot;时,下线工作流,下线状态的工作流可以编辑,但不能运行。</li>
+<li><strong>运行:</strong> 只有上线的工作流能运行。运行操作步骤见<a href="#runWorkflow">2.3.3 运行工作流</a></li>
+<li><strong>定时:</strong> 只有上线的工作流能设置定时,系统自动定时调度工作流运行。创建定时后的状态为&quot;下线&quot;,需在定时管理页面上线定时才生效。定时操作步骤见<a href="#creatTiming">2.3.4 工作流定时</a>。</li>
+<li><strong>定时管理:</strong> 定时管理页面可编辑、上线/下线、删除定时。</li>
+<li><strong>删除:</strong> 删除工作流定义。</li>
+<li><strong>下载:</strong> 下载工作流定义到本地。</li>
+<li><strong>树形图:</strong> 以树形结构展示任务节点的类型及任务状态,如下图所示:<p align="center">
+    <img src="/img/tree.png" width="80%" />
+</p>  
+</li>
+</ul>
+<h4><span id=runWorkflow>2.3.3 运行工作流</span></h4>
+<ul>
+<li>
+<p>点击项目管理-&gt;工作流-&gt;工作流定义,进入工作流定义页面,如下图所示,点击&quot;上线&quot;按钮<img src="/img/online.png" width="35"/>,上线工作流。</p>
+<p align="center">
+    <img src="/img/work_list.png" width="80%" />
+</p>
+</li>
+<li>
+<p>点击”运行“按钮,弹出启动参数设置弹框,如下图所示,设置启动参数,点击弹框中的&quot;运行&quot;按钮,工作流开始运行,工作流实例页面生成一条工作流实例。</p>
+ <p align="center">
+   <img src="/img/run-work.png" width="80%" />
+ </p>  
+</li>
+</ul>
+<p><span id=runParamers>工作流运行参数说明:</span></p>
+<pre><code>* 失败策略:当某一个任务节点执行失败时,其他并行的任务节点需要执行的策略。”继续“表示:某一任务失败后,其他任务节点正常执行;”结束“表示:终止所有正在执行的任务,并终止整个流程。
+* 通知策略:当流程结束,根据流程状态发送流程执行信息通知邮件,包含任何状态都不发,成功发,失败发,成功或失败都发。
+* 流程优先级:流程运行的优先级,分五个等级:最高(HIGHEST),高(HIGH),中(MEDIUM),低(LOW),最低(LOWEST)。当master线程数不足时,级别高的流程在执行队列中会优先执行,相同优先级的流程按照先进先出的顺序执行。
+* worker分组:该流程只能在指定的worker机器组里执行。默认是Default,可以在任一worker上执行。
+* 通知组:选择通知策略||超时报警||发生容错时,会发送流程信息或邮件到通知组里的所有成员。
+* 收件人:选择通知策略||超时报警||发生容错时,会发送流程信息或告警邮件到收件人列表。
+* 抄送人:选择通知策略||超时报警||发生容错时,会抄送流程信息或告警邮件到抄送人列表。
+* 补数:包括串行补数、并行补数2种模式。串行补数:指定时间范围内,从开始日期至结束日期依次执行补数,只生成一条流程实例;并行补数:指定时间范围内,多天同时进行补数,生成N条流程实例。 
+</code></pre>
+<ul>
+<li>
+<p>补数: 执行指定日期的工作流定义,可以选择补数时间范围(目前只支持针对连续的天进行补数),比如需要补5月1号到5月10号的数据,如下图所示:</p>
+<p align="center">
+    <img src="/img/complement.png" width="80%" />
+</p>
+<blockquote>
+<p>串行模式:补数从5月1号到5月10号依次执行,流程实例页面生成一条流程实例;</p>
+</blockquote>
+<blockquote>
+<p>并行模式:同时执行5月1号到5月10号的任务,流程实例页面生成十条流程实例。</p>
+</blockquote>
+</li>
+</ul>
+<h4><span id=creatTiming>2.3.4 工作流定时</span></h4>
+<ul>
+<li>创建定时:点击项目管理-&gt;工作流-&gt;工作流定义,进入工作流定义页面,上线工作流,点击&quot;定时&quot;按钮<img src="/img/timing.png" width="35"/>,弹出定时参数设置弹框,如下图所示:<p align="center">
+    <img src="/img/time-schedule.png" width="80%" />
+</p>
+</li>
+<li>选择起止时间。在起止时间范围内,定时运行工作流;不在起止时间范围内,不再产生定时工作流实例。</li>
+<li>添加一个每天凌晨5点执行一次的定时,如下图所示:<p align="center">
+    <img src="/img/time-schedule2.png" width="80%" />
+</p>
+</li>
+<li>失败策略、通知策略、流程优先级、Worker分组、通知组、收件人、抄送人同<a href="#runParamers">工作流运行参数</a>。</li>
+<li>点击&quot;创建&quot;按钮,创建定时成功,此时定时状态为&quot;<strong>下线</strong>&quot;,定时需<strong>上线</strong>才生效。</li>
+<li>定时上线:点击&quot;定时管理&quot;按钮<img src="/img/timeManagement.png" width="35"/>,进入定时管理页面,点击&quot;上线&quot;按钮,定时状态变为&quot;上线&quot;,如下图所示,工作流定时生效。<p align="center">
+    <img src="/img/time-schedule3.png" width="80%" />
+</p>
+</li>
+</ul>
+<h4>2.3.5 导入工作流</h4>
+<p>点击项目管理-&gt;工作流-&gt;工作流定义,进入工作流定义页面,点击&quot;导入工作流&quot;按钮,导入本地工作流文件,工作流定义列表显示导入的工作流,状态为下线。</p>
+<h4>2.4 工作流实例</h4>
+<h4>2.4.1 查看工作流实例</h4>
+<ul>
+<li>点击项目管理-&gt;工作流-&gt;工作流实例,进入工作流实例页面,如下图所示:   <p align="center">
+      <img src="/img/instance-list.png" width="80%" />
+   </p>           
+</li>
+<li>点击工作流名称,进入DAG查看页面,查看任务执行状态,如下图所示。<p align="center">
+  <img src="/img/instance-detail.png" width="80%" />
+</p>
+</li>
+</ul>
+<h4>2.4.2 查看任务日志</h4>
+<ul>
+<li>进入工作流实例页面,点击工作流名称,进入DAG查看页面,双击任务节点,如下图所示: <p align="center">
+   <img src="/img/instanceViewLog.png" width="80%" />
+ </p>
+</li>
+<li>点击&quot;查看日志&quot;,弹出日志弹框,如下图所示,任务实例页面也可查看任务日志,参考<a href="#taskLog">任务查看日志</a>。 <p align="center">
+   <img src="/img/task-log.png" width="80%" />
+ </p>
+</li>
+</ul>
+<h4>2.4.3 查看任务历史记录</h4>
+<ul>
+<li>点击项目管理-&gt;工作流-&gt;工作流实例,进入工作流实例页面,点击工作流名称,进入工作流DAG页面;</li>
+<li>双击任务节点,如下图所示,点击&quot;查看历史&quot;,跳转到任务实例页面,并展示该工作流实例运行的任务实例列表 <p align="center">
+   <img src="/img/task_history.png" width="80%" />
+ </p>
+</li>
+</ul>
+<h4>2.4.4 查看运行参数</h4>
+<ul>
+<li>点击项目管理-&gt;工作流-&gt;工作流实例,进入工作流实例页面,点击工作流名称,进入工作流DAG页面;</li>
+<li>点击左上角图标<img src="/img/run_params_button.png" width="35"/>,查看工作流实例的启动参数;点击图标<img src="/img/global_param.png" width="35"/>,查看工作流实例的全局参数和局部参数,如下图所示: <p align="center">
+   <img src="/img/run_params.png" width="80%" />
+ </p>      
+</li>
+</ul>
+<h4>2.4.4 工作流实例操作功能</h4>
+<p>点击项目管理-&gt;工作流-&gt;工作流实例,进入工作流实例页面,如下图所示:<br>
+<p align="center">
+<img src="/img/instance-list.png" width="80%" />
+</p></p>
+<ul>
+<li><strong>编辑:</strong> 只能编辑已终止的流程。点击&quot;编辑&quot;按钮或工作流实例名称进入DAG编辑页面,编辑后点击&quot;保存&quot;按钮,弹出保存DAG弹框,如下图所示,在弹框中勾选&quot;是否更新到工作流定义&quot;,保存后则更新工作流定义;若不勾选,则不更新工作流定义。   <p align="center">
+     <img src="/img/editDag.png" width="80%" />
+   </p>
+</li>
+<li><strong>重跑:</strong> 重新执行已经终止的流程。</li>
+<li><strong>恢复失败:</strong> 针对失败的流程,可以执行恢复失败操作,从失败的节点开始执行。</li>
+<li><strong>停止:</strong> 对正在运行的流程进行<strong>停止</strong>操作,后台会先<code>kill</code>worker进程,再执行<code>kill -9</code>操作</li>
+<li><strong>暂停:</strong> 对正在运行的流程进行<strong>暂停</strong>操作,系统状态变为<strong>等待执行</strong>,会等待正在执行的任务结束,暂停下一个要执行的任务。</li>
+<li><strong>恢复暂停:</strong> 对暂停的流程恢复,直接从<strong>暂停的节点</strong>开始运行</li>
+<li><strong>删除:</strong> 删除工作流实例及工作流实例下的任务实例</li>
+<li><strong>甘特图:</strong> Gantt图纵轴是某个工作流实例下的任务实例的拓扑排序,横轴是任务实例的运行时间,如图示:   <p align="center">
+       <img src="/img/gant-pic.png" width="80%" />
+   </p>
+</li>
+</ul>
+<h4>2.5 任务实例</h4>
+<ul>
+<li>
+<p>点击项目管理-&gt;工作流-&gt;任务实例,进入任务实例页面,如下图所示,点击工作流实例名称,可跳转到工作流实例DAG图查看任务状态。</p>
+   <p align="center">
+      <img src="/img/task-list.png" width="80%" />
+   </p>
+</li>
+<li>
+<p><span id=taskLog>查看日志:</span>点击操作列中的“查看日志”按钮,可以查看任务执行的日志情况。</p>
+   <p align="center">
+      <img src="/img/task-log2.png" width="80%" />
+   </p>
+</li>
+</ul>
+<h3>3. 资源中心</h3>
+<h4>3.1 hdfs资源配置</h4>
+<ul>
+<li>上传资源文件和udf函数,所有上传的文件和资源都会被存储到hdfs上,所以需要以下配置项:</li>
+</ul>
+<pre><code>conf/common/common.properties  
+    # Users who have permission to create directories under the HDFS root path
+    hdfs.root.user=hdfs
+    # data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。&quot;/escheduler&quot; is recommended
+    data.store2hdfs.basepath=/dolphinscheduler
+    # resource upload startup type : HDFS,S3,NONE
+    res.upload.startup.type=HDFS
+    # whether kerberos starts
+    hadoop.security.authentication.startup.state=false
+    # java.security.krb5.conf path
+    java.security.krb5.conf.path=/opt/krb5.conf
+    # loginUserFromKeytab user
+    login.user.keytab.username=hdfs-mycluster@ESZ.COM
+    # loginUserFromKeytab path
+    login.user.keytab.path=/opt/hdfs.headless.keytab
+    
+conf/common/hadoop.properties      
+    # ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
+    # to the conf directory,support s3,for example : s3a://dolphinscheduler
+    fs.defaultFS=hdfs://mycluster:8020    
+    #resourcemanager ha note this need ips , this empty if single
+    yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx    
+    # If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
+    yarn.application.status.address=http://xxxx:8088/ws/v1/cluster/apps/%s
+
+</code></pre>
+<ul>
+<li>yarn.resourcemanager.ha.rm.ids与yarn.application.status.address只需配置其中一个地址,另一个地址配置为空。</li>
+<li>需要从Hadoop集群的conf目录下复制core-site.xml、hdfs-site.xml到dolphinscheduler项目的conf目录下,重启api-server服务。</li>
+</ul>
+<h4>3.2 文件管理</h4>
+<blockquote>
+<p>是对各种资源文件的管理,包括创建基本的txt/log/sh/conf/py/java等文件、上传jar包等各种类型文件,可进行编辑、重命名、下载、删除等操作。</p>
+</blockquote>
+  <p align="center">
+   <img src="/img/file-manage.png" width="80%" />
+ </p>
+<ul>
+<li>创建文件</li>
+</ul>
+<blockquote>
+<p>文件格式支持以下几种类型:txt、log、sh、conf、cfg、py、java、sql、xml、hql、properties</p>
+</blockquote>
+<p align="center">
+   <img src="/img/file_create.png" width="80%" />
+ </p>
+<ul>
+<li>上传文件</li>
+</ul>
+<blockquote>
+<p>上传文件:点击&quot;上传文件&quot;按钮进行上传,将文件拖拽到上传区域,文件名会自动以上传的文件名称补全</p>
+</blockquote>
+<p align="center">
+   <img src="/img/file_upload.png" width="80%" />
+ </p>
+<ul>
+<li>文件查看</li>
+</ul>
+<blockquote>
+<p>对可查看的文件类型,点击文件名称,可查看文件详情</p>
+</blockquote>
+<p align="center">
+   <img src="/img/file_detail.png" width="80%" />
+ </p>
+<ul>
+<li>下载文件</li>
+</ul>
+<blockquote>
+<p>点击文件列表的&quot;下载&quot;按钮下载文件或者在文件详情中点击右上角&quot;下载&quot;按钮下载文件</p>
+</blockquote>
+<ul>
+<li>文件重命名</li>
+</ul>
+<p align="center">
+   <img src="/img/file_rename.png" width="80%" />
+ </p>
+<ul>
+<li>删除</li>
+</ul>
+<blockquote>
+<p>文件列表-&gt;点击&quot;删除&quot;按钮,删除指定文件</p>
+</blockquote>
+<h4>3.3 UDF管理</h4>
+<h4>3.3.1 资源管理</h4>
+<blockquote>
+<p>资源管理和文件管理功能类似,不同之处是资源管理是上传的UDF函数,文件管理上传的是用户程序,脚本及配置文件
+操作功能:重命名、下载、删除。</p>
+</blockquote>
+<ul>
+<li>上传udf资源</li>
+</ul>
+<blockquote>
+<p>和上传文件相同。</p>
+</blockquote>
+<h4>3.3.2 函数管理</h4>
+<ul>
+<li>创建udf函数</li>
+</ul>
+<blockquote>
+<p>点击“创建UDF函数”,输入udf函数参数,选择udf资源,点击“提交”,创建udf函数。</p>
+</blockquote>
+<blockquote>
+<p>目前只支持HIVE的临时UDF函数</p>
+</blockquote>
+<ul>
+<li>UDF函数名称:输入UDF函数时的名称</li>
+<li>包名类名:输入UDF函数的全路径</li>
+<li>UDF资源:设置创建的UDF对应的资源文件</li>
+</ul>
+<p align="center">
+   <img src="/img/udf_edit.png" width="80%" />
+ </p>
+<h3>4. 创建数据源</h3>
+<blockquote>
+<p>数据源中心支持MySQL、POSTGRESQL、HIVE/IMPALA、SPARK、CLICKHOUSE、ORACLE、SQLSERVER等数据源</p>
+</blockquote>
+<h4>4.1 创建/编辑MySQL数据源</h4>
+<ul>
+<li>
+<p>点击“数据源中心-&gt;创建数据源”,根据需求创建不同类型的数据源。</p>
+</li>
+<li>
+<p>数据源:选择MYSQL</p>
+</li>
+<li>
+<p>数据源名称:输入数据源的名称</p>
+</li>
+<li>
+<p>描述:输入数据源的描述</p>
+</li>
+<li>
+<p>IP主机名:输入连接MySQL的IP</p>
+</li>
+<li>
+<p>端口:输入连接MySQL的端口</p>
+</li>
+<li>
+<p>用户名:设置连接MySQL的用户名</p>
+</li>
+<li>
+<p>密码:设置连接MySQL的密码</p>
+</li>
+<li>
+<p>数据库名:输入连接MySQL的数据库名称</p>
+</li>
+<li>
+<p>Jdbc连接参数:用于MySQL连接的参数设置,以JSON形式填写</p>
+</li>
+</ul>
+<p align="center">
+   <img src="/img/mysql_edit.png" width="80%" />
+ </p>
+<blockquote>
+<p>点击“测试连接”,测试数据源是否可以连接成功。</p>
+</blockquote>
+<h4>4.2 创建/编辑POSTGRESQL数据源</h4>
+<ul>
+<li>数据源:选择POSTGRESQL</li>
+<li>数据源名称:输入数据源的名称</li>
+<li>描述:输入数据源的描述</li>
+<li>IP/主机名:输入连接POSTGRESQL的IP</li>
+<li>端口:输入连接POSTGRESQL的端口</li>
+<li>用户名:设置连接POSTGRESQL的用户名</li>
+<li>密码:设置连接POSTGRESQL的密码</li>
+<li>数据库名:输入连接POSTGRESQL的数据库名称</li>
+<li>Jdbc连接参数:用于POSTGRESQL连接的参数设置,以JSON形式填写</li>
+</ul>
+<p align="center">
+   <img src="/img/postgresql_edit.png" width="80%" />
+ </p>
+<h4>4.3 创建/编辑HIVE数据源</h4>
+<p>1.使用HiveServer2方式连接</p>
+ <p align="center">
+    <img src="/img/hive_edit.png" width="80%" />
+  </p>
+<ul>
+<li>数据源:选择HIVE</li>
+<li>数据源名称:输入数据源的名称</li>
+<li>描述:输入数据源的描述</li>
+<li>IP/主机名:输入连接HIVE的IP</li>
+<li>端口:输入连接HIVE的端口</li>
+<li>用户名:设置连接HIVE的用户名</li>
+<li>密码:设置连接HIVE的密码</li>
+<li>数据库名:输入连接HIVE的数据库名称</li>
+<li>Jdbc连接参数:用于HIVE连接的参数设置,以JSON形式填写</li>
+</ul>
+<p>2.使用HiveServer2 HA Zookeeper方式连接</p>
+ <p align="center">
+    <img src="/img/hive_edit2.png" width="80%" />
+  </p>
+<p>注意:如果开启了<strong>kerberos</strong>,则需要填写 <strong>Principal</strong></p>
+<p align="center">
+    <img src="/img/hive_kerberos.png" width="80%" />
+  </p>
+<h4>4.4 创建/编辑Spark数据源</h4>
+<p align="center">
+   <img src="/img/spark_datesource.png" width="80%" />
+ </p>
+<ul>
+<li>数据源:选择Spark</li>
+<li>数据源名称:输入数据源的名称</li>
+<li>描述:输入数据源的描述</li>
+<li>IP/主机名:输入连接Spark的IP</li>
+<li>端口:输入连接Spark的端口</li>
+<li>用户名:设置连接Spark的用户名</li>
+<li>密码:设置连接Spark的密码</li>
+<li>数据库名:输入连接Spark的数据库名称</li>
+<li>Jdbc连接参数:用于Spark连接的参数设置,以JSON形式填写</li>
+</ul>
+<p>注意:如果开启了<strong>kerberos</strong>,则需要填写 <strong>Principal</strong></p>
+<p align="center">
+    <img src="/img/sparksql_kerberos.png" width="80%" />
+  </p>
+<h3>5. 安全中心(权限系统)</h3>
+<pre><code> * 安全中心只有管理员账户才有权限操作,分别有队列管理、租户管理、用户管理、告警组管理、worker分组管理、令牌管理等功能,在用户管理模块可以对资源、数据源、项目等授权
+ * 管理员登录,默认用户名密码:admin/dolphinscheduler123
+</code></pre>
+<h4>5.1 创建队列</h4>
+<ul>
+<li>队列是在执行spark、mapreduce等程序,需要用到“队列”参数时使用的。</li>
+<li>管理员进入安全中心-&gt;队列管理页面,点击“创建队列”按钮,创建队列。</li>
+</ul>
+ <p align="center">
+    <img src="/img/create-queue.png" width="80%" />
+  </p>
+<h4>5.2 添加租户</h4>
+<ul>
+<li>租户对应的是Linux的用户,用于worker提交作业所使用的用户。如果linux没有这个用户,worker会在执行脚本的时候创建这个用户。</li>
+<li>租户编码:<strong>租户编码是Linux上的用户,唯一,不能重复</strong></li>
+<li>管理员进入安全中心-&gt;租户管理页面,点击“创建租户”按钮,创建租户。</li>
+</ul>
+ <p align="center">
+    <img src="/img/addtenant.png" width="80%" />
+  </p>
+<h4>5.3 创建普通用户</h4>
+<ul>
+<li>用户分为<strong>管理员用户</strong>和<strong>普通用户</strong></li>
+</ul>
+<pre><code>* 管理员有授权和用户管理等权限,没有创建项目和工作流定义的操作的权限。
+* 普通用户可以创建项目和对工作流定义的创建,编辑,执行等操作。
+* 注意:如果该用户切换了租户,则该用户所在租户下所有资源将复制到切换的新租户下。
+</code></pre>
+<ul>
+<li>管理员进入安全中心-&gt;用户管理页面,点击“创建用户”按钮,创建用户。</li>
+</ul>
+<p align="center">
+   <img src="/img/useredit2.png" width="80%" />
+ </p>
+<blockquote>
+<p><strong>编辑用户信息</strong></p>
+</blockquote>
+<ul>
+<li>管理员进入安全中心-&gt;用户管理页面,点击&quot;编辑&quot;按钮,编辑用户信息。</li>
+<li>普通用户登录后,点击用户名下拉框中的用户信息,进入用户信息页面,点击&quot;编辑&quot;按钮,编辑用户信息。</li>
+</ul>
+<blockquote>
+<p><strong>修改用户密码</strong></p>
+</blockquote>
+<ul>
+<li>管理员进入安全中心-&gt;用户管理页面,点击&quot;编辑&quot;按钮,编辑用户信息时,输入新密码修改用户密码。</li>
+<li>普通用户登录后,点击用户名下拉框中的用户信息,进入修改密码页面,输入密码并确认密码后点击&quot;编辑&quot;按钮,则修改密码成功。</li>
+</ul>
+<h4>5.4 创建告警组</h4>
+<ul>
+<li>告警组是在启动时设置的参数,在流程结束以后会将流程的状态和其他信息以邮件形式发送给告警组。</li>
+</ul>
+<ul>
+<li>管理员进入安全中心-&gt;告警组管理页面,点击“创建告警组”按钮,创建告警组。</li>
+</ul>
+  <p align="center">
+    <img src="/img/mail_edit.png" width="80%" />
+  </p>
+<h4>5.5 创建worker分组</h4>
+<ul>
+<li>worker分组,提供了一种让任务在指定的worker上运行的机制。管理员创建worker分组,在任务节点和运行参数中设置中可以指定该任务运行的worker分组,如果指定的分组被删除或者没有指定分组,则该任务会在任一worker上运行。</li>
+<li>管理员进入安全中心-&gt;Worker分组管理页面,点击“创建Worker分组”按钮,创建Worker分组。worker分组内有多个ip地址(<strong>不能写别名</strong>),以<strong>英文逗号</strong>分隔。</li>
+</ul>
+  <p align="center">
+    <img src="/img/worker1.png" width="80%" />
+  </p>
+<h4>5.6 令牌管理</h4>
+<blockquote>
+<p>由于后端接口有登录检查,令牌管理提供了一种可以通过调用接口的方式对系统进行各种操作。</p>
+</blockquote>
+<ul>
+<li>管理员进入安全中心-&gt;令牌管理页面,点击“创建令牌”按钮,选择失效时间与用户,点击&quot;生成令牌&quot;按钮,点击&quot;提交&quot;按钮,则选择用户的token创建成功。</li>
+</ul>
+  <p align="center">
+      <img src="/img/creat_token.png" width="80%" />
+   </p>
+<ul>
+<li>
+<p>普通用户登录后,点击用户名下拉框中的用户信息,进入令牌管理页面,选择失效时间,点击&quot;生成令牌&quot;按钮,点击&quot;提交&quot;按钮,则该用户创建token成功。</p>
+</li>
+<li>
+<p>调用示例:</p>
+</li>
+</ul>
+<pre><code class="language-令牌调用示例">    /**
+     * test token
+     */
+    public  void doPOSTParam()throws Exception{
+        // create HttpClient
+        CloseableHttpClient httpclient = HttpClients.createDefault();
+
+        // create http post request
+        HttpPost httpPost = new HttpPost(&quot;http://127.0.0.1:12345/escheduler/projects/create&quot;);
+        httpPost.setHeader(&quot;token&quot;, &quot;123&quot;);
+        // set parameters
+        List&lt;NameValuePair&gt; parameters = new ArrayList&lt;NameValuePair&gt;();
+        parameters.add(new BasicNameValuePair(&quot;projectName&quot;, &quot;qzw&quot;));
+        parameters.add(new BasicNameValuePair(&quot;desc&quot;, &quot;qzw&quot;));
+        UrlEncodedFormEntity formEntity = new UrlEncodedFormEntity(parameters);
+        httpPost.setEntity(formEntity);
+        CloseableHttpResponse response = null;
+        try {
+            // execute
+            response = httpclient.execute(httpPost);
+            // response status code 200
+            if (response.getStatusLine().getStatusCode() == 200) {
+                String content = EntityUtils.toString(response.getEntity(), &quot;UTF-8&quot;);
+                System.out.println(content);
+            }
+        } finally {
+            if (response != null) {
+                response.close();
+            }
+            httpclient.close();
+        }
+    }
+</code></pre>
+<h4>5.7 授予权限</h4>
+<pre><code>* 授予权限包括项目权限,资源权限,数据源权限,UDF函数权限。
+* 管理员可以对普通用户进行非其创建的项目、资源、数据源和UDF函数进行授权。因为项目、资源、数据源和UDF函数授权方式都是一样的,所以以项目授权为例介绍。
+* 注意:对于用户自己创建的项目,该用户拥有所有的权限。则项目列表和已选项目列表中不会显示。
+</code></pre>
+<ul>
+<li>管理员进入安全中心-&gt;用户管理页面,点击需授权用户的“授权”按钮,如下图所示:</li>
+</ul>
+  <p align="center">
+   <img src="/img/auth_user.png" width="80%" />
+ </p>
+<ul>
+<li>选择项目,进行项目授权。</li>
+</ul>
+<p align="center">
+   <img src="/img/auth_project.png" width="80%" />
+ </p>
+<ul>
+<li>资源、数据源、UDF函数授权同项目授权。</li>
+</ul>
+<h3>6. 监控中心</h3>
+<h4>6.1 服务管理</h4>
+<ul>
+<li>服务管理主要是对系统中的各个服务的健康状况和基本信息的监控和显示</li>
+</ul>
+<h4>6.1.1 master监控</h4>
+<ul>
+<li>主要是master的相关信息。</li>
+</ul>
+<p align="center">
+   <img src="/img/master-jk.png" width="80%" />
+ </p>
+<h4>6.1.2 worker监控</h4>
+<ul>
+<li>主要是worker的相关信息。</li>
+</ul>
+<p align="center">
+   <img src="/img/worker-jk.png" width="80%" />
+ </p>
+<h4>6.1.3 Zookeeper监控</h4>
+<ul>
+<li>主要是zookpeeper中各个worker和master的相关配置信息。</li>
+</ul>
+<p align="center">
+   <img src="/img/zk-jk.png" width="80%" />
+ </p>
+<h4>6.1.4 DB监控</h4>
+<ul>
+<li>主要是DB的健康状况</li>
+</ul>
+<p align="center">
+   <img src="/img/mysql-jk.png" width="80%" />
+ </p>
+<h4>6.2 统计管理</h4>
+<p align="center">
+   <img src="/img/Statistics.png" width="80%" />
+ </p>
+<ul>
+<li>待执行命令数:统计t_ds_command表的数据</li>
+<li>执行失败的命令数:统计t_ds_error_command表的数据</li>
+<li>待运行任务数:统计Zookeeper中task_queue的数据</li>
+<li>待杀死任务数:统计Zookeeper中task_kill的数据</li>
+</ul>
+<h3>7. <span id=TaskParamers>任务节点类型和参数设置</span></h3>
+<h4>7.1 Shell节点</h4>
+<blockquote>
+<p>shell节点,在worker执行的时候,会生成一个临时shell脚本,使用租户同名的linux用户执行这个脚本。</p>
+</blockquote>
+<ul>
+<li>
+<p>点击项目管理-项目名称-工作流定义,点击&quot;创建工作流&quot;按钮,进入DAG编辑页面。</p>
+</li>
+<li>
+<p>工具栏中拖动<img src="/img/shell.png" width="35"/>到画板中,如下图所示:</p>
+<p align="center">
+    <img src="/img/shell_dag.png" width="80%" />
+</p> 
+</li>
+<li>
+<p>节点名称:一个工作流定义中的节点名称是唯一的。</p>
+</li>
+<li>
+<p>运行标志:标识这个节点是否能正常调度,如果不需要执行,可以打开禁止执行开关。</p>
+</li>
+<li>
+<p>描述信息:描述该节点的功能。</p>
+</li>
+<li>
+<p>任务优先级:worker线程数不足时,根据优先级从高到低依次执行,优先级一样时根据先进先出原则执行。</p>
+</li>
+<li>
+<p>Worker分组:任务分配给worker组的机器机执行,选择Default,会随机选择一台worker机执行。</p>
+</li>
+<li>
+<p>失败重试次数:任务失败重新提交的次数,支持下拉和手填。</p>
+</li>
+<li>
+<p>失败重试间隔:任务失败重新提交任务的时间间隔,支持下拉和手填。</p>
+</li>
+<li>
+<p>超时告警:勾选超时告警、超时失败,当任务超过&quot;超时时长&quot;后,会发送告警邮件并且任务执行失败.</p>
+</li>
+<li>
+<p>脚本:用户开发的SHELL程序。</p>
+</li>
+<li>
+<p>资源:是指脚本中需要调用的资源文件列表,资源中心-文件管理上传或创建的文件。</p>
+</li>
+<li>
+<p>自定义参数:是SHELL局部的用户自定义参数,会替换脚本中以${变量}的内容。</p>
+</li>
+</ul>
+<h4>7.2 子流程节点</h4>
+<ul>
+<li>子流程节点,就是把外部的某个工作流定义当做一个任务节点去执行。</li>
+</ul>
+<blockquote>
+<p>拖动工具栏中的<img src="https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_SUB_PROCESS.png" alt="PNG">任务节点到画板中,如下图所示:</p>
+</blockquote>
+<p align="center">
+   <img src="/img/subprocess_edit.png" width="80%" />
+ </p>
+<ul>
+<li>节点名称:一个工作流定义中的节点名称是唯一的</li>
+<li>运行标志:标识这个节点是否能正常调度</li>
+<li>描述信息:描述该节点的功能</li>
+<li>超时告警:勾选超时告警、超时失败,当任务超过&quot;超时时长&quot;后,会发送告警邮件并且任务执行失败.</li>
+<li>子节点:是选择子流程的工作流定义,右上角进入该子节点可以跳转到所选子流程的工作流定义</li>
+</ul>
+<h4>7.3 依赖(DEPENDENT)节点</h4>
+<ul>
+<li>依赖节点,就是<strong>依赖检查节点</strong>。比如A流程依赖昨天的B流程执行成功,依赖节点会去检查B流程在昨天是否有执行成功的实例。</li>
+</ul>
+<blockquote>
+<p>拖动工具栏中的<img src="https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_DEPENDENT.png" alt="PNG">任务节点到画板中,如下图所示:</p>
+</blockquote>
+<p align="center">
+   <img src="/img/dependent_edit.png" width="80%" />
+ </p>
+<blockquote>
+<p>依赖节点提供了逻辑判断功能,比如检查昨天的B流程是否成功,或者C流程是否执行成功。</p>
+</blockquote>
+  <p align="center">
+   <img src="/img/depend-node.png" width="80%" />
+ </p>
+<blockquote>
+<p>例如,A流程为周报任务,B、C流程为天任务,A任务需要B、C任务在上周的每一天都执行成功,如图示:</p>
+</blockquote>
+ <p align="center">
+   <img src="/img/depend-node2.png" width="80%" />
+ </p>
+<blockquote>
+<p>假如,周报A同时还需要自身在上周二执行成功:</p>
+</blockquote>
+ <p align="center">
+   <img src="/img/depend-node3.png" width="80%" />
+ </p>
+<h4>7.4 存储过程节点</h4>
+<ul>
+<li>根据选择的数据源,执行存储过程。</li>
+</ul>
+<blockquote>
+<p>拖动工具栏中的<img src="https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_PROCEDURE.png" alt="PNG">任务节点到画板中,如下图所示:</p>
+</blockquote>
+<p align="center">
+   <img src="/img/procedure_edit.png" width="80%" />
+ </p>
+<ul>
+<li>数据源:存储过程的数据源类型支持MySQL和POSTGRESQL两种,选择对应的数据源</li>
+<li>方法:是存储过程的方法名称</li>
+<li>自定义参数:存储过程的自定义参数类型支持IN、OUT两种,数据类型支持VARCHAR、INTEGER、LONG、FLOAT、DOUBLE、DATE、TIME、TIMESTAMP、BOOLEAN九种数据类型</li>
+</ul>
+<h4>7.5 SQL节点</h4>
+<ul>
+<li>拖动工具栏中的<img src="https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_SQL.png" alt="PNG">任务节点到画板中</li>
+<li>非查询SQL功能:编辑非查询SQL任务信息,sql类型选择非查询,如下图所示:</li>
+</ul>
+  <p align="center">
+   <img src="/img/sql-node.png" width="80%" />
+ </p>
+<ul>
+<li>查询SQL功能:编辑查询SQL任务信息,sql类型选择查询,选择表格或附件形式发送邮件到指定的收件人,如下图所示。</li>
+</ul>
+<p align="center">
+   <img src="/img/sql-node2.png" width="80%" />
+ </p>
+<ul>
+<li>数据源:选择对应的数据源</li>
+<li>sql类型:支持查询和非查询两种,查询是select类型的查询,是有结果集返回的,可以指定邮件通知为表格、附件或表格附件三种模板。非查询是没有结果集返回的,是针对update、delete、insert三种类型的操作。</li>
+<li>sql参数:输入参数格式为key1=value1;key2=value2…</li>
+<li>sql语句:SQL语句</li>
+<li>UDF函数:对于HIVE类型的数据源,可以引用资源中心中创建的UDF函数,其他类型的数据源暂不支持UDF函数。</li>
+<li>自定义参数:SQL任务类型,而存储过程是自定义参数顺序的给方法设置值自定义参数类型和数据类型同存储过程任务类型一样。区别在于SQL任务类型自定义参数会替换sql语句中${变量}。</li>
+<li>前置sql:前置sql在sql语句之前执行。</li>
+<li>后置sql:后置sql在sql语句之后执行。</li>
+</ul>
+<h4>7.6 SPARK节点</h4>
+<ul>
+<li>通过SPARK节点,可以直接直接执行SPARK程序,对于spark节点,worker会使用<code>spark-submit</code>方式提交任务</li>
+</ul>
+<blockquote>
+<p>拖动工具栏中的<img src="https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_SPARK.png" alt="PNG">任务节点到画板中,如下图所示:</p>
+</blockquote>
+<p align="center">
+   <img src="/img/spark_edit.png" width="80%" />
+ </p>
+<ul>
+<li>程序类型:支持JAVA、Scala和Python三种语言</li>
+<li>主函数的class:是Spark程序的入口Main Class的全路径</li>
+<li>主jar包:是Spark的jar包</li>
+<li>部署方式:支持yarn-cluster、yarn-client和local三种模式</li>
+<li>Driver内核数:可以设置Driver内核数及内存数</li>
+<li>Executor数量:可以设置Executor数量、Executor内存数和Executor内核数</li>
+<li>命令行参数:是设置Spark程序的输入参数,支持自定义参数变量的替换。</li>
+<li>其他参数:支持 --jars、--files、--archives、--conf格式</li>
+<li>资源:如果其他参数中引用了资源文件,需要在资源中选择指定</li>
+<li>自定义参数:是MR局部的用户自定义参数,会替换脚本中以${变量}的内容</li>
+</ul>
+<p>注意:JAVA和Scala只是用来标识,没有区别,如果是Python开发的Spark则没有主函数的class,其他都是一样</p>
+<h4>7.7 MapReduce(MR)节点</h4>
+<ul>
+<li>使用MR节点,可以直接执行MR程序。对于mr节点,worker会使用<code>hadoop jar</code>方式提交任务</li>
+</ul>
+<blockquote>
+<p>拖动工具栏中的<img src="https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_MR.png" alt="PNG">任务节点到画板中,如下图所示:</p>
+</blockquote>
+<ol>
+<li>JAVA程序</li>
+</ol>
+ <p align="center">
+   <img src="/img/mr_java.png" width="80%" />
+ </p>
+<ul>
+<li>主函数的class:是MR程序的入口Main Class的全路径</li>
+<li>程序类型:选择JAVA语言</li>
+<li>主jar包:是MR的jar包</li>
+<li>命令行参数:是设置MR程序的输入参数,支持自定义参数变量的替换</li>
+<li>其他参数:支持 –D、-files、-libjars、-archives格式</li>
+<li>资源: 如果其他参数中引用了资源文件,需要在资源中选择指定</li>
+<li>自定义参数:是MR局部的用户自定义参数,会替换脚本中以${变量}的内容</li>
+</ul>
+<ol start="2">
+<li>Python程序</li>
+</ol>
+<p align="center">
+   <img src="/img/mr_edit.png" width="80%" />
+ </p>
+<ul>
+<li>程序类型:选择Python语言</li>
+<li>主jar包:是运行MR的Python jar包</li>
+<li>其他参数:支持 –D、-mapper、-reducer、-input  -output格式,这里可以设置用户自定义参数的输入,比如:</li>
+<li>-mapper  &quot;<a href="http://mapper.py">mapper.py</a> 1&quot;  -file <a href="http://mapper.py">mapper.py</a>   -reducer <a href="http://reducer.py">reducer.py</a>  -file <a href="http://reducer.py">reducer.py</a> –input /journey/words.txt -output /journey/out/mr/${currentTimeMillis}</li>
+<li>其中 -mapper 后的 <a href="http://mapper.py">mapper.py</a> 1是两个参数,<a href="http://xn--mapper-9m7iglm85bf76bbzbb87i.py">第一个参数是mapper.py</a>,第二个参数是1</li>
+<li>资源: 如果其他参数中引用了资源文件,需要在资源中选择指定</li>
+<li>自定义参数:是MR局部的用户自定义参数,会替换脚本中以${变量}的内容</li>
+</ul>
+<h4>7.8 Python节点</h4>
+<ul>
+<li>使用python节点,可以直接执行python脚本,对于python节点,worker会使用<code>python **</code>方式提交任务。</li>
+</ul>
+<blockquote>
+<p>拖动工具栏中的<img src="https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_PYTHON.png" alt="PNG">任务节点到画板中,如下图所示:</p>
+</blockquote>
+<p align="center">
+   <img src="/img/python_edit.png" width="80%" />
+ </p>
+<ul>
+<li>脚本:用户开发的Python程序</li>
+<li>资源:是指脚本中需要调用的资源文件列表</li>
+<li>自定义参数:是Python局部的用户自定义参数,会替换脚本中以${变量}的内容</li>
+</ul>
+<h4>7.9 Flink节点</h4>
+<ul>
+<li>拖动工具栏中的<img src="/img/flink.png" width="35"/>任务节点到画板中,如下图所示:</li>
+</ul>
+<p align="center">
+  <img src="/img/flink_edit.png" width="80%" />
+</p>
+<ul>
+<li>程序类型:支持JAVA、Scala和Python三种语言</li>
+<li>主函数的class:是Flink程序的入口Main Class的全路径</li>
+<li>主jar包:是Flink的jar包</li>
+<li>部署方式:支持cluster、local三种模式</li>
+<li>slot数量:可以设置slot数</li>
+<li>taskManage数量:可以设置taskManage数</li>
+<li>jobManager内存数:可以设置jobManager内存数</li>
+<li>taskManager内存数:可以设置taskManager内存数</li>
+<li>命令行参数:是设置Spark程序的输入参数,支持自定义参数变量的替换。</li>
+<li>其他参数:支持 --jars、--files、--archives、--conf格式</li>
+<li>资源:如果其他参数中引用了资源文件,需要在资源中选择指定</li>
+<li>自定义参数:是Flink局部的用户自定义参数,会替换脚本中以${变量}的内容</li>
+</ul>
+<p>注意:JAVA和Scala只是用来标识,没有区别,如果是Python开发的Flink则没有主函数的class,其他都是一样</p>
+<h4>7.10 http节点</h4>
+<ul>
+<li>拖动工具栏中的<img src="/img/http.png" width="35"/>任务节点到画板中,如下图所示:</li>
+</ul>
+<p align="center">
+   <img src="/img/http_edit.png" width="80%" />
+ </p>
+<ul>
+<li>节点名称:一个工作流定义中的节点名称是唯一的。</li>
+<li>运行标志:标识这个节点是否能正常调度,如果不需要执行,可以打开禁止执行开关。</li>
+<li>描述信息:描述该节点的功能。</li>
+<li>任务优先级:worker线程数不足时,根据优先级从高到低依次执行,优先级一样时根据先进先出原则执行。</li>
+<li>Worker分组:任务分配给worker组的机器机执行,选择Default,会随机选择一台worker机执行。</li>
+<li>失败重试次数:任务失败重新提交的次数,支持下拉和手填。</li>
+<li>失败重试间隔:任务失败重新提交任务的时间间隔,支持下拉和手填。</li>
+<li>超时告警:勾选超时告警、超时失败,当任务超过&quot;超时时长&quot;后,会发送告警邮件并且任务执行失败.</li>
+<li>请求地址:http请求URL。</li>
+<li>请求类型:支持GET、POSt、HEAD、PUT、DELETE。</li>
+<li>请求参数:支持Parameter、Body、Headers。</li>
+<li>校验条件:支持默认响应码、自定义响应码、内容包含、内容不包含。</li>
+<li>校验内容:当校验条件选择自定义响应码、内容包含、内容不包含时,需填写校验内容。</li>
+<li>自定义参数:是http局部的用户自定义参数,会替换脚本中以${变量}的内容。</li>
+</ul>
+<h4>8. 参数</h4>
+<h4>8.1 系统参数</h4>
+<table>
+    <tr><th>变量</th><th>含义</th></tr>
+    <tr>
+        <td>${system.biz.date}</td>
+        <td>日常调度实例定时的定时时间前一天,格式为 yyyyMMdd,补数据时,该日期 +1</td>
+    </tr>
+    <tr>
+        <td>${system.biz.curdate}</td>
+        <td>日常调度实例定时的定时时间,格式为 yyyyMMdd,补数据时,该日期 +1</td>
+    </tr>
+    <tr>
+        <td>${system.datetime}</td>
+        <td>日常调度实例定时的定时时间,格式为 yyyyMMddHHmmss,补数据时,该日期 +1</td>
+    </tr>
+</table>
+<h4>8.2 时间自定义参数</h4>
+<ul>
+<li>
+<p>支持代码中自定义变量名,声明方式:${变量名}。可以是引用 &quot;系统参数&quot; 或指定 &quot;常量&quot;。</p>
+</li>
+<li>
+<p>我们定义这种基准变量为 [...] 格式的,[yyyyMMddHHmmss] 是可以任意分解组合的,比如:$[yyyyMMdd], $[HHmmss], $[yyyy-MM-dd] 等</p>
+</li>
+<li>
+<p>也可以使用以下格式:</p>
+<pre><code>* 后 N 年:$[add_months(yyyyMMdd,12*N)]
+* 前 N 年:$[add_months(yyyyMMdd,-12*N)]
+* 后 N 月:$[add_months(yyyyMMdd,N)]
+* 前 N 月:$[add_months(yyyyMMdd,-N)]
+* 后 N 周:$[yyyyMMdd+7*N]
+* 前 N 周:$[yyyyMMdd-7*N]
+* 后 N 天:$[yyyyMMdd+N]
+* 前 N 天:$[yyyyMMdd-N]
+* 后 N 小时:$[HHmmss+N/24]
+* 前 N 小时:$[HHmmss-N/24]
+* 后 N 分钟:$[HHmmss+N/24/60]
+* 前 N 分钟:$[HHmmss-N/24/60]
+</code></pre>
+</li>
+</ul>
+<h4>8.3 <span id=UserDefinedParameters>用户自定义参数</span></h4>
+<ul>
+<li>用户自定义参数分为全局参数和局部参数。全局参数是保存工作流定义和工作流实例的时候传递的全局参数,全局参数可以在整个流程中的任何一个任务节点的局部参数引用。
+例如:</li>
+</ul>
+<p align="center">
+   <img src="/img/local_parameter.png" width="80%" />
+ </p>
+<ul>
+<li>global_bizdate为全局参数,引用的是系统参数。</li>
+</ul>
+<p align="center">
+   <img src="/img/global_parameter.png" width="80%" />
+ </p>
+<ul>
+<li>任务中local_param_bizdate通过${global_bizdate}来引用全局参数,对于脚本可以通过${local_param_bizdate}来引全局变量global_bizdate的值,或通过JDBC直接将local_param_bizdate的值set进去</li>
+</ul>
+</div></section><footer class="footer-container"><div class="footer-body"><img src="/img/ds_gray.svg"/><div class="cols-container"><div class="col col-12"><h3>Disclaimer</h3><p>Apache DolphinScheduler (incubating) is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by Incubator. 
+Incubation is required of all newly accepted projects until a further review indicates 
+that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. 
+While incubation status is not necessarily a reflection of the completeness or stability of the code, 
+it does indicate that the project has yet to be fully endorsed by the ASF.</p></div><div class="col col-6"><dl><dt>文档</dt><dd><a href="/zh-cn/docs/1.2.0/user_doc/architecture-design.html" target="_self">概览</a></dd><dd><a href="/zh-cn/docs/1.2.0/user_doc/quick-start.html" target="_self">快速开始</a></dd><dd><a href="/zh-cn/docs/1.2.0/user_doc/backend-development.html" target="_self">开发者指南</a></dd></dl></div><div class="col col-6"><dl><dt>ASF</dt><dd><a href="http://www.apache.org" target="_se [...]
+	<script src="https://f.alicdn.com/react/15.4.1/react-with-addons.min.js"></script>
+	<script src="https://f.alicdn.com/react/15.4.1/react-dom.min.js"></script>
+	<script>
+		window.rootPath = '';
+  </script>
+	<script src="/build/documentation.js"></script>
+</body>
+</html>
\ No newline at end of file
diff --git a/zh-cn/docs/1.3.0/user_doc/system-manual.json b/zh-cn/docs/1.3.0/user_doc/system-manual.json
new file mode 100644
index 0000000..d3483d9
--- /dev/null
+++ b/zh-cn/docs/1.3.0/user_doc/system-manual.json
@@ -0,0 +1,6 @@
+{
+  "filename": "system-manual.md",
+  "__html": "<h1>系统使用手册</h1>\n<h2>快速上手</h2>\n<blockquote>\n<p>请参照<a href=\"quick-start.html\">快速上手</a></p>\n</blockquote>\n<h2>操作指南</h2>\n<h3>1. 首页</h3>\n<p>首页包含用户所有项目的任务状态统计、流程状态统计、工作流定义统计。\n<p align=\"center\">\n<img src=\"/img/home.png\" width=\"80%\" />\n</p></p>\n<h3>2. 项目管理</h3>\n<h4>2.1 创建项目</h4>\n<ul>\n<li>\n<p>点击&quot;项目管理&quot;进入项目管理页面,点击“创建项目”按钮,输入项目名称,项目描述,点击“提交”,创建新的项目。</p>\n<p align=\"center\">\n    <img src=\"/img/project.png\" width=\"80%\" />\n</p>\n</li>\n</ul>\n<h4>2.2 [...]
+  "link": "/zh-cn/docs/1.3.0/user_doc/system-manual.html",
+  "meta": {}
+}
\ No newline at end of file
diff --git a/zh-cn/docs/1.3.0/user_doc/upgrade.html b/zh-cn/docs/1.3.0/user_doc/upgrade.html
index 8808261..91704c1 100644
--- a/zh-cn/docs/1.3.0/user_doc/upgrade.html
+++ b/zh-cn/docs/1.3.0/user_doc/upgrade.html
@@ -18,7 +18,7 @@
 <p><code>sh ./script/stop-all.sh</code></p>
 <h2>3. 下载新版本的安装包</h2>
 <ul>
-<li><a href="https://dolphinscheduler.apache.org/en-us/docs/user_doc/download.html">下载</a>, 下载最新版本的前后端安装包(dolphinscheduler-backend、dolphinscheduler-ui)</li>
+<li><a href="https://dolphinscheduler.apache.org/en-us/docs/user_doc/download.html">下载</a>, 下载最新版本的二进制安装包</li>
 <li>以下升级操作都需要在新版本的目录进行</li>
 </ul>
 <h2>4. 数据库升级</h2>
diff --git a/zh-cn/docs/1.3.0/user_doc/upgrade.json b/zh-cn/docs/1.3.0/user_doc/upgrade.json
index b3d2279..48081f2 100644
--- a/zh-cn/docs/1.3.0/user_doc/upgrade.json
+++ b/zh-cn/docs/1.3.0/user_doc/upgrade.json
@@ -1,6 +1,6 @@
 {
   "filename": "upgrade.md",
-  "__html": "<h1>DolphinScheduler升级文档</h1>\n<h2>1. 备份上一版本文件和数据库</h2>\n<h2>2. 停止dolphinscheduler所有服务</h2>\n<p><code>sh ./script/stop-all.sh</code></p>\n<h2>3. 下载新版本的安装包</h2>\n<ul>\n<li><a href=\"https://dolphinscheduler.apache.org/en-us/docs/user_doc/download.html\">下载</a>, 下载最新版本的前后端安装包(dolphinscheduler-backend、dolphinscheduler-ui)</li>\n<li>以下升级操作都需要在新版本的目录进行</li>\n</ul>\n<h2>4. 数据库升级</h2>\n<ul>\n<li>\n<p>修改conf/datasource.properties中的下列属性</p>\n</li>\n<li>\n<p>如果选择 MySQL,请注释掉 PostgreSQL [...]
+  "__html": "<h1>DolphinScheduler升级文档</h1>\n<h2>1. 备份上一版本文件和数据库</h2>\n<h2>2. 停止dolphinscheduler所有服务</h2>\n<p><code>sh ./script/stop-all.sh</code></p>\n<h2>3. 下载新版本的安装包</h2>\n<ul>\n<li><a href=\"https://dolphinscheduler.apache.org/en-us/docs/user_doc/download.html\">下载</a>, 下载最新版本的二进制安装包</li>\n<li>以下升级操作都需要在新版本的目录进行</li>\n</ul>\n<h2>4. 数据库升级</h2>\n<ul>\n<li>\n<p>修改conf/datasource.properties中的下列属性</p>\n</li>\n<li>\n<p>如果选择 MySQL,请注释掉 PostgreSQL 相关配置(反之同理), 还需要手动添加 [<a href=\"https://downlo [...]
   "link": "/zh-cn/docs/1.3.0/user_doc/upgrade.html",
   "meta": {}
 }
\ No newline at end of file