You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dolphinscheduler.apache.org by zh...@apache.org on 2022/05/24 15:12:00 UTC

[dolphinscheduler] branch 3.0.0-beta-prepare updated (69a76e8658 -> eedf2ba001)

This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a change to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git


    from 69a76e8658 [refactor][perf] cache compiled regex pattern (#10099)
     new b598b27c08 [Doc] Add complie supporting system (#10188)
     new 232a7bdb60 [Fix][UI] Fix data-quality component multi-language support. (#10202)
     new 1f8ee855f0 [doc] Update delete workflow description (#10203)
     new 9a966e4cb2 [Fix-10199] [Workflow/Workflow Definition] After deleting messages in batches, the error message is repeated (#10201)
     new 03e5ac6964 [Bug][Deploy]Fix worker-server path in script scp-hosts.sh (#10208) (#10209)
     new 424ef27257 fix#10214 Delete online workflows in batches, prompt information is accurate (#10215)
     new 80ebe4a334 [python] Fix process definition attr release state not work (#10151)
     new 844ce4cdd7 [Fix][UI] add assignment of totalPage (#10165)
     new f7f3305569 [doc] Update task DataX document (#10218)
     new 944be34177 [Feature-9474] [CI] Add cluster test script verify on shell script (#9997)
     new 5d8679fba9 [Fix-10103][k8s]Fix k8s Change DataSource Error (#10128)
     new f28890e139 [Fix][UI] Fix the problem of multi-language support of workflow instance host in Chinese state. (#10223)
     new 00e1162286 [doc] Update kubernetes' FAQ (#10221)
     new b320743980 [doc] Refactor local parameter docment (#10119)
     new 9eaac1e1b4 [fix-10207] When the registered alarm plugin is empty, the error message is not clear (#10224)
     new eedf2ba001 [Fix][UI] Fix issue with treemap depth in workflow relationships. (#10229)

The 16 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .github/workflows/backend.yml                      |  31 ++++-
 .github/workflows/cluster-test/mysql/Dockerfile    |  47 ++++++++
 .../workflows/cluster-test/mysql/deploy.sh         |  42 ++++---
 .../cluster-test/mysql/docker-compose-base.yaml    |  61 +++++-----
 .../cluster-test/mysql/docker-compose-cluster.yaml |  20 ++--
 .../cluster-test/mysql}/dolphinscheduler_env.sh    |  14 +--
 .../workflows/cluster-test/mysql}/install_env.sh   |  12 +-
 .../workflows/cluster-test/mysql/running_test.sh   |  81 +++++++++++++
 .../development/development-environment-setup.md   |   6 +-
 docs/docs/en/guide/installation/kubernetes.md      |  89 +++++++--------
 docs/docs/en/guide/installation/pseudo-cluster.md  |   6 +-
 docs/docs/en/guide/parameter/local.md              |  25 ++--
 docs/docs/en/guide/project/workflow-definition.md  |   2 +-
 docs/docs/en/guide/task/datax.md                   | 126 ++++++++++-----------
 .../development/development-environment-setup.md   |   6 +-
 docs/docs/zh/guide/installation/kubernetes.md      |  91 +++++++--------
 docs/docs/zh/guide/installation/pseudo-cluster.md  |   6 +-
 docs/docs/zh/guide/parameter/local.md              |  25 ++--
 docs/docs/zh/guide/project/workflow-definition.md  |   2 +-
 docs/docs/zh/guide/task/datax.md                   | 122 ++++++++++----------
 docs/img/datax_edit.png                            | Bin 478215 -> 0 bytes
 docs/img/global_parameter_en.png                   | Bin 219165 -> 0 bytes
 .../img/new_ui/dev/parameter/local_parameter01.png | Bin 0 -> 642098 bytes
 .../img/new_ui/dev/parameter/local_parameter02.png | Bin 0 -> 303057 bytes
 docs/img/supplement_local_parameter.png            | Bin 228552 -> 0 bytes
 docs/img/supplement_local_parameter_en.png         | Bin 246045 -> 0 bytes
 .../controller/ProcessDefinitionController.java    |  14 +--
 .../apache/dolphinscheduler/api/enums/Status.java  |   2 +-
 .../dolphinscheduler/api/python/PythonGateway.java |   3 +-
 .../service/impl/ProcessDefinitionServiceImpl.java |   2 +-
 .../pydolphinscheduler/docs/source/config.rst      |  68 +++++------
 .../src/pydolphinscheduler/constants.py            |   7 --
 .../src/pydolphinscheduler/core/configuration.py   |   3 +
 .../pydolphinscheduler/core/default_config.yaml    |   6 +
 .../pydolphinscheduler/core/process_definition.py  |  26 ++++-
 .../tests/core/test_process_definition.py          |  41 ++++++-
 .../tests/utils/test_yaml_parser.py                |   9 +-
 .../src/main/bin/create-schema.sh                  |  28 -----
 .../src/main/bin/upgrade-schema.sh                 |   5 +-
 .../tools/datasource/CreateDolphinScheduler.java   |  55 ---------
 .../tools/datasource/InitDolphinScheduler.java     |   2 +
 .../tools/datasource/UpgradeDolphinScheduler.java  |  11 +-
 .../src/main/resources/application.yaml            |   9 +-
 .../src/components/chart/modules/Tree.tsx          |   2 +-
 dolphinscheduler-ui/src/locales/modules/en_US.ts   |   9 +-
 dolphinscheduler-ui/src/locales/modules/zh_CN.ts   |  11 +-
 .../src/views/data-quality/rule/use-table.ts       |   1 +
 .../views/data-quality/task-result/use-table.ts    |   1 +
 .../monitor/statistics/audit-log/use-table.ts      |   1 +
 .../task/components/node/fields/use-rules.ts       |   5 +-
 .../src/main/resources/application.yaml            |  10 ++
 script/scp-hosts.sh                                |   4 +-
 script/start-all.sh                                |   8 +-
 script/stop-all.sh                                 |   8 +-
 54 files changed, 675 insertions(+), 490 deletions(-)
 create mode 100644 .github/workflows/cluster-test/mysql/Dockerfile
 copy script/install.sh => .github/workflows/cluster-test/mysql/deploy.sh (52%)
 mode change 100755 => 100644
 copy dolphinscheduler-e2e/dolphinscheduler-e2e-case/src/test/resources/docker/datasource-mysql/docker-compose.yaml => .github/workflows/cluster-test/mysql/docker-compose-base.yaml (58%)
 copy dolphinscheduler-alert/dolphinscheduler-alert-plugins/dolphinscheduler-alert-script/src/test/script/shell/test.sh => .github/workflows/cluster-test/mysql/docker-compose-cluster.yaml (80%)
 mode change 100755 => 100644
 copy {script/env => .github/workflows/cluster-test/mysql}/dolphinscheduler_env.sh (83%)
 copy {script/env => .github/workflows/cluster-test/mysql}/install_env.sh (92%)
 create mode 100644 .github/workflows/cluster-test/mysql/running_test.sh
 delete mode 100644 docs/img/datax_edit.png
 delete mode 100644 docs/img/global_parameter_en.png
 create mode 100644 docs/img/new_ui/dev/parameter/local_parameter01.png
 create mode 100644 docs/img/new_ui/dev/parameter/local_parameter02.png
 delete mode 100644 docs/img/supplement_local_parameter.png
 delete mode 100644 docs/img/supplement_local_parameter_en.png
 delete mode 100755 dolphinscheduler-tools/src/main/bin/create-schema.sh
 delete mode 100644 dolphinscheduler-tools/src/main/java/org/apache/dolphinscheduler/tools/datasource/CreateDolphinScheduler.java


[dolphinscheduler] 02/16: [Fix][UI] Fix data-quality component multi-language support. (#10202)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit 232a7bdb606488377c3852d1509367671823cd6a
Author: songjianet <17...@qq.com>
AuthorDate: Mon May 23 18:17:30 2022 +0800

    [Fix][UI] Fix data-quality component multi-language support. (#10202)
    
    * [Fix][UI] Fix data-quality component multi-language support.
    
    * [Fix][UI] Fix data-quality component multi-language support.
    
    (cherry picked from commit c8a4c694eaac41ce9249b0cb41ad0da04b6d117c)
---
 dolphinscheduler-ui/src/locales/modules/en_US.ts                 | 9 ++++++++-
 dolphinscheduler-ui/src/locales/modules/zh_CN.ts                 | 9 ++++++++-
 .../src/views/projects/task/components/node/fields/use-rules.ts  | 5 ++---
 3 files changed, 18 insertions(+), 5 deletions(-)

diff --git a/dolphinscheduler-ui/src/locales/modules/en_US.ts b/dolphinscheduler-ui/src/locales/modules/en_US.ts
index f52932aa12..8223e4de8c 100644
--- a/dolphinscheduler-ui/src/locales/modules/en_US.ts
+++ b/dolphinscheduler-ui/src/locales/modules/en_US.ts
@@ -947,7 +947,14 @@ const project = {
     sql_parameter: 'SQL Parameter',
     format_tips: 'Please enter format',
     udf_function: 'UDF Function',
-    unlimited: 'unlimited'
+    unlimited: 'unlimited',
+    please_select_source_connector_type: 'Please select source connector type',
+    please_select_source_datasource_id: 'Please select source datasource id',
+    please_enter_source_table_name: 'Please select source table name',
+    please_enter_filter_expression: 'Please enter filter expression',
+    please_enter_column_only_single_column_is_supported: 'Please select column, only single column is supported',
+    please_enter_threshold_number_is_needed: 'Please enter threshold number is needed',
+    please_enter_comparison_title: 'please select comparison title'
   }
 }
 
diff --git a/dolphinscheduler-ui/src/locales/modules/zh_CN.ts b/dolphinscheduler-ui/src/locales/modules/zh_CN.ts
index b39d9df0d3..2f4177a99b 100644
--- a/dolphinscheduler-ui/src/locales/modules/zh_CN.ts
+++ b/dolphinscheduler-ui/src/locales/modules/zh_CN.ts
@@ -936,7 +936,14 @@ const project = {
     sql_parameter: 'sql参数',
     format_tips: '请输入格式为',
     udf_function: 'UDF函数',
-    unlimited: '不限制'
+    unlimited: '不限制',
+    please_select_source_connector_type: '请选择源数据类型',
+    please_select_source_datasource_id: '请选择源数据源',
+    please_enter_source_table_name: '请选择源数据表',
+    please_enter_filter_expression: '请输入源表过滤条件',
+    please_enter_column_only_single_column_is_supported: '请选择源表检测列',
+    please_enter_threshold_number_is_needed: '请输入阈值',
+    please_enter_comparison_title: '请选择期望值类型'
   }
 }
 
diff --git a/dolphinscheduler-ui/src/views/projects/task/components/node/fields/use-rules.ts b/dolphinscheduler-ui/src/views/projects/task/components/node/fields/use-rules.ts
index 607aaf4ba5..78c9592a6c 100644
--- a/dolphinscheduler-ui/src/views/projects/task/components/node/fields/use-rules.ts
+++ b/dolphinscheduler-ui/src/views/projects/task/components/node/fields/use-rules.ts
@@ -67,9 +67,7 @@ export function useRules(
     if (ruleLoading.value) return
     ruleLoading.value = true
     const result = await getRuleFormCreateJson(ruleId)
-    const items = JSON.parse(result).map((item: IResponseJsonItem) =>
-      formatResponseJson(item)
-    )
+    const items = JSON.parse(result).map((item: IResponseJsonItem) => formatResponseJson(item))
     updateRules(items, preItemLen)
     preItemLen = items.length
     ruleLoading.value = false
@@ -78,6 +76,7 @@ export function useRules(
   const formatResponseJson = (
     responseItem: IResponseJsonItem
   ): IJsonItemParams => {
+    responseItem.props.placeholder = t('project.node.' + responseItem.props.placeholder.split(' ').join('_').split(',').join('').toLowerCase())
     const item: IJsonItemParams = {
       field: responseItem.field,
       options: responseItem.options,


[dolphinscheduler] 11/16: [Fix-10103][k8s]Fix k8s Change DataSource Error (#10128)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit 5d8679fba9ac5f03e60daff8e4c85b32fbd9aedc
Author: 旺阳 <qi...@cisco.com>
AuthorDate: Tue May 24 13:59:42 2022 +0800

    [Fix-10103][k8s]Fix k8s Change DataSource Error (#10128)
    
    (cherry picked from commit de5507fb1965fd993ed5e07f31f2167642c87130)
---
 dolphinscheduler-tools/src/main/bin/upgrade-schema.sh       |  6 ++----
 dolphinscheduler-tools/src/main/resources/application.yaml  |  9 ++++++++-
 dolphinscheduler-worker/src/main/resources/application.yaml | 10 ++++++++++
 3 files changed, 20 insertions(+), 5 deletions(-)

diff --git a/dolphinscheduler-tools/src/main/bin/upgrade-schema.sh b/dolphinscheduler-tools/src/main/bin/upgrade-schema.sh
index 355ebfe379..055157bc2b 100755
--- a/dolphinscheduler-tools/src/main/bin/upgrade-schema.sh
+++ b/dolphinscheduler-tools/src/main/bin/upgrade-schema.sh
@@ -19,13 +19,11 @@
 BIN_DIR=$(dirname $0)
 DOLPHINSCHEDULER_HOME=${DOLPHINSCHEDULER_HOME:-$(cd $BIN_DIR/../..; pwd)}
 
-if [ "$DOCKER" != "true" ]; then
-  source "$DOLPHINSCHEDULER_HOME/bin/env/dolphinscheduler_env.sh"
-fi
+source "$DOLPHINSCHEDULER_HOME/bin/env/dolphinscheduler_env.sh"
 
 JAVA_OPTS=${JAVA_OPTS:-"-server -Duser.timezone=${SPRING_JACKSON_TIME_ZONE} -Xms1g -Xmx1g -Xmn512m -XX:+PrintGCDetails -Xloggc:gc.log -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=dump.hprof"}
 
 java $JAVA_OPTS \
   -cp "$DOLPHINSCHEDULER_HOME/tools/conf":"$DOLPHINSCHEDULER_HOME/tools/libs/*":"$DOLPHINSCHEDULER_HOME/tools/sql" \
-  -Dspring.profiles.active=upgrade \
+  -Dspring.profiles.active=upgrade,${DATABASE} \
   org.apache.dolphinscheduler.tools.datasource.UpgradeDolphinScheduler
diff --git a/dolphinscheduler-tools/src/main/resources/application.yaml b/dolphinscheduler-tools/src/main/resources/application.yaml
index 2c26aed0f3..45327b5848 100644
--- a/dolphinscheduler-tools/src/main/resources/application.yaml
+++ b/dolphinscheduler-tools/src/main/resources/application.yaml
@@ -43,4 +43,11 @@ spring:
       on-profile: mysql
   datasource:
     driver-class-name: com.mysql.jdbc.Driver
-    url: jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
+
+---
+spring:
+  config:
+    activate:
+      on-profile: postgresql
+  datasource:
+    driver-class-name: org.postgresql.Driver
diff --git a/dolphinscheduler-worker/src/main/resources/application.yaml b/dolphinscheduler-worker/src/main/resources/application.yaml
index f0be99cef1..78965d237c 100644
--- a/dolphinscheduler-worker/src/main/resources/application.yaml
+++ b/dolphinscheduler-worker/src/main/resources/application.yaml
@@ -89,3 +89,13 @@ management:
 
 metrics:
   enabled: true
+
+# Override by profile
+
+---
+spring:
+  config:
+    activate:
+      on-profile: mysql
+  datasource:
+    driver-class-name: com.mysql.jdbc.Driver


[dolphinscheduler] 06/16: fix#10214 Delete online workflows in batches, prompt information is accurate (#10215)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit 424ef272578a10bed57b7d1cd14c6e57ac52017c
Author: Mr.An <42...@users.noreply.github.com>
AuthorDate: Tue May 24 10:18:03 2022 +0800

    fix#10214 Delete online workflows in batches, prompt information is accurate (#10215)
    
    (cherry picked from commit edcb7aad23f73fb166efc3863dfb58e137ca05b4)
---
 .../dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
index 955af7c3ad..ad05c456f5 100644
--- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
@@ -711,7 +711,7 @@ public class ProcessDefinitionServiceImpl extends BaseServiceImpl implements Pro
 
         // check process definition is already online
         if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
-            putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, String.valueOf(code));
+            putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, processDefinition.getName());
             return result;
         }
         // check process instances is already running


[dolphinscheduler] 01/16: [Doc] Add complie supporting system (#10188)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit b598b27c081f4ad8e39f9b2032a7ab23680ef6d4
Author: xiangzihao <46...@qq.com>
AuthorDate: Mon May 23 15:17:37 2022 +0800

    [Doc] Add complie supporting system (#10188)
    
    (cherry picked from commit 41a318381067b285a03001e9ef43d8eaa107ff6a)
---
 docs/docs/en/development/development-environment-setup.md | 6 +++++-
 docs/docs/zh/development/development-environment-setup.md | 6 +++++-
 2 files changed, 10 insertions(+), 2 deletions(-)

diff --git a/docs/docs/en/development/development-environment-setup.md b/docs/docs/en/development/development-environment-setup.md
index 102dead641..21bd17f748 100644
--- a/docs/docs/en/development/development-environment-setup.md
+++ b/docs/docs/en/development/development-environment-setup.md
@@ -21,7 +21,11 @@ git clone git@github.com:apache/dolphinscheduler.git
 
 ### compile source code
 
-ii. Run `mvn clean install -Prelease -Dmaven.test.skip=true`
+Supporting system:
+* MacOS
+* Liunx
+
+Run `mvn clean install -Prelease -Dmaven.test.skip=true`
 
 ## Docker image build
 
diff --git a/docs/docs/zh/development/development-environment-setup.md b/docs/docs/zh/development/development-environment-setup.md
index af5c4feb92..29ef58a240 100644
--- a/docs/docs/zh/development/development-environment-setup.md
+++ b/docs/docs/zh/development/development-environment-setup.md
@@ -21,7 +21,11 @@ git clone git@github.com:apache/dolphinscheduler.git
 
 ### 编译源码
 
-* 运行 `mvn clean install -Prelease -Dmaven.test.skip=true`
+支持的系统:
+* MacOS
+* Linux
+
+运行 `mvn clean install -Prelease -Dmaven.test.skip=true`
 
 ## Docker镜像构建
 


[dolphinscheduler] 03/16: [doc] Update delete workflow description (#10203)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit 1f8ee855f054a0959a882d252053e58b87c90ca9
Author: Mr.An <42...@users.noreply.github.com>
AuthorDate: Mon May 23 20:59:19 2022 +0800

    [doc] Update delete workflow description (#10203)
    
    (cherry picked from commit 31cb4853129fb01e21be523d1f87c72258d7fe01)
---
 docs/docs/en/guide/project/workflow-definition.md | 2 +-
 docs/docs/zh/guide/project/workflow-definition.md | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/docs/docs/en/guide/project/workflow-definition.md b/docs/docs/en/guide/project/workflow-definition.md
index f06965c1eb..0f15cffcbb 100644
--- a/docs/docs/en/guide/project/workflow-definition.md
+++ b/docs/docs/en/guide/project/workflow-definition.md
@@ -48,7 +48,7 @@ The following are the operation functions of the workflow definition list:
 - **Run:** Only workflow in the online state can run. See [2.3.3 Run Workflow](#run-the-workflow) for the operation steps.
 - **Timing:** Timing can only set to online workflows, and the system automatically schedules to run the workflow on time. The status after creating a timing setting is "offline", and the timing must set online on the timing management page to make effect. See [2.3.4 Workflow Timing](#workflow-timing) for timing operation steps
 - **Timing Management:** The timing management page can edit, online or offline and delete timing
-- **Delete:** Delete the workflow definition
+- **Delete:** Delete the workflow definition. In the same project, only the workflow definition created by yourself can be deleted, and the workflow definition of other users cannot be deleted. If you need to delete it, please contact the user who created it or the administrator.
 - **Download:** Download workflow definition to local
 - **Tree Diagram:** Display the task node type and task status in a tree structure, as shown in the figure below:
 
diff --git a/docs/docs/zh/guide/project/workflow-definition.md b/docs/docs/zh/guide/project/workflow-definition.md
index a14decf10b..da04be2867 100644
--- a/docs/docs/zh/guide/project/workflow-definition.md
+++ b/docs/docs/zh/guide/project/workflow-definition.md
@@ -48,7 +48,7 @@
 - **运行:** 只有上线的工作流能运行。运行操作步骤见运行工作流
 - **定时:** 只有上线的工作流能设置定时,系统自动定时调度工作流运行。创建定时后的状态为"下线",需在定时管理页面上线定时才生效。定时操作步骤见工作流定时
 - **定时管理:** 定时管理页面可编辑、上线/下线、删除定时。
-- **删除:** 删除工作流定义。
+- **删除:** 删除工作流定义。在同一个项目中,只能删除自己创建的工作流定义,其他用户的工作流定义不能进行删除,如果需要删除请联系创建用户或者管理员。
 - **下载:** 下载工作流定义到本地。
 - **树形图:** 以树形结构展示任务节点的类型及任务状态,如下图所示:
 


[dolphinscheduler] 09/16: [doc] Update task DataX document (#10218)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit f7f3305569c961d8df3d09c87c26b45b36150570
Author: Jiajie Zhong <zh...@hotmail.com>
AuthorDate: Tue May 24 13:45:26 2022 +0800

    [doc] Update task DataX document (#10218)
    
    (cherry picked from commit f4b7754952f16f60f1850d672e25e05a2088c49d)
---
 docs/docs/en/guide/task/datax.md | 126 +++++++++++++++++++--------------------
 docs/docs/zh/guide/task/datax.md | 122 +++++++++++++++++++------------------
 docs/img/datax_edit.png          | Bin 478215 -> 0 bytes
 3 files changed, 126 insertions(+), 122 deletions(-)

diff --git a/docs/docs/en/guide/task/datax.md b/docs/docs/en/guide/task/datax.md
index 20cdec8588..2413d360c9 100644
--- a/docs/docs/en/guide/task/datax.md
+++ b/docs/docs/en/guide/task/datax.md
@@ -1,63 +1,63 @@
-# DataX
-
-## Overview
-
-DataX task type for executing DataX programs. For DataX nodes, the worker will execute `${DATAX_HOME}/bin/datax.py` to analyze the input json file.
-
-## Create Task
-
-- Click `Project -> Management-Project -> Name-Workflow Definition`, and click the `Create Workflow` button to enter the DAG editing page.
-- Drag from the toolbar <img src="/img/tasks/icons/datax.png" width="15"/> task node to canvas.
-
-## Task Parameter
-
-- **Node name**: The node name in a workflow definition is unique.
-- **Run flag**: Identifies whether this node schedules normally, if it does not need to execute, select the `prohibition execution`.
-- **Descriptive information**: Describe the function of the node.
-- **Task priority**: When the number of worker threads is insufficient, execute in the order of priority from high to low, and tasks with the same priority will execute in a first-in first-out order.
-- **Worker grouping**: Assign tasks to the machines of the worker group to execute. If `Default` is selected, randomly select a worker machine for execution.
-- **Environment Name**: Configure the environment name in which run the script.
-- **Times of failed retry attempts**: The number of times the task failed to resubmit.
-- **Failed retry interval**: The time interval (unit minute) for resubmitting the task after a failed task.
-- **Delayed execution time**: The time (unit minute) that a task delays in execution.
-- **Timeout alarm**: Check the timeout alarm and timeout failure. When the task runs exceed the "timeout", an alarm email will send and the task execution will fail.
-- **Custom template**: Customize the content of the DataX node's JSON profile when the default DataSource provided does not meet the requirements.
-- **JSON**: JSON configuration file for DataX synchronization.
-- **Custom parameters**: SQL task type, and stored procedure is a custom parameter order, to set customized parameter type and data type for the method is the same as the stored procedure task type. The difference is that the custom parameter of the SQL task type replaces the `${variable}` in the SQL statement.
-- **Data source**: Select the data source to extract data.
-- **SQL statement**: The SQL statement used to extract data from the target database, the SQL query column name is automatically parsed when execute the node, and mapped to the target table to synchronize column name. When the column names of the source table and the target table are inconsistent, they can be converted by column alias (as)
-- **Target library**: Select the target library for data synchronization.
-- **Pre-SQL**: Pre-SQL executes before the SQL statement (executed by the target database).
-- **Post-SQL**: Post-SQL executes after the SQL statement (executed by the target database).
-- **Stream limit (number of bytes)**: Limit the number of bytes for a query.
-- **Limit flow (number of records)**: Limit the number of records for a query.
-- **Running memory**: Set the minimum and maximum memory required, which can be set according to the actual production environment.
-- **Predecessor task**: Selecting a predecessor task for the current task, will set the selected predecessor task as upstream of the current task.
-
-## Task Example
-
-This example demonstrates how to import data from Hive into MySQL.
-
-### Configure the DataX environment in DolphinScheduler
-
-If you are using the DataX task type in a production environment, it is necessary to configure the required environment first. The following is the configuration file: `bin/env/dolphinscheduler_env.sh`.
-
-![datax_task01](/img/tasks/demo/datax_task01.png)
-
-After finish the environment configuration, need to restart DolphinScheduler.
-
-### Configure DataX Task Node
-
-As the default DataSource does not contain data read from Hive, require a custom JSON, refer to: [HDFS Writer](https://github.com/alibaba/DataX/blob/master/hdfswriter/doc/hdfswriter.md). Note: Partition directories exist on the HDFS path, when importing data in real world situations, partitioning is recommended to be passed as a parameter, using custom parameters.
-
-After finish the required JSON file, you can configure the node by following the steps in the diagram below:
-
-![datax_task02](/img/tasks/demo/datax_task02.png)
-
-### View Execution Result
-
-![datax_task03](/img/tasks/demo/datax_task03.png)
-
-### Notice
-
-If the default DataSource provided does not meet your needs, you can configure the writer and reader of the DataX according to the actual usage environment in the custom template options, available at [DataX](https://github.com/alibaba/DataX).
+# DataX
+
+## Overview
+
+DataX task type for executing DataX programs. For DataX nodes, the worker will execute `${DATAX_HOME}/bin/datax.py` to analyze the input json file.
+
+## Create Task
+
+- Click Project Management -> Project Name -> Workflow Definition, and click the "Create Workflow" button to enter the DAG editing page.
+- Drag the <img src="/img/tasks/icons/datax.png" width="15"/> from the toolbar to the drawing board.
+
+## Task Parameter
+
+- **Node name**: The node name in a workflow definition is unique.
+- **Run flag**: Identifies whether this node can be scheduled normally, if it does not need to be executed, you can turn on the prohibition switch.
+- **Descriptive information**: describe the function of the node.
+- **Task priority**: When the number of worker threads is insufficient, they are executed in order from high to low, and when the priority is the same, they are executed according to the first-in first-out principle.
+- **Worker grouping**: Tasks are assigned to the machines of the worker group to execute. If Default is selected, a worker machine will be randomly selected for execution.
+- **Environment Name**: Configure the environment name in which to run the script.
+- **Number of failed retry attempts**: The number of times the task failed to be resubmitted.
+- **Failed retry interval**: The time, in cents, interval for resubmitting the task after a failed task.
+- **Delayed execution time**: The time, in cents, that a task is delayed in execution.
+- **Timeout alarm**: Check the timeout alarm and timeout failure. When the task exceeds the "timeout period", an alarm email will be sent and the task execution will fail.
+- **Custom template**: Custom the content of the DataX node's json profile when the default data source provided does not meet the required requirements.
+- **json**: json configuration file for DataX synchronization.
+- **Custom parameters**: SQL task type, and stored procedure is a custom parameter order to set values for the method. The custom parameter type and data type are the same as the stored procedure task type. The difference is that the SQL task type custom parameter will replace the \${variable} in the SQL statement.
+- **Data source**: Select the data source from which the data will be extracted.
+- **sql statement**: the sql statement used to extract data from the target database, the sql query column name is automatically parsed when the node is executed, and mapped to the target table synchronization column name. When the source table and target table column names are inconsistent, they can be converted by column alias.
+- **Target library**: Select the target library for data synchronization.
+- **Pre-sql**: Pre-sql is executed before the sql statement (executed by the target library).
+- **Post-sql**: Post-sql is executed after the sql statement (executed by the target library).
+- **Stream limit (number of bytes)**: Limits the number of bytes in the query.
+- **Limit flow (number of records)**: Limit the number of records for a query.
+- **Running memory**: the minimum and maximum memory required can be configured to suit the actual production environment.
+- **Predecessor task**: Selecting a predecessor task for the current task will set the selected predecessor task as upstream of the current task.
+
+## Task Example
+
+This example demonstrates importing data from Hive into MySQL.
+
+### Configuring the DataX environment in DolphinScheduler
+
+If you are using the DataX task type in a production environment, it is necessary to configure the required environment first. The configuration file is as follows: `/dolphinscheduler/conf/env/dolphinscheduler_env.sh`.
+
+![datax_task01](/img/tasks/demo/datax_task01.png)
+
+After the environment has been configured, DolphinScheduler needs to be restarted.
+
+### Configuring DataX Task Node
+
+As the default data source does not contain data to be read from Hive, a custom json is required, refer to: [HDFS Writer](https://github.com/alibaba/DataX/blob/master/hdfswriter/doc/hdfswriter.md). Note: Partition directories exist on the HDFS path, when importing data in real world situations, partitioning is recommended to be passed as a parameter, using custom parameters.
+
+After writing the required json file, you can configure the node content by following the steps in the diagram below.
+
+![datax_task02](/img/tasks/demo/datax_task02.png)
+
+### View run results
+
+![datax_task03](/img/tasks/demo/datax_task03.png)
+
+### Notice
+
+If the default data source provided does not meet your needs, you can configure the writer and reader of DataX according to the actual usage environment in the custom template option, available at https://github.com/alibaba/DataX.
diff --git a/docs/docs/zh/guide/task/datax.md b/docs/docs/zh/guide/task/datax.md
index 5a9e167980..fa1d62a42c 100644
--- a/docs/docs/zh/guide/task/datax.md
+++ b/docs/docs/zh/guide/task/datax.md
@@ -1,59 +1,63 @@
-# DATAX 节点
-
-## 综述
-
-DataX 任务类型,用于执行 DataX 程序。对于 DataX 节点,worker 会通过执行 `${DATAX_HOME}/bin/datax.py` 来解析传入的 json 文件。
-
-## 创建任务
-
-- 点击项目管理 -> 项目名称 -> 工作流定义,点击“创建工作流”按钮,进入 DAG 编辑页面;
-- 拖动工具栏的<img src="/img/tasks/icons/datax.png" width="15"/> 任务节点到画板中。
-
-## 任务参数
-
-- 节点名称:设置任务节点的名称。一个工作流定义中的节点名称是唯一的。
-- 运行标志:标识这个结点是否能正常调度,如果不需要执行,可以打开禁止执行开关。
-- 描述:描述该节点的功能。
-- 任务优先级:worker 线程数不足时,根据优先级从高到低依次执行,优先级一样时根据先进先出原则执行。
-- Worker 分组:任务分配给 worker 组的机器执行,选择 Default ,会随机选择一台 worker 机执行。
-- 环境名称:配置运行脚本的环境。
-- 失败重试次数:任务失败重新提交的次数。
-- 失败重试间隔:任务失败重新提交任务的时间间隔,以分为单位。
-- 延时执行时间:任务延迟执行的时间,以分为单位。
-- 超时警告:勾选超时警告、超时失败,当任务超过“超时时长”后,会发送告警邮件并且任务执行失败。
-- 自定义模板:当默认提供的数据源不满足所需要求的时,可自定义 datax 节点的 json 配置文件内容。
-- json:DataX 同步的 json 配置文件。
-- 自定义参数:sql 任务类型,而存储过程是自定义参数顺序的给方法设置值自定义参数类型和数据类型同存储过程任务类型一样。区别在于SQL任务类型自定义参数会替换 sql 语句中 ${变量}。
-- 数据源:选择抽取数据的数据源。
-- sql 语句:目标库抽取数据的 sql 语句,节点执行时自动解析 sql 查询列名,映射为目标表同步列名,源表和目标表列名不一致时,可以通过列别名(as)转换。
-- 目标库:选择数据同步的目标库。
-- 目标库前置 sql:前置 sql 在 sql 语句之前执行(目标库执行)。
-- 目标库后置 sql:后置 sql 在 sql 语句之后执行(目标库执行)。
-- 限流(字节数):限制查询的字节数。
-- 限流(记录数):限制查询的记录数。
-- 运行内存:可根据实际生产环境配置所需的最小和最大内存。
-- 前置任务:选择当前任务的前置任务,会将被选择的前置任务设置为当前任务的上游。
-
-## 任务样例
-
-该样例演示为从 Hive 数据导入到 MySQL 中。
-
-### 在 DolphinScheduler 中配置 DataX 环境
-
-若生产环境中要是使用到 DataX 任务类型,则需要先配置好所需的环境。配置文件如下:`bin/env/dolphinscheduler_env.sh`。
-
-![datax_task01](/img/tasks/demo/datax_task01.png)
-
-  <p align="center">
-   <img src="/img/datax_edit.png" width="80%" />
-  </p>
-
-- 自定义模板:打开自定义模板开关时,可以自定义datax节点的json配置文件内容(适用于控件配置不满足需求时)
-- 数据源:选择抽取数据的数据源
-- sql语句:目标库抽取数据的sql语句,节点执行时自动解析sql查询列名,映射为目标表同步列名,源表和目标表列名不一致时,可以通过列别名(as)转换
-- 目标库:选择数据同步的目标库
-- 目标表:数据同步的目标表名
-- 前置sql:前置sql在sql语句之前执行(目标库执行)。
-- 后置sql:后置sql在sql语句之后执行(目标库执行)。
-- json:datax同步的json配置文件
-- 自定义参数:SQL任务类型,而存储过程是自定义参数顺序的给方法设置值自定义参数类型和数据类型同存储过程任务类型一样。区别在于SQL任务类型自定义参数会替换sql语句中${变量}。
\ No newline at end of file
+# DATAX 节点
+
+## 综述
+
+DataX 任务类型,用于执行 DataX 程序。对于 DataX 节点,worker 会通过执行 `${DATAX_HOME}/bin/datax.py` 来解析传入的 json 文件。
+
+## 创建任务
+
+- 点击项目管理 -> 项目名称 -> 工作流定义,点击“创建工作流”按钮,进入 DAG 编辑页面;
+- 拖动工具栏的<img src="/img/tasks/icons/datax.png" width="15"/> 任务节点到画板中。
+
+## 任务参数
+
+- 节点名称:设置任务节点的名称。一个工作流定义中的节点名称是唯一的。
+- 运行标志:标识这个结点是否能正常调度,如果不需要执行,可以打开禁止执行开关。
+- 描述:描述该节点的功能。
+- 任务优先级:worker 线程数不足时,根据优先级从高到低依次执行,优先级一样时根据先进先出原则执行。
+- Worker 分组:任务分配给 worker 组的机器执行,选择 Default ,会随机选择一台 worker 机执行。
+- 环境名称:配置运行脚本的环境。
+- 失败重试次数:任务失败重新提交的次数。
+- 失败重试间隔:任务失败重新提交任务的时间间隔,以分为单位。
+- 延时执行时间:任务延迟执行的时间,以分为单位。
+- 超时警告:勾选超时警告、超时失败,当任务超过“超时时长”后,会发送告警邮件并且任务执行失败。
+- 自定义模板:当默认提供的数据源不满足所需要求的时,可自定义 datax 节点的 json 配置文件内容。
+- json:DataX 同步的 json 配置文件。
+- 自定义参数:sql 任务类型,而存储过程是自定义参数顺序的给方法设置值自定义参数类型和数据类型同存储过程任务类型一样。区别在于SQL任务类型自定义参数会替换 sql 语句中 ${变量}。
+- 数据源:选择抽取数据的数据源。
+- sql 语句:目标库抽取数据的 sql 语句,节点执行时自动解析 sql 查询列名,映射为目标表同步列名,源表和目标表列名不一致时,可以通过列别名(as)转换。
+- 目标库:选择数据同步的目标库。
+- 目标库前置 sql:前置 sql 在 sql 语句之前执行(目标库执行)。
+- 目标库后置 sql:后置 sql 在 sql 语句之后执行(目标库执行)。
+- 限流(字节数):限制查询的字节数。
+- 限流(记录数):限制查询的记录数。
+- 运行内存:可根据实际生产环境配置所需的最小和最大内存。
+- 前置任务:选择当前任务的前置任务,会将被选择的前置任务设置为当前任务的上游。
+
+## 任务样例
+
+该样例演示为从 Hive 数据导入到 MySQL 中。
+
+### 在 DolphinScheduler 中配置 DataX 环境
+
+若生产环境中要是使用到 DataX 任务类型,则需要先配置好所需的环境。配置文件如下:`/dolphinscheduler/conf/env/dolphinscheduler_env.sh`。
+
+![datax_task01](/img/tasks/demo/datax_task01.png)
+
+当环境配置完成之后,需要重启 DolphinScheduler。
+
+### 配置 DataX 任务节点
+
+由于默认的的数据源中并不包含从 Hive 中读取数据,所以需要自定义 json,可参考:[HDFS Writer](https://github.com/alibaba/DataX/blob/master/hdfswriter/doc/hdfswriter.md)。其中需要注意的是 HDFS 路径上存在分区目录,在实际情况导入数据时,分区建议进行传参,即使用自定义参数。
+
+在编写好所需的 json 之后,可按照下图步骤进行配置节点内容。
+
+![datax_task02](/img/tasks/demo/datax_task02.png)
+
+### 查看运行结果
+
+![datax_task03](/img/tasks/demo/datax_task03.png)
+
+## 注意事项:
+
+若默认提供的数据源不满足需求,可在自定义模板选项中,根据实际使用环境来配置 DataX 的 writer 和 reader,可参考:https://github.com/alibaba/DataX
\ No newline at end of file
diff --git a/docs/img/datax_edit.png b/docs/img/datax_edit.png
deleted file mode 100644
index fbda73419d..0000000000
Binary files a/docs/img/datax_edit.png and /dev/null differ


[dolphinscheduler] 08/16: [Fix][UI] add assignment of totalPage (#10165)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit 844ce4cdd7b4b1725ad275777747186c2f5aff54
Author: rockfang <65...@qq.com>
AuthorDate: Tue May 24 10:48:34 2022 +0800

    [Fix][UI] add assignment of totalPage (#10165)
    
    (cherry picked from commit 7fc3664ec366ffc7abbd8732d47e56f0f313bab7)
---
 dolphinscheduler-ui/src/views/data-quality/rule/use-table.ts            | 1 +
 dolphinscheduler-ui/src/views/data-quality/task-result/use-table.ts     | 1 +
 dolphinscheduler-ui/src/views/monitor/statistics/audit-log/use-table.ts | 1 +
 3 files changed, 3 insertions(+)

diff --git a/dolphinscheduler-ui/src/views/data-quality/rule/use-table.ts b/dolphinscheduler-ui/src/views/data-quality/rule/use-table.ts
index fbc10a7b7b..dac1f557eb 100644
--- a/dolphinscheduler-ui/src/views/data-quality/rule/use-table.ts
+++ b/dolphinscheduler-ui/src/views/data-quality/rule/use-table.ts
@@ -110,6 +110,7 @@ export function useTable(viewRuleEntry = (unusedRuleJson: string): void => {}) {
 
     const { state } = useAsyncState(
       queryRuleListPaging(data).then((res: RuleRes) => {
+        variables.totalPage = res.totalPage
         variables.tableData = res.totalList.map((item, unused) => {
           const ruleName =
             'data_quality.rule.' + item.name.substring(3, item.name.length - 1)
diff --git a/dolphinscheduler-ui/src/views/data-quality/task-result/use-table.ts b/dolphinscheduler-ui/src/views/data-quality/task-result/use-table.ts
index e5472e842f..5919d74eed 100644
--- a/dolphinscheduler-ui/src/views/data-quality/task-result/use-table.ts
+++ b/dolphinscheduler-ui/src/views/data-quality/task-result/use-table.ts
@@ -210,6 +210,7 @@ export function useTable() {
 
     const { state } = useAsyncState(
       queryExecuteResultListPaging(data).then((res: ResultListRes) => {
+        variables.totalPage = res.totalPage
         variables.tableData = res.totalList.map((item, unused) => {
           return {
             ...item
diff --git a/dolphinscheduler-ui/src/views/monitor/statistics/audit-log/use-table.ts b/dolphinscheduler-ui/src/views/monitor/statistics/audit-log/use-table.ts
index f45b2ee3dc..87e5215030 100644
--- a/dolphinscheduler-ui/src/views/monitor/statistics/audit-log/use-table.ts
+++ b/dolphinscheduler-ui/src/views/monitor/statistics/audit-log/use-table.ts
@@ -88,6 +88,7 @@ export function useTable() {
 
     const { state } = useAsyncState(
       queryAuditLogListPaging(data).then((res: AuditListRes) => {
+        variables.totalPage = res.totalPage
         variables.tableData = res.totalList.map((item, unused) => {
           return {
             ...item


[dolphinscheduler] 15/16: [fix-10207] When the registered alarm plugin is empty, the error message is not clear (#10224)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit 9eaac1e1b41e92532480aa9240d914e398abf6f0
Author: Mr.An <42...@users.noreply.github.com>
AuthorDate: Tue May 24 18:03:04 2022 +0800

    [fix-10207] When the registered alarm plugin is empty, the error message is not clear (#10224)
    
    * Improve the problem that the error message is not clear when the registered alarm plugin is empty
    
    * update error info
    
    * update error info
    
    * update error info
    
    * Update Status.java
    
    fix typo
    
    Co-authored-by: xiangzihao <46...@qq.com>
    (cherry picked from commit 14e80ab5fd3a6c068fe168fb76c5b9c836edf113)
---
 .../src/main/java/org/apache/dolphinscheduler/api/enums/Status.java     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
index 0847d6920d..3315ef671e 100644
--- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
@@ -329,7 +329,7 @@ public enum Status {
 
     //plugin
     PLUGIN_NOT_A_UI_COMPONENT(110001, "query plugin error, this plugin has no UI component", "查询插件错误,此插件无UI组件"),
-    QUERY_PLUGINS_RESULT_IS_NULL(110002, "query plugins result is null", "查询插件为空"),
+    QUERY_PLUGINS_RESULT_IS_NULL(110002, "query alarm plugins result is empty, please check the startup status of the alarm component and confirm that the relevant alarm plugin is successfully registered", "查询告警插件为空, 请检查告警组件启动状态并确认相关告警插件已注册成功"),
     QUERY_PLUGINS_ERROR(110003, "query plugins error", "查询插件错误"),
     QUERY_PLUGIN_DETAIL_RESULT_IS_NULL(110004, "query plugin detail result is null", "查询插件详情结果为空"),
 


[dolphinscheduler] 10/16: [Feature-9474] [CI] Add cluster test script verify on shell script (#9997)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit 944be34177c5d42d0acdf794332b76cddf0bdd00
Author: xiangzihao <46...@qq.com>
AuthorDate: Sat May 14 12:30:57 2022 +0800

    [Feature-9474] [CI] Add cluster test script verify on shell script (#9997)
    
    * cluster test
    
    * fix init db failed
    
    * fix init db failed
    
    * fix init db failed
    
    * fix init db failed
    
    * fix init db failed
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add sudo
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * remove cluster-test workflows
    
    * add github actions
    
    * add github actions
    
    * refactor test to docker mode
    
    * refactor test to docker mode
    
    * refactor test to docker mode
    
    * refactor test to docker mode
    
    * remove create schema logic
    
    * remove create schema logic
    
    * remove create schema logic
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * fix runnint cluster test error
    
    * add github actions
    
    * add github actions
    
    * add github actions
    
    * add cluster test timeout
    
    * add cluster start test loop check
    
    * add cluster start test loop check
    
    * optimize cluster start test loop check
    
    (cherry picked from commit baf654cb47d60a753dd6372698ebb9230fca4859)
---
 .github/workflows/backend.yml                      | 31 ++++++++-
 .github/workflows/cluster-test/mysql/Dockerfile    | 47 +++++++++++++
 .../workflows/cluster-test/mysql/deploy.sh         | 32 ++++++---
 .../cluster-test/mysql/docker-compose-base.yaml    | 65 +++++++++++++++++
 .../cluster-test/mysql/docker-compose-cluster.yaml | 21 +++---
 .../cluster-test/mysql/dolphinscheduler_env.sh     | 48 +++++++++++++
 .../workflows/cluster-test/mysql/install_env.sh    | 61 ++++++++++++++++
 .../workflows/cluster-test/mysql/running_test.sh   | 81 ++++++++++++++++++++++
 docs/docs/en/guide/installation/pseudo-cluster.md  |  6 +-
 docs/docs/zh/guide/installation/pseudo-cluster.md  |  6 +-
 .../src/main/bin/upgrade-schema.sh                 |  1 +
 .../tools/datasource/CreateDolphinScheduler.java   | 55 ---------------
 .../tools/datasource/InitDolphinScheduler.java     |  2 +
 .../tools/datasource/UpgradeDolphinScheduler.java  | 11 ++-
 script/start-all.sh                                |  8 +--
 script/stop-all.sh                                 |  8 +--
 16 files changed, 391 insertions(+), 92 deletions(-)

diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml
index fa7495aa40..502ae4c62f 100644
--- a/.github/workflows/backend.yml
+++ b/.github/workflows/backend.yml
@@ -78,11 +78,38 @@ jobs:
                  -Dmaven.wagon.httpconnectionManager.ttlSeconds=120
       - name: Check dependency license
         run: tools/dependencies/check-LICENSE.sh
+      - uses: actions/upload-artifact@v2
+        name: Upload Binary Package
+        with:
+          name: binary-package
+          path: ./dolphinscheduler-dist/target/apache-dolphinscheduler-dev-SNAPSHOT-bin.tar.gz
+          retention-days: 1
+  cluster-test:
+    name: Cluster-Test
+    needs: build
+    runs-on: ubuntu-latest
+    timeout-minutes: 20
+    steps:
+      - uses: actions/checkout@v2
+        with:
+          submodules: true
+      - uses: actions/download-artifact@v2
+        name: Download Binary Package
+        with:
+          name: binary-package
+          path: ./
+      - name: Running cluster test(Mysql)
+        run: |
+          docker-compose -f .github/workflows/cluster-test/mysql/docker-compose-base.yaml up -d
+          docker build -t jdk8:ds_cluster -f .github/workflows/cluster-test/mysql/Dockerfile .
+          docker-compose -f .github/workflows/cluster-test/mysql/docker-compose-cluster.yaml up -d
+          /bin/bash .github/workflows/cluster-test/mysql/running_test.sh
+          docker rm -f $(docker ps -aq)
   result:
     name: Build
     runs-on: ubuntu-latest
     timeout-minutes: 30
-    needs: [ build, paths-filter ]
+    needs: [ build, paths-filter, cluster-test ]
     if: always()
     steps:
       - name: Status
@@ -91,7 +118,7 @@ jobs:
             echo "Skip Build!"
             exit 0
           fi
-          if [[ ${{ needs.build.result }} != 'success' ]]; then
+          if [[ ${{ needs.build.result }} != 'success' || ${{ needs.cluster-test.result }} != 'success' ]]; then
             echo "Build Failed!"
             exit -1
           fi
diff --git a/.github/workflows/cluster-test/mysql/Dockerfile b/.github/workflows/cluster-test/mysql/Dockerfile
new file mode 100644
index 0000000000..e117b86dc6
--- /dev/null
+++ b/.github/workflows/cluster-test/mysql/Dockerfile
@@ -0,0 +1,47 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+FROM openjdk:8-jre-slim-buster
+
+RUN apt update ; \
+    apt install -y curl wget default-mysql-client sudo openssh-server netcat-traditional ;
+
+#COPY ./dolphinscheduler-dist/target/apache-dolphinscheduler-dev-SNAPSHOT-bin.tar.gz /root/apache-dolphinscheduler-dev-SNAPSHOT-bin.tar.gz
+COPY ./apache-dolphinscheduler-dev-SNAPSHOT-bin.tar.gz /root/apache-dolphinscheduler-dev-SNAPSHOT-bin.tar.gz
+RUN tar -zxvf /root/apache-dolphinscheduler-dev-SNAPSHOT-bin.tar.gz -C ~
+
+ENV DOLPHINSCHEDULER_HOME /root/apache-dolphinscheduler-dev-SNAPSHOT-bin
+
+#Setting install.sh
+COPY .github/workflows/cluster-test/mysql/install_env.sh $DOLPHINSCHEDULER_HOME/bin/env/install_env.sh
+
+#Setting dolphinscheduler_env.sh
+COPY .github/workflows/cluster-test/mysql/dolphinscheduler_env.sh $DOLPHINSCHEDULER_HOME/bin/env/dolphinscheduler_env.sh
+
+#Download mysql jar
+ENV MYSQL_URL "https://repo.maven.apache.org/maven2/mysql/mysql-connector-java/8.0.16/mysql-connector-java-8.0.16.jar"
+ENV MYSQL_DRIVER "mysql-connector-java-8.0.16.jar"
+RUN wget -O $DOLPHINSCHEDULER_HOME/alert-server/libs/$MYSQL_DRIVER $MYSQL_URL ; \
+cp $DOLPHINSCHEDULER_HOME/alert-server/libs/$MYSQL_DRIVER $DOLPHINSCHEDULER_HOME/api-server/libs/$MYSQL_DRIVER ; \
+cp $DOLPHINSCHEDULER_HOME/alert-server/libs/$MYSQL_DRIVER $DOLPHINSCHEDULER_HOME/master-server/libs/$MYSQL_DRIVER ; \
+cp $DOLPHINSCHEDULER_HOME/alert-server/libs/$MYSQL_DRIVER $DOLPHINSCHEDULER_HOME/worker-server/libs/$MYSQL_DRIVER ; \
+cp $DOLPHINSCHEDULER_HOME/alert-server/libs/$MYSQL_DRIVER $DOLPHINSCHEDULER_HOME/tools/libs/$MYSQL_DRIVER
+
+#Deploy
+COPY .github/workflows/cluster-test/mysql/deploy.sh /root/deploy.sh
+
+CMD [ "/bin/bash", "/root/deploy.sh" ]
diff --git a/dolphinscheduler-tools/src/main/bin/upgrade-schema.sh b/.github/workflows/cluster-test/mysql/deploy.sh
old mode 100755
new mode 100644
similarity index 52%
copy from dolphinscheduler-tools/src/main/bin/upgrade-schema.sh
copy to .github/workflows/cluster-test/mysql/deploy.sh
index 33d4e06db8..75b23d08a2
--- a/dolphinscheduler-tools/src/main/bin/upgrade-schema.sh
+++ b/.github/workflows/cluster-test/mysql/deploy.sh
@@ -15,16 +15,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
+set -euox pipefail
 
-BIN_DIR=$(dirname $0)
-DOLPHINSCHEDULER_HOME=${DOLPHINSCHEDULER_HOME:-$(cd $BIN_DIR/../..; pwd)}
 
-if [ "$DOCKER" != "true" ]; then
-  source "$DOLPHINSCHEDULER_HOME/bin/env/dolphinscheduler_env.sh"
-fi
+USER=root
+DOLPHINSCHEDULER_HOME=/root/apache-dolphinscheduler-dev-SNAPSHOT-bin
 
-JAVA_OPTS=${JAVA_OPTS:-"-server -Duser.timezone=${SPRING_JACKSON_TIME_ZONE} -Xms1g -Xmx1g -Xmn512m -XX:+PrintGCDetails -Xloggc:gc.log -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=dump.hprof"}
+#Create database
+mysql -hmysql -P3306 -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS dolphinscheduler DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;"
 
-java $JAVA_OPTS \
-  -cp "$DOLPHINSCHEDULER_HOME/tools/conf":"$DOLPHINSCHEDULER_HOME/tools/libs/*":"$DOLPHINSCHEDULER_HOME/tools/sql" \
-  org.apache.dolphinscheduler.tools.datasource.UpgradeDolphinScheduler
+#Sudo
+sed -i '$a'$USER'  ALL=(ALL)  NOPASSWD: NOPASSWD: ALL' /etc/sudoers
+sed -i 's/Defaults    requirett/#Defaults    requirett/g' /etc/sudoers
+
+#SSH
+ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
+cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
+chmod 600 ~/.ssh/authorized_keys
+service ssh start
+
+#Init schema
+/bin/bash $DOLPHINSCHEDULER_HOME/tools/bin/upgrade-schema.sh
+
+#Start Cluster
+/bin/bash $DOLPHINSCHEDULER_HOME/bin/start-all.sh
+
+#Keep running
+tail -f /dev/null
diff --git a/.github/workflows/cluster-test/mysql/docker-compose-base.yaml b/.github/workflows/cluster-test/mysql/docker-compose-base.yaml
new file mode 100644
index 0000000000..251b72672d
--- /dev/null
+++ b/.github/workflows/cluster-test/mysql/docker-compose-base.yaml
@@ -0,0 +1,65 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+version: "3"
+
+services:
+  mysql:
+    container_name: mysql
+    image: mysql:5.7.36
+    command: --default-authentication-plugin=mysql_native_password
+    restart: always
+    environment:
+      MYSQL_ROOT_PASSWORD: 123456
+    ports:
+      - "3306:3306"
+    healthcheck:
+      test: mysqladmin ping -h 127.0.0.1 -u root --password=$$MYSQL_ROOT_PASSWORD
+      interval: 5s
+      timeout: 60s
+      retries: 120
+
+  zoo1:
+    image: zookeeper:3.8.0
+    restart: always
+    hostname: zoo1
+    ports:
+      - "2181:2181"
+    environment:
+      ZOO_MY_ID: 1
+      ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
+
+  zoo2:
+    image: zookeeper:3.8.0
+    restart: always
+    hostname: zoo2
+    ports:
+      - "2182:2181"
+    environment:
+      ZOO_MY_ID: 2
+      ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
+
+  zoo3:
+    image: zookeeper:3.8.0
+    restart: always
+    hostname: zoo3
+    ports:
+      - "2183:2181"
+    environment:
+      ZOO_MY_ID: 3
+      ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
+
diff --git a/dolphinscheduler-tools/src/main/bin/create-schema.sh b/.github/workflows/cluster-test/mysql/docker-compose-cluster.yaml
old mode 100755
new mode 100644
similarity index 57%
rename from dolphinscheduler-tools/src/main/bin/create-schema.sh
rename to .github/workflows/cluster-test/mysql/docker-compose-cluster.yaml
index 29cc5d7711..1b2d9e91ab
--- a/dolphinscheduler-tools/src/main/bin/create-schema.sh
+++ b/.github/workflows/cluster-test/mysql/docker-compose-cluster.yaml
@@ -1,4 +1,3 @@
-#!/bin/bash
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -16,13 +15,15 @@
 # limitations under the License.
 #
 
-BIN_DIR=$(dirname $0)
-DOLPHINSCHEDULER_HOME=${DOLPHINSCHEDULER_HOME:-$(cd $BIN_DIR/../..; pwd)}
+version: "3"
 
-source "$DOLPHINSCHEDULER_HOME/bin/env/dolphinscheduler_env.sh"
-
-JAVA_OPTS=${JAVA_OPTS:-"-server -Duser.timezone=${SPRING_JACKSON_TIME_ZONE} -Xms1g -Xmx1g -Xmn512m -XX:+PrintGCDetails -Xloggc:gc.log -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=dump.hprof"}
-
-java $JAVA_OPTS \
-  -cp "$DOLPHINSCHEDULER_HOME/tools/conf":"$DOLPHINSCHEDULER_HOME/tools/libs/*":"$DOLPHINSCHEDULER_HOME/tools/sql" \
-  org.apache.dolphinscheduler.tools.datasource.CreateDolphinScheduler
+services:
+  ds:
+    container_name: ds
+    image: jdk8:ds_cluster
+    restart: always
+    ports:
+      - "12345:12345"
+      - "5679:5679"
+      - "1235:1235"
+      - "50053:50053"
diff --git a/.github/workflows/cluster-test/mysql/dolphinscheduler_env.sh b/.github/workflows/cluster-test/mysql/dolphinscheduler_env.sh
new file mode 100755
index 0000000000..07d5a8dbba
--- /dev/null
+++ b/.github/workflows/cluster-test/mysql/dolphinscheduler_env.sh
@@ -0,0 +1,48 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# JAVA_HOME, will use it to start DolphinScheduler server
+export JAVA_HOME=${JAVA_HOME:-/usr/local/openjdk-8}
+
+# Database related configuration, set database type, username and password
+export DATABASE=${DATABASE:-mysql}
+export SPRING_PROFILES_ACTIVE=${DATABASE}
+export SPRING_DATASOURCE_DRIVER_CLASS_NAME=com.mysql.cj.jdbc.Driver
+export SPRING_DATASOURCE_URL="jdbc:mysql://mysql:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8&useSSL=false"
+export SPRING_DATASOURCE_USERNAME=root
+export SPRING_DATASOURCE_PASSWORD=123456
+
+# DolphinScheduler server related configuration
+export SPRING_CACHE_TYPE=${SPRING_CACHE_TYPE:-none}
+export SPRING_JACKSON_TIME_ZONE=${SPRING_JACKSON_TIME_ZONE:-UTC}
+export MASTER_FETCH_COMMAND_NUM=${MASTER_FETCH_COMMAND_NUM:-10}
+
+# Registry center configuration, determines the type and link of the registry center
+export REGISTRY_TYPE=${REGISTRY_TYPE:-zookeeper}
+export REGISTRY_ZOOKEEPER_CONNECT_STRING=${REGISTRY_ZOOKEEPER_CONNECT_STRING:-zoo1:2181,zoo2:2182,zoo3:2183}
+
+# Tasks related configurations, need to change the configuration if you use the related tasks.
+export HADOOP_HOME=${HADOOP_HOME:-/opt/soft/hadoop}
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-/opt/soft/hadoop/etc/hadoop}
+export SPARK_HOME1=${SPARK_HOME1:-/opt/soft/spark1}
+export SPARK_HOME2=${SPARK_HOME2:-/opt/soft/spark2}
+export PYTHON_HOME=${PYTHON_HOME:-/opt/soft/python}
+export HIVE_HOME=${HIVE_HOME:-/opt/soft/hive}
+export FLINK_HOME=${FLINK_HOME:-/opt/soft/flink}
+export DATAX_HOME=${DATAX_HOME:-/opt/soft/datax}
+
+export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME/bin:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$DATAX_HOME/bin:$PATH
diff --git a/.github/workflows/cluster-test/mysql/install_env.sh b/.github/workflows/cluster-test/mysql/install_env.sh
new file mode 100644
index 0000000000..b7c9797e20
--- /dev/null
+++ b/.github/workflows/cluster-test/mysql/install_env.sh
@@ -0,0 +1,61 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# ---------------------------------------------------------
+# INSTALL MACHINE
+# ---------------------------------------------------------
+# A comma separated list of machine hostname or IP would be installed DolphinScheduler,
+# including master, worker, api, alert. If you want to deploy in pseudo-distributed
+# mode, just write a pseudo-distributed hostname
+# Example for hostnames: ips="ds1,ds2,ds3,ds4,ds5", Example for IPs: ips="192.168.8.1,192.168.8.2,192.168.8.3,192.168.8.4,192.168.8.5"
+ips=${ips:-"localhost"}
+
+# Port of SSH protocol, default value is 22. For now we only support same port in all `ips` machine
+# modify it if you use different ssh port
+sshPort=${sshPort:-"22"}
+
+# A comma separated list of machine hostname or IP would be installed Master server, it
+# must be a subset of configuration `ips`.
+# Example for hostnames: masters="ds1,ds2", Example for IPs: masters="192.168.8.1,192.168.8.2"
+masters=${masters:-"localhost"}
+
+# A comma separated list of machine <hostname>:<workerGroup> or <IP>:<workerGroup>.All hostname or IP must be a
+# subset of configuration `ips`, And workerGroup have default value as `default`, but we recommend you declare behind the hosts
+# Example for hostnames: workers="ds1:default,ds2:default,ds3:default", Example for IPs: workers="192.168.8.1:default,192.168.8.2:default,192.168.8.3:default"
+workers=${workers:-"localhost:default"}
+
+# A comma separated list of machine hostname or IP would be installed Alert server, it
+# must be a subset of configuration `ips`.
+# Example for hostname: alertServer="ds3", Example for IP: alertServer="192.168.8.3"
+alertServer=${alertServer:-"localhost"}
+
+# A comma separated list of machine hostname or IP would be installed API server, it
+# must be a subset of configuration `ips`.
+# Example for hostname: apiServers="ds1", Example for IP: apiServers="192.168.8.1"
+apiServers=${apiServers:-"localhost"}
+
+# The directory to install DolphinScheduler for all machine we config above. It will automatically be created by `install.sh` script if not exists.
+# Do not set this configuration same as the current path (pwd)
+installPath=${installPath:-"/root/apache-dolphinscheduler-dev-SNAPSHOT-bin"}
+
+# The user to deploy DolphinScheduler for all machine we config above. For now user must create by yourself before running `install.sh`
+# script. The user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled than the root directory needs
+# to be created by this user
+deployUser=${deployUser:-"dolphinscheduler"}
+
+# The root of zookeeper, for now DolphinScheduler default registry server is zookeeper.
+zkRoot=${zkRoot:-"/dolphinscheduler"}
diff --git a/.github/workflows/cluster-test/mysql/running_test.sh b/.github/workflows/cluster-test/mysql/running_test.sh
new file mode 100644
index 0000000000..c812d05402
--- /dev/null
+++ b/.github/workflows/cluster-test/mysql/running_test.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+set -x
+
+
+API_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:12345/dolphinscheduler/actuator/health"
+MASTER_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:5679/actuator/health"
+WORKER_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:1235/actuator/health"
+
+#Cluster start health check
+TIMEOUT=120
+START_HEALTHCHECK_EXITCODE=0
+
+for ((i=1; i<=TIMEOUT; i++))
+do
+  MASTER_HTTP_STATUS=$(eval "$MASTER_HEALTHCHECK_COMMAND")
+  WORKER_HTTP_STATUS=$(eval "$WORKER_HEALTHCHECK_COMMAND")
+  API_HTTP_STATUS=$(eval "$API_HEALTHCHECK_COMMAND")
+  if [[ $MASTER_HTTP_STATUS -eq 200 && $WORKER_HTTP_STATUS -eq 200 && $API_HTTP_STATUS -eq 200 ]];then
+    START_HEALTHCHECK_EXITCODE=0
+  else
+    START_HEALTHCHECK_EXITCODE=2
+  fi
+
+  if [[ $START_HEALTHCHECK_EXITCODE -eq 0 ]];then
+    echo "cluster start health check success"
+    break
+  fi
+
+  if [[ $i -eq $TIMEOUT ]];then
+    docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-dev-SNAPSHOT-bin/master-server/logs/dolphinscheduler-master.log"
+    echo "cluster start health check failed"
+    exit $START_HEALTHCHECK_EXITCODE
+  fi
+
+  sleep 1
+done
+
+#Stop Cluster
+docker exec -u root ds bash -c "/root/apache-dolphinscheduler-dev-SNAPSHOT-bin/bin/stop-all.sh"
+
+#Cluster stop health check
+sleep 5
+MASTER_HTTP_STATUS=$(eval "$MASTER_HEALTHCHECK_COMMAND")
+if [[ $MASTER_HTTP_STATUS -ne 200 ]];then
+  echo "master stop health check success"
+else
+  echo "master stop health check failed"
+  exit 3
+fi
+
+WORKER_HTTP_STATUS=$(eval "$WORKER_HEALTHCHECK_COMMAND")
+if [[ $WORKER_HTTP_STATUS -ne 200 ]];then
+  echo "worker stop health check success"
+else
+  echo "worker stop health check failed"
+  exit 3
+fi
+
+API_HTTP_STATUS=$(eval "$API_HEALTHCHECK_COMMAND")
+if [[ $API_HTTP_STATUS -ne 200 ]];then
+  echo "api stop health check success"
+else
+  echo "api stop health check failed"
+  exit 3
+fi
diff --git a/docs/docs/en/guide/installation/pseudo-cluster.md b/docs/docs/en/guide/installation/pseudo-cluster.md
index b96679a63b..d6ff451175 100644
--- a/docs/docs/en/guide/installation/pseudo-cluster.md
+++ b/docs/docs/en/guide/installation/pseudo-cluster.md
@@ -115,7 +115,7 @@ export JAVA_HOME=${JAVA_HOME:-/opt/soft/java}
 export DATABASE=${DATABASE:-postgresql}
 export SPRING_PROFILES_ACTIVE=${DATABASE}
 export SPRING_DATASOURCE_DRIVER_CLASS_NAME=org.postgresql.Driver
-export SPRING_DATASOURCE_URL=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler
+export SPRING_DATASOURCE_URL="jdbc:postgresql://127.0.0.1:5432/dolphinscheduler"
 export SPRING_DATASOURCE_USERNAME={user}
 export SPRING_DATASOURCE_PASSWORD={password}
 
@@ -180,7 +180,7 @@ Then, modify `./bin/env/dolphinscheduler_env.sh` to use mysql, change {user} and
 export DATABASE=${DATABASE:-mysql}
 export SPRING_PROFILES_ACTIVE=${DATABASE}
 export SPRING_DATASOURCE_DRIVER_CLASS_NAME=com.mysql.cj.jdbc.Driver
-export SPRING_DATASOURCE_URL=jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
+export SPRING_DATASOURCE_URL="jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8&useSSL=false"
 export SPRING_DATASOURCE_USERNAME={user}
 export SPRING_DATASOURCE_PASSWORD={password}
 ```
@@ -188,7 +188,7 @@ export SPRING_DATASOURCE_PASSWORD={password}
 After the above steps done you would create a new database for DolphinScheduler, then run the Shell script to init database:
 
 ```shell
-sh tools/bin/create-schema.sh
+sh tools/bin/upgrade-schema.sh
 ```
 
 ## Start DolphinScheduler
diff --git a/docs/docs/zh/guide/installation/pseudo-cluster.md b/docs/docs/zh/guide/installation/pseudo-cluster.md
index 2e0570cce2..107d27b2f1 100644
--- a/docs/docs/zh/guide/installation/pseudo-cluster.md
+++ b/docs/docs/zh/guide/installation/pseudo-cluster.md
@@ -112,7 +112,7 @@ export JAVA_HOME=${JAVA_HOME:-/opt/soft/java}
 export DATABASE=${DATABASE:-postgresql}
 export SPRING_PROFILES_ACTIVE=${DATABASE}
 export SPRING_DATASOURCE_DRIVER_CLASS_NAME=org.postgresql.Driver
-export SPRING_DATASOURCE_URL=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler
+export SPRING_DATASOURCE_URL="jdbc:postgresql://127.0.0.1:5432/dolphinscheduler"
 export SPRING_DATASOURCE_USERNAME={user}
 export SPRING_DATASOURCE_PASSWORD={password}
 
@@ -177,7 +177,7 @@ mysql> FLUSH PRIVILEGES;
 export DATABASE=${DATABASE:-mysql}
 export SPRING_PROFILES_ACTIVE=${DATABASE}
 export SPRING_DATASOURCE_DRIVER_CLASS_NAME=com.mysql.cj.jdbc.Driver
-export SPRING_DATASOURCE_URL=jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
+export SPRING_DATASOURCE_URL="jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8&useSSL=false"
 export SPRING_DATASOURCE_USERNAME={user}
 export SPRING_DATASOURCE_PASSWORD={password}
 ```  
@@ -185,7 +185,7 @@ export SPRING_DATASOURCE_PASSWORD={password}
 完成上述步骤后,您已经为 DolphinScheduler 创建一个新数据库,现在你可以通过快速的 Shell 脚本来初始化数据库
 
 ```shell
-sh tools/bin/create-schema.sh
+sh tools/bin/upgrade-schema.sh
 ```
 
 ## 启动 DolphinScheduler
diff --git a/dolphinscheduler-tools/src/main/bin/upgrade-schema.sh b/dolphinscheduler-tools/src/main/bin/upgrade-schema.sh
index 33d4e06db8..355ebfe379 100755
--- a/dolphinscheduler-tools/src/main/bin/upgrade-schema.sh
+++ b/dolphinscheduler-tools/src/main/bin/upgrade-schema.sh
@@ -27,4 +27,5 @@ JAVA_OPTS=${JAVA_OPTS:-"-server -Duser.timezone=${SPRING_JACKSON_TIME_ZONE} -Xms
 
 java $JAVA_OPTS \
   -cp "$DOLPHINSCHEDULER_HOME/tools/conf":"$DOLPHINSCHEDULER_HOME/tools/libs/*":"$DOLPHINSCHEDULER_HOME/tools/sql" \
+  -Dspring.profiles.active=upgrade \
   org.apache.dolphinscheduler.tools.datasource.UpgradeDolphinScheduler
diff --git a/dolphinscheduler-tools/src/main/java/org/apache/dolphinscheduler/tools/datasource/CreateDolphinScheduler.java b/dolphinscheduler-tools/src/main/java/org/apache/dolphinscheduler/tools/datasource/CreateDolphinScheduler.java
deleted file mode 100644
index 204dc5c2cc..0000000000
--- a/dolphinscheduler-tools/src/main/java/org/apache/dolphinscheduler/tools/datasource/CreateDolphinScheduler.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.dolphinscheduler.tools.datasource;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.boot.CommandLineRunner;
-import org.springframework.boot.SpringApplication;
-import org.springframework.boot.autoconfigure.SpringBootApplication;
-import org.springframework.stereotype.Component;
-
-@SpringBootApplication
-public class CreateDolphinScheduler {
-    public static void main(String[] args) {
-        SpringApplication.run(CreateDolphinScheduler.class, args);
-    }
-
-    @Component
-    static class CreateRunner implements CommandLineRunner {
-        private static final Logger logger = LoggerFactory.getLogger(CreateRunner.class);
-
-        private final DolphinSchedulerManager dolphinSchedulerManager;
-
-        CreateRunner(DolphinSchedulerManager dolphinSchedulerManager) {
-            this.dolphinSchedulerManager = dolphinSchedulerManager;
-        }
-
-        @Override
-        public void run(String... args) throws Exception {
-            if (dolphinSchedulerManager.schemaIsInitialized()) {
-                dolphinSchedulerManager.upgradeDolphinScheduler();
-                logger.info("upgrade DolphinScheduler finished");
-            } else {
-                dolphinSchedulerManager.initDolphinScheduler();
-                logger.info("init DolphinScheduler finished");
-            }
-            logger.info("create DolphinScheduler success");
-        }
-    }
-}
diff --git a/dolphinscheduler-tools/src/main/java/org/apache/dolphinscheduler/tools/datasource/InitDolphinScheduler.java b/dolphinscheduler-tools/src/main/java/org/apache/dolphinscheduler/tools/datasource/InitDolphinScheduler.java
index 18eb310654..6ccd73decd 100644
--- a/dolphinscheduler-tools/src/main/java/org/apache/dolphinscheduler/tools/datasource/InitDolphinScheduler.java
+++ b/dolphinscheduler-tools/src/main/java/org/apache/dolphinscheduler/tools/datasource/InitDolphinScheduler.java
@@ -22,6 +22,7 @@ import org.slf4j.LoggerFactory;
 import org.springframework.boot.CommandLineRunner;
 import org.springframework.boot.SpringApplication;
 import org.springframework.boot.autoconfigure.SpringBootApplication;
+import org.springframework.context.annotation.Profile;
 import org.springframework.stereotype.Component;
 
 @SpringBootApplication
@@ -31,6 +32,7 @@ public class InitDolphinScheduler {
     }
 
     @Component
+    @Profile("init")
     static class InitRunner implements CommandLineRunner {
         private static final Logger logger = LoggerFactory.getLogger(InitRunner.class);
 
diff --git a/dolphinscheduler-tools/src/main/java/org/apache/dolphinscheduler/tools/datasource/UpgradeDolphinScheduler.java b/dolphinscheduler-tools/src/main/java/org/apache/dolphinscheduler/tools/datasource/UpgradeDolphinScheduler.java
index d09acc6692..22e1338431 100644
--- a/dolphinscheduler-tools/src/main/java/org/apache/dolphinscheduler/tools/datasource/UpgradeDolphinScheduler.java
+++ b/dolphinscheduler-tools/src/main/java/org/apache/dolphinscheduler/tools/datasource/UpgradeDolphinScheduler.java
@@ -22,6 +22,7 @@ import org.slf4j.LoggerFactory;
 import org.springframework.boot.CommandLineRunner;
 import org.springframework.boot.SpringApplication;
 import org.springframework.boot.autoconfigure.SpringBootApplication;
+import org.springframework.context.annotation.Profile;
 import org.springframework.stereotype.Component;
 
 @SpringBootApplication
@@ -31,6 +32,7 @@ public class UpgradeDolphinScheduler {
     }
 
     @Component
+    @Profile("upgrade")
     static class UpgradeRunner implements CommandLineRunner {
         private static final Logger logger = LoggerFactory.getLogger(UpgradeRunner.class);
 
@@ -42,8 +44,13 @@ public class UpgradeDolphinScheduler {
 
         @Override
         public void run(String... args) throws Exception {
-            dolphinSchedulerManager.upgradeDolphinScheduler();
-            logger.info("upgrade DolphinScheduler success");
+            if (dolphinSchedulerManager.schemaIsInitialized()) {
+                dolphinSchedulerManager.upgradeDolphinScheduler();
+                logger.info("upgrade DolphinScheduler finished");
+            } else {
+                dolphinSchedulerManager.initDolphinScheduler();
+                logger.info("init DolphinScheduler finished");
+            }
         }
     }
 }
diff --git a/script/start-all.sh b/script/start-all.sh
index 35936eea0a..0126a1381b 100755
--- a/script/start-all.sh
+++ b/script/start-all.sh
@@ -33,7 +33,7 @@ mastersHost=(${masters//,/ })
 for master in ${mastersHost[@]}
 do
   echo "$master master server is starting"
-	ssh -p $sshPort $master  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh start master-server;"
+	ssh -o StrictHostKeyChecking=no -p $sshPort $master  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh start master-server;"
 
 done
 
@@ -41,16 +41,16 @@ for worker in ${workerNames[@]}
 do
   echo "$worker worker server is starting"
 
-  ssh -p $sshPort $worker  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh start worker-server;"
+  ssh -o StrictHostKeyChecking=no -p $sshPort $worker  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh start worker-server;"
 done
 
-ssh -p $sshPort $alertServer  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh start alert-server;"
+ssh -o StrictHostKeyChecking=no -p $sshPort $alertServer  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh start alert-server;"
 
 apiServersHost=(${apiServers//,/ })
 for apiServer in ${apiServersHost[@]}
 do
   echo "$apiServer api server is starting"
-  ssh -p $sshPort $apiServer  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh start api-server;"
+  ssh -o StrictHostKeyChecking=no -p $sshPort $apiServer  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh start api-server;"
 done
 
 # query server status
diff --git a/script/stop-all.sh b/script/stop-all.sh
index ee3a09cd41..e8eb6da5d1 100755
--- a/script/stop-all.sh
+++ b/script/stop-all.sh
@@ -33,21 +33,21 @@ mastersHost=(${masters//,/ })
 for master in ${mastersHost[@]}
 do
   echo "$master master server is stopping"
-	ssh -p $sshPort $master  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh stop master-server;"
+	ssh -o StrictHostKeyChecking=no -p $sshPort $master  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh stop master-server;"
 
 done
 
 for worker in ${workerNames[@]}
 do
   echo "$worker worker server is stopping"
-  ssh -p $sshPort $worker  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh stop worker-server;"
+  ssh -o StrictHostKeyChecking=no -p $sshPort $worker  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh stop worker-server;"
 done
 
-ssh -p $sshPort $alertServer  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh stop alert-server;"
+ssh -o StrictHostKeyChecking=no -p $sshPort $alertServer  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh stop alert-server;"
 
 apiServersHost=(${apiServers//,/ })
 for apiServer in ${apiServersHost[@]}
 do
   echo "$apiServer api server is stopping"
-  ssh -p $sshPort $apiServer  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh stop api-server;"
+  ssh -o StrictHostKeyChecking=no -p $sshPort $apiServer  "cd $installPath/; bash bin/dolphinscheduler-daemon.sh stop api-server;"
 done


[dolphinscheduler] 07/16: [python] Fix process definition attr release state not work (#10151)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit 80ebe4a33431c88c7514b3ad0d9ffdf645e1bba2
Author: Jiajie Zhong <zh...@hotmail.com>
AuthorDate: Tue May 24 10:25:19 2022 +0800

    [python] Fix process definition attr release state not work (#10151)
    
    This patch fix the error release state not work when it set to
    offline and submit it to Java gateway, it error because we do
    not pass the attribute to Java gateway function
    `createOrUpdateProcessDefinition`
    
    close: #9779
    (cherry picked from commit 56e0ea802d93667de8c48796a9291a775abddd49)
---
 .../dolphinscheduler/api/python/PythonGateway.java |  3 +-
 .../pydolphinscheduler/docs/source/config.rst      | 68 +++++++++++-----------
 .../src/pydolphinscheduler/constants.py            |  7 ---
 .../src/pydolphinscheduler/core/configuration.py   |  3 +
 .../pydolphinscheduler/core/default_config.yaml    |  6 ++
 .../pydolphinscheduler/core/process_definition.py  | 26 ++++++++-
 .../tests/core/test_process_definition.py          | 41 +++++++++++--
 .../tests/utils/test_yaml_parser.py                |  9 +--
 8 files changed, 111 insertions(+), 52 deletions(-)

diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/python/PythonGateway.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/python/PythonGateway.java
index 4fb74f1e66..817f411854 100644
--- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/python/PythonGateway.java
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/python/PythonGateway.java
@@ -221,6 +221,7 @@ public class PythonGateway {
                                                 int timeout,
                                                 String workerGroup,
                                                 String tenantCode,
+                                                int releaseState,
                                                 String taskRelationJson,
                                                 String taskDefinitionJson,
                                                 ProcessExecutionTypeEnum executionType) {
@@ -248,7 +249,7 @@ public class PythonGateway {
         if (schedule != null) {
             createOrUpdateSchedule(user, projectCode, processDefinitionCode, schedule, workerGroup, warningType, warningGroupId);
         }
-        processDefinitionService.releaseProcessDefinition(user, projectCode, processDefinitionCode, ReleaseState.ONLINE);
+        processDefinitionService.releaseProcessDefinition(user, projectCode, processDefinitionCode, ReleaseState.getEnum(releaseState));
         return processDefinitionCode;
     }
 
diff --git a/dolphinscheduler-python/pydolphinscheduler/docs/source/config.rst b/dolphinscheduler-python/pydolphinscheduler/docs/source/config.rst
index b7879dd94e..2b804d0c62 100644
--- a/dolphinscheduler-python/pydolphinscheduler/docs/source/config.rst
+++ b/dolphinscheduler-python/pydolphinscheduler/docs/source/config.rst
@@ -78,39 +78,41 @@ All Configurations in Environment Variables
 
 All environment variables as below, and you could modify their value via `Bash <by bash>`_ or `Python OS Module <by python os module>`_
 
-+------------------+------------------------------------+------------------------------------------------------------------------------------------------------------------+
-| Variable Section | Variable Name                      | description                                                                                                      |
-+==================+====================================+==================================================================================================================+
-|                  | ``PYDS_JAVA_GATEWAY_ADDRESS``      | Default Java gateway address, will use its value when it is set.                                                 |
-+                  +------------------------------------+------------------------------------------------------------------------------------------------------------------+
-|   Java Gateway   | ``PYDS_JAVA_GATEWAY_PORT``         | Default Java gateway port, will use its value when it is set.                                                    |
-+                  +------------------------------------+------------------------------------------------------------------------------------------------------------------+
-|                  | ``PYDS_JAVA_GATEWAY_AUTO_CONVERT`` | Default boolean Java gateway auto convert, will use its value when it is set.                                    |
-+------------------+------------------------------------+------------------------------------------------------------------------------------------------------------------+
-|                  | ``PYDS_USER_NAME``                 | Default user name, will use when user's ``name`` when does not specify.                                          |
-+                  +------------------------------------+------------------------------------------------------------------------------------------------------------------+
-|                  | ``PYDS_USER_PASSWORD``             | Default user password, will use when user's ``password`` when does not specify.                                  |
-+                  +------------------------------------+------------------------------------------------------------------------------------------------------------------+
-|   Default User   | ``PYDS_USER_EMAIL``                | Default user email, will use when user's ``email`` when does not specify.                                        |
-+                  +------------------------------------+------------------------------------------------------------------------------------------------------------------+
-|                  | ``PYDS_USER_PHONE``                | Default user phone, will use when user's ``phone`` when does not specify.                                        |
-+                  +------------------------------------+------------------------------------------------------------------------------------------------------------------+
-|                  | ``PYDS_USER_STATE``                | Default user state, will use when user's ``state`` when does not specify.                                        |
-+------------------+------------------------------------+------------------------------------------------------------------------------------------------------------------+
-|                  | ``PYDS_WORKFLOW_PROJECT``          | Default workflow project name, will use its value when workflow does not specify the attribute ``project``.      |
-+                  +------------------------------------+------------------------------------------------------------------------------------------------------------------+
-|                  | ``PYDS_WORKFLOW_TENANT``           | Default workflow tenant, will use its value when workflow does not specify the attribute ``tenant``.             |
-+                  +------------------------------------+------------------------------------------------------------------------------------------------------------------+
-| Default Workflow | ``PYDS_WORKFLOW_USER``             | Default workflow user, will use its value when workflow does not specify the attribute ``user``.                 |
-+                  +------------------------------------+------------------------------------------------------------------------------------------------------------------+
-|                  | ``PYDS_WORKFLOW_QUEUE``            | Default workflow queue, will use its value when workflow does not specify the attribute ``queue``.               |
-+                  +------------------------------------+------------------------------------------------------------------------------------------------------------------+
-|                  | ``PYDS_WORKFLOW_WORKER_GROUP``     | Default workflow worker group, will use its value when workflow does not specify the attribute ``worker_group``. |
-+                  +------------------------------------+------------------------------------------------------------------------------------------------------------------+
-|                  | ``PYDS_WORKFLOW_TIME_ZONE``        | Default workflow worker group, will use its value when workflow does not specify the attribute ``timezone``.     |
-+                  +------------------------------------+------------------------------------------------------------------------------------------------------------------+
-|                  | ``PYDS_WORKFLOW_WARNING_TYPE``     | Default workflow warning type, will use its value when workflow does not specify the attribute ``warning_type``. |
-+------------------+------------------------------------+------------------------------------------------------------------------------------------------------------------+
++------------------+------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+| Variable Section | Variable Name                      | description                                                                                                        |
++==================+====================================+====================================================================================================================+
+|                  | ``PYDS_JAVA_GATEWAY_ADDRESS``      | Default Java gateway address, will use its value when it is set.                                                   |
++                  +------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+|   Java Gateway   | ``PYDS_JAVA_GATEWAY_PORT``         | Default Java gateway port, will use its value when it is set.                                                      |
++                  +------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+|                  | ``PYDS_JAVA_GATEWAY_AUTO_CONVERT`` | Default boolean Java gateway auto convert, will use its value when it is set.                                      |
++------------------+------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+|                  | ``PYDS_USER_NAME``                 | Default user name, will use when user's ``name`` when does not specify.                                            |
++                  +------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+|                  | ``PYDS_USER_PASSWORD``             | Default user password, will use when user's ``password`` when does not specify.                                    |
++                  +------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+|   Default User   | ``PYDS_USER_EMAIL``                | Default user email, will use when user's ``email`` when does not specify.                                          |
++                  +------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+|                  | ``PYDS_USER_PHONE``                | Default user phone, will use when user's ``phone`` when does not specify.                                          |
++                  +------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+|                  | ``PYDS_USER_STATE``                | Default user state, will use when user's ``state`` when does not specify.                                          |
++------------------+------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+|                  | ``PYDS_WORKFLOW_PROJECT``          | Default workflow project name, will use its value when workflow does not specify the attribute ``project``.        |
++                  +------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+|                  | ``PYDS_WORKFLOW_TENANT``           | Default workflow tenant, will use its value when workflow does not specify the attribute ``tenant``.               |
++                  +------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+| Default Workflow | ``PYDS_WORKFLOW_USER``             | Default workflow user, will use its value when workflow does not specify the attribute ``user``.                   |
++                  +------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+|                  | ``PYDS_WORKFLOW_QUEUE``            | Default workflow queue, will use its value when workflow does not specify the attribute ``queue``.                 |
++                  +------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+|                  | ``PYDS_WORKFLOW_WORKER_GROUP``     | Default workflow worker group, will use its value when workflow does not specify the attribute ``worker_group``.   |
++                  +------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+|                  | ``PYDS_WORKFLOW_RELEASE_STATE``    | Default workflow release state, will use its value when workflow does not specify the attribute ``release_state``. |
++                  +------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+|                  | ``PYDS_WORKFLOW_TIME_ZONE``        | Default workflow worker group, will use its value when workflow does not specify the attribute ``timezone``.       |
++                  +------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+|                  | ``PYDS_WORKFLOW_WARNING_TYPE``     | Default workflow warning type, will use its value when workflow does not specify the attribute ``warning_type``.   |
++------------------+------------------------------------+--------------------------------------------------------------------------------------------------------------------+
 
 .. note::
 
diff --git a/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/constants.py b/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/constants.py
index 3992917310..262469c88f 100644
--- a/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/constants.py
+++ b/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/constants.py
@@ -18,13 +18,6 @@
 """Constants for pydolphinscheduler."""
 
 
-class ProcessDefinitionReleaseState:
-    """Constants for :class:`pydolphinscheduler.core.process_definition.ProcessDefinition` release state."""
-
-    ONLINE: str = "ONLINE"
-    OFFLINE: str = "OFFLINE"
-
-
 class TaskPriority(str):
     """Constants for task priority."""
 
diff --git a/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/core/configuration.py b/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/core/configuration.py
index 03ac0977e4..860f9869f3 100644
--- a/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/core/configuration.py
+++ b/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/core/configuration.py
@@ -177,6 +177,9 @@ WORKFLOW_USER = os.environ.get(
 WORKFLOW_QUEUE = os.environ.get(
     "PYDS_WORKFLOW_QUEUE", configs.get("default.workflow.queue")
 )
+WORKFLOW_RELEASE_STATE = os.environ.get(
+    "PYDS_WORKFLOW_RELEASE_STATE", configs.get("default.workflow.release_state")
+)
 WORKFLOW_WORKER_GROUP = os.environ.get(
     "PYDS_WORKFLOW_WORKER_GROUP", configs.get("default.workflow.worker_group")
 )
diff --git a/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/core/default_config.yaml b/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/core/default_config.yaml
index e437e55c31..5541af7b79 100644
--- a/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/core/default_config.yaml
+++ b/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/core/default_config.yaml
@@ -48,5 +48,11 @@ default:
     user: userPythonGateway
     queue: queuePythonGateway
     worker_group: default
+    # Release state of workflow, default value is ``online`` which mean setting workflow online when it submits
+    # to Java gateway, if you want to set workflow offline set its value to ``offline``
+    release_state: online
     time_zone: Asia/Shanghai
+    # Warning type of the workflow, default value is ``NONE`` mean do not warn user in any cases of workflow state,
+    # change to ``FAILURE`` if you want to warn users when workflow failed. All available enum value are
+    # ``NONE``, ``SUCCESS``, ``FAILURE``, ``ALL`` 
     warning_type: NONE
diff --git a/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/core/process_definition.py b/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/core/process_definition.py
index dbf2c41795..cef01706df 100644
--- a/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/core/process_definition.py
+++ b/dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/core/process_definition.py
@@ -21,7 +21,7 @@ import json
 from datetime import datetime
 from typing import Any, Dict, List, Optional, Set
 
-from pydolphinscheduler.constants import ProcessDefinitionReleaseState, TaskType
+from pydolphinscheduler.constants import TaskType
 from pydolphinscheduler.core import configuration
 from pydolphinscheduler.core.base import Base
 from pydolphinscheduler.exceptions import PyDSParamException, PyDSTaskNoFoundException
@@ -105,7 +105,7 @@ class ProcessDefinition(Base):
         warning_type: Optional[str] = configuration.WORKFLOW_WARNING_TYPE,
         warning_group_id: Optional[int] = 0,
         timeout: Optional[int] = 0,
-        release_state: Optional[str] = ProcessDefinitionReleaseState.ONLINE,
+        release_state: Optional[str] = configuration.WORKFLOW_RELEASE_STATE,
         param: Optional[Dict] = None,
     ):
         super().__init__(name, description)
@@ -126,7 +126,7 @@ class ProcessDefinition(Base):
             self.warning_type = warning_type.strip().upper()
         self.warning_group_id = warning_group_id
         self.timeout = timeout
-        self.release_state = release_state
+        self._release_state = release_state
         self.param = param
         self.tasks: dict = {}
         # TODO how to fix circle import
@@ -197,6 +197,25 @@ class ProcessDefinition(Base):
         """Set attribute end_time."""
         self._end_time = val
 
+    @property
+    def release_state(self) -> int:
+        """Get attribute release_state."""
+        rs_ref = {
+            "online": 1,
+            "offline": 0,
+        }
+        if self._release_state not in rs_ref:
+            raise PyDSParamException(
+                "Parameter release_state only support `online` or `offline` but get %",
+                self._release_state,
+            )
+        return rs_ref[self._release_state]
+
+    @release_state.setter
+    def release_state(self, val: str) -> None:
+        """Set attribute release_state."""
+        self._release_state = val.lower()
+
     @property
     def param_json(self) -> Optional[List[Dict]]:
         """Return param json base on self.param."""
@@ -381,6 +400,7 @@ class ProcessDefinition(Base):
             self.timeout,
             self.worker_group,
             self._tenant,
+            self.release_state,
             # TODO add serialization function
             json.dumps(self.task_relation_json),
             json.dumps(self.task_definition_json),
diff --git a/dolphinscheduler-python/pydolphinscheduler/tests/core/test_process_definition.py b/dolphinscheduler-python/pydolphinscheduler/tests/core/test_process_definition.py
index 36e1cb035e..63580de467 100644
--- a/dolphinscheduler-python/pydolphinscheduler/tests/core/test_process_definition.py
+++ b/dolphinscheduler-python/pydolphinscheduler/tests/core/test_process_definition.py
@@ -24,7 +24,6 @@ from unittest.mock import patch
 import pytest
 from freezegun import freeze_time
 
-from pydolphinscheduler.constants import ProcessDefinitionReleaseState
 from pydolphinscheduler.core import configuration
 from pydolphinscheduler.core.process_definition import ProcessDefinition
 from pydolphinscheduler.exceptions import PyDSParamException
@@ -67,7 +66,7 @@ def test_process_definition_key_attr(func):
         ("worker_group", configuration.WORKFLOW_WORKER_GROUP),
         ("warning_type", configuration.WORKFLOW_WARNING_TYPE),
         ("warning_group_id", 0),
-        ("release_state", ProcessDefinitionReleaseState.ONLINE),
+        ("release_state", 1),
     ],
 )
 def test_process_definition_default_value(name, value):
@@ -90,7 +89,6 @@ def test_process_definition_default_value(name, value):
         ("warning_type", str, "FAILURE"),
         ("warning_group_id", int, 1),
         ("timeout", int, 1),
-        ("release_state", str, "OFFLINE"),
         ("param", dict, {"key": "value"}),
     ],
 )
@@ -103,6 +101,41 @@ def test_set_attr(name, cls, expect):
         ), f"ProcessDefinition set attribute `{name}` do not work expect"
 
 
+@pytest.mark.parametrize(
+    "value,expect",
+    [
+        ("online", 1),
+        ("offline", 0),
+    ],
+)
+def test_set_release_state(value, expect):
+    """Test process definition set release_state attributes."""
+    with ProcessDefinition(TEST_PROCESS_DEFINITION_NAME, release_state=value) as pd:
+        assert (
+            getattr(pd, "release_state") == expect
+        ), "ProcessDefinition set attribute release_state do not return expect value."
+
+
+@pytest.mark.parametrize(
+    "value",
+    [
+        "oneline",
+        "offeline",
+        1,
+        0,
+        None,
+    ],
+)
+def test_set_release_state_error(value):
+    """Test process definition set release_state attributes with error."""
+    pd = ProcessDefinition(TEST_PROCESS_DEFINITION_NAME, release_state=value)
+    with pytest.raises(
+        PyDSParamException,
+        match="Parameter release_state only support `online` or `offline` but get.*",
+    ):
+        pd.release_state
+
+
 @pytest.mark.parametrize(
     "set_attr,set_val,get_attr,get_val",
     [
@@ -283,7 +316,7 @@ def test_process_definition_get_define_without_task():
         "warningType": configuration.WORKFLOW_WARNING_TYPE,
         "warningGroupId": 0,
         "timeout": 0,
-        "releaseState": ProcessDefinitionReleaseState.ONLINE,
+        "releaseState": 1,
         "param": None,
         "tasks": {},
         "taskDefinitionJson": [{}],
diff --git a/dolphinscheduler-python/pydolphinscheduler/tests/utils/test_yaml_parser.py b/dolphinscheduler-python/pydolphinscheduler/tests/utils/test_yaml_parser.py
index 4b1b05ed33..ad3aaf7bd1 100644
--- a/dolphinscheduler-python/pydolphinscheduler/tests/utils/test_yaml_parser.py
+++ b/dolphinscheduler-python/pydolphinscheduler/tests/utils/test_yaml_parser.py
@@ -58,10 +58,11 @@ expects = [
         "default.workflow.project": ("project-pydolphin", "project-pydolphinEdit"),
         "default.workflow.tenant": ("tenant_pydolphin", "SmithEdit"),
         "default.workflow.user": ("userPythonGateway", "SmithEdit"),
-        "default.workflow.queue": ("queuePythonGateway", "SmithEdit"),
-        "default.workflow.worker_group": ("default", "SmithEdit"),
-        "default.workflow.time_zone": ("Asia/Shanghai", "SmithEdit"),
-        "default.workflow.warning_type": ("NONE", "SmithEdit"),
+        "default.workflow.queue": ("queuePythonGateway", "queueEdit"),
+        "default.workflow.worker_group": ("default", "wgEdit"),
+        "default.workflow.release_state": ("online", "offline"),
+        "default.workflow.time_zone": ("Asia/Shanghai", "Europe/Amsterdam"),
+        "default.workflow.warning_type": ("NONE", "SUCCESS"),
     },
 ]
 


[dolphinscheduler] 14/16: [doc] Refactor local parameter docment (#10119)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit b32074398099d0b6f943413b98c43aaff775ce9c
Author: QuakeWang <45...@users.noreply.github.com>
AuthorDate: Tue May 24 17:16:08 2022 +0800

    [doc] Refactor local parameter docment (#10119)
    
    (cherry picked from commit 5998c73aac2f2532fca90fb8fe8ba431c322ffe3)
---
 docs/docs/en/guide/parameter/local.md              |  25 ++++++++++++++-------
 docs/docs/zh/guide/parameter/local.md              |  25 ++++++++++++++-------
 docs/img/global_parameter_en.png                   | Bin 219165 -> 0 bytes
 .../img/new_ui/dev/parameter/local_parameter01.png | Bin 0 -> 642098 bytes
 .../img/new_ui/dev/parameter/local_parameter02.png | Bin 0 -> 303057 bytes
 docs/img/supplement_local_parameter.png            | Bin 228552 -> 0 bytes
 docs/img/supplement_local_parameter_en.png         | Bin 246045 -> 0 bytes
 7 files changed, 34 insertions(+), 16 deletions(-)

diff --git a/docs/docs/en/guide/parameter/local.md b/docs/docs/en/guide/parameter/local.md
index a764e7b5fc..3ba290bb36 100644
--- a/docs/docs/en/guide/parameter/local.md
+++ b/docs/docs/en/guide/parameter/local.md
@@ -6,14 +6,23 @@ Parameters configured on the task definition page, the scope of this parameter i
 
 ## Usage
 
-Usage of local parameters is: at the task define page, click the '+' beside the 'Custom Parameters' and fill in the key and value to save:
+Usage of local parameters is: at the task define page, click the '+' beside the 'Custom Parameters' and fill in the key and value to save.
 
-<p align="center">
-     <img src="/img/supplement_local_parameter_en.png" width="80%" />
-</p>
+## Examples
 
-<p align="center">
-     <img src="/img/global_parameter_en.png" width="80%" />
-</p>
+This example shows how to use local parameters to print the current date. Create a Shell task and write a script with the content `echo ${dt}`. Click **custom parameter** in the configuration bar, and the configuration is as follows:
 
-If you want to call the [built-in parameter](built-in.md) in the local parameters, fill in thevalue of built-in parameters in `value`. As in the above figure, `${biz_date}` and `${curdate}`.
\ No newline at end of file
+![local-parameter01](/img/new_ui/dev/parameter/local_parameter01.png)
+
+Parameters:
+
+- dt: indicates the parameter name
+- in: IN indicates that local parameters can only be used on the current node, and OUT indicates that local parameters can be transmitted to the downstream
+- DATE: indicates the DATE of the data type
+- $[YYYY-MM-DD] : indicates a built-in parameter derived from a user-defined format
+
+Save the workflow and run it. View Shell task's log.
+
+![local-parameter02](/img/new_ui/dev/parameter/local_parameter02.png)
+
+> Note: The local parameter can be used in the workflow of the current task node. If it is set to OUT, it can be passed to the downstream workflow. Please refer to: [Parameter Context](context.md)
diff --git a/docs/docs/zh/guide/parameter/local.md b/docs/docs/zh/guide/parameter/local.md
index b5598c81b8..b92c593ecf 100644
--- a/docs/docs/zh/guide/parameter/local.md
+++ b/docs/docs/zh/guide/parameter/local.md
@@ -6,14 +6,23 @@
 
 ## 使用方式
 
-本地参数配置方式如下:在任务定义页面,点击“自定义参数”右边的加号,填写对应的变量名称和对应的值,保存即可
+本地参数配置方式如下:在任务定义页面,点击“自定义参数”右边的加号,填写对应的变量名称和对应的值,保存即可。
 
-<p align="center">
-   <img src="/img/supplement_local_parameter.png" width="80%" />
- </p>
+## 任务样例
 
-<p align="center">
-   <img src="/img/global_parameter.png" width="80%" />
-</p>
+本样例展示了如何使用本地参数,打印输出当前日期。创建一个 Shell 任务,并编写脚本内容为 `echo ${dt}`。点击配置栏中的**自定义参数**,配置如下图所示:
 
-如果想要在本地参数中调用系统内置参数,将内置参数对应的值填到`value`中,如上图中的`${biz_date}`以及`${curdate}`
+![local-parameter01](/img/new_ui/dev/parameter/local_parameter01.png)
+
+参数说明:
+
+- dt:参数名
+- IN:IN 表示局部参数仅能在当前节点使用,OUT 表示局部参数可以向下游传递
+- DATE:数据类型,日期
+- $[yyyy-MM-dd]:自定义格式的衍生内置参数
+
+保存工作流并运行,查看 Shell 任务输出日志。
+
+![local-parameter02](/img/new_ui/dev/parameter/local_parameter02.png)
+
+> 注:本地参数可以在当前任务节点的工作流中,设置其为 OUT 则可以传递给下游的工作流使用,可以参考:[参数传递](context.md)
diff --git a/docs/img/global_parameter_en.png b/docs/img/global_parameter_en.png
deleted file mode 100644
index 32aecf3715..0000000000
Binary files a/docs/img/global_parameter_en.png and /dev/null differ
diff --git a/docs/img/new_ui/dev/parameter/local_parameter01.png b/docs/img/new_ui/dev/parameter/local_parameter01.png
new file mode 100644
index 0000000000..09d38366e8
Binary files /dev/null and b/docs/img/new_ui/dev/parameter/local_parameter01.png differ
diff --git a/docs/img/new_ui/dev/parameter/local_parameter02.png b/docs/img/new_ui/dev/parameter/local_parameter02.png
new file mode 100644
index 0000000000..02572a7036
Binary files /dev/null and b/docs/img/new_ui/dev/parameter/local_parameter02.png differ
diff --git a/docs/img/supplement_local_parameter.png b/docs/img/supplement_local_parameter.png
deleted file mode 100644
index 04588ea99e..0000000000
Binary files a/docs/img/supplement_local_parameter.png and /dev/null differ
diff --git a/docs/img/supplement_local_parameter_en.png b/docs/img/supplement_local_parameter_en.png
deleted file mode 100644
index 1209269fb6..0000000000
Binary files a/docs/img/supplement_local_parameter_en.png and /dev/null differ


[dolphinscheduler] 12/16: [Fix][UI] Fix the problem of multi-language support of workflow instance host in Chinese state. (#10223)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit f28890e139849dedd5f356abc43d0fd7d6e15be1
Author: songjianet <17...@qq.com>
AuthorDate: Tue May 24 16:14:56 2022 +0800

    [Fix][UI] Fix the problem of multi-language support of workflow instance host in Chinese state. (#10223)
    
    (cherry picked from commit 6cebdfdf5045862653a1d05354bdd3f039919f54)
---
 dolphinscheduler-ui/src/locales/modules/zh_CN.ts | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dolphinscheduler-ui/src/locales/modules/zh_CN.ts b/dolphinscheduler-ui/src/locales/modules/zh_CN.ts
index 2f4177a99b..97e0bca96f 100644
--- a/dolphinscheduler-ui/src/locales/modules/zh_CN.ts
+++ b/dolphinscheduler-ui/src/locales/modules/zh_CN.ts
@@ -464,7 +464,7 @@ const project = {
     fault_tolerant_sign: '容错标识',
     dry_run_flag: '空跑标识',
     executor: '执行用户',
-    host: 'Host',
+    host: '主机',
     start_process: '启动工作流',
     execute_from_the_current_node: '从当前节点开始执行',
     recover_tolerance_fault_process: '恢复被容错的工作流',


[dolphinscheduler] 16/16: [Fix][UI] Fix issue with treemap depth in workflow relationships. (#10229)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit eedf2ba001a6e44c876570598797aeb8b37a1dee
Author: songjianet <17...@qq.com>
AuthorDate: Tue May 24 18:33:00 2022 +0800

    [Fix][UI] Fix issue with treemap depth in workflow relationships. (#10229)
    
    * [Fix][UI] Fix issue with treemap depth in workflow relationships.
    
    * [Fix][UI] Fix issue with treemap depth in workflow relationships.
    
    (cherry picked from commit abecec3c28402b9781ae05831228af0f2bfb4a9f)
---
 dolphinscheduler-ui/src/components/chart/modules/Tree.tsx | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dolphinscheduler-ui/src/components/chart/modules/Tree.tsx b/dolphinscheduler-ui/src/components/chart/modules/Tree.tsx
index 10948eb201..f94918c4b0 100644
--- a/dolphinscheduler-ui/src/components/chart/modules/Tree.tsx
+++ b/dolphinscheduler-ui/src/components/chart/modules/Tree.tsx
@@ -62,7 +62,7 @@ const TreeChart = defineComponent({
           symbolSize: 18,
           edgeShape: 'polyline',
           edgeForkPosition: '63%',
-          initialTreeDepth: 3,
+          initialTreeDepth: 'auto',
           lineStyle: {
             width: 3
           },


[dolphinscheduler] 13/16: [doc] Update kubernetes' FAQ (#10221)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit 00e11622865077391c01026540e3e894bf4faed0
Author: 旺阳 <qi...@cisco.com>
AuthorDate: Tue May 24 17:00:57 2022 +0800

    [doc] Update kubernetes' FAQ (#10221)
    
    (cherry picked from commit c12848a4ff7fa2fd307c0c18acef40c410d4db99)
---
 docs/docs/en/guide/installation/kubernetes.md | 89 ++++++++++++--------------
 docs/docs/zh/guide/installation/kubernetes.md | 91 ++++++++++++---------------
 2 files changed, 79 insertions(+), 101 deletions(-)

diff --git a/docs/docs/en/guide/installation/kubernetes.md b/docs/docs/en/guide/installation/kubernetes.md
index 572bf19e7b..4162b03217 100644
--- a/docs/docs/en/guide/installation/kubernetes.md
+++ b/docs/docs/en/guide/installation/kubernetes.md
@@ -186,24 +186,29 @@ kubectl scale --replicas=6 sts dolphinscheduler-worker -n test # with test names
 
 > Because of the commercial license, we cannot directly use the driver of MySQL.
 >
-> If you want to use MySQL, you can build a new image based on the `apache/dolphinscheduler` image follow the following instructions:
+> If you want to use MySQL, you can build a new image based on the `apache/dolphinscheduler-<service>` image follow the following instructions:
+> 
+> Since version 3.0.0, dolphinscheduler has been microserviced and the change of metadata storage requires replacing all services with MySQL driver, which including dolphinscheduler-tools, dolphinscheduler-master, dolphinscheduler-worker, dolphinscheduler-api, dolphinscheduler-alert-server
 
 1. Download the MySQL driver [mysql-connector-java-8.0.16.jar](https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.16/mysql-connector-java-8.0.16.jar).
 
 2. Create a new `Dockerfile` to add MySQL driver:
 
 ```
-FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:3.0.0-beta-1
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler-<service>:3.0.0-beta-1
+# For example
+# FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler-tools:3.0.0-beta-1
+
 COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
 ```
 
 3. Build a new docker image including MySQL driver:
 
 ```
-docker build -t apache/dolphinscheduler:mysql-driver .
+docker build -t apache/dolphinscheduler-<service>:mysql-driver .
 ```
 
-4. Push the docker image `apache/dolphinscheduler:mysql-driver` to a docker registry.
+4. Push the docker image `apache/dolphinscheduler-<service>:mysql-driver` to a docker registry.
 
 5. Modify image `repository` and update `tag` to `mysql-driver` in `values.yaml`.
 
@@ -214,7 +219,6 @@ docker build -t apache/dolphinscheduler:mysql-driver .
 ```yaml
 externalDatabase:
   type: "mysql"
-  driver: "com.mysql.jdbc.Driver"
   host: "localhost"
   port: "3306"
   username: "root"
@@ -225,70 +229,53 @@ externalDatabase:
 
 8. Run a DolphinScheduler release in Kubernetes (See **Install DolphinScheduler**).
 
-### How to Support MySQL Datasource in `Datasource manage`?
+### How to Support MySQL or Oracle Datasource in `Datasource manage`?
 
-> Because of the commercial license, we cannot directly use the driver of MySQL.
+> Because of the commercial license, we cannot directly use the driver of MySQL or Oracle.
 >
-> If you want to add MySQL datasource, you can build a new image based on the `apache/dolphinscheduler` image follow the following instructions:
+> If you want to add MySQL or Oracle datasource, you can build a new image based on the `apache/dolphinscheduler-<service>` image follow the following instructions:
+>
+> You need to change the two service images including dolphinscheduler-worker, dolphinscheduler-api.
 
 1. Download the MySQL driver [mysql-connector-java-8.0.16.jar](https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.16/mysql-connector-java-8.0.16.jar).
+or download the Oracle driver [ojdbc8.jar](https://repo1.maven.org/maven2/com/oracle/database/jdbc/ojdbc8/) (such as `ojdbc8-19.9.0.0.jar`)
 
-2. Create a new `Dockerfile` to add MySQL driver:
+2. Create a new `Dockerfile` to add MySQL or Oracle driver:
 
 ```
-FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:3.0.0-beta-1
-COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
-```
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler-<service>:3.0.0-beta-1
+# For example
+# FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler-worker:3.0.0-beta-1
 
-3. Build a new docker image including MySQL driver:
-
-```
-docker build -t apache/dolphinscheduler:mysql-driver .
-```
-
-4. Push the docker image `apache/dolphinscheduler:mysql-driver` to a docker registry.
-
-5. Modify image `repository` and update `tag` to `mysql-driver` in `values.yaml`.
-
-6. Run a DolphinScheduler release in Kubernetes (See **Install DolphinScheduler**).
-
-7. Add a MySQL datasource in `Datasource manage`.
-
-### How to Support Oracle Datasource in `Datasource manage`?
-
-> Because of the commercial license, we cannot directly use the driver of Oracle.
->
-> If you want to add Oracle datasource, you can build a new image based on the `apache/dolphinscheduler` image follow the following instructions:
-
-1. Download the Oracle driver [ojdbc8.jar](https://repo1.maven.org/maven2/com/oracle/database/jdbc/ojdbc8/) (such as `ojdbc8-19.9.0.0.jar`)
-
-2. Create a new `Dockerfile` to add Oracle driver:
+# If you want to support MySQL Datasource
+COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
 
-```
-FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:3.0.0-beta-1
+# If you want to support Oracle Datasource
 COPY ojdbc8-19.9.0.0.jar /opt/dolphinscheduler/lib
 ```
 
-3. Build a new docker image including Oracle driver:
+3. Build a new docker image including MySQL or Oracle driver:
 
 ```
-docker build -t apache/dolphinscheduler:oracle-driver .
+docker build -t apache/dolphinscheduler-<service>:new-driver .
 ```
 
-4. Push the docker image `apache/dolphinscheduler:oracle-driver` to a docker registry.
+4. Push the docker image `apache/dolphinscheduler-<service>:new-driver` to a docker registry.
 
-5. Modify image `repository` and update `tag` to `oracle-driver` in `values.yaml`.
+5. Modify image `repository` and update `tag` to `new-driver` in `values.yaml`.
 
 6. Run a DolphinScheduler release in Kubernetes (See **Install DolphinScheduler**).
 
-7. Add an Oracle datasource in `Datasource manage`.
+7. Add a MySQL or Oracle datasource in `Datasource manage`.
 
 ### How to Support Python 2 pip and Custom requirements.txt?
 
+> Just change the image of the dolphinscheduler-worker service.
+
 1. Create a new `Dockerfile` to install pip:
 
 ```
-FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:3.0.0-beta-1
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler-worker:3.0.0-beta-1
 COPY requirements.txt /tmp
 RUN apt-get update && \
     apt-get install -y --no-install-recommends python-pip && \
@@ -299,16 +286,16 @@ RUN apt-get update && \
 The command will install the default **pip 18.1**. If you upgrade the pip, just add the following command.
 
 ```
-    pip install --no-cache-dir -U pip && \
+pip install --no-cache-dir -U pip && \
 ```
 
 2. Build a new docker image including pip:
 
 ```
-docker build -t apache/dolphinscheduler:pip .
+docker build -t apache/dolphinscheduler-worker:pip .
 ```
 
-3. Push the docker image `apache/dolphinscheduler:pip` to a docker registry.
+3. Push the docker image `apache/dolphinscheduler-worker:pip` to a docker registry.
 
 4. Modify image `repository` and update `tag` to `pip` in `values.yaml`.
 
@@ -318,10 +305,12 @@ docker build -t apache/dolphinscheduler:pip .
 
 ### How to Support Python 3?
 
+> Just change the image of the dolphinscheduler-worker service.
+
 1. Create a new `Dockerfile` to install Python 3:
 
 ```
-FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:3.0.0-beta-1
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler-worker:3.0.0-beta-1
 RUN apt-get update && \
     apt-get install -y --no-install-recommends python3 && \
     rm -rf /var/lib/apt/lists/*
@@ -330,16 +319,16 @@ RUN apt-get update && \
 The command will install the default **Python 3.7.3**. If you also want to install **pip3**, just replace `python3` with `python3-pip` like:
 
 ```
-    apt-get install -y --no-install-recommends python3-pip && \
+apt-get install -y --no-install-recommends python3-pip && \
 ```
 
 2. Build a new docker image including Python 3:
 
 ```
-docker build -t apache/dolphinscheduler:python3 .
+docker build -t apache/dolphinscheduler-worker:python3 .
 ```
 
-3. Push the docker image `apache/dolphinscheduler:python3` to a docker registry.
+3. Push the docker image `apache/dolphinscheduler-worker:python3` to a docker registry.
 
 4. Modify image `repository` and update `tag` to `python3` in `values.yaml`.
 
diff --git a/docs/docs/zh/guide/installation/kubernetes.md b/docs/docs/zh/guide/installation/kubernetes.md
index 5a97ed2467..569fdb0d00 100644
--- a/docs/docs/zh/guide/installation/kubernetes.md
+++ b/docs/docs/zh/guide/installation/kubernetes.md
@@ -187,24 +187,29 @@ kubectl scale --replicas=6 sts dolphinscheduler-worker -n test # with test names
 
 > 由于商业许可证的原因,我们不能直接使用 MySQL 的驱动包.
 >
-> 如果你要使用 MySQL, 你可以基于官方镜像 `apache/dolphinscheduler` 进行构建.
+> 如果你要使用 MySQL, 你可以基于官方镜像 `apache/dolphinscheduler-<service>` 进行构建.
+> 
+> 从3.0.0版本起,dolphinscheduler已经微服务化,更改元数据存储需要对把所有的服务都替换为 MySQL 驱动包,包括 dolphinscheduler-tools, dolphinscheduler-master, dolphinscheduler-worker, dolphinscheduler-api, dolphinscheduler-alert-server .
 
 1. 下载 MySQL 驱动包 [mysql-connector-java-8.0.16.jar](https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.16/mysql-connector-java-8.0.16.jar)
 
 2. 创建一个新的 `Dockerfile`,用于添加 MySQL 的驱动包:
 
 ```
-FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:3.0.0-beta-1
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler-<service>:3.0.0-beta-1
+# For example
+# FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler-tools:3.0.0-beta-1
+
 COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
 ```
 
 3. 构建一个包含 MySQL 驱动包的新镜像:
 
 ```
-docker build -t apache/dolphinscheduler:mysql-driver .
+docker build -t apache/dolphinscheduler-<service>:mysql-driver .
 ```
 
-4. 推送 docker 镜像 `apache/dolphinscheduler:mysql-driver` 到一个 docker registry 中
+4. 推送 docker 镜像 `apache/dolphinscheduler-<service>:mysql-driver` 到一个 docker registry 中
 
 5. 修改 `values.yaml` 文件中 image 的 `repository` 字段,并更新 `tag` 为 `mysql-driver`
 
@@ -215,7 +220,6 @@ docker build -t apache/dolphinscheduler:mysql-driver .
 ```yaml
 externalDatabase:
   type: "mysql"
-  driver: "com.mysql.jdbc.Driver"
   host: "localhost"
   port: "3306"
   username: "root"
@@ -226,70 +230,53 @@ externalDatabase:
 
 8. 部署 dolphinscheduler (详见**安装 dolphinscheduler**)
 
-### 如何在数据源中心支持 MySQL 数据源?
+### 如何在数据源中心支持 MySQL 或者 Oracle 数据源?
 
-> 由于商业许可证的原因,我们不能直接使用 MySQL 的驱动包.
+> 由于商业许可证的原因,我们不能直接使用 MySQL 或者 Oracle 的驱动包.
 >
-> 如果你要添加 MySQL 数据源, 你可以基于官方镜像 `apache/dolphinscheduler` 进行构建.
-
-1. 下载 MySQL 驱动包 [mysql-connector-java-8.0.16.jar](https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.16/mysql-connector-java-8.0.16.jar)
-
-2. 创建一个新的 `Dockerfile`,用于添加 MySQL 驱动包:
+> 如果你要添加 MySQL 或者 Oracle, 你可以基于官方镜像 `apache/dolphinscheduler-<service>` 进行构建.
+> 
+> 需要更改 dolphinscheduler-worker, dolphinscheduler-api 两个服务的镜像.
 
-```
-FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:3.0.0-beta-1
-COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
-```
+1. 下载 MySQL 驱动包 [mysql-connector-java-8.0.16.jar](https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.16/mysql-connector-java-8.0.16.jar) 
+或者 Oracle 驱动包 [ojdbc8.jar](https://repo1.maven.org/maven2/com/oracle/database/jdbc/ojdbc8/) (例如 `ojdbc8-19.9.0.0.jar`)
 
-3. 构建一个包含 MySQL 驱动包的新镜像:
+3. 创建一个新的 `Dockerfile`,用于添加 MySQL 或者 Oracle 驱动包:
 
 ```
-docker build -t apache/dolphinscheduler:mysql-driver .
-```
-
-4. 推送 docker 镜像 `apache/dolphinscheduler:mysql-driver` 到一个 docker registry 中
-
-5. 修改 `values.yaml` 文件中 image 的 `repository` 字段,并更新 `tag` 为 `mysql-driver`
-
-6. 部署 dolphinscheduler (详见**安装 dolphinscheduler**)
-
-7. 在数据源中心添加一个 MySQL 数据源
-
-### 如何在数据源中心支持 Oracle 数据源?
-
-> 由于商业许可证的原因,我们不能直接使用 Oracle 的驱动包.
->
-> 如果你要添加 Oracle 数据源, 你可以基于官方镜像 `apache/dolphinscheduler` 进行构建.
-
-1. 下载 Oracle 驱动包 [ojdbc8.jar](https://repo1.maven.org/maven2/com/oracle/database/jdbc/ojdbc8/) (例如 `ojdbc8-19.9.0.0.jar`)
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler-<service>:3.0.0-beta-1
+# For example
+# FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler-worker:3.0.0-beta-1
 
-2. 创建一个新的 `Dockerfile`,用于添加 Oracle 驱动包:
+# If you want to support MySQL Datasource
+COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
 
-```
-FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:3.0.0-beta-1
+# If you want to support Oracle Datasource
 COPY ojdbc8-19.9.0.0.jar /opt/dolphinscheduler/lib
 ```
 
-3. 构建一个包含 Oracle 驱动包的新镜像:
+3. 构建一个包含 MySQL 或者 Oracle 驱动包的新镜像:
 
 ```
-docker build -t apache/dolphinscheduler:oracle-driver .
+docker build -t apache/dolphinscheduler-<service>:new-driver .
 ```
 
-4. 推送 docker 镜像 `apache/dolphinscheduler:oracle-driver` 到一个 docker registry 中
+4. 推送 docker 镜像 `apache/dolphinscheduler-<service>:new-driver` 到一个 docker registry 中
 
-5. 修改 `values.yaml` 文件中 image 的 `repository` 字段,并更新 `tag` 为 `oracle-driver`
+5. 修改 `values.yaml` 文件中 image 的 `repository` 字段,并更新 `tag` 为 `new-driver`
 
 6. 部署 dolphinscheduler (详见**安装 dolphinscheduler**)
 
-7. 在数据源中心添加一个 Oracle 数据源
+7. 在数据源中心添加一个 MySQL 或者 Oracle 数据源
 
 ### 如何支持 Python 2 pip 以及自定义 requirements.txt?
 
+> 只需要更改 dolphinscheduler-worker 服务的镜像.
+
 1. 创建一个新的 `Dockerfile`,用于安装 pip:
 
 ```
-FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:3.0.0-beta-1
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler-worker:3.0.0-beta-1
 COPY requirements.txt /tmp
 RUN apt-get update && \
     apt-get install -y --no-install-recommends python-pip && \
@@ -300,16 +287,16 @@ RUN apt-get update && \
 这个命令会安装默认的 **pip 18.1**. 如果你想升级 pip, 只需添加一行
 
 ```
-    pip install --no-cache-dir -U pip && \
+pip install --no-cache-dir -U pip && \
 ```
 
 2. 构建一个包含 pip 的新镜像:
 
 ```
-docker build -t apache/dolphinscheduler:pip .
+docker build -t apache/dolphinscheduler-worker:pip .
 ```
 
-3. 推送 docker 镜像 `apache/dolphinscheduler:pip` 到一个 docker registry 中
+3. 推送 docker 镜像 `apache/dolphinscheduler-worker:pip` 到一个 docker registry 中
 
 4. 修改 `values.yaml` 文件中 image 的 `repository` 字段,并更新 `tag` 为 `pip`
 
@@ -319,10 +306,12 @@ docker build -t apache/dolphinscheduler:pip .
 
 ### 如何支持 Python 3?
 
+> 只需要更改 dolphinscheduler-worker 服务的镜像.
+
 1. 创建一个新的 `Dockerfile`,用于安装 Python 3:
 
 ```
-FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:3.0.0-beta-1
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler-worker:3.0.0-beta-1
 RUN apt-get update && \
     apt-get install -y --no-install-recommends python3 && \
     rm -rf /var/lib/apt/lists/*
@@ -331,16 +320,16 @@ RUN apt-get update && \
 这个命令会安装默认的 **Python 3.7.3**. 如果你也想安装 **pip3**, 将 `python3` 替换为 `python3-pip` 即可
 
 ```
-    apt-get install -y --no-install-recommends python3-pip && \
+apt-get install -y --no-install-recommends python3-pip && \
 ```
 
 2. 构建一个包含 Python 3 的新镜像:
 
 ```
-docker build -t apache/dolphinscheduler:python3 .
+docker build -t apache/dolphinscheduler-worker:python3 .
 ```
 
-3. 推送 docker 镜像 `apache/dolphinscheduler:python3` 到一个 docker registry 中
+3. 推送 docker 镜像 `apache/dolphinscheduler-worker:python3` 到一个 docker registry 中
 
 4. 修改 `values.yaml` 文件中 image 的 `repository` 字段,并更新 `tag` 为 `python3`
 


[dolphinscheduler] 05/16: [Bug][Deploy]Fix worker-server path in script scp-hosts.sh (#10208) (#10209)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit 03e5ac696421f0037645f56e7e1c2d1db0d93950
Author: Eric Gao <er...@gmail.com>
AuthorDate: Tue May 24 09:53:11 2022 +0800

    [Bug][Deploy]Fix worker-server path in script scp-hosts.sh (#10208) (#10209)
    
    (cherry picked from commit 4dbe27bb8f7e87fcf18582e970d08236f2dbf3d5)
---
 script/scp-hosts.sh | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/script/scp-hosts.sh b/script/scp-hosts.sh
index b5cd611984..5ac51de8ed 100755
--- a/script/scp-hosts.sh
+++ b/script/scp-hosts.sh
@@ -49,7 +49,7 @@ do
     fi
   done
   # set worker groups in application.yaml
-  [[ -n ${workerIndex} ]] && sed -i "s/- default/- ${groupNames[$workerIndex]}/" worker-server/conf/application.yaml
+  [[ -n ${workerIndex} ]] && sed -i "s/- default/- ${groupNames[$workerIndex]}/" ../worker-server/conf/application.yaml
 
   for dsDir in bin master-server worker-server alert-server api-server ui tools
   do
@@ -58,7 +58,7 @@ do
     scp -q -P $sshPort -r $workDir/../$dsDir  $host:$installPath
   done
   # restore worker groups to default
-  [[ -n ${workerIndex} ]] && sed -i "s/- ${groupNames[$workerIndex]}/- default/" worker-server/conf/application.yaml
+  [[ -n ${workerIndex} ]] && sed -i "s/- ${groupNames[$workerIndex]}/- default/" ../worker-server/conf/application.yaml
 
   echo "scp dirs to $host/$installPath complete"
 done


[dolphinscheduler] 04/16: [Fix-10199] [Workflow/Workflow Definition] After deleting messages in batches, the error message is repeated (#10201)

Posted by zh...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch 3.0.0-beta-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit 9a966e4cb263677103dfa3abaf7331c361721c85
Author: Mr.An <42...@users.noreply.github.com>
AuthorDate: Tue May 24 09:17:44 2022 +0800

    [Fix-10199] [Workflow/Workflow Definition] After deleting messages in batches, the error message is repeated (#10201)
    
    * remove duplicate prompts
    
    * update import package
    
    (cherry picked from commit 0e8ed6ebb43c804e0264afe51657e98a54be962f)
---
 .../api/controller/ProcessDefinitionController.java        | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
index 942ac97839..b238044a16 100644
--- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
@@ -32,10 +32,10 @@ import org.apache.dolphinscheduler.dao.entity.User;
 import org.apache.commons.lang.StringUtils;
 
 import java.text.MessageFormat;
-import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
+import java.util.Set;
+import java.util.HashSet;
 
 import javax.servlet.http.HttpServletResponse;
 
@@ -637,7 +637,7 @@ public class ProcessDefinitionController extends BaseController {
                                                       @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode,
                                                       @RequestParam("codes") String codes) {
         Map<String, Object> result = new HashMap<>();
-        List<String> deleteFailedCodeList = new ArrayList<>();
+        Set<String> deleteFailedCodeSet = new HashSet<>();
         if (!StringUtils.isEmpty(codes)) {
             String[] processDefinitionCodeArray = codes.split(",");
             for (String strProcessDefinitionCode : processDefinitionCodeArray) {
@@ -645,17 +645,17 @@ public class ProcessDefinitionController extends BaseController {
                 try {
                     Map<String, Object> deleteResult = processDefinitionService.deleteProcessDefinitionByCode(loginUser, projectCode, code);
                     if (!Status.SUCCESS.equals(deleteResult.get(Constants.STATUS))) {
-                        deleteFailedCodeList.add((String) deleteResult.get(Constants.MSG));
+                        deleteFailedCodeSet.add((String) deleteResult.get(Constants.MSG));
                         logger.error((String) deleteResult.get(Constants.MSG));
                     }
                 } catch (Exception e) {
-                    deleteFailedCodeList.add(MessageFormat.format(Status.DELETE_PROCESS_DEFINE_BY_CODES_ERROR.getMsg(), strProcessDefinitionCode));
+                    deleteFailedCodeSet.add(MessageFormat.format(Status.DELETE_PROCESS_DEFINE_BY_CODES_ERROR.getMsg(), strProcessDefinitionCode));
                 }
             }
         }
 
-        if (!deleteFailedCodeList.isEmpty()) {
-            putMsg(result, BATCH_DELETE_PROCESS_DEFINE_BY_CODES_ERROR, String.join("\n", deleteFailedCodeList));
+        if (!deleteFailedCodeSet.isEmpty()) {
+            putMsg(result, BATCH_DELETE_PROCESS_DEFINE_BY_CODES_ERROR, String.join("\n", deleteFailedCodeSet));
         } else {
             putMsg(result, Status.SUCCESS);
         }