You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dolphinscheduler.apache.org by ke...@apache.org on 2022/10/20 04:31:09 UTC

[dolphinscheduler] branch dev updated: [Improvement-12293] Update the common.properties in api-test-case and e2e-case (#12295)

This is an automated email from the ASF dual-hosted git repository.

kezhenxu94 pushed a commit to branch dev
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git


The following commit(s) were added to refs/heads/dev by this push:
     new 666849abe0 [Improvement-12293] Update the common.properties in api-test-case and e2e-case (#12295)
666849abe0 is described below

commit 666849abe001e131ec4096a60c8c18136656233d
Author: rickchengx <38...@users.noreply.github.com>
AuthorDate: Thu Oct 20 12:31:03 2022 +0800

    [Improvement-12293] Update the common.properties in api-test-case and e2e-case (#12295)
---
 .../resources/docker/file-manage/common.properties | 85 ++++++++++++++++++----
 .../resources/docker/file-manage/common.properties | 75 +++++++++++++------
 2 files changed, 124 insertions(+), 36 deletions(-)

diff --git a/dolphinscheduler-api-test/dolphinscheduler-api-test-case/src/test/resources/docker/file-manage/common.properties b/dolphinscheduler-api-test/dolphinscheduler-api-test-case/src/test/resources/docker/file-manage/common.properties
index abac3ad391..847bcfa3a7 100644
--- a/dolphinscheduler-api-test/dolphinscheduler-api-test-case/src/test/resources/docker/file-manage/common.properties
+++ b/dolphinscheduler-api-test/dolphinscheduler-api-test-case/src/test/resources/docker/file-manage/common.properties
@@ -14,29 +14,61 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
+
 # user data local directory path, please make sure the directory exists and have read write permissions
 data.basedir.path=/tmp/dolphinscheduler
-# resource storage type: HDFS, S3, NONE
+
+# resource view suffixs
+#resource.view.suffixs=txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js
+
+# resource storage type: HDFS, S3, OSS, NONE
 resource.storage.type=S3
-# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration
-# please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
-resource.upload.path=/dolphinscheduler
+# resource store on HDFS/S3 path, resource file will store to this base path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
+resource.storage.upload.base.path=/dolphinscheduler
+
+# The AWS access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
+resource.aws.access.key.id=accessKey123
+# The AWS secret access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
+resource.aws.secret.access.key=secretKey123
+# The AWS Region to use. if resource.storage.type=S3 or use EMR-Task, This configuration is required
+resource.aws.region=us-east-1
+# The name of the bucket. You need to create them by yourself. Otherwise, the system cannot start. All buckets in Amazon S3 share a single namespace; ensure the bucket is given a unique name.
+resource.aws.s3.bucket.name=dolphinscheduler
+# You need to set this parameter when private cloud s3. If S3 uses public cloud, you only need to set resource.aws.region or set to the endpoint of a public cloud such as S3.cn-north-1.amazonaws.com.cn
+resource.aws.s3.endpoint=http://s3:9000
+
+# alibaba cloud access key id, required if you set resource.storage.type=OSS
+resource.alibaba.cloud.access.key.id=<your-access-key-id>
+# alibaba cloud access key secret, required if you set resource.storage.type=OSS
+resource.alibaba.cloud.access.key.secret=<your-access-key-secret>
+# alibaba cloud region, required if you set resource.storage.type=OSS
+resource.alibaba.cloud.region=cn-hangzhou
+# oss bucket name, required if you set resource.storage.type=OSS
+resource.alibaba.cloud.oss.bucket.name=dolphinscheduler
+# oss bucket endpoint, required if you set resource.storage.type=OSS
+resource.alibaba.cloud.oss.endpoint=https://oss-cn-hangzhou.aliyuncs.com
+
+# if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path
+resource.hdfs.root.user=hdfs
+# if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
+resource.hdfs.fs.defaultFS=s3a://dolphinscheduler
+
 # whether to startup kerberos
 hadoop.security.authentication.startup.state=false
+
 # java.security.krb5.conf path
 java.security.krb5.conf.path=/opt/krb5.conf
+
 # login user from keytab username
 login.user.keytab.username=hdfs-mycluster@ESZ.COM
+
 # login user from keytab path
 login.user.keytab.path=/opt/hdfs.headless.keytab
+
 # kerberos expire time, the unit is hour
 kerberos.expire.time=2
-# resource view suffixs
-#resource.view.suffixs=txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js
-# if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path
-hdfs.root.user=hdfs
-# if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
-fs.defaultFS=s3a://dolphinscheduler
+
+
 # resourcemanager port, the default value is 8088 if not specified
 resource.manager.httpaddress.port=8088
 # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
@@ -45,25 +77,48 @@ yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
 yarn.application.status.address=http://ds1:%s/ws/v1/cluster/apps/%s
 # job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)
 yarn.job.history.status.address=http://ds1:19888/ws/v1/history/mapreduce/jobs/%s
+
 # datasource encryption enable
 datasource.encryption.enable=false
+
 # datasource encryption salt
 datasource.encryption.salt=!@#$%^&*
+
+# data quality option
+data-quality.jar.name=dolphinscheduler-data-quality-dev-SNAPSHOT.jar
+
+#data-quality.error.output.path=/tmp/data-quality-error-data
+
+# Network IP gets priority, default inner outer
+
+# Whether hive SQL is executed in the same session
+support.hive.oneSession=false
+
 # use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions
 sudo.enable=true
+
 # network interface preferred like eth0, default: empty
 #dolphin.scheduler.network.interface.preferred=
+
 # network IP gets priority, default: inner outer
 #dolphin.scheduler.network.priority.strategy=default
+
 # system env path
 #dolphinscheduler.env.path=dolphinscheduler_env.sh
+
 # development state
 development.state=false
+
 # rpc port
 alert.rpc.port=50052
-aws.access.key.id=accessKey123
-aws.secret.access.key=secretKey123
-aws.region=us-east-1
-aws.endpoint=http://s3:9000
+
+# set path of conda.sh
+conda.path=/opt/anaconda3/etc/profile.d/conda.sh
+
 # Task resource limit state
-task.resource.limit.state=false
\ No newline at end of file
+task.resource.limit.state=false
+
+# mlflow task plugin preset repository
+ml.mlflow.preset_repository=https://github.com/apache/dolphinscheduler-mlflow
+# mlflow task plugin preset repository version
+ml.mlflow.preset_repository_version="main"
\ No newline at end of file
diff --git a/dolphinscheduler-e2e/dolphinscheduler-e2e-case/src/test/resources/docker/file-manage/common.properties b/dolphinscheduler-e2e/dolphinscheduler-e2e-case/src/test/resources/docker/file-manage/common.properties
index c8a3c32433..847bcfa3a7 100644
--- a/dolphinscheduler-e2e/dolphinscheduler-e2e-case/src/test/resources/docker/file-manage/common.properties
+++ b/dolphinscheduler-e2e/dolphinscheduler-e2e-case/src/test/resources/docker/file-manage/common.properties
@@ -18,12 +18,41 @@
 # user data local directory path, please make sure the directory exists and have read write permissions
 data.basedir.path=/tmp/dolphinscheduler
 
-# resource storage type: HDFS, S3, NONE
-resource.storage.type=S3
+# resource view suffixs
+#resource.view.suffixs=txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js
 
-# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
+# resource storage type: HDFS, S3, OSS, NONE
+resource.storage.type=S3
+# resource store on HDFS/S3 path, resource file will store to this base path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
 resource.storage.upload.base.path=/dolphinscheduler
 
+# The AWS access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
+resource.aws.access.key.id=accessKey123
+# The AWS secret access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
+resource.aws.secret.access.key=secretKey123
+# The AWS Region to use. if resource.storage.type=S3 or use EMR-Task, This configuration is required
+resource.aws.region=us-east-1
+# The name of the bucket. You need to create them by yourself. Otherwise, the system cannot start. All buckets in Amazon S3 share a single namespace; ensure the bucket is given a unique name.
+resource.aws.s3.bucket.name=dolphinscheduler
+# You need to set this parameter when private cloud s3. If S3 uses public cloud, you only need to set resource.aws.region or set to the endpoint of a public cloud such as S3.cn-north-1.amazonaws.com.cn
+resource.aws.s3.endpoint=http://s3:9000
+
+# alibaba cloud access key id, required if you set resource.storage.type=OSS
+resource.alibaba.cloud.access.key.id=<your-access-key-id>
+# alibaba cloud access key secret, required if you set resource.storage.type=OSS
+resource.alibaba.cloud.access.key.secret=<your-access-key-secret>
+# alibaba cloud region, required if you set resource.storage.type=OSS
+resource.alibaba.cloud.region=cn-hangzhou
+# oss bucket name, required if you set resource.storage.type=OSS
+resource.alibaba.cloud.oss.bucket.name=dolphinscheduler
+# oss bucket endpoint, required if you set resource.storage.type=OSS
+resource.alibaba.cloud.oss.endpoint=https://oss-cn-hangzhou.aliyuncs.com
+
+# if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path
+resource.hdfs.root.user=hdfs
+# if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
+resource.hdfs.fs.defaultFS=s3a://dolphinscheduler
+
 # whether to startup kerberos
 hadoop.security.authentication.startup.state=false
 
@@ -39,25 +68,13 @@ login.user.keytab.path=/opt/hdfs.headless.keytab
 # kerberos expire time, the unit is hour
 kerberos.expire.time=2
 
-# resource view suffixs
-#resource.view.suffixs=txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js
-
-# if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path
-resource.hdfs.root.user=hdfs
-
-# if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
-resource.hdfs.fs.defaultFS=s3a://dolphinscheduler
-
 
 # resourcemanager port, the default value is 8088 if not specified
 resource.manager.httpaddress.port=8088
-
 # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
 yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
-
 # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
 yarn.application.status.address=http://ds1:%s/ws/v1/cluster/apps/%s
-
 # job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)
 yarn.job.history.status.address=http://ds1:19888/ws/v1/history/mapreduce/jobs/%s
 
@@ -67,6 +84,16 @@ datasource.encryption.enable=false
 # datasource encryption salt
 datasource.encryption.salt=!@#$%^&*
 
+# data quality option
+data-quality.jar.name=dolphinscheduler-data-quality-dev-SNAPSHOT.jar
+
+#data-quality.error.output.path=/tmp/data-quality-error-data
+
+# Network IP gets priority, default inner outer
+
+# Whether hive SQL is executed in the same session
+support.hive.oneSession=false
+
 # use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions
 sudo.enable=true
 
@@ -75,17 +102,23 @@ sudo.enable=true
 
 # network IP gets priority, default: inner outer
 #dolphin.scheduler.network.priority.strategy=default
+
 # system env path
 #dolphinscheduler.env.path=dolphinscheduler_env.sh
+
 # development state
 development.state=false
+
 # rpc port
 alert.rpc.port=50052
-resource.aws.access.key.id=accessKey123
-resource.aws.secret.access.key=secretKey123
-resource.aws.region=us-east-1
-resource.aws.s3.bucket.name=dolphinscheduler
-resource.aws.s3.endpoint=http://s3:9000
+
+# set path of conda.sh
+conda.path=/opt/anaconda3/etc/profile.d/conda.sh
 
 # Task resource limit state
-task.resource.limit.state=false
\ No newline at end of file
+task.resource.limit.state=false
+
+# mlflow task plugin preset repository
+ml.mlflow.preset_repository=https://github.com/apache/dolphinscheduler-mlflow
+# mlflow task plugin preset repository version
+ml.mlflow.preset_repository_version="main"
\ No newline at end of file