You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dolphinscheduler.apache.org by GitBox <gi...@apache.org> on 2022/08/05 02:13:00 UTC

[GitHub] [dolphinscheduler] xyzhh commented on issue #11089: [Bug] [data quality ] Minio resource center is not supported by data quality task.

xyzhh commented on issue #11089:
URL: https://github.com/apache/dolphinscheduler/issues/11089#issuecomment-1205962837

   #
   # Licensed to the Apache Software Foundation (ASF) under one or more
   # contributor license agreements.  See the NOTICE file distributed with
   # this work for additional information regarding copyright ownership.
   # The ASF licenses this file to You under the Apache License, Version 2.0
   # (the "License"); you may not use this file except in compliance with
   # the License.  You may obtain a copy of the License at
   #
   #     http://www.apache.org/licenses/LICENSE-2.0
   #
   # Unless required by applicable law or agreed to in writing, software
   # distributed under the License is distributed on an "AS IS" BASIS,
   # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   # See the License for the specific language governing permissions and
   # limitations under the License.
   #
   
   # user data local directory path, please make sure the directory exists and have read write permissions
   data.basedir.path=/tmp/dolphinscheduler
   
   # resource storage type: HDFS, S3, NONE
   #resource.storage.type=NONE
   resource.storage.type=S3
   
   # resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
   resource.upload.path=/dolphinscheduler
   
   # whether to startup kerberos
   hadoop.security.authentication.startup.state=false
   
   # java.security.krb5.conf path
   java.security.krb5.conf.path=/opt/krb5.conf
   
   # login user from keytab username
   login.user.keytab.username=hdfs-mycluster@ESZ.COM
   
   # login user from keytab path
   login.user.keytab.path=/opt/hdfs.headless.keytab
   
   # kerberos expire time, the unit is hour
   kerberos.expire.time=2
   # resource view suffixs
   #resource.view.suffixs=txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js
   # if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path
   hdfs.root.user=hdfs
   # if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
   #fs.defaultFS=hdfs://mycluster:8020
   #fs.defaultFS=hdfs://master:8020
   fs.defaultFS=s3a://dolphinscheduler
   
   aws.access.key.id=minio_akey
   aws.secret.access.key=minio_skey
   aws.region=us-east-1
   aws.endpoint=http://worker02:12346
   # resourcemanager port, the default value is 8088 if not specified
   resource.manager.httpaddress.port=8088
   # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
   #yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
   yarn.resourcemanager.ha.rm.ids=
   # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
   #yarn.application.status.address=http://ds1:%s/ws/v1/cluster/apps/%s
   yarn.application.status.address=http://master:%s/ws/v1/cluster/apps/%s
   # job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)
   #yarn.job.history.status.address=http://ds1:19888/ws/v1/history/mapreduce/jobs/%s
   yarn.job.history.status.address=http://master:19888/ws/v1/history/mapreduce/jobs/%s
   
   # datasource encryption enable
   datasource.encryption.enable=false
   
   # datasource encryption salt
   datasource.encryption.salt=!@#$%^&*
   
   # data quality option
   #data-quality.jar.name=dolphinscheduler-data-quality-dev-SNAPSHOT.jar
   data-quality.jar.name=dolphinscheduler-data-quality-3.0.0-beta-2.jar
   
   #data-quality.error.output.path=/tmp/data-quality-error-data
   
   # Network IP gets priority, default inner outer
   
   # Whether hive SQL is executed in the same session
   support.hive.oneSession=false
   
   # use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions
   sudo.enable=true
   
   # network interface preferred like eth0, default: empty
   #dolphin.scheduler.network.interface.preferred=
   
   # network IP gets priority, default: inner outer
   #dolphin.scheduler.network.priority.strategy=default
   
   # system env path
   #dolphinscheduler.env.path=dolphinscheduler_env.sh
   
   # development state
   development.state=false
   
   # rpc port
   alert.rpc.port=50052
   
   # Url endpoint for zeppelin RESTful API
   zeppelin.rest.url=http://localhost:8080
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@dolphinscheduler.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org