You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dolphinscheduler.apache.org by GitBox <gi...@apache.org> on 2020/06/02 06:44:05 UTC

[GitHub] [incubator-dolphinscheduler] dailidong commented on a change in pull request #2857: add job history to judge application status #2625

dailidong commented on a change in pull request #2857:
URL: https://github.com/apache/incubator-dolphinscheduler/pull/2857#discussion_r433653457



##########
File path: dolphinscheduler-common/src/main/resources/common.properties
##########
@@ -18,47 +18,49 @@
 # resource storage type : HDFS,S3,NONE
 resource.storage.type=NONE
 
-# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
-resource.upload.path=/dolphinscheduler
+# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions."/dolphinscheduler" is recommended
+#resource.upload.path=/dolphinscheduler
 
 # user data local directory path, please make sure the directory exists and have read write permissions
 #data.basedir.path=/tmp/dolphinscheduler
 
 # whether kerberos starts
-hadoop.security.authentication.startup.state=false
+#hadoop.security.authentication.startup.state=false
 
 # java.security.krb5.conf path
-java.security.krb5.conf.path=/opt/krb5.conf
+#java.security.krb5.conf.path=/opt/krb5.conf
 
 # login user from keytab username
-login.user.keytab.username=hdfs-mycluster@ESZ.COM
+#login.user.keytab.username=hdfs-mycluster@ESZ.COM
 
 # loginUserFromKeytab path
-login.user.keytab.path=/opt/hdfs.headless.keytab
+#login.user.keytab.path=/opt/hdfs.headless.keytab
 
 #resource.view.suffixs
 #resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties
 
 # if resource.storage.type=HDFS, the user need to have permission to create directories under the HDFS root path
 hdfs.root.user=hdfs
 
-# if resource.storage.type=S3,the value like: s3a://dolphinscheduler ; if resource.storage.type=HDFS, When namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
+# if resource.storage.type=S3,the value like: s3a://dolphinscheduler ; if resource.storage.type=HDFS, When namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
 fs.defaultFS=hdfs://mycluster:8020
 
-# if resource.storage.type=S3,s3 endpoint
-fs.s3a.endpoint=http://192.168.xx.xx:9010
+# if resource.storage.type=S3,s3 endpoint
+#fs.s3a.endpoint=http://192.168.199.91:9010
 
-# if resource.storage.type=S3,s3 access key
-fs.s3a.access.key=A3DXS30FO22544RE
+# if resource.storage.type=S3,s3 access key
+#fs.s3a.access.key=A3DXS30FO22544RE
 
-# if resource.storage.type=S3,s3 secret key
-fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
+# if resource.storage.type=S3,s3 secret key
+#fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK

Review comment:
       why comment these lines? 




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org