You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dolphinscheduler.apache.org by jo...@apache.org on 2020/04/21 02:25:52 UTC

[incubator-dolphinscheduler] branch dev updated: simplify and optimize config (#2469)

This is an automated email from the ASF dual-hosted git repository.

journey pushed a commit to branch dev
in repository https://gitbox.apache.org/repos/asf/incubator-dolphinscheduler.git


The following commit(s) were added to refs/heads/dev by this push:
     new eec92ed  simplify and optimize config (#2469)
eec92ed is described below

commit eec92ed1fc1a35e22b8e38ac40681a1dd803399b
Author: dailidong <da...@gmail.com>
AuthorDate: Tue Apr 21 10:25:45 2020 +0800

    simplify and optimize config (#2469)
    
    * [Refactor worker] simplify and optimize config (#2386)
    
    * simplify config
    
    * simplify config
    
    * simplify and optimize config
    
    * Update HadoopUtils.java
    
    optimize HadoopUtils
    
    * Update HadoopUtilsTest.java
    
    * Update HadoopUtilsTest.java
    
    * Update HadoopUtilsTest.java
    
    * Update HttpUtils.java
    
    * Update pom.xml
    
    * Update HadoopUtilsTest.java
    
    * Update HadoopUtilsTest.java
    
    * Update HadoopUtilsTest.java
    
    * Update HadoopUtilsTest.java
    
    * Update HadoopUtilsTest.java
    
    * Update HadoopUtilsTest.java
---
 .../alert/template/AlertTemplateFactory.java       |  17 +-
 .../dolphinscheduler/alert/utils/Constants.java    |   2 -
 .../dolphinscheduler/alert/utils/MailUtils.java    |   9 +-
 .../src/main/resources/alert.properties            |   3 -
 .../alert/template/AlertTemplateFactoryTest.java   |   1 -
 .../dolphinscheduler/common/utils/HadoopUtils.java |  60 ++--
 .../dolphinscheduler/common/utils/HttpUtils.java   |   2 +-
 .../src/main/resources/common.properties           |  24 +-
 .../common/utils/HadoopUtilsTest.java              | 162 +++++++--
 .../src/main/resources/datasource.properties       |   3 -
 .../src/main/resources/config/install_config.conf  | 131 ++++++-
 install.sh                                         | 398 ++-------------------
 pom.xml                                            |   3 +-
 script/dolphinscheduler-daemon.sh                  |   6 +-
 script/scp-hosts.sh                                |  22 +-
 script/start-all.sh                                |  13 +-
 script/stop-all.sh                                 |  14 +-
 17 files changed, 359 insertions(+), 511 deletions(-)

diff --git a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/template/AlertTemplateFactory.java b/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/template/AlertTemplateFactory.java
index 58e3800..965677e 100644
--- a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/template/AlertTemplateFactory.java
+++ b/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/template/AlertTemplateFactory.java
@@ -17,9 +17,6 @@
 package org.apache.dolphinscheduler.alert.template;
 
 import org.apache.dolphinscheduler.alert.template.impl.DefaultHTMLTemplate;
-import org.apache.dolphinscheduler.alert.utils.Constants;
-import org.apache.dolphinscheduler.alert.utils.PropertyUtils;
-import org.apache.dolphinscheduler.common.utils.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -30,8 +27,6 @@ public class AlertTemplateFactory {
 
     private static final Logger logger = LoggerFactory.getLogger(AlertTemplateFactory.class);
 
-    private static final String alertTemplate = PropertyUtils.getString(Constants.ALERT_TEMPLATE);
-
     private AlertTemplateFactory(){}
 
     /**
@@ -39,16 +34,6 @@ public class AlertTemplateFactory {
      * @return a template, default is DefaultHTMLTemplate
      */
     public static AlertTemplate getMessageTemplate() {
-
-        if(StringUtils.isEmpty(alertTemplate)){
-            return new DefaultHTMLTemplate();
-        }
-
-        switch (alertTemplate){
-            case "html":
-                return new DefaultHTMLTemplate();
-            default:
-                throw new IllegalArgumentException(String.format("not support alert template: %s",alertTemplate));
-        }
+        return new DefaultHTMLTemplate();
     }
 }
diff --git a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java b/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java
index 94d95b3..28be8aa 100644
--- a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java
+++ b/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java
@@ -77,8 +77,6 @@ public class Constants {
 
     public static final int NUMBER_1000 = 1000;
 
-    public static final String ALERT_TEMPLATE = "alert.template";
-
     public static final String SPRING_DATASOURCE_DRIVER_CLASS_NAME = "spring.datasource.driver-class-name";
 
     public static final String SPRING_DATASOURCE_URL = "spring.datasource.url";
diff --git a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/MailUtils.java b/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/MailUtils.java
index e20b75a..ef364cb 100644
--- a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/MailUtils.java
+++ b/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/MailUtils.java
@@ -260,9 +260,14 @@ public class MailUtils {
         part1.setContent(partContent, Constants.TEXT_HTML_CHARSET_UTF_8);
         // set attach file
         MimeBodyPart part2 = new MimeBodyPart();
-        // make excel file
-        ExcelUtils.genExcelFile(content,title, xlsFilePath);
         File file = new File(xlsFilePath + Constants.SINGLE_SLASH +  title + Constants.EXCEL_SUFFIX_XLS);
+        if (!file.getParentFile().exists()) {
+            file.getParentFile().mkdirs();
+        }
+        // make excel file
+
+        ExcelUtils.genExcelFile(content,title,xlsFilePath);
+
         part2.attachFile(file);
         part2.setFileName(MimeUtility.encodeText(title + Constants.EXCEL_SUFFIX_XLS,Constants.UTF_8,"B"));
         // add components to collection
diff --git a/dolphinscheduler-alert/src/main/resources/alert.properties b/dolphinscheduler-alert/src/main/resources/alert.properties
index c359b27..3e83c01 100644
--- a/dolphinscheduler-alert/src/main/resources/alert.properties
+++ b/dolphinscheduler-alert/src/main/resources/alert.properties
@@ -18,9 +18,6 @@
 #alert type is EMAIL/SMS
 alert.type=EMAIL
 
-# alter msg template, default is html template
-#alert.template=html
-
 # mail server configuration
 mail.protocol=SMTP
 mail.server.host=xxx.xxx.com
diff --git a/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/template/AlertTemplateFactoryTest.java b/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/template/AlertTemplateFactoryTest.java
index 6865b89..32201e6 100644
--- a/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/template/AlertTemplateFactoryTest.java
+++ b/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/template/AlertTemplateFactoryTest.java
@@ -47,7 +47,6 @@ public class AlertTemplateFactoryTest {
     public void testGetMessageTemplate(){
 
         PowerMockito.mockStatic(PropertyUtils.class);
-        when(PropertyUtils.getString(Constants.ALERT_TEMPLATE)).thenReturn("html");
 
         AlertTemplate defaultTemplate = AlertTemplateFactory.getMessageTemplate();
 
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
index 5d4f867..02f00ce 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
@@ -57,7 +57,8 @@ public class HadoopUtils implements Closeable {
 
     private static String hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER);
     public static final String resourceUploadPath = PropertyUtils.getString(RESOURCE_UPLOAD_PATH, "/dolphinscheduler");
-
+    public static final String rmHaIds = PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS);
+    public static final String appAddress = PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
 
     private static final String HADOOP_UTILS_KEY = "HADOOP_UTILS_KEY";
 
@@ -110,14 +111,15 @@ public class HadoopUtils implements Closeable {
         try {
             configuration = new Configuration();
 
-            String resUploadStartupType = PropertyUtils.getString(Constants.RESOURCE_STORAGE_TYPE);
-            ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
+            String resourceStorageType = PropertyUtils.getString(Constants.RESOURCE_STORAGE_TYPE);
+            ResUploadType resUploadType = ResUploadType.valueOf(resourceStorageType);
 
-            if (resUploadType == ResUploadType.HDFS) {
-                if (PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE,false)) {
+            if (resUploadType == ResUploadType.HDFS){
+                if (PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE,false)){
                     System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF,
                             PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH));
-                    configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+                    configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
+                    hdfsUser = "";
                     UserGroupInformation.setConfiguration(configuration);
                     UserGroupInformation.loginUserFromKeytab(PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME),
                             PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH));
@@ -166,21 +168,6 @@ public class HadoopUtils implements Closeable {
             }
 
 
-            String rmHaIds = PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS);
-            String appAddress = PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
-            //not use resourcemanager
-            if (rmHaIds.contains(Constants.YARN_RESOURCEMANAGER_HA_XX)){
-                yarnEnabled = false;
-            } else if (!StringUtils.isEmpty(rmHaIds)) {
-                //resourcemanager HA enabled
-                appAddress = getAppAddress(appAddress, rmHaIds);
-                yarnEnabled = true;
-                logger.info("appAddress : {}", appAddress);
-            } else {
-                //single resourcemanager enabled
-                yarnEnabled = true;
-            }
-            configuration.set(Constants.YARN_APPLICATION_STATUS_ADDRESS, appAddress);
         } catch (Exception e) {
             logger.error(e.getMessage(), e);
         }
@@ -200,7 +187,29 @@ public class HadoopUtils implements Closeable {
      * @return url of application
      */
     public String getApplicationUrl(String applicationId) {
-        return String.format(configuration.get(Constants.YARN_APPLICATION_STATUS_ADDRESS), applicationId);
+        /**
+         * if rmHaIds contains xx, it signs not use resourcemanager
+         * otherwise:
+         *  if rmHaIds is empty, single resourcemanager enabled
+         *  if rmHaIds not empty: resourcemanager HA enabled
+         */
+        String appUrl = "";
+        //not use resourcemanager
+        if (rmHaIds.contains(Constants.YARN_RESOURCEMANAGER_HA_XX)){
+
+            yarnEnabled = false;
+            logger.warn("should not step here");
+        } else if (!StringUtils.isEmpty(rmHaIds)) {
+            //resourcemanager HA enabled
+            appUrl = getAppAddress(appAddress, rmHaIds);
+            yarnEnabled = true;
+            logger.info("application url : {}", appUrl);
+        } else {
+            //single resourcemanager enabled
+            yarnEnabled = true;
+        }
+
+        return String.format(appUrl, applicationId);
     }
 
     /**
@@ -484,13 +493,6 @@ public class HadoopUtils implements Closeable {
         return String.format("%s/udfs", getHdfsTenantDir(tenantCode));
     }
 
-    /**
-     * get absolute path and name for file on hdfs
-     *
-     * @param tenantCode tenant code
-     * @param fileName   file name
-     * @return get absolute path and name for file on hdfs
-     */
 
     /**
      * get hdfs file name
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HttpUtils.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HttpUtils.java
index 7de198f..98d9cf1 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HttpUtils.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HttpUtils.java
@@ -40,7 +40,7 @@ public class HttpUtils {
 	/**
 	 * get http request content
 	 * @param url url
-	 * @return http response
+	 * @return http get request response content
 	 */
 	public static String get(String url){
 		CloseableHttpClient httpclient = HttpClients.createDefault();
diff --git a/dolphinscheduler-common/src/main/resources/common.properties b/dolphinscheduler-common/src/main/resources/common.properties
index c6484ff..db3b241 100644
--- a/dolphinscheduler-common/src/main/resources/common.properties
+++ b/dolphinscheduler-common/src/main/resources/common.properties
@@ -19,22 +19,22 @@
 resource.storage.type=NONE
 
 # resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
-#resource.upload.path=/dolphinscheduler
+resource.upload.path=/dolphinscheduler
 
 # user data local directory path, please make sure the directory exists and have read write permissions
 #data.basedir.path=/tmp/dolphinscheduler
 
 # whether kerberos starts
-#hadoop.security.authentication.startup.state=false
+hadoop.security.authentication.startup.state=false
 
 # java.security.krb5.conf path
-#java.security.krb5.conf.path=/opt/krb5.conf
+java.security.krb5.conf.path=/opt/krb5.conf
 
-# loginUserFromKeytab user
-#login.user.keytab.username=hdfs-mycluster@ESZ.COM
+# login user from keytab username
+login.user.keytab.username=hdfs-mycluster@ESZ.COM
 
 # loginUserFromKeytab path
-#login.user.keytab.path=/opt/hdfs.headless.keytab
+login.user.keytab.path=/opt/hdfs.headless.keytab
 
 #resource.view.suffixs
 #resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties
@@ -46,21 +46,21 @@ hdfs.root.user=hdfs
 fs.defaultFS=hdfs://mycluster:8020
 
 # if resource.storage.type=S3,s3 endpoint
-#fs.s3a.endpoint=http://192.168.199.91:9010
+fs.s3a.endpoint=http://192.168.199.91:9010
 
 # if resource.storage.type=S3,s3 access key
-#fs.s3a.access.key=A3DXS30FO22544RE
+fs.s3a.access.key=A3DXS30FO22544RE
 
 # if resource.storage.type=S3,s3 secret key
-#fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
+fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
 
-# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty  TODO
+# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty
 yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
 
-# If resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ark1 to actual resourcemanager hostname.
+# if resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ark1 to actual resourcemanager hostname.
 yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
 
-# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions, TODO
+# system env path
 #dolphinscheduler.env.path=env/dolphinscheduler_env.sh
 
 kerberos.expire.time=7
\ No newline at end of file
diff --git a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/HadoopUtilsTest.java b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/HadoopUtilsTest.java
index b7bf220..00b8f1c 100644
--- a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/HadoopUtilsTest.java
+++ b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/HadoopUtilsTest.java
@@ -17,88 +17,186 @@
 package org.apache.dolphinscheduler.common.utils;
 
 import org.apache.dolphinscheduler.common.enums.ResourceType;
-import org.junit.Ignore;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Assert;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 
-@Ignore
+@RunWith(MockitoJUnitRunner.class)
+//todo there is no hadoop environment
 public class HadoopUtilsTest {
 
     private static final Logger logger = LoggerFactory.getLogger(HadoopUtilsTest.class);
+    private HadoopUtils hadoopUtils = HadoopUtils.getInstance();
 
     @Test
     public void getActiveRMTest() {
-        logger.info(HadoopUtils.getAppAddress("http://ark1:8088/ws/v1/cluster/apps/%s","192.168.xx.xx,192.168.xx.xx"));
+        try{
+            hadoopUtils.getAppAddress("http://ark1:8088/ws/v1/cluster/apps/%s","192.168.xx.xx,192.168.xx.xx");
+        } catch (Exception e) {
+            logger.error(e.getMessage(),e);
+        }
     }
 
     @Test
-    public void getApplicationStatusAddressTest(){
-        logger.info(HadoopUtils.getInstance().getApplicationUrl("application_1548381297012_0030"));
+    public void rename()  {
+
+        boolean result = false;
+        try {
+            result = hadoopUtils.rename("/dolphinscheduler/hdfs1","/dolphinscheduler/hdfs2");
+        } catch (Exception e) {
+            logger.error(e.getMessage(),e);
+        }
+        Assert.assertEquals(false, result);
     }
 
+
     @Test
-    public void test() throws IOException {
-        HadoopUtils.getInstance().copyLocalToHdfs("/root/teamviewer_13.1.8286.x86_64.rpm", "/journey", true, true);
+    public void getConfiguration(){
+        Configuration conf = hadoopUtils.getConfiguration();
+
     }
 
     @Test
-    public void readFileTest(){
+    public void mkdir()  {
+        boolean result = false;
         try {
-            byte[] bytes = HadoopUtils.getInstance().catFile("/dolphinscheduler/hdfs/resources/35435.sh");
-            logger.info(new String(bytes));
+            result = hadoopUtils.mkdir("/dolphinscheduler/hdfs");
         } catch (Exception e) {
-
+            logger.error(e.getMessage(), e);
         }
+        Assert.assertEquals(false, result);
     }
-    @Test
-    public void testCapacity(){
 
+    @Test
+    public void delete() {
+        boolean result = false;
+        try {
+            result = hadoopUtils.delete("/dolphinscheduler/hdfs",true);
+        } catch (Exception e) {
+            logger.error(e.getMessage(), e);
+        }
+        Assert.assertEquals(false, result);
     }
+
     @Test
-    public void testMove(){
-        HadoopUtils instance = HadoopUtils.getInstance();
+    public void exists() {
+        boolean result = false;
         try {
-            instance.copy("/opt/apptest/test.dat","/opt/apptest/test.dat.back",true,true);
+            result = hadoopUtils.exists("/dolphinscheduler/hdfs");
         } catch (Exception e) {
             logger.error(e.getMessage(), e);
         }
+        Assert.assertEquals(false, result);
+    }
 
+    @Test
+    public void getHdfsDataBasePath() {
+        String result = hadoopUtils.getHdfsDataBasePath();
+        Assert.assertEquals("/dolphinscheduler", result);
+    }
 
+    @Test
+    public void getHdfsResDir() {
+        String result = hadoopUtils.getHdfsResDir("11000");
+        Assert.assertEquals("/dolphinscheduler/11000/resources", result);
     }
 
     @Test
-    public void getApplicationStatus() {
-         logger.info(HadoopUtils.getInstance().getApplicationStatus("application_1542010131334_0029").toString());
+    public void getHdfsUserDir() {
+        String result = hadoopUtils.getHdfsUserDir("11000",1000);
+        Assert.assertEquals("/dolphinscheduler/11000/home/1000", result);
     }
 
     @Test
-    public void getApplicationUrl(){
-        String application_1516778421218_0042 = HadoopUtils.getInstance().getApplicationUrl("application_1529051418016_0167");
-        logger.info(application_1516778421218_0042);
+    public void getHdfsUdfDir()  {
+        String result = hadoopUtils.getHdfsUdfDir("11000");
+        Assert.assertEquals("/dolphinscheduler/11000/udfs", result);
     }
 
     @Test
-    public void catFileTest()throws Exception{
-        List<String> stringList = HadoopUtils.getInstance().catFile("/dolphinscheduler/hdfs/resources/WCSparkPython.py", 0, 1000);
-        logger.info(String.join(",",stringList));
+    public void getHdfsFileName() {
+        String result = hadoopUtils.getHdfsFileName(ResourceType.FILE,"11000","aa.txt");
+        Assert.assertEquals("/dolphinscheduler/11000/resources/aa.txt", result);
     }
 
     @Test
-    public void getHdfsFileNameTest(){
-        logger.info(HadoopUtils.getHdfsFileName(ResourceType.FILE,"test","/test"));
+    public void isYarnEnabled() {
+        boolean result = hadoopUtils.isYarnEnabled();
+        Assert.assertEquals(false, result);
     }
 
     @Test
-    public void getHdfsResourceFileNameTest(){
-        logger.info(HadoopUtils.getHdfsResourceFileName("test","/test"));
+    public void test() {
+        try {
+            hadoopUtils.copyLocalToHdfs("/root/teamviewer_13.1.8286.x86_64.rpm", "/journey", true, true);
+        } catch (Exception e) {
+            logger.error(e.getMessage(), e);
+        }
     }
 
     @Test
-    public void getHdfsUdfFileNameTest(){
-        logger.info(HadoopUtils.getHdfsUdfFileName("test","/test.jar"));
+    public void readFileTest(){
+        try {
+            byte[] bytes = hadoopUtils.catFile("/dolphinscheduler/hdfs/resources/35435.sh");
+            logger.info(new String(bytes));
+        } catch (Exception e) {
+            logger.error(e.getMessage(),e);
+        }
+    }
+
+
+    @Test
+    public void testMove(){
+        try {
+            hadoopUtils.copy("/opt/apptest/test.dat","/opt/apptest/test.dat.back",true,true);
+        } catch (Exception e) {
+            logger.error(e.getMessage(), e);
+        }
+
+    }
+
+    @Test
+    public void getApplicationStatus() {
+        try {
+            logger.info(hadoopUtils.getApplicationStatus("application_1542010131334_0029").toString());
+        } catch (Exception e) {
+            logger.error(e.getMessage(), e);
+        }
+    }
+
+    @Test
+    public void getApplicationUrl(){
+        String application_1516778421218_0042 = hadoopUtils.getApplicationUrl("application_1529051418016_0167");
+        logger.info(application_1516778421218_0042);
+    }
+
+    @Test
+    public void catFileWithLimitTest() {
+        List<String> stringList = new ArrayList<>();
+        try {
+            stringList = hadoopUtils.catFile("/dolphinscheduler/hdfs/resources/WCSparkPython.py", 0, 1000);
+            logger.info(String.join(",",stringList));
+        } catch (Exception e) {
+            logger.error(e.getMessage(), e);
+        }
+    }
+
+    @Test
+    public void catFileTest() {
+        byte[] content = new byte[0];
+        try {
+            content = hadoopUtils.catFile("/dolphinscheduler/hdfs/resources/WCSparkPython.py");
+        } catch (Exception e) {
+            logger.error(e.getMessage(), e);
+        }
+        logger.info(Arrays.toString(content));
     }
-}
\ No newline at end of file
+}
diff --git a/dolphinscheduler-dao/src/main/resources/datasource.properties b/dolphinscheduler-dao/src/main/resources/datasource.properties
index 63c694e..2f28ca2 100644
--- a/dolphinscheduler-dao/src/main/resources/datasource.properties
+++ b/dolphinscheduler-dao/src/main/resources/datasource.properties
@@ -25,9 +25,6 @@ spring.datasource.url=jdbc:postgresql://localhost:5432/dolphinscheduler
 spring.datasource.username=test
 spring.datasource.password=test
 
-## base spring data source configuration todo need to remove
-#spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
-
 # connection configuration
 #spring.datasource.initialSize=5
 # min connection number
diff --git a/dolphinscheduler-server/src/main/resources/config/install_config.conf b/dolphinscheduler-server/src/main/resources/config/install_config.conf
index 0378490..4671be7 100644
--- a/dolphinscheduler-server/src/main/resources/config/install_config.conf
+++ b/dolphinscheduler-server/src/main/resources/config/install_config.conf
@@ -15,11 +15,126 @@
 # limitations under the License.
 #
 
-installPath=/data1_1T/dolphinscheduler
-deployUser=dolphinscheduler
-ips=ark0,ark1,ark2,ark3,ark4
-sshPort=22
-masters=ark0,ark1
-workers=ark2,ark3,ark4
-alertServer=ark3
-apiServers=ark1
+
+# NOTICE :  If the following config has special characters in the variable `.*[]^${}\+?|()@#&`, Please escape, for example, `[` escape to `\[`
+# postgresql or mysql
+dbtype="mysql"
+
+# db config
+# db address and port
+dbhost="192.168.xx.xx:3306"
+
+# db username
+username="xx"
+
+# db passwprd
+# NOTICE: if there are special characters, please use the \ to escape, for example, `[` escape to `\[`
+password="xx"
+
+# zk cluster
+zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
+
+# Note: the target installation path for dolphinscheduler, please not config as the same as the current path (pwd)
+installPath="/data1_1T/dolphinscheduler"
+
+# deployment user
+# Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself
+deployUser="dolphinscheduler"
+
+
+# alert config
+# mail server host
+mailServerHost="smtp.exmail.qq.com"
+
+# mail server port
+# note: Different protocols and encryption methods correspond to different ports, when SSL/TLS is enabled, make sure the port is correct.
+mailServerPort="25"
+
+# sender
+mailSender="xxxxxxxxxx"
+
+# user
+mailUser="xxxxxxxxxx"
+
+# sender password
+# note: The mail.passwd is email service authorization code, not the email login password.
+mailPassword="xxxxxxxxxx"
+
+# TLS mail protocol support
+starttlsEnable="false"
+
+sslTrust="xxxxxxxxxx"
+
+# SSL mail protocol support
+# note: The SSL protocol is enabled by default.
+# only one of TLS and SSL can be in the true state.
+sslEnable="true"
+
+
+# resource storage type:HDFS,S3,NONE
+resourceStorageType="NONE"
+
+# if resourceStorageType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory.
+# if S3,write S3 address,HA,for example :s3a://dolphinscheduler,
+# Note,s3 be sure to create the root directory /dolphinscheduler
+defaultFS="hdfs://mycluster:8020"
+
+# if resourceStorageType is S3, the following three configuration is required, otherwise please ignore
+s3Endpoint="http://192.168.xx.xx:9010"
+s3AccessKey="xxxxxxxxxx"
+s3SecretKey="xxxxxxxxxx"
+
+# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty
+yarnHaIps="192.168.xx.xx,192.168.xx.xx"
+
+# if resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ark1 to actual resourcemanager hostname.
+singleYarnIp="ark1"
+
+# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。/dolphinscheduler is recommended
+resourceUploadPath="/dolphinscheduler"
+
+# who have permissions to create directory under HDFS/S3 root path
+# Note: if kerberos is enabled, please config hdfsRootUser=
+hdfsRootUser="hdfs"
+
+# kerberos config
+# whether kerberos starts, if kerberos starts, following four items need to config, otherwise please ignore
+kerberosStartUp="false"
+# kdc krb5 config file path
+krb5ConfPath="$installPath/conf/krb5.conf"
+# keytab username
+keytabUserName="hdfs-mycluster@ESZ.COM"
+# username keytab path
+keytabPath="$installPath/conf/hdfs.headless.keytab"
+
+
+# api server port
+apiServerPort="12345"
+
+
+# install hosts
+# Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname
+ips="ark0,ark1,ark2,ark3,ark4"
+
+# ssh port, default 22
+# Note: if ssh port is not default, modify here
+sshPort="22"
+
+# run master machine
+# Note: list of hosts hostname for deploying master
+masters="ark0,ark1"
+
+# run worker machine
+# note: list of machine hostnames for deploying workers
+workers="ark2,ark3,ark4"
+
+# run alert machine
+# note: list of machine hostnames for deploying alert server
+alertServer="ark3"
+
+# run api machine
+# note: list of machine hostnames for deploying api server
+apiServers="ark1"
+
+# whether to start monitoring self-starting scripts
+monitorServerState="false"
diff --git a/install.sh b/install.sh
index ed66ce5..20b293f 100644
--- a/install.sh
+++ b/install.sh
@@ -19,309 +19,25 @@
 workDir=`dirname $0`
 workDir=`cd ${workDir};pwd`
 
-#To be compatible with MacOS and Linux
+source ${workDir}/conf/config/install_config.conf
+
+# 1.replace file
+echo "1.replace file"
+
 txt=""
 if [[ "$OSTYPE" == "darwin"* ]]; then
     # Mac OSX
     txt="''"
-elif [[ "$OSTYPE" == "linux-gnu" ]]; then
-    # linux
-    txt=""
-elif [[ "$OSTYPE" == "cygwin" ]]; then
-    # POSIX compatibility layer and Linux environment emulation for Windows
-    echo "DolphinScheduler not support Windows operating system"
-    exit 1
-elif [[ "$OSTYPE" == "msys" ]]; then
-    # Lightweight shell and GNU utilities compiled for Windows (part of MinGW)
-    echo "DolphinScheduler not support Windows operating system"
-    exit 1
-elif [[ "$OSTYPE" == "win32" ]]; then
-    echo "DolphinScheduler not support Windows operating system"
-    exit 1
-elif [[ "$OSTYPE" == "freebsd"* ]]; then
-    # ...
-    txt=""
-else
-    # Unknown.
-    echo "Operating system unknown, please tell us(submit issue) for better service"
-    exit 1
-fi
-
-source ${workDir}/conf/config/install_config.conf
-
-# for example postgresql or mysql ...
-dbtype="postgresql"
-
-# db config
-# db address and port
-dbhost="192.168.xx.xx:5432"
-
-# db name
-dbname="dolphinscheduler"
-
-# db username
-username="xx"
-
-# db passwprd
-# Note: if there are special characters, please use the \ transfer character to transfer
-passowrd="xx"
-
-# conf/config/install_config.conf config
-# Note: the installation path is not the same as the current path (pwd)
-installPath="/data1_1T/dolphinscheduler"
-
-# deployment user
-# Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself
-deployUser="dolphinscheduler"
-
-# zk cluster
-zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
-
-# install hosts
-# Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname
-ips="ark0,ark1,ark2,ark3,ark4"
-
-# ssh port, default 22
-# Note: if ssh port is not default, modify here
-sshPort=22
-
-# run master machine
-# Note: list of hosts hostname for deploying master
-masters="ark0,ark1"
-
-# run worker machine
-# note: list of machine hostnames for deploying workers
-workers="ark2,ark3,ark4"
-
-# run alert machine
-# note: list of machine hostnames for deploying alert server
-alertServer="ark3"
-
-# run api machine
-# note: list of machine hostnames for deploying api server
-apiServers="ark1"
-
-# alert config
-# mail protocol
-mailProtocol="SMTP"
-
-# mail server host
-mailServerHost="smtp.exmail.qq.com"
-
-# mail server port
-# note: Different protocols and encryption methods correspond to different ports, when SSL/TLS is enabled, make sure the port is correct.
-mailServerPort="25"
-
-# sender
-mailSender="xxxxxxxxxx"
-
-# user
-mailUser="xxxxxxxxxx"
-
-# sender password
-# note: The mail.passwd is email service authorization code, not the email login password.
-mailPassword="xxxxxxxxxx"
-
-# TLS mail protocol support
-starttlsEnable="false"
-
-sslTrust="xxxxxxxxxx"
-
-# SSL mail protocol support
-# note: The SSL protocol is enabled by default. 
-# only one of TLS and SSL can be in the true state.
-sslEnable="true"
-
-# download excel path
-xlsFilePath="/tmp/xls"
-
-# Enterprise WeChat Enterprise ID Configuration
-enterpriseWechatCorpId="xxxxxxxxxx"
-
-# Enterprise WeChat application Secret configuration
-enterpriseWechatSecret="xxxxxxxxxx"
-
-# Enterprise WeChat Application AgentId Configuration
-enterpriseWechatAgentId="xxxxxxxxxx"
-
-# Enterprise WeChat user configuration, multiple users to , split
-enterpriseWechatUsers="xxxxx,xxxxx"
-
-
-# whether to start monitoring self-starting scripts
-monitorServerState="false"
-
-# resource Center upload and select storage method:HDFS,S3,NONE
-resUploadStartupType="NONE"
-
-# if resUploadStartupType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory.
-# if S3,write S3 address,HA,for example :s3a://dolphinscheduler,
-# Note,s3 be sure to create the root directory /dolphinscheduler
-defaultFS="hdfs://mycluster:8020"
-
-# if S3 is configured, the following configuration is required.
-s3Endpoint="http://192.168.xx.xx:9010"
-s3AccessKey="xxxxxxxxxx"
-s3SecretKey="xxxxxxxxxx"
-
-# resourcemanager HA configuration, if it is a single resourcemanager, here is yarnHaIps=""
-yarnHaIps="192.168.xx.xx,192.168.xx.xx"
-
-# if it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine.
-singleYarnIp="ark1"
-
-# hdfs root path, the owner of the root path must be the deployment user. 
-# versions prior to 1.1.0 do not automatically create the hdfs root directory, you need to create it yourself.
-hdfsPath="/dolphinscheduler"
-
-# have users who create directory permissions under hdfs root path /
-# Note: if kerberos is enabled, hdfsRootUser="" can be used directly.
-hdfsRootUser="hdfs"
-
-# common config
-# Program root path
-programPath="/tmp/dolphinscheduler"
-
-# download path
-downloadPath="/tmp/dolphinscheduler/download"
-
-# task execute path
-execPath="/tmp/dolphinscheduler/exec"
-
-# SHELL environmental variable path
-shellEnvPath="$installPath/conf/env/dolphinscheduler_env.sh"
-
-# suffix of the resource file
-resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml"
-
-# development status, if true, for the SHELL script, you can view the encapsulated SHELL script in the execPath directory. 
-# If it is false, execute the direct delete
-devState="true"
-
-# kerberos config
-# kerberos whether to start
-kerberosStartUp="false"
-
-# kdc krb5 config file path
-krb5ConfPath="$installPath/conf/krb5.conf"
-
-# keytab username
-keytabUserName="hdfs-mycluster@ESZ.COM"
-
-# username keytab path
-keytabPath="$installPath/conf/hdfs.headless.keytab"
-
-# zk config
-# zk root directory
-zkRoot="/dolphinscheduler"
-
-# zk session timeout
-zkSessionTimeout="300"
-
-# zk connection timeout
-zkConnectionTimeout="300"
-
-# zk retry interval
-zkRetryMaxSleep="100"
-
-# zk retry maximum number of times
-zkRetryMaxtime="5"
-
-
-# master config 
-# master execution thread maximum number, maximum parallelism of process instance
-masterExecThreads="100"
-
-# the maximum number of master task execution threads, the maximum degree of parallelism for each process instance
-masterExecTaskNum="20"
-
-# master heartbeat interval
-masterHeartbeatInterval="10"
-
-# master task submission retries
-masterTaskCommitRetryTimes="5"
-
-# master task submission retry interval
-masterTaskCommitInterval="1000"
-
-# master maximum cpu average load, used to determine whether the master has execution capability
-masterMaxCpuLoadAvg="100"
-
-# master reserve memory to determine if the master has execution capability
-masterReservedMemory="0.1"
-
-# worker config
-# worker execution thread
-workerExecThreads="100"
-
-# worker heartbeat interval
-workerHeartbeatInterval="10"
-
-# worker number of fetch tasks
-workerFetchTaskNum="3"
-
-# worker reserve memory to determine if the master has execution capability
-workerReservedMemory="0.1"
-
-# api config
-# api server port
-apiServerPort="12345"
-
-# api session timeout
-apiServerSessionTimeout="7200"
-
-# api server context path
-apiServerContextPath="/dolphinscheduler/"
-
-# spring max file size
-springMaxFileSize="1024MB"
-
-# spring max request size
-springMaxRequestSize="1024MB"
-
-# api max http post size
-apiMaxHttpPostSize="5000000"
-
-
-# 1,replace file
-echo "1,replace file"
-if [ $dbtype == "mysql" ];then
-    sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:mysql://${dbhost}/${dbname}?characterEncoding=UTF-8#g" conf/application.properties
-    sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${username}#g" conf/application.properties
-    sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${passowrd}#g" conf/application.properties
-    sed -i ${txt} "s#spring.datasource.driver-class-name.*#spring.datasource.driver-class-name=com.mysql.jdbc.Driver#g" conf/application.properties
-
-    sed -i ${txt} "s#org.quartz.dataSource.myDs.URL.*#org.quartz.dataSource.myDs.URL=jdbc:mysql://${dbhost}/${dbname}?characterEncoding=UTF-8#g" conf/quartz.properties
-    sed -i ${txt} "s#org.quartz.dataSource.myDs.user.*#org.quartz.dataSource.myDs.user=${username}#g" conf/quartz.properties
-    sed -i ${txt} "s#org.quartz.dataSource.myDs.password.*#org.quartz.dataSource.myDs.password=${passowrd}#g" conf/quartz.properties
-    sed -i ${txt} "s#org.quartz.dataSource.myDs.driver.*#org.quartz.dataSource.myDs.driver=com.mysql.jdbc.Driver#g" conf/quartz.properties
-    sed -i ${txt} "s#org.quartz.jobStore.driverDelegateClass.*#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.StdJDBCDelegate#g" conf/quartz.properties
 fi
 
+datasourceDriverClassname="com.mysql.jdbc.Driver"
 if [ $dbtype == "postgresql" ];then
-    sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:postgresql://${dbhost}/${dbname}?characterEncoding=UTF-8#g" conf/application.properties
-    sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${username}#g" conf/application.properties
-    sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${passowrd}#g" conf/application.properties
-    sed -i ${txt} "s#spring.datasource.driver-class-name.*#spring.datasource.driver-class-name=org.postgresql.Driver#g" conf/application.properties
-
-    sed -i ${txt} "s#org.quartz.dataSource.myDs.URL.*#org.quartz.dataSource.myDs.URL=jdbc:postgresql://${dbhost}/${dbname}?characterEncoding=UTF-8#g" conf/quartz.properties
-    sed -i ${txt} "s#org.quartz.dataSource.myDs.user.*#org.quartz.dataSource.myDs.user=${username}#g" conf/quartz.properties
-    sed -i ${txt} "s#org.quartz.dataSource.myDs.password.*#org.quartz.dataSource.myDs.password=${passowrd}#g" conf/quartz.properties
-    sed -i ${txt} "s#org.quartz.dataSource.myDs.driver.*#org.quartz.dataSource.myDs.driver=org.postgresql.Driver#g" conf/quartz.properties
-    sed -i ${txt} "s#org.quartz.jobStore.driverDelegateClass.*#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.PostgreSQLDelegate#g" conf/quartz.properties
+  datasourceDriverClassname="org.postgresql.Driver"
 fi
-
-sed -i ${txt} "s#master.exec.threads.*#master.exec.threads=${masterExecThreads}#g" conf/application.properties
-sed -i ${txt} "s#master.exec.task.num.*#master.exec.task.num=${masterExecTaskNum}#g" conf/application.properties
-sed -i ${txt} "s#master.heartbeat.interval.*#master.heartbeat.interval=${masterHeartbeatInterval}#g" conf/application.properties
-sed -i ${txt} "s#master.task.commit.retryTimes.*#master.task.commit.retryTimes=${masterTaskCommitRetryTimes}#g" conf/application.properties
-sed -i ${txt} "s#master.task.commit.interval.*#master.task.commit.interval=${masterTaskCommitInterval}#g" conf/application.properties
-sed -i ${txt} "s#master.reserved.memory.*#master.reserved.memory=${masterReservedMemory}#g" conf/application.properties
-
-sed -i ${txt} "s#worker.exec.threads.*#worker.exec.threads=${workerExecThreads}#g" conf/application.properties
-sed -i ${txt} "s#worker.heartbeat.interval.*#worker.heartbeat.interval=${workerHeartbeatInterval}#g" conf/application.properties
-sed -i ${txt} "s#worker.fetch.task.num.*#worker.fetch.task.num=${workerFetchTaskNum}#g" conf/application.properties
-sed -i ${txt} "s#worker.reserved.memory.*#worker.reserved.memory=${workerReservedMemory}#g" conf/application.properties
+sed -i ${txt} "s#spring.datasource.driver-class-name.*#spring.datasource.driver-class-name=${datasourceDriverClassname}#g" conf/datasource.properties
+sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:${dbtype}://${dbhost}/dolphinscheduler?characterEncoding=UTF-8#g" conf/datasource.properties
+sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${username}#g" conf/datasource.properties
+sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${password}#g" conf/datasource.properties
 
 sed -i ${txt} "s#fs.defaultFS.*#fs.defaultFS=${defaultFS}#g" conf/common.properties
 sed -i ${txt} "s#fs.s3a.endpoint.*#fs.s3a.endpoint=${s3Endpoint}#g" conf/common.properties
@@ -329,37 +45,18 @@ sed -i ${txt} "s#fs.s3a.access.key.*#fs.s3a.access.key=${s3AccessKey}#g" conf/co
 sed -i ${txt} "s#fs.s3a.secret.key.*#fs.s3a.secret.key=${s3SecretKey}#g" conf/common.properties
 sed -i ${txt} "s#yarn.resourcemanager.ha.rm.ids.*#yarn.resourcemanager.ha.rm.ids=${yarnHaIps}#g" conf/common.properties
 sed -i ${txt} "s#yarn.application.status.address.*#yarn.application.status.address=http://${singleYarnIp}:8088/ws/v1/cluster/apps/%s#g" conf/common.properties
-
-sed -i ${txt} "s#data.basedir.path.*#data.basedir.path=${programPath}#g" conf/common.properties
-sed -i ${txt} "s#data.download.basedir.path.*#data.download.basedir.path=${downloadPath}#g" conf/common.properties
-sed -i ${txt} "s#process.exec.basepath.*#process.exec.basepath=${execPath}#g" conf/common.properties
 sed -i ${txt} "s#hdfs.root.user.*#hdfs.root.user=${hdfsRootUser}#g" conf/common.properties
-sed -i ${txt} "s#data.store2hdfs.basepath.*#data.store2hdfs.basepath=${hdfsPath}#g" conf/common.properties
-sed -i ${txt} "s#res.upload.startup.type.*#res.upload.startup.type=${resUploadStartupType}#g" conf/common.properties
-sed -i ${txt} "s#dolphinscheduler.env.path.*#dolphinscheduler.env.path=${shellEnvPath}#g" conf/common.properties
-sed -i ${txt} "s#resource.view.suffixs.*#resource.view.suffixs=${resSuffixs}#g" conf/common.properties
-sed -i ${txt} "s#development.state.*#development.state=${devState}#g" conf/common.properties
+sed -i ${txt} "s#resource.upload.path.*#resource.upload.path=${resourceUploadPath}#g" conf/common.properties
+sed -i ${txt} "s#resource.storage.type.*#resource.storage.type=${resourceStorageType}#g" conf/common.properties
 sed -i ${txt} "s#hadoop.security.authentication.startup.state.*#hadoop.security.authentication.startup.state=${kerberosStartUp}#g" conf/common.properties
 sed -i ${txt} "s#java.security.krb5.conf.path.*#java.security.krb5.conf.path=${krb5ConfPath}#g" conf/common.properties
 sed -i ${txt} "s#login.user.keytab.username.*#login.user.keytab.username=${keytabUserName}#g" conf/common.properties
 sed -i ${txt} "s#login.user.keytab.path.*#login.user.keytab.path=${keytabPath}#g" conf/common.properties
-
 sed -i ${txt} "s#zookeeper.quorum.*#zookeeper.quorum=${zkQuorum}#g" conf/common.properties
-sed -i ${txt} "s#zookeeper.dolphinscheduler.root.*#zookeeper.dolphinscheduler.root=${zkRoot}#g" conf/common.properties
-sed -i ${txt} "s#zookeeper.session.timeout.*#zookeeper.session.timeout=${zkSessionTimeout}#g" conf/common.properties
-sed -i ${txt} "s#zookeeper.connection.timeout.*#zookeeper.connection.timeout=${zkConnectionTimeout}#g" conf/common.properties
-sed -i ${txt} "s#zookeeper.retry.max.sleep.*#zookeeper.retry.max.sleep=${zkRetryMaxSleep}#g" conf/common.properties
-sed -i ${txt} "s#zookeeper.retry.maxtime.*#zookeeper.retry.maxtime=${zkRetryMaxtime}#g" conf/common.properties
 
 sed -i ${txt} "s#server.port.*#server.port=${apiServerPort}#g" conf/application-api.properties
-sed -i ${txt} "s#server.servlet.session.timeout.*#server.servlet.session.timeout=${apiServerSessionTimeout}#g" conf/application-api.properties
-sed -i ${txt} "s#server.servlet.context-path.*#server.servlet.context-path=${apiServerContextPath}#g" conf/application-api.properties
-sed -i ${txt} "s#spring.servlet.multipart.max-file-size.*#spring.servlet.multipart.max-file-size=${springMaxFileSize}#g" conf/application-api.properties
-sed -i ${txt} "s#spring.servlet.multipart.max-request-size.*#spring.servlet.multipart.max-request-size=${springMaxRequestSize}#g" conf/application-api.properties
-sed -i ${txt} "s#server.jetty.max-http-post-size.*#server.jetty.max-http-post-size=${apiMaxHttpPostSize}#g" conf/application-api.properties
 
 
-sed -i ${txt} "s#mail.protocol.*#mail.protocol=${mailProtocol}#g" conf/alert.properties
 sed -i ${txt} "s#mail.server.host.*#mail.server.host=${mailServerHost}#g" conf/alert.properties
 sed -i ${txt} "s#mail.server.port.*#mail.server.port=${mailServerPort}#g" conf/alert.properties
 sed -i ${txt} "s#mail.sender.*#mail.sender=${mailSender}#g" conf/alert.properties
@@ -368,85 +65,38 @@ sed -i ${txt} "s#mail.passwd.*#mail.passwd=${mailPassword}#g" conf/alert.propert
 sed -i ${txt} "s#mail.smtp.starttls.enable.*#mail.smtp.starttls.enable=${starttlsEnable}#g" conf/alert.properties
 sed -i ${txt} "s#mail.smtp.ssl.trust.*#mail.smtp.ssl.trust=${sslTrust}#g" conf/alert.properties
 sed -i ${txt} "s#mail.smtp.ssl.enable.*#mail.smtp.ssl.enable=${sslEnable}#g" conf/alert.properties
-sed -i ${txt} "s#xls.file.path.*#xls.file.path=${xlsFilePath}#g" conf/alert.properties
-sed -i ${txt} "s#enterprise.wechat.corp.id.*#enterprise.wechat.corp.id=${enterpriseWechatCorpId}#g" conf/alert.properties
-sed -i ${txt} "s#enterprise.wechat.secret.*#enterprise.wechat.secret=${enterpriseWechatSecret}#g" conf/alert.properties
-sed -i ${txt} "s#enterprise.wechat.agent.id.*#enterprise.wechat.agent.id=${enterpriseWechatAgentId}#g" conf/alert.properties
-sed -i ${txt} "s#enterprise.wechat.users.*#enterprise.wechat.users=${enterpriseWechatUsers}#g" conf/alert.properties
-
-
-
-sed -i ${txt} "s#installPath.*#installPath=${installPath}#g" conf/config/install_config.conf
-sed -i ${txt} "s#deployUser.*#deployUser=${deployUser}#g" conf/config/install_config.conf
-sed -i ${txt} "s#ips.*#ips=${ips}#g" conf/config/install_config.conf
-sed -i ${txt} "s#sshPort.*#sshPort=${sshPort}#g" conf/config/install_config.conf
-
-
-sed -i ${txt} "s#masters.*#masters=${masters}#g" conf/config/install_config.conf
-sed -i ${txt} "s#workers.*#workers=${workers}#g" conf/config/install_config.conf
-sed -i ${txt} "s#alertServer.*#alertServer=${alertServer}#g" conf/config/install_config.conf
-sed -i ${txt} "s#apiServers.*#apiServers=${apiServers}#g" conf/config/install_config.conf
-sed -i ${txt} "s#sshPort.*#sshPort=${sshPort}#g" conf/config/install_config.conf
 
-
-# 2,create directory
-echo "2,create directory"
+# 2.create directory
+echo "2.create directory"
 
 if [ ! -d $installPath ];then
   sudo mkdir -p $installPath
   sudo chown -R $deployUser:$deployUser $installPath
 fi
 
-hostsArr=(${ips//,/ })
-for host in ${hostsArr[@]}
-do
-
-# create if programPath does not exist
-if ! ssh -p $sshPort $host test -e $programPath; then
-  ssh -p $sshPort $host "sudo mkdir -p $programPath;sudo chown -R $deployUser:$deployUser $programPath"
-fi
-
-# create if downloadPath does not exist
-if ! ssh -p $sshPort $host test -e $downloadPath; then
-  ssh -p $sshPort $host "sudo mkdir -p $downloadPath;sudo chown -R $deployUser:$deployUser $downloadPath"
-fi
-
-# create if execPath does not exist
-if ! ssh -p $sshPort $host test -e $execPath; then
-  ssh -p $sshPort $host "sudo mkdir -p $execPath; sudo chown -R $deployUser:$deployUser $execPath"
-fi
-
-# create if xlsFilePath does not exist
-if ! ssh -p $sshPort $host test -e $xlsFilePath; then
-  ssh -p $sshPort $host "sudo mkdir -p $xlsFilePath; sudo chown -R $deployUser:$deployUser $xlsFilePath"
-fi
-
-done
-
-
-# 3,scp resources
-echo "3,scp resources"
+# 3.scp resources
+echo "3.scp resources"
 sh ${workDir}/script/scp-hosts.sh
 if [ $? -eq 0 ]
 then
 	echo 'scp copy completed'
 else
 	echo 'scp copy failed to exit'
-	exit -1
+	exit 1
 fi
 
 
-# 4,stop server
-echo "4,stop server"
+# 4.stop server
+echo "4.stop server"
 sh ${workDir}/script/stop-all.sh
 
 
-# 5,delete zk node
-echo "5,delete zk node"
+# 5.delete zk node
+echo "5.delete zk node"
 
 sh ${workDir}/script/remove-zk-node.sh $zkRoot
 
 
-# 6,startup
-echo "6,startup"
+# 6.startup
+echo "6.startup"
 sh ${workDir}/script/start-all.sh
\ No newline at end of file
diff --git a/pom.xml b/pom.xml
index f8cbe7f..053652f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -760,8 +760,9 @@
                         <include>**/common/utils/StringTest.java</include>
                         <include>**/common/utils/StringUtilsTest.java</include>
                         <include>**/common/utils/TaskParametersUtilsTest.java</include>
+                        <include>**/common/utils/HadoopUtilsTest.java</include>
+                        <include>**/common/utils/HttpUtilsTest.java</include>
                         <include>**/common/ConstantsTest.java</include>
-                        <include>**/common/utils/HadoopUtils.java</include>
                         <include>**/dao/mapper/AccessTokenMapperTest.java</include>
                         <include>**/dao/mapper/AlertGroupMapperTest.java</include>
                         <include>**/dao/mapper/CommandMapperTest.java</include>
diff --git a/script/dolphinscheduler-daemon.sh b/script/dolphinscheduler-daemon.sh
index ab3b5c4..19669e5 100644
--- a/script/dolphinscheduler-daemon.sh
+++ b/script/dolphinscheduler-daemon.sh
@@ -35,6 +35,8 @@ BIN_DIR=`dirname $0`
 BIN_DIR=`cd "$BIN_DIR"; pwd`
 DOLPHINSCHEDULER_HOME=$BIN_DIR/..
 
+source /etc/profile
+
 export JAVA_HOME=$JAVA_HOME
 #export JAVA_HOME=/opt/soft/jdk
 export HOSTNAME=`hostname`
@@ -90,8 +92,8 @@ case $startStop in
 
     exec_command="$LOG_FILE $DOLPHINSCHEDULER_OPTS -classpath $DOLPHINSCHEDULER_CONF_DIR:$DOLPHINSCHEDULER_LIB_JARS $CLASS"
 
-    echo "nohup $JAVA_HOME/bin/java $exec_command > $log 2>&1 > /dev/null &"
-    nohup $JAVA_HOME/bin/java $exec_command > $log 2>&1 > /dev/null &
+    echo "nohup $JAVA_HOME/bin/java $exec_command > $log 2>&1 &"
+    nohup $JAVA_HOME/bin/java $exec_command > $log 2>&1 &
     echo $! > $pid
     ;;
 
diff --git a/script/scp-hosts.sh b/script/scp-hosts.sh
index 05878c3..f4949f3 100644
--- a/script/scp-hosts.sh
+++ b/script/scp-hosts.sh
@@ -24,16 +24,18 @@ hostsArr=(${ips//,/ })
 for host in ${hostsArr[@]}
 do
 
-    if ! ssh -p $sshPort $host test -e $installPath; then
-      ssh -p $sshPort $host "sudo mkdir -p $installPath; sudo chown -R $deployUser:$deployUser $installPath"
-    fi
+  if ! ssh -p $sshPort $host test -e $installPath; then
+    ssh -p $sshPort $host "sudo mkdir -p $installPath; sudo chown -R $deployUser:$deployUser $installPath"
+  fi
 
+  echo "scp dirs to $host/$installPath starting"
 	ssh -p $sshPort $host  "cd $installPath/; rm -rf bin/ conf/ lib/ script/ sql/ ui/"
-	scp -P $sshPort -r $workDir/../bin  $host:$installPath
-	scp -P $sshPort -r $workDir/../conf  $host:$installPath
-	scp -P $sshPort -r $workDir/../lib   $host:$installPath
-	scp -P $sshPort -r $workDir/../script  $host:$installPath
-	scp -P $sshPort -r $workDir/../sql  $host:$installPath
-	scp -P $sshPort -r $workDir/../ui  $host:$installPath
-	scp -P $sshPort  $workDir/../install.sh  $host:$installPath
+
+  for dsDir in bin conf lib script sql ui install.sh
+  do
+    echo "start to scp $dsDir to $host/$installPath"
+    scp -P $sshPort -r $workDir/../$dsDir  $host:$installPath
+  done
+
+  echo "scp dirs to $host/$installPath complete"
 done
diff --git a/script/start-all.sh b/script/start-all.sh
index bb4b0a1..11e4572 100644
--- a/script/start-all.sh
+++ b/script/start-all.sh
@@ -23,7 +23,7 @@ source $workDir/../conf/config/install_config.conf
 mastersHost=(${masters//,/ })
 for master in ${mastersHost[@]}
 do
-        echo $master
+  echo "$master master server is starting"
 	ssh -p $sshPort $master  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start master-server;"
 
 done
@@ -31,10 +31,10 @@ done
 workersHost=(${workers//,/ })
 for worker in ${workersHost[@]}
 do
-        echo $worker
+  echo "$worker worker server is starting"
 
-        ssh -p $sshPort $worker  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start worker-server;"
-        ssh -p $sshPort $worker  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start logger-server;"
+  ssh -p $sshPort $worker  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start worker-server;"
+  ssh -p $sshPort $worker  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start logger-server;"
 done
 
 ssh -p $sshPort $alertServer  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start alert-server;"
@@ -42,8 +42,7 @@ ssh -p $sshPort $alertServer  "cd $installPath/; sh bin/dolphinscheduler-daemon.
 apiServersHost=(${apiServers//,/ })
 for apiServer in ${apiServersHost[@]}
 do
-        echo $apiServer
-
-        ssh -p $sshPort $apiServer  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start api-server;"
+  echo "$apiServer worker server is starting"
+  ssh -p $sshPort $apiServer  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start api-server;"
 done
 
diff --git a/script/stop-all.sh b/script/stop-all.sh
index c0c6f4d..f761579 100644
--- a/script/stop-all.sh
+++ b/script/stop-all.sh
@@ -24,7 +24,7 @@ source $workDir/../conf/config/install_config.conf
 mastersHost=(${masters//,/ })
 for master in ${mastersHost[@]}
 do
-        echo $master
+  echo "$master master server is stopping"
 	ssh -p $sshPort $master  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop master-server;"
 
 done
@@ -32,10 +32,9 @@ done
 workersHost=(${workers//,/ })
 for worker in ${workersHost[@]}
 do
-        echo $worker
-
-        ssh -p $sshPort $worker  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop worker-server;"
-        ssh -p $sshPort $worker  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop logger-server;"
+  echo "$worker worker server is stopping"
+  ssh -p $sshPort $worker  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop worker-server;"
+  ssh -p $sshPort $worker  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop logger-server;"
 done
 
 ssh -p $sshPort $alertServer  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop alert-server;"
@@ -43,8 +42,7 @@ ssh -p $sshPort $alertServer  "cd $installPath/; sh bin/dolphinscheduler-daemon.
 apiServersHost=(${apiServers//,/ })
 for apiServer in ${apiServersHost[@]}
 do
-        echo $apiServer
-
-        ssh -p $sshPort $apiServer  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop api-server;"
+  echo "$apiServer worker server is stopping"
+  ssh -p $sshPort $apiServer  "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop api-server;"
 done