You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@streampark.apache.org by be...@apache.org on 2022/11/11 05:16:11 UTC

[incubator-streampark] branch dev updated: [bug] front-end build bug fixed (#2004)

This is an automated email from the ASF dual-hosted git repository.

benjobs pushed a commit to branch dev
in repository https://gitbox.apache.org/repos/asf/incubator-streampark.git


The following commit(s) were added to refs/heads/dev by this push:
     new 66b92fb26 [bug] front-end build bug fixed (#2004)
66b92fb26 is described below

commit 66b92fb2680f410105d22e1efabe2b3bbb39ffa1
Author: benjobs <be...@apache.org>
AuthorDate: Fri Nov 11 13:16:06 2022 +0800

    [bug] front-end build bug fixed (#2004)
    
    * [bug] front-end build bug fixed
---
 .../streampark-console-service/pom.xml             |   4 +-
 streampark-console/streampark-console-webapp/.env  |   4 +-
 streampark-spark/pom.xml                           |   1 -
 streampark-spark/streampark-spark-test/README.md   |   1 -
 .../streampark-spark-test/assembly.xml             |  79 ------------
 .../assembly/conf/prod/test.properties             | 130 -------------------
 .../assembly/conf/prod/test.yml                    |  96 --------------
 streampark-spark/streampark-spark-test/pom.xml     | 125 ------------------
 .../src/main/resources/logback.xml                 | 142 ---------------------
 .../streampark/spark/test/HelloStreamParkApp.scala |  68 ----------
 10 files changed, 4 insertions(+), 646 deletions(-)

diff --git a/streampark-console/streampark-console-service/pom.xml b/streampark-console/streampark-console-service/pom.xml
index 8e51fae2a..62f14fbe9 100644
--- a/streampark-console/streampark-console-service/pom.xml
+++ b/streampark-console/streampark-console-service/pom.xml
@@ -423,8 +423,8 @@
                                     <goal>install-node-and-npm</goal>
                                 </goals>
                                 <configuration>
-                                    <nodeVersion>v14.16.0</nodeVersion>
-                                    <npmVersion>6.14.11</npmVersion>
+                                    <nodeVersion>v16.16.0</nodeVersion>
+                                    <npmVersion>8.19.2</npmVersion>
                                 </configuration>
                             </execution>
                             <execution>
diff --git a/streampark-console/streampark-console-webapp/.env b/streampark-console/streampark-console-webapp/.env
index 6fea15fb7..5eddae9a1 100644
--- a/streampark-console/streampark-console-webapp/.env
+++ b/streampark-console/streampark-console-webapp/.env
@@ -14,10 +14,10 @@
 # limitations under the License.
 
 # port
-VITE_PORT=3100
+VITE_PORT=10001
 
 # spa-title
-VITE_GLOB_APP_TITLE=StreamPark
+VITE_GLOB_APP_TITLE=Apache StreamPark
 
 # spa shortname
 VITE_GLOB_APP_SHORT_NAME=StreamPark
diff --git a/streampark-spark/pom.xml b/streampark-spark/pom.xml
index 3fce081a5..effbfc5c6 100644
--- a/streampark-spark/pom.xml
+++ b/streampark-spark/pom.xml
@@ -33,7 +33,6 @@
     <modules>
         <module>streampark-spark-core</module>
         <module>streampark-spark-connector</module>
-        <module>streampark-spark-test</module>
     </modules>
 
     <dependencies>
diff --git a/streampark-spark/streampark-spark-test/README.md b/streampark-spark/streampark-spark-test/README.md
deleted file mode 100644
index 6f4602373..000000000
--- a/streampark-spark/streampark-spark-test/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# test module documentation
diff --git a/streampark-spark/streampark-spark-test/assembly.xml b/streampark-spark/streampark-spark-test/assembly.xml
deleted file mode 100644
index c0f0d1621..000000000
--- a/streampark-spark/streampark-spark-test/assembly.xml
+++ /dev/null
@@ -1,79 +0,0 @@
-<!--
-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-       https://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
--->
-<assembly>
-    <id>bin</id>
-    <formats>
-        <format>tar.gz</format>
-    </formats>
-    <dependencySets>
-        <!-- The jar packages that need to be entered are all written here, yes I have not found a good way😅 -->
-        <dependencySet>
-            <useProjectArtifact>true</useProjectArtifact>
-            <outputDirectory>lib</outputDirectory>
-            <useProjectAttachments>true</useProjectAttachments>
-            <includes>
-                <include>org.apache.commons:commons-pool2</include>
-                <include>org.apache.commons:commons-dbcp2</include>
-                <include>org.apache.streampark:streampark-common_2.12</include>
-                <include>org.apache.streampark:streampark-spark-core_2.12</include>
-                <include>org.apache.streampark:streampark-spark-connector-base_2.12</include>
-                <include>org.apache.streampark:streampark-spark-connector-kafka_2.12</include>
-                <include>org.apache.kafka:kafka-clients</include>
-                <include>org.apache.spark:spark-token-provider-kafka-0-10_2.12</include>
-                <include>org.apache.spark:spark-streaming-kafka-0-10_2.12</include>
-                <include>org.scalikejdbc:scalikejdbc-core_2.12</include>
-                <include>org.scalikejdbc:scalikejdbc-interpolation-macro_2.12</include>
-                <include>org.scalikejdbc:scalikejdbc-interpolation_2.12</include>
-                <include>org.scalikejdbc:scalikejdbc_2.12</include>
-                <include>redis.clients:jedis</include>
-                <include>mysql:mysql-connector-java</include>
-                <include>org.scala-lang.modules:scala-collection-compat_2.12</include>
-            </includes>
-        </dependencySet>
-    </dependencySets>
-    <fileSets>
-        <fileSet>
-            <directory>${project.build.directory}</directory>
-            <outputDirectory>lib</outputDirectory>
-            <includes>
-                <include>*.jar</include>
-            </includes>
-        </fileSet>
-        <fileSet>
-            <directory>assembly/bin</directory>
-            <outputDirectory>bin</outputDirectory>
-            <fileMode>0755</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>assembly/conf</directory>
-            <outputDirectory>conf</outputDirectory>
-            <fileMode>0755</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>assembly/logs</directory>
-            <outputDirectory>logs</outputDirectory>
-            <fileMode>0755</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>assembly/temp</directory>
-            <outputDirectory>temp</outputDirectory>
-            <fileMode>0755</fileMode>
-        </fileSet>
-    </fileSets>
-</assembly>
diff --git a/streampark-spark/streampark-spark-test/assembly/conf/prod/test.properties b/streampark-spark/streampark-spark-test/assembly/conf/prod/test.properties
deleted file mode 100644
index c384728ff..000000000
--- a/streampark-spark/streampark-spark-test/assembly/conf/prod/test.properties
+++ /dev/null
@@ -1,130 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-######################################################
-#                                                    #
-#               spark process startup.sh             #
-#                   user config                      #
-#                                                    #
-######################################################
-
-#Must be set to execute the full package name of the class
-spark.main.class=org.apache.streampark.spark.test.HelloStreamParkApp
-#spark task name configuration, it is recommended to keep the task name globally unique;This allows for some unique handling based on the name when the design task fails;Not set to use the full name of the class.App
-spark.app.name=HelloStreamParkApp
-spark.app.conf.version=10
-
-######################################################
-#                                                    #
-#                spark config                        #
-#                                                    #
-######################################################
-#Perform cluster setup, no setup, generally use YARN
-spark.master=yarn
-#YARN deployment mode;default=cluster
-spark.submit.deployMode=cluster
-#spark-streaming interval time per batch;default=300
-spark.batch.duration=5
-#Task submission queue of spark on yarn;default=defalut
-spark.yarn.queue=default
-#The spark network serialization method, the default is JavaSerializer, which can be used for all types but is slower;The recommended Kryo method is used here,kafka-0.10 must use this method
-spark.serializer=org.apache.spark.serializer.KryoSerializer
-
-#++++++++++++++++++++++Driver node related configuration+++++++++++++++++++++++++++
-#Driver node uses memory size setting;default=512MB
-spark.driver.memory=512MB
-#The number of cpu's used by Driver nodes is set;default=1
-spark.driver.cores=1
-#When driver node build spark-jar and user-jar conflict, the user-supplied one is used first, which is an experimental parameter only valid for cluster mode;default=false
-spark.driver.userClassPathFirst=false
-
-#++++++++++++++++++++++Executor node related configuration+++++++++++++++++++++++++
-#Number of Executor settings,default=1
-spark.executor.instances=1
-#Executor uses the number of cpu settings;default=1
-spark.executor.cores=1
-#Executor uses memory size settings;default=512MB
-spark.executor.memory=512MB
-#The same as the driver node configuration, but for the executor;default=false
-spark.executor.userClassPathFirst=true
-
-#++++++++++++++++++++++++Executor dynamic allocation-related configuration++++++++++++++++++++
-#Executor dynamically assigned predecessor services;default=false
-spark.shuffle.service.enabled=true
-#The port corresponding to the service, which is configured in the yarn-site, is loaded and started by the NodeManager service;default=7337
-spark.shuffle.service.port=7337
-#Configure whether to enable dynamic resource allocation, this dynamic allocation is for executor, need yarn cluster configuration support dynamic allocation;default=false
-spark.dynamicAllocation.enabled=true
-#Time to release an idle executor;default=60s
-spark.dynamicAllocation.executorIdleTimeout=60s
-#Idle release time for executor with cache;default=infinity(No release by default)
-spark.dynamicAllocation.cachedExecutorIdleTimeout=30
-#Initialize the number of executors, if you set spark.executor.instances to use whoever is small;default=minExecutors(Not set to use this configuration value)
-spark.dynamicAllocation.initialExecutors=1
-#executor dynamically allocates the maximum number of allocations that can be made;default=infinity
-spark.dynamicAllocation.maxExecutors=60
-#Minimum number of executor dynamic shrinkage;default=0
-spark.dynamicAllocation.minExecutors=1
-#How long does the batch scheduling delay start adding executor;default=1s
-spark.dynamicAllocation.schedulerBacklogTimeout=1s
-#as above, but for subsequent requests;default=SchedulerBacklogTimeout(Not set to use this configuration value)
-spark.dynamicAllocation.sustainedSchedulerBacklogTimeout=1s
-
-######################################################
-#                                                    #
-#             StreamPark-Spark Kafka Source             #
-#                   base config                      #
-#                                                    #
-######################################################
-spark.source.kafka.consume.topics=bigdata
-spark.source.kafka.consume.group.id=test
-spark.source.kafka.consume.bootstrap.servers=kafka1:9092
-spark.source.kafka.consume.key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
-spark.source.kafka.consume.value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
-spark.source.kafka.consume.fetch.max.wait.ms=3000
-spark.source.kafka.consume.repartition=60
-#Specify the location from which the kafka topic is consumed for the first time. There are two optional values latest[latest location],earliest[earliest location].
-spark.source.kafka.consume.auto.offset.reset=earliest
-spark.source.kafka.offset.store.type=mysql
-spark.source.kafka.offset.store.mysql.table=consumer_offsets
-spark.source.kafka.offset.store.mysql.jdbc.url=jdbc:mysql://localhost:3306/spark
-spark.source.kafka.offset.store.mysql.user=root
-spark.source.kafka.offset.store.mysql.password=123456
-
-######################################################
-#                                                    #
-#              StreamPark-Spark MySQL Sink              #
-#                   base config                      #
-#                                                    #
-######################################################
-spark.sink.mysql.jdbc.url=jdbc:mysql://localhost:3306/spark
-spark.sink.mysql.user=root
-spark.sink.mysql.password=123456
-######################################################
-#                                                    #
-#                StreamPark-Spark Monitor               #
-#              Congestion base config                #
-#                                                    #
-######################################################
-#default=0
-spark.monitor.congestion.batch=0
-#How many batches are stacked and then kill the task, the default is 0 without kill, with the automatic task restart function can effectively restart the stacked tasks to restore;default=0
-spark.monitor.suicide.batch=0
-#zk address
-spark.monitor.zookeeper=localhost:2181
-#spark.monitor.dingding.url=https://oapi.dingtalk.com/robot/send?access_token=d4d19790b4d4b83bfbeeb9f67e75ed5b1c2e3a40968e9d908df7c691c0f78afe
-spark.monitor.dingding.user=
diff --git a/streampark-spark/streampark-spark-test/assembly/conf/prod/test.yml b/streampark-spark/streampark-spark-test/assembly/conf/prod/test.yml
deleted file mode 100644
index ae03c95c8..000000000
--- a/streampark-spark/streampark-spark-test/assembly/conf/prod/test.yml
+++ /dev/null
@@ -1,96 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
----
-spark:
-  app:
-    name: HelloStreamParkApp
-    conf:
-      version: 10
-  submit:
-    deployMode: cluster
-  sink:
-    redis:
-      port: 6379
-      host:
-      timeout: 30
-      db: 0
-  batch:
-    duration: 5
-  serializer: org.apache.spark.serializer.KryoSerializer
-  monitor:
-    zookeeper: bigdata-001-112-130.sqkb-s.com:2181,bigdata-002-112-129.sqkb-s.com:2181,bigdata-003-112-128.sqkb-s.com:2181,bigdata-004-112-131.sqkb-s.com:2181,bigdata-005-112-127.sqkb-s.com:2181
-    suicide:
-      batch: 0
-    congestion:
-      batch: 0
-    dingding:
-      user:
-      url: https://oapi.dingtalk.com/robot/send?access_token=d4d19790b4d4b83bfbeeb9f67e75ed5b1c2e3a40968e9d908df7c691c0f78afe
-  main:
-    class: org.apache.streampark.spark.test.HelloStreamParkApp
-  source:
-    kafka:
-      offset:
-        store:
-          type: kafka
-      consume:
-        auto:
-          offset:
-            reset: earliest
-        max:
-          partition:
-            fetch:
-              bytes: 10485760
-        topics: flm_monitor_sqkb_basic_app_v2
-        fetch:
-          max:
-            wait:
-              ms: 3000
-        bootstrap:
-          servers: bigdata-kafka-001-113-8:9092,bigdata-kafka-002-113-9:9092,bigdata-kafka-003-113-10:9092
-        value:
-          deserializer: org.apache.kafka.common.serialization.StringDeserializer
-        key:
-          deserializer: org.apache.kafka.common.serialization.StringDeserializer
-        group:
-          id: test_01
-  master: yarn
-  driver:
-    memory: 512MB
-    cores: 1
-    userClassPathFirst: false
-  dynamicAllocation:
-    initialExecutors: 1
-    cachedExecutorIdleTimeout: -1
-    minExecutors: 1
-    sustainedSchedulerBacklogTimeout: 1s
-    executorIdleTimeout: 60s
-    schedulerBacklogTimeout: 1s
-    enabled: true
-    maxExecutors: 60
-  executor:
-    cores: 1
-    memory: 512MB
-    instances: 1
-    userClassPathFirst: true
-  shuffle:
-    service:
-      port: 7337
-      enabled: true
-  yarn:
-    queue: default
diff --git a/streampark-spark/streampark-spark-test/pom.xml b/streampark-spark/streampark-spark-test/pom.xml
deleted file mode 100644
index c61ced056..000000000
--- a/streampark-spark/streampark-spark-test/pom.xml
+++ /dev/null
@@ -1,125 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-       https://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.apache.streampark</groupId>
-        <artifactId>streampark-spark</artifactId>
-        <version>1.2.4-SNAPSHOT</version>
-    </parent>
-
-    <artifactId>streampark-spark-test</artifactId>
-    <name>StreamPark : Spark Test</name>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.apache.streampark</groupId>
-            <artifactId>streampark-spark-core_2.12</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.streampark</groupId>
-            <artifactId>streampark-spark-connector-kafka_2.12</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.json4s</groupId>
-            <artifactId>json4s-jackson_2.12</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>com.fasterxml.jackson.core</groupId>
-            <artifactId>jackson-databind</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>com.fasterxml.jackson.core</groupId>
-            <artifactId>jackson-annotations</artifactId>
-            <version>${jackson.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>com.fasterxml.jackson.module</groupId>
-            <artifactId>jackson-module-scala_2.12</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.scalikejdbc</groupId>
-            <artifactId>scalikejdbc_2.12</artifactId>
-            <version>${scalikejdbc.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>mysql</groupId>
-            <artifactId>mysql-connector-java</artifactId>
-            <scope>test</scope>
-        </dependency>
-
-    </dependencies>
-
-    <build>
-        <sourceDirectory>src/main/scala</sourceDirectory>
-
-        <resources>
-            <resource>
-                <directory>src/main/resources</directory>
-            </resource>
-        </resources>
-
-        <plugins>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-compiler-plugin</artifactId>
-            </plugin>
-
-
-            <plugin>
-                <groupId>net.alchim31.maven</groupId>
-                <artifactId>scala-maven-plugin</artifactId>
-            </plugin>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-assembly-plugin</artifactId>
-                <version>3.1.1</version>
-                <executions>
-                    <execution>
-                        <id>distro-assembly</id>
-                        <phase>package</phase>
-                        <goals>
-                            <goal>single</goal>
-                        </goals>
-                    </execution>
-                </executions>
-                <configuration>
-                    <appendAssemblyId>false</appendAssemblyId>
-                    <descriptors>
-                        <descriptor>assembly.xml</descriptor>
-                    </descriptors>
-                </configuration>
-            </plugin>
-        </plugins>
-
-
-    </build>
-
-</project>
diff --git a/streampark-spark/streampark-spark-test/src/main/resources/logback.xml b/streampark-spark/streampark-spark-test/src/main/resources/logback.xml
deleted file mode 100644
index 69ddb9c8f..000000000
--- a/streampark-spark/streampark-spark-test/src/main/resources/logback.xml
+++ /dev/null
@@ -1,142 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-       https://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
--->
-<configuration>
-    <!-- log file storage path -->
-    <property name="LOG_HOME" value="${app.home}/logs/"/>
-    <property name="FILE_SIZE" value="50MB"/>
-    <property name="MAX_HISTORY" value="100"/>
-    <timestamp key="DATE_TIME" datePattern="yyyy-MM-dd HH:mm:ss"/>
-
-    <property name="log.colorPattern"
-              value="%d{yyyy-MM-dd HH:mm:ss} | %highlight(%-5level) | %boldYellow(%thread) | %boldGreen(%logger) | %msg%n"/>
-    <property name="log.pattern"
-              value="%d{yyyy-MM-dd HH:mm:ss.SSS} %contextName [%thread] %-5level %logger{36} - %msg%n"/>
-
-    <!-- console print -->
-    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
-        <encoder charset="utf-8">
-            <pattern>${log.colorPattern}</pattern>
-        </encoder>
-    </appender>
-    <!-- ERROR input to file, by date and file size -->
-    <appender name="ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
-        <encoder charset="utf-8">
-            <pattern>${log.pattern}</pattern>
-        </encoder>
-        <filter class="ch.qos.logback.classic.filter.LevelFilter">
-            <level>ERROR</level>
-            <onMatch>ACCEPT</onMatch>
-            <onMismatch>DENY</onMismatch>
-        </filter>
-        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
-            <fileNamePattern>${LOG_HOME}%d/error.%i.log</fileNamePattern>
-            <maxHistory>${MAX_HISTORY}</maxHistory>
-            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
-                <maxFileSize>${FILE_SIZE}</maxFileSize>
-            </timeBasedFileNamingAndTriggeringPolicy>
-        </rollingPolicy>
-    </appender>
-
-    <!-- WARN input to file, by date and file size -->
-    <appender name="WARN" class="ch.qos.logback.core.rolling.RollingFileAppender">
-        <encoder charset="utf-8">
-            <pattern>${log.pattern}</pattern>
-        </encoder>
-        <filter class="ch.qos.logback.classic.filter.LevelFilter">
-            <level>WARN</level>
-            <onMatch>ACCEPT</onMatch>
-            <onMismatch>DENY</onMismatch>
-        </filter>
-        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
-            <fileNamePattern>${LOG_HOME}%d/warn.%i.log</fileNamePattern>
-            <MAX_HISTORY>${MAX_HISTORY}</MAX_HISTORY>
-            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
-                <maxFileSize>${FILE_SIZE}</maxFileSize>
-            </timeBasedFileNamingAndTriggeringPolicy>
-        </rollingPolicy>
-    </appender>
-
-    <!-- INFO input to file, by date and file size -->
-    <appender name="INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
-        <encoder charset="utf-8">
-            <pattern>${log.pattern}</pattern>
-        </encoder>
-        <filter class="ch.qos.logback.classic.filter.LevelFilter">
-            <level>INFO</level>
-            <onMatch>ACCEPT</onMatch>
-            <onMismatch>DENY</onMismatch>
-        </filter>
-        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
-            <fileNamePattern>${LOG_HOME}%d/info.%i.log</fileNamePattern>
-            <MAX_HISTORY>${MAX_HISTORY}</MAX_HISTORY>
-            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
-                <maxFileSize>${FILE_SIZE}</maxFileSize>
-            </timeBasedFileNamingAndTriggeringPolicy>
-        </rollingPolicy>
-    </appender>
-    <!-- DEBUG input to file, by date and file size -->
-    <appender name="DEBUG" class="ch.qos.logback.core.rolling.RollingFileAppender">
-        <encoder charset="utf-8">
-            <pattern>${log.pattern}</pattern>
-        </encoder>
-        <filter class="ch.qos.logback.classic.filter.LevelFilter">
-            <level>DEBUG</level>
-            <onMatch>ACCEPT</onMatch>
-            <onMismatch>DENY</onMismatch>
-        </filter>
-        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
-            <fileNamePattern>${LOG_HOME}%d/debug.%i.log</fileNamePattern>
-            <MAX_HISTORY>${MAX_HISTORY}</MAX_HISTORY>
-            <timeBasedFileNamingAndTriggeringPolicy
-                    class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
-                <maxFileSize>${FILE_SIZE}</maxFileSize>
-            </timeBasedFileNamingAndTriggeringPolicy>
-        </rollingPolicy>
-    </appender>
-    <!-- TRACE input to file, by date and file size -->
-    <appender name="TRACE" class="ch.qos.logback.core.rolling.RollingFileAppender">
-        <encoder charset="utf-8">
-            <pattern>${log.pattern}</pattern>
-        </encoder>
-        <filter class="ch.qos.logback.classic.filter.LevelFilter">
-            <level>TRACE</level>
-            <onMatch>ACCEPT</onMatch>
-            <onMismatch>DENY</onMismatch>
-        </filter>
-        <rollingPolicy
-                class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
-            <fileNamePattern>${LOG_HOME}%d/trace.%i.log</fileNamePattern>
-            <MAX_HISTORY>${MAX_HISTORY}</MAX_HISTORY>
-            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
-                <maxFileSize>${FILE_SIZE}</maxFileSize>
-            </timeBasedFileNamingAndTriggeringPolicy>
-        </rollingPolicy>
-    </appender>
-
-    <!-- Logger root directory -->
-    <root level="INFO">
-        <appender-ref ref="STDOUT"/>
-        <appender-ref ref="DEBUG"/>
-        <appender-ref ref="ERROR"/>
-        <appender-ref ref="WARN"/>
-        <appender-ref ref="INFO"/>
-        <appender-ref ref="TRACE"/>
-    </root>
-</configuration>
diff --git a/streampark-spark/streampark-spark-test/src/main/scala/org/apache/streampark/spark/test/HelloStreamParkApp.scala b/streampark-spark/streampark-spark-test/src/main/scala/org/apache/streampark/spark/test/HelloStreamParkApp.scala
deleted file mode 100644
index e540f1a72..000000000
--- a/streampark-spark/streampark-spark-test/src/main/scala/org/apache/streampark/spark/test/HelloStreamParkApp.scala
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.streampark.spark.test
-
-import org.apache.streampark.spark.connector.kafka.source.KafkaSource
-import org.apache.streampark.spark.core.SparkStreaming
-import scalikejdbc.{ConnectionPool, DB, SQL}
-
-object HelloStreamParkApp extends SparkStreaming {
-
-  override def handle(): Unit = {
-
-    val jdbcURL = sparkConf.get("spark.sink.mysql.jdbc.url")
-    val user = sparkConf.get("spark.sink.mysql.user")
-    val password = sparkConf.get("spark.sink.mysql.password")
-
-    val source = new KafkaSource[String, String](context)
-
-    val line = source.getDStream[String](x => (x.value))
-
-    line.flatMap(_.split(" ")).map(_ -> 1).reduceByKey(_ + _)
-      .foreachRDD((rdd, time) => {
-
-        // handle transform
-        rdd.foreachPartition(iter => {
-
-          // sink data to MySQL
-          ConnectionPool.singleton(jdbcURL, user, password)
-
-          DB.autoCommit { implicit session =>
-            val sql =
-              s"""
-                 |create table if not exists word_count (
-                 |`word` varchar(255),
-                 |`count` int(255),
-                 |UNIQUE INDEX `INX`(`word`)
-                 |)
-                    """.stripMargin
-            SQL(sql).execute.apply()
-          }
-
-          DB.localTx(implicit session => {
-            iter.foreach(x => {
-              val sql = s"replace into word_count(`word`,`count`) values('${x._1}',${x._2})"
-              SQL(sql).update()
-            })
-          })
-        })
-        // commit offset
-        source.updateOffset(time)
-      })
-  }
-}