You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@inlong.apache.org by do...@apache.org on 2022/11/25 12:53:45 UTC

[inlong] 01/01: Revert "[INLONG-6629][GIT] Add .gitattributes to format the files (#6630)"

This is an automated email from the ASF dual-hosted git repository.

dockerzhang pushed a commit to branch revert-6630-fix-6629
in repository https://gitbox.apache.org/repos/asf/inlong.git

commit 5d120d3153787d71b42aa480e2a4f481c6f2f736
Author: Charles Zhang <do...@apache.org>
AuthorDate: Fri Nov 25 20:53:38 2022 +0800

    Revert "[INLONG-6629][GIT] Add .gitattributes to format the files (#6630)"
    
    This reverts commit dc4959bed3e025b7001041a6f720fcb70d1713e5.
---
 .gitattributes                                     |   38 -
 docker/docker-compose/.env                         |   38 +-
 .../org/apache/inlong/agent/utils/GsonUtil.java    |  114 +-
 .../inlong/agent/plugin/sources/OracleSource.java  |   96 +-
 .../agent/plugin/sources/reader/OracleReader.java  |  616 ++--
 .../agent/plugin/sources/TestOracleConnect.java    |  124 +-
 .../agent/plugin/sources/TestOracleSource.java     |  180 +-
 .../dataproxy/sink/common/MsgDedupHandler.java     |  212 +-
 .../dataproxy/sink/common/TubeProducerHolder.java  |  562 ++--
 .../inlong/dataproxy/sink/common/TubeUtils.java    |  156 +-
 .../inlong/dataproxy/utils/DateTimeUtils.java      |   82 +-
 .../inlong/dataproxy/utils/InLongMsgVer.java       |  108 +-
 .../src/main/assemblies/sort-connectors.xml        |  350 +-
 .../resources/plugins/manager-plugin-example.jar   |  Bin 96654 -> 96655 bytes
 .../release/lib/libdataproxy_sdk.a                 |  Bin 18423337 -> 18423406 bytes
 .../inlong/sort/protocol/constant/DLCConstant.java |  160 +-
 .../sort/protocol/constant/IcebergConstant.java    |  108 +-
 .../protocol/node/extract/PulsarExtractNode.java   |  250 +-
 .../sort/protocol/node/format/InLongMsgFormat.java |  182 +-
 .../protocol/node/load/ClickHouseLoadNode.java     |  246 +-
 .../protocol/node/load/DLCIcebergLoadNode.java     |  288 +-
 .../sort/protocol/node/load/DorisLoadNode.java     |  354 +-
 .../sort/protocol/node/load/IcebergLoadNode.java   |  260 +-
 .../node/extract/PulsarExtractNodeTest.java        |  110 +-
 .../protocol/node/format/InLongMsgFormatTest.java  |   60 +-
 .../protocol/node/load/ClickHouseLoadNodeTest.java |  102 +-
 .../protocol/node/load/DLCIcebergLoadNodeTest.java |  120 +-
 .../sort/protocol/node/load/DorisLoadNodeTest.java |  146 +-
 .../sort/base/format/JsonToRowDataConverters.java  |  832 ++---
 .../inlong/sort/base/metric/MetricOption.java      |  422 +--
 .../inlong/sort/base/sink/MultipleSinkOption.java  |  310 +-
 .../base/sink/SchemaUpdateExceptionPolicy.java     |   92 +-
 .../apache/inlong/sort/base/sink/TableChange.java  |  402 +--
 .../hive/filesystem/AbstractStreamingWriter.java   |  466 +--
 .../sort/hive/filesystem/CompactFileWriter.java    |  138 +-
 .../sort/hive/filesystem/StreamingFileWriter.java  |  206 +-
 .../inlong/sort/hive/filesystem/StreamingSink.java |  328 +-
 .../sort/iceberg/flink/CompactTableProperties.java |  196 +-
 .../inlong/sort/iceberg/flink/FlinkCatalog.java    | 1464 ++++----
 .../sort/iceberg/flink/FlinkCatalogFactory.java    |  334 +-
 .../iceberg/flink/FlinkDynamicTableFactory.java    |  464 +--
 .../sort/iceberg/flink/actions/RewriteResult.java  |   64 +-
 .../flink/actions/SyncRewriteDataFilesAction.java  |  266 +-
 .../actions/SyncRewriteDataFilesActionOption.java  |  342 +-
 .../inlong/sort/iceberg/flink/sink/FlinkSink.java  | 1064 +++---
 .../flink/sink/RowDataTaskWriterFactory.java       |  296 +-
 .../inlong/sort/iceberg/FlinkTypeToType.java       |  402 +--
 .../iceberg/sink/RowDataTaskWriterFactory.java     |  298 +-
 .../sink/multiple/DynamicSchemaHandleOperator.java |  588 ++--
 .../multiple/IcebergMultipleFilesCommiter.java     |  212 +-
 .../sink/multiple/IcebergMultipleStreamWriter.java |  542 +--
 .../sink/multiple/IcebergProcessFunction.java      |  212 +-
 .../sink/multiple/IcebergProcessOperator.java      |  288 +-
 .../sink/multiple/IcebergSingleFileCommiter.java   |  786 ++---
 .../sink/multiple/IcebergSingleStreamWriter.java   |  352 +-
 .../iceberg/sink/multiple/MultipleWriteResult.java |   82 +-
 .../iceberg/sink/multiple/RecordWithSchema.java    |  342 +-
 .../iceberg/sink/multiple/SchemaChangeUtils.java   |  272 +-
 .../sink/multiple/SchemaEvolutionFunction.java     |   46 +-
 .../jdbc/table/JdbcDynamicOutputFormatBuilder.java |  582 ++--
 .../table/DynamicTubeMQDeserializationSchema.java  |  268 +-
 .../sort/parser/ClickHouseSqlParserTest.java       |  252 +-
 .../inlong/sort/parser/DLCIcebergSqlParseTest.java |  292 +-
 .../DorisExtractNodeToDorisLoadNodeTest.java       |  290 +-
 .../sort/parser/IcebergNodeSqlParserTest.java      |  346 +-
 .../MySqlExtractNodeToDorisLoadNodeTest.java       |  276 +-
 .../inlong/sort/parser/PulsarSqlParserTest.java    |  216 +-
 .../org/apache/inlong/sort/tests/KafkaE2ECase.java |  482 +--
 .../sort/tests/utils/FlinkContainerTestEnv.java    |  630 ++--
 .../apache/inlong/sort/tests/utils/JdbcProxy.java  |  206 +-
 .../inlong/sort/tests/utils/MySqlContainer.java    |  408 +--
 .../sort/tests/utils/PlaceholderResolver.java      |  300 +-
 .../apache/inlong/sort/tests/utils/TestUtils.java  |  250 +-
 .../test/resources/env/kafka_test_mysql_init.txt   |   36 +-
 .../src/test/resources/flinkSql/kafka_test.sql     |  150 +-
 .../src/test/resources/groupFile/kafka_test.json   | 1122 +++----
 .../formats/inlongmsg/InLongMsgDecodingFormat.java |  356 +-
 .../inlongmsg/InLongMsgDeserializationSchema.java  |  346 +-
 .../formats/inlongmsg/InLongMsgFormatFactory.java  |  190 +-
 .../sort/formats/inlongmsg/InLongMsgOptions.java   |  102 +-
 .../org.apache.flink.table.factories.Factory       |   32 +-
 .../inlongmsg/InLongMsgFormatFactoryTest.java      |  248 +-
 .../inlongmsg/InLongMsgRowDataSerDeTest.java       |  358 +-
 .../org.apache.flink.table.factories.Factory       |   32 +-
 .../canal/CanalJsonEnhancedEncodingFormat.java     |  656 ++--
 .../json/canal/CanalJsonEnhancedFormatFactory.java |  224 +-
 .../CanalJsonEnhancedSerializationSchema.java      |  388 +--
 .../org.apache.flink.table.factories.Factory       |   32 +-
 .../canal/CanalJsonEnhancedFormatFactoryTest.java  |  276 +-
 .../canal/CanalJsonEnhancedSerDeSchemaTest.java    |  428 +--
 .../src/test/resources/canal-json-inlong-data.txt  |    4 +-
 .../tubemq-client-cpp/conf/client.conf             |   54 +-
 .../tubemq-client-cpp/example/README.md            |   44 +-
 .../include/tubemq/tubemq_client.h                 |  138 +-
 .../include/tubemq/tubemq_errcode.h                |  234 +-
 .../include/tubemq/tubemq_message.h                |  156 +-
 .../include/tubemq/tubemq_return.h                 |  226 +-
 .../include/tubemq/tubemq_tdmsg.h                  |  194 +-
 .../tubemq-client-cpp/proto/readme.md              |   38 +-
 .../tubemq-client-cpp/src/any.h                    |  282 +-
 .../tubemq-client-cpp/src/baseconsumer.h           |  284 +-
 .../tubemq-client-cpp/src/buffer.h                 |  678 ++--
 .../tubemq-client-cpp/src/client_service.cc        |  620 ++--
 .../tubemq-client-cpp/src/client_service.h         |  250 +-
 .../tubemq-client-cpp/src/client_subinfo.cc        |  266 +-
 .../tubemq-client-cpp/src/client_subinfo.h         |  158 +-
 .../tubemq-client-cpp/src/codec_protocol.h         |  116 +-
 .../tubemq-client-cpp/src/const_config.h           |  272 +-
 .../tubemq-client-cpp/src/const_rpc.h              |  182 +-
 .../tubemq-client-cpp/src/file_ini.cc              |  308 +-
 .../tubemq-client-cpp/src/file_ini.h               |  102 +-
 .../tubemq-client-cpp/src/flowctrl_def.cc          | 1570 ++++-----
 .../tubemq-client-cpp/src/meta_info.cc             | 1160 +++----
 .../tubemq-client-cpp/src/meta_info.h              |  376 +--
 .../tubemq-client-cpp/src/rmt_data_cache.cc        | 1324 ++++----
 .../tubemq-client-cpp/src/rmt_data_cache.h         |  332 +-
 .../tubemq-client-cpp/src/tubemq_config.cc         | 1418 ++++----
 .../tubemq-client-cpp/src/tubemq_message.cc        |  458 +--
 .../tubemq-client-cpp/src/tubemq_return.cc         |  360 +-
 .../tubemq-client-cpp/src/tubemq_tdmsg.cc          | 1864 +++++------
 .../tubemq-client-cpp/src/utils.cc                 | 1046 +++---
 .../tubemq-client-cpp/src/utils.h                  |  142 +-
 .../tubemq-client-cpp/src/version.h                |   66 +-
 .../tubemq-client-cpp/tests/README.md              |   44 +-
 .../tubemq-client-cpp/third_party/README.md        |   46 +-
 .../tubemq/client/common/QueryMetaResult.java      |  124 +-
 .../inlong/tubemq/client/common/StatsConfig.java   |  322 +-
 .../inlong/tubemq/client/common/StatsLevel.java    |  112 +-
 .../consumer/SimpleClientBalanceConsumer.java      | 3506 ++++++++++----------
 .../tubemq/client/producer/MaxMsgSizeHolder.java   |  138 +-
 .../tubemq/client/consumer/StatsConfigTest.java    |  102 +-
 .../inlong/tubemq/corebase/metric/Counter.java     |   80 +-
 .../inlong/tubemq/corebase/metric/Counting.java    |   88 +-
 .../inlong/tubemq/corebase/metric/Gauge.java       |   62 +-
 .../inlong/tubemq/corebase/metric/Histogram.java   |  140 +-
 .../inlong/tubemq/corebase/metric/Metric.java      |   78 +-
 .../tubemq/corebase/metric/TrafficStatsUnit.java   |  170 +-
 .../tubemq/corebase/metric/impl/BaseMetric.java    |  108 +-
 .../tubemq/corebase/metric/impl/ESTHistogram.java  |  410 +--
 .../tubemq/corebase/metric/impl/LongMaxGauge.java  |  126 +-
 .../tubemq/corebase/metric/impl/LongMinGauge.java  |  126 +-
 .../corebase/metric/impl/LongOnlineCounter.java    |  134 +-
 .../corebase/metric/impl/LongStatsCounter.java     |  130 +-
 .../corebase/metric/impl/SimpleHistogram.java      |  174 +-
 .../tubemq/corebase/metric/impl/SinceTime.java     |  112 +-
 .../inlong/tubemq/corebase/rv/ProcessResult.java   |  140 +-
 .../apache/inlong/tubemq/corebase/rv/RetValue.java |  180 +-
 .../corebase/utils/DateTimeConvertUtils.java       |  414 +--
 .../tubemq/corebase/utils/KeyBuilderUtils.java     |  116 +-
 .../inlong/tubemq/corebase/utils/MixedUtils.java   |  306 +-
 .../inlong/tubemq/corebase/utils/RegexDef.java     |  112 +-
 .../tubemq/corebase/utils/SettingValidUtils.java   |   98 +-
 .../inlong/tubemq/corebase/utils/Tuple2.java       |  142 +-
 .../inlong/tubemq/corebase/utils/Tuple3.java       |  146 +-
 .../inlong/tubemq/corebase/utils/Tuple4.java       |  146 +-
 .../inlong/tubemq/corebase/MessagesTest.java       |  206 +-
 .../tubemq/corebase/metric/HistogramTest.java      |  272 +-
 .../tubemq/corebase/metric/SimpleMetricTest.java   |  248 +-
 .../corebase/utils/DateTimeConvertUtilsTest.java   |  186 +-
 .../controller/node/request/ModifyBrokerReq.java   |   90 +-
 .../broker/metadata/ClusterConfigHolder.java       |  160 +-
 .../tubemq/server/broker/offset/OffsetCsmItem.java |   92 +-
 .../server/broker/offset/OffsetCsmRecord.java      |  156 +-
 .../server/broker/stats/BrokerSrvStatsHolder.java  |  588 ++--
 .../server/broker/stats/BrokerStatsType.java       |  112 +-
 .../server/broker/stats/audit/AuditUtils.java      |  234 +-
 .../stats/prometheus/BrokerPromMetricService.java  |  274 +-
 .../server/broker/utils/GroupOffsetInfo.java       |  168 +-
 .../server/broker/utils/TopicPubStoreInfo.java     |  106 +-
 .../server/broker/web/AbstractWebHandler.java      |  188 +-
 .../server/common/exception/LoadMetaException.java |   70 +-
 .../tubemq/server/common/fielddef/CliArgDef.java   |  276 +-
 .../tubemq/server/common/fielddef/WebFieldDef.java |  720 ++--
 .../server/common/fielddef/WebFieldType.java       |  122 +-
 .../server/common/fileconfig/BdbMetaConfig.java    |  300 +-
 .../server/common/fileconfig/ZKMetaConfig.java     |  100 +-
 .../server/common/statusdef/CleanPolType.java      |   96 +-
 .../server/common/statusdef/EnableStatus.java      |  114 +-
 .../server/common/statusdef/ManageStatus.java      |  270 +-
 .../tubemq/server/common/statusdef/StepStatus.java |  142 +-
 .../server/common/statusdef/TopicStatus.java       |  110 +-
 .../server/common/statusdef/TopicStsChgType.java   |  106 +-
 .../tubemq/server/common/utils/AppendResult.java   |  128 +-
 .../tubemq/server/common/utils/HttpUtils.java      |  334 +-
 .../tubemq/server/common/utils/SerialIdUtils.java  |   74 +-
 .../server/common/webbase/WebCallStatsHolder.java  |  484 +--
 .../server/common/webbase/WebMethodMapper.java     |  184 +-
 .../server/master/metamanage/DataOpErrCode.java    |  126 +-
 .../master/metamanage/DefaultMetaDataService.java  | 2366 ++++++-------
 .../server/master/metamanage/MetaDataService.java  | 1516 ++++-----
 .../metamanage/metastore/ConfigObserver.java       |   52 +-
 .../metamanage/metastore/KeepAliveService.java     |  150 +-
 .../metastore/dao/entity/BaseEntity.java           |  730 ++--
 .../metastore/dao/entity/TopicPropGroup.java       | 1044 +++---
 .../metastore/dao/mapper/AbstractMapper.java       |   54 +-
 .../metastore/dao/mapper/BrokerConfigMapper.java   |  236 +-
 .../metastore/dao/mapper/ClusterConfigMapper.java  |  102 +-
 .../metastore/dao/mapper/ConsumeCtrlMapper.java    |  124 +-
 .../metastore/dao/mapper/GroupResCtrlMapper.java   |  104 +-
 .../metastore/dao/mapper/MetaConfigMapper.java     | 1212 +++----
 .../metastore/dao/mapper/TopicCtrlMapper.java      |  190 +-
 .../metastore/dao/mapper/TopicDeployMapper.java    |  250 +-
 .../metastore/impl/AbsBrokerConfigMapperImpl.java  |  838 ++---
 .../metastore/impl/AbsClusterConfigMapperImpl.java |  262 +-
 .../metastore/impl/AbsConsumeCtrlMapperImpl.java   |  930 +++---
 .../metastore/impl/AbsGroupResCtrlMapperImpl.java  |  338 +-
 .../metastore/impl/AbsMetaConfigMapperImpl.java    | 2674 +++++++--------
 .../metastore/impl/AbsTopicCtrlMapperImpl.java     |  424 +--
 .../metastore/impl/AbsTopicDeployMapperImpl.java   | 1584 ++++-----
 .../impl/bdbimpl/BdbBrokerConfigMapperImpl.java    |  236 +-
 .../impl/bdbimpl/BdbClusterConfigMapperImpl.java   |  238 +-
 .../impl/bdbimpl/BdbConsumeCtrlMapperImpl.java     |  230 +-
 .../impl/bdbimpl/BdbGroupResCtrlMapperImpl.java    |  230 +-
 .../impl/bdbimpl/BdbMetaConfigMapperImpl.java      | 1134 +++----
 .../impl/bdbimpl/BdbTopicCtrlMapperImpl.java       |  234 +-
 .../impl/bdbimpl/BdbTopicDeployMapperImpl.java     |  232 +-
 .../metastore/impl/zkimpl/TZKNodeKeys.java         |   62 +-
 .../impl/zkimpl/ZKBrokerConfigMapperImpl.java      |  246 +-
 .../impl/zkimpl/ZKClusterConfigMapperImpl.java     |  256 +-
 .../impl/zkimpl/ZKConsumeCtrlMapperImpl.java       |  238 +-
 .../impl/zkimpl/ZKGroupResCtrlMapperImpl.java      |  244 +-
 .../impl/zkimpl/ZKMetaConfigMapperImpl.java        |  706 ++--
 .../impl/zkimpl/ZKTopicCtrlMapperImpl.java         |  244 +-
 .../impl/zkimpl/ZKTopicDeployMapperImpl.java       |  246 +-
 .../nodemanage/nodebroker/BrokerPSInfoHolder.java  |  440 +--
 .../nodemanage/nodebroker/BrokerRunManager.java    |  220 +-
 .../nodemanage/nodebroker/BrokerSyncData.java      |  682 ++--
 .../nodemanage/nodebroker/BrokerTopicInfoView.java |  890 ++---
 .../nodemanage/nodebroker/DefBrokerRunManager.java | 1096 +++---
 .../server/master/stats/MasterSrvStatsHolder.java  |  916 ++---
 .../server/master/stats/MasterStatsType.java       |  110 +-
 .../stats/prometheus/MasterPromMetricService.java  |  190 +-
 .../server/master/utils/MetaConfigSamplePrint.java |  214 +-
 .../master/web/handler/AbstractWebHandler.java     |  116 +-
 .../master/web/handler/BrokerProcessResult.java    |  108 +-
 .../master/web/handler/GroupProcessResult.java     |   92 +-
 .../master/web/handler/TopicProcessResult.java     |   94 +-
 .../master/web/handler/WebMasterInfoHandler.java   | 1092 +++---
 .../master/web/handler/WebOtherInfoHandler.java    | 1128 +++----
 .../tubemq/server/tools/cli/CliAbstractBase.java   |  146 +-
 .../server/common/WebParameterUtilsTest.java       |  572 ++--
 .../metastore/dao/entity/BaseEntityTest.java       |  400 +--
 .../metastore/dao/entity/BrokerConfEntityTest.java |  388 +--
 .../dao/entity/ClusterSettingEntityTest.java       |  522 +--
 .../dao/entity/GroupConsumeCtrlEntityTest.java     |  180 +-
 .../dao/entity/GroupResCtrlEntityTest.java         |  262 +-
 .../metastore/dao/entity/TopicCtrlEntityTest.java  |  244 +-
 .../dao/entity/TopicDeployEntityTest.java          |  316 +-
 .../metastore/dao/entity/TopicPropGroupTest.java   |  402 +--
 .../src/test/resources/master-bdbstore.ini         |  182 +-
 .../src/test/resources/master-meta-bdb.ini         |  170 +-
 .../src/test/resources/master-meta-zk.ini          |  114 +-
 .../licenses/LICENSE-akka-actor_2.11.txt           |  422 +--
 .../licenses/LICENSE-akka-protobuf_2.11.txt        |  422 +--
 .../licenses/LICENSE-akka-stream_2.11.txt          |  422 +--
 .../licenses/LICENSE-aopalliance-repackaged.txt    | 1274 +++----
 licenses/inlong-agent/licenses/LICENSE-hk2-api.txt | 1274 +++----
 .../inlong-agent/licenses/LICENSE-hk2-locator.txt  | 1274 +++----
 .../inlong-agent/licenses/LICENSE-hk2-utils.txt    | 1274 +++----
 .../licenses/LICENSE-jakarta.inject.txt            | 1274 +++----
 .../inlong-agent/licenses/LICENSE-javassist.txt    |  280 +-
 .../licenses/LICENSE-javax.ws.rs-api.txt           | 1516 ++++-----
 .../licenses/LICENSE-jcip-annotations.txt          |   16 +-
 .../inlong-agent/licenses/LICENSE-jetty-jmx.txt    |  332 +-
 .../inlong-agent/licenses/LICENSE-jopt-simple.txt  |   46 +-
 licenses/inlong-agent/licenses/LICENSE-jsr305.txt  |   14 +-
 .../licenses/LICENSE-osgi-resource-locator.txt     | 1272 +++----
 .../licenses/LICENSE-protobuf-java.txt             |   62 +-
 .../licenses/LICENSE-pulsar-client.txt             |  608 ++--
 .../licenses/LICENSE-scala-library.txt             |   28 +-
 .../LICENSE-scala-parser-combinators_2.11.txt      |   28 +-
 .../licenses/LICENSE-scala-reflect.txt             |   28 +-
 licenses/inlong-agent/licenses/LICENSE-xz.txt      |   14 +-
 .../notices/NOTICE-jakarta.validation-api.txt      |   84 +-
 .../inlong-agent/notices/NOTICE-jersey-client.txt  |  226 +-
 .../inlong-agent/notices/NOTICE-jersey-common.txt  |  226 +-
 .../NOTICE-jersey-container-servlet-core.txt       |  226 +-
 .../notices/NOTICE-jersey-container-servlet.txt    |  226 +-
 .../inlong-agent/notices/NOTICE-jersey-hk2.txt     |  226 +-
 .../inlong-agent/notices/NOTICE-jersey-server.txt  |  226 +-
 licenses/inlong-agent/notices/NOTICE-jetty-jmx.txt |  234 +-
 .../inlong-agent/notices/NOTICE-netty-buffer.txt   |  526 +--
 .../inlong-agent/notices/NOTICE-netty-codec.txt    |  526 +--
 .../inlong-agent/notices/NOTICE-netty-common.txt   |  526 +--
 .../inlong-agent/notices/NOTICE-netty-handler.txt  |  526 +--
 .../inlong-agent/notices/NOTICE-netty-resolver.txt |  526 +--
 .../notices/NOTICE-netty-tcnative-classes.txt      |  100 +-
 .../NOTICE-netty-transport-classes-epoll.txt       |  526 +--
 .../NOTICE-netty-transport-native-epoll.txt        |  526 +--
 .../NOTICE-netty-transport-native-unix-common.txt  |  526 +--
 .../notices/NOTICE-netty-transport.txt             |  526 +--
 .../notices/NOTICE-osgi-resource-locator.txt       |  104 +-
 .../inlong-audit/licenses/LICENSE-HdrHistogram.txt |   80 +-
 .../inlong-audit/licenses/LICENSE-compiler.txt     |   26 +-
 licenses/inlong-audit/licenses/LICENSE-hppc.txt    |  404 +--
 .../licenses/LICENSE-javax.ws.rs-api.txt           | 1516 ++++-----
 .../licenses/LICENSE-jcip-annotations.txt          |   16 +-
 .../inlong-audit/licenses/LICENSE-jetty-jmx.txt    |  332 +-
 .../inlong-audit/licenses/LICENSE-jopt-simple.txt  |   46 +-
 licenses/inlong-audit/licenses/LICENSE-jsr305.txt  |   14 +-
 .../LICENSE-mybatis-spring-boot-autoconfigure.txt  |   24 +-
 .../LICENSE-mybatis-spring-boot-starter.txt        |   24 +-
 .../licenses/LICENSE-protobuf-java.txt             |   62 +-
 .../licenses/LICENSE-pulsar-client.txt             |  608 ++--
 licenses/inlong-audit/licenses/LICENSE-xz.txt      |   14 +-
 .../inlong-audit/licenses/LICENSE-zstd-jni.txt     |   50 +-
 .../notices/NOTICE-jakarta.annotation-api.txt      |   76 +-
 licenses/inlong-audit/notices/NOTICE-jetty-jmx.txt |  234 +-
 .../inlong-audit/notices/NOTICE-netty-buffer.txt   |  526 +--
 .../inlong-audit/notices/NOTICE-netty-codec.txt    |  526 +--
 .../inlong-audit/notices/NOTICE-netty-common.txt   |  526 +--
 .../inlong-audit/notices/NOTICE-netty-handler.txt  |  526 +--
 .../inlong-audit/notices/NOTICE-netty-resolver.txt |  526 +--
 .../notices/NOTICE-netty-tcnative-classes.txt      |  100 +-
 .../NOTICE-netty-transport-classes-epoll.txt       |  526 +--
 .../NOTICE-netty-transport-native-epoll.txt        |  526 +--
 .../NOTICE-netty-transport-native-unix-common.txt  |  526 +--
 .../notices/NOTICE-netty-transport.txt             |  526 +--
 licenses/inlong-dashboard/NOTICE                   |   80 +-
 .../inlong-dashboard/licenses/LICENSE-echarts.txt  |  442 +--
 licenses/inlong-dataproxy/LICENSE                  |  976 +++---
 licenses/inlong-dataproxy/NOTICE                   | 1472 ++++----
 .../licenses/LICENSE-javax.ws.rs-api.txt           | 1516 ++++-----
 .../licenses/LICENSE-jcip-annotations.txt          |   16 +-
 .../licenses/LICENSE-jetty-jmx.txt                 |  332 +-
 .../inlong-dataproxy/licenses/LICENSE-jsr305.txt   |   14 +-
 licenses/inlong-dataproxy/licenses/LICENSE-xz.txt  |   14 +-
 .../inlong-dataproxy/notices/NOTICE-jetty-jmx.txt  |  234 +-
 .../notices/NOTICE-netty-buffer.txt                |  526 +--
 .../notices/NOTICE-netty-codec.txt                 |  526 +--
 .../notices/NOTICE-netty-common.txt                |  526 +--
 .../notices/NOTICE-netty-handler.txt               |  526 +--
 .../notices/NOTICE-netty-resolver.txt              |  526 +--
 .../notices/NOTICE-netty-tcnative-classes.txt      |  100 +-
 .../NOTICE-netty-transport-classes-epoll.txt       |  526 +--
 .../NOTICE-netty-transport-native-epoll.txt        |  526 +--
 .../NOTICE-netty-transport-native-unix-common.txt  |  526 +--
 .../notices/NOTICE-netty-transport.txt             |  526 +--
 .../licenses/LICENSE-HdrHistogram.txt              |   80 +-
 .../licenses/LICENSE-RoaringBitmap.txt             |  380 +--
 .../licenses/LICENSE-akka-actor_2.11.txt           |  422 +--
 .../licenses/LICENSE-akka-protobuf_2.11.txt        |  422 +--
 .../licenses/LICENSE-akka-slf4j_2.11.txt           |  422 +--
 .../licenses/LICENSE-akka-stream_2.11.txt          |  422 +--
 .../licenses/LICENSE-antlr-runtime.txt             |   50 +-
 .../licenses/LICENSE-aopalliance.txt               |    6 +-
 .../licenses/LICENSE-aspectjweaver.txt             |  556 ++--
 .../licenses/LICENSE-commons-compiler.txt          |   60 +-
 .../inlong-manager/licenses/LICENSE-compiler.txt   |   26 +-
 .../licenses/LICENSE-druid-spring-boot-starter.txt |   24 +-
 licenses/inlong-manager/licenses/LICENSE-druid.txt |   24 +-
 .../licenses/LICENSE-flatbuffers.txt               |  402 +--
 .../licenses/LICENSE-grizzled-slf4j_2.11.txt       |   70 +-
 licenses/inlong-manager/licenses/LICENSE-hppc.txt  |  404 +--
 .../inlong-manager/licenses/LICENSE-janino.txt     |   60 +-
 .../inlong-manager/licenses/LICENSE-javassist.txt  |  280 +-
 .../licenses/LICENSE-jcip-annotations.txt          |   16 +-
 .../inlong-manager/licenses/LICENSE-jcommander.txt |  404 +--
 .../licenses/LICENSE-jersey-core.txt               |  406 +--
 .../licenses/LICENSE-jersey-json.txt               |  406 +--
 .../licenses/LICENSE-jetty-sslengine.txt           |  332 +-
 .../licenses/LICENSE-jetty-util-6.1.26.txt         |  332 +-
 licenses/inlong-manager/licenses/LICENSE-jetty.txt |  332 +-
 licenses/inlong-manager/licenses/LICENSE-jline.txt |   68 +-
 licenses/inlong-manager/licenses/LICENSE-joni.txt  |   40 +-
 .../licenses/LICENSE-jopt-simple.txt               |   46 +-
 licenses/inlong-manager/licenses/LICENSE-jpam.txt  |   28 +-
 .../inlong-manager/licenses/LICENSE-jsr305.txt     |   14 +-
 licenses/inlong-manager/licenses/LICENSE-kryo.txt  |   18 +-
 .../licenses/LICENSE-leveldbjni-all.txt            |   52 +-
 .../inlong-manager/licenses/LICENSE-libfb303.txt   |  504 +--
 .../inlong-manager/licenses/LICENSE-minlog.txt     |   18 +-
 .../licenses/LICENSE-mockito-core.txt              |   40 +-
 .../LICENSE-mybatis-spring-boot-autoconfigure.txt  |   24 +-
 .../LICENSE-mybatis-spring-boot-starter.txt        |   24 +-
 ...ICENSE-pagehelper-spring-boot-autoconfigure.txt |   40 +-
 .../LICENSE-pagehelper-spring-boot-starter.txt     |   40 +-
 .../licenses/LICENSE-protobuf-java.txt             |   62 +-
 .../licenses/LICENSE-pulsar-client.txt             |  608 ++--
 .../licenses/LICENSE-scala-compiler.txt            |  132 +-
 .../licenses/LICENSE-scala-java8-compat_2.11.txt   |   54 +-
 .../licenses/LICENSE-scala-library.txt             |   28 +-
 .../LICENSE-scala-parser-combinators_2.11.txt      |   28 +-
 .../licenses/LICENSE-scala-reflect.txt             |  132 +-
 .../licenses/LICENSE-scala-xml_2.11.txt            |   28 +-
 .../inlong-manager/licenses/LICENSE-scopt_2.11.txt |   40 +-
 licenses/inlong-manager/licenses/LICENSE-shims.txt |  380 +--
 .../inlong-manager/licenses/LICENSE-stax2-api.txt  |   24 +-
 .../notices/NOTICE-jakarta.annotation-api.txt      |   76 +-
 .../notices/NOTICE-jakarta.validation-api.txt      |   84 +-
 .../inlong-manager/notices/NOTICE-netty-all.txt    |  494 +--
 .../inlong-manager/notices/NOTICE-netty-buffer.txt |  526 +--
 .../inlong-manager/notices/NOTICE-netty-codec.txt  |  526 +--
 .../inlong-manager/notices/NOTICE-netty-common.txt |  526 +--
 .../notices/NOTICE-netty-handler.txt               |  526 +--
 .../notices/NOTICE-netty-resolver.txt              |  526 +--
 .../notices/NOTICE-netty-tcnative-classes.txt      |  100 +-
 .../NOTICE-netty-transport-classes-epoll.txt       |  526 +--
 .../NOTICE-netty-transport-native-epoll.txt        |  526 +--
 .../NOTICE-netty-transport-native-unix-common.txt  |  526 +--
 .../notices/NOTICE-netty-transport.txt             |  526 +--
 .../notices/NOTICE-osgi-resource-locator.txt       |  104 +-
 .../licenses/LICENSE-HdrHistogram.txt              |   80 +-
 .../licenses/LICENSE-RoaringBitmap.txt             |  380 +--
 .../licenses/LICENSE-apache-jsp.txt                |  332 +-
 .../licenses/LICENSE-apache-jstl.txt               |  332 +-
 .../licenses/LICENSE-cos_api-bundle.txt            |  404 +--
 .../licenses/LICENSE-dnsjava.txt                   |   46 +-
 .../licenses/LICENSE-ecj.txt                       |   12 +-
 .../licenses/LICENSE-hppc.txt                      |  404 +--
 .../licenses/LICENSE-iceberg-flink-runtime.txt     |  968 +++---
 .../licenses/LICENSE-jaxb-api.txt                  | 1378 ++++----
 .../licenses/LICENSE-jersey-client-1.9.txt         | 1516 ++++-----
 .../licenses/LICENSE-jersey-core.txt               | 1516 ++++-----
 .../licenses/LICENSE-jersey-guice.txt              |  546 +--
 .../licenses/LICENSE-jersey-json.txt               | 1516 ++++-----
 .../licenses/LICENSE-jersey-server-1.9.txt         | 1516 ++++-----
 .../licenses/LICENSE-jetty-annotations.txt         |  332 +-
 .../licenses/LICENSE-jetty-jaas.txt                |  332 +-
 .../licenses/LICENSE-jetty-jmx.txt                 |  332 +-
 .../licenses/LICENSE-jetty-jndi.txt                |  332 +-
 .../licenses/LICENSE-jetty-plus.txt                |  332 +-
 .../licenses/LICENSE-jetty-rewrite.txt             |  332 +-
 .../licenses/LICENSE-jetty-runner.txt              |  332 +-
 .../licenses/LICENSE-jetty-schemas.txt             |  524 +--
 .../licenses/LICENSE-jetty-sslengine.txt           |  332 +-
 .../licenses/LICENSE-jetty-util-6.1.26.txt         |  332 +-
 .../LICENSE-jetty-util-9.4.6.v20170531.txt         |  332 +-
 .../licenses/LICENSE-jetty-webapp.txt              |  332 +-
 .../licenses/LICENSE-jetty-xml.txt                 |  332 +-
 .../licenses/LICENSE-jetty.txt                     |  332 +-
 .../licenses/LICENSE-jline.txt                     |   68 +-
 .../licenses/LICENSE-joni.txt                      |   40 +-
 .../licenses/LICENSE-jsp-api.txt                   | 1516 ++++-----
 .../licenses/LICENSE-jsr305.txt                    |   14 +-
 .../licenses/LICENSE-jul-to-slf4j.txt              |   46 +-
 .../licenses/LICENSE-libfb303.txt                  |  504 +--
 .../licenses/LICENSE-libthrift.txt                 |  504 +--
 .../licenses/LICENSE-mongo-kafka-connect.txt       |  400 +--
 .../licenses/LICENSE-mssql-jdbc.txt                |   26 +-
 .../licenses/LICENSE-parquet-common.txt            |  434 +--
 .../licenses/LICENSE-parquet-format-structures.txt |  434 +--
 .../licenses/LICENSE-protobuf-java.txt             |   62 +-
 .../licenses/LICENSE-servlet-api.txt               |   46 +-
 .../licenses/LICENSE-shims.txt                     |  380 +--
 .../licenses/LICENSE-slf4j-api.txt                 |   46 +-
 .../licenses/LICENSE-stax2-api.txt                 |   24 +-
 .../licenses/LICENSE-threeten-extra.txt            |   58 +-
 .../licenses/LICENSE-websocket-api.txt             |  332 +-
 .../licenses/LICENSE-websocket-client.txt          |  332 +-
 .../licenses/LICENSE-websocket-common.txt          |  332 +-
 .../licenses/LICENSE-websocket-server.txt          |  332 +-
 .../licenses/LICENSE-websocket-servlet.txt         |  332 +-
 .../notices/NOTICE-cos_api-bundle.txt              |   16 +-
 .../notices/NOTICE-derby.txt                       |  364 +-
 .../notices/NOTICE-iceberg-flink-runtime.txt       |  182 +-
 .../notices/NOTICE-jakarta.validation-api.txt      |   84 +-
 .../notices/NOTICE-jakarta.ws.rs-api.txt           |  120 +-
 .../notices/NOTICE-jersey-client-2.31.txt          |  226 +-
 .../notices/NOTICE-jersey-common.txt               |  226 +-
 .../NOTICE-jersey-container-servlet-core.txt       |  226 +-
 .../notices/NOTICE-jersey-container-servlet.txt    |  226 +-
 .../notices/NOTICE-jersey-hk2.txt                  |  226 +-
 .../notices/NOTICE-jersey-media-jaxb.txt           |  226 +-
 .../notices/NOTICE-jersey-server-2.3.1.txt         |  226 +-
 .../notices/NOTICE-jetty-jmx.txt                   |  234 +-
 .../notices/NOTICE-netty-all.txt                   |  494 +--
 .../notices/NOTICE-netty-buffer.txt                |  526 +--
 .../notices/NOTICE-netty-codec.txt                 |  526 +--
 .../notices/NOTICE-netty-common.txt                |  526 +--
 .../notices/NOTICE-netty-handler.txt               |  526 +--
 .../notices/NOTICE-netty-resolver.txt              |  526 +--
 .../notices/NOTICE-netty-tcnative-classes.txt      |  526 +--
 .../NOTICE-netty-transport-classes-epoll.txt       |  526 +--
 .../NOTICE-netty-transport-native-epoll.txt        |  526 +--
 .../NOTICE-netty-transport-native-unix-common.txt  |  526 +--
 .../notices/NOTICE-netty-transport.txt             |  526 +--
 .../notices/NOTICE-osgi-resource-locator.txt       |  104 +-
 .../notices/NOTICE-parquet-column.txt              |  186 +-
 .../notices/NOTICE-parquet-common.txt              |  186 +-
 .../notices/NOTICE-parquet-encoding.txt            |  186 +-
 .../notices/NOTICE-parquet-format-structures.txt   |  186 +-
 .../notices/NOTICE-parquet-hadoop.txt              |  186 +-
 .../licenses/LICENSE-HdrHistogram.txt              |   80 +-
 .../licenses/LICENSE-aopalliance.txt               |    6 +-
 .../licenses/LICENSE-compiler.txt                  |   26 +-
 .../licenses/LICENSE-dnsjava.txt                   |   46 +-
 .../licenses/LICENSE-hppc.txt                      |  404 +--
 .../licenses/LICENSE-javax.annotation-api.txt      | 1516 ++++-----
 .../licenses/LICENSE-javax.ws.rs-api.txt           | 1272 +++----
 .../licenses/LICENSE-jaxb-impl.txt                 |  546 +--
 .../licenses/LICENSE-jcip-annotations.txt          |   16 +-
 .../licenses/LICENSE-jersey-client.txt             |  406 +--
 .../licenses/LICENSE-jersey-core.txt               |  406 +--
 .../licenses/LICENSE-jersey-guice.txt              |  406 +--
 .../licenses/LICENSE-jersey-json.txt               |  406 +--
 .../licenses/LICENSE-jersey-server.txt             |  406 +--
 .../licenses/LICENSE-jersey-servlet.txt            |  406 +--
 .../licenses/LICENSE-jetty-client.txt              |  332 +-
 .../licenses/LICENSE-jetty-jmx.txt                 |  332 +-
 .../licenses/LICENSE-jetty-rewrite.txt             |  332 +-
 .../licenses/LICENSE-jetty-sslengine.txt           |  332 +-
 .../licenses/LICENSE-jetty-util-6.1.26.txt         |  332 +-
 .../licenses/LICENSE-jetty-webapp.txt              |  332 +-
 .../licenses/LICENSE-jetty-xml.txt                 |  332 +-
 .../licenses/LICENSE-jetty.txt                     |  332 +-
 .../licenses/LICENSE-jline.txt                     |   68 +-
 .../licenses/LICENSE-jopt-simple.txt               |   46 +-
 .../licenses/LICENSE-jpam.txt                      |   28 +-
 .../licenses/LICENSE-jsch.txt                      |   58 +-
 .../licenses/LICENSE-jsp-api.txt                   | 1516 ++++-----
 .../licenses/LICENSE-jsr305.txt                    |   14 +-
 .../licenses/LICENSE-leveldbjni-all.txt            |   52 +-
 .../licenses/LICENSE-libfb303.txt                  |  504 +--
 .../licenses/LICENSE-libthrift.txt                 |  504 +--
 .../licenses/LICENSE-logback-classic.txt           |   28 +-
 .../licenses/LICENSE-logback-core.txt              |   28 +-
 .../licenses/LICENSE-protobuf-java-util.txt        |   62 +-
 .../licenses/LICENSE-protobuf-java.txt             |   62 +-
 .../licenses/LICENSE-pulsar-client.txt             |  608 ++--
 .../licenses/LICENSE-servlet-api.txt               |   46 +-
 .../licenses/LICENSE-slf4j-api.txt                 |   46 +-
 .../licenses/LICENSE-stax2-api.txt                 |   24 +-
 .../notices/NOTICE-aircompressor.txt               |   72 +-
 .../notices/NOTICE-javax.ws.rs-api.txt             |  120 +-
 .../notices/NOTICE-jetty-client.txt                |  234 +-
 .../notices/NOTICE-jetty-jmx.txt                   |  234 +-
 .../notices/NOTICE-netty-all.txt                   |  494 +--
 .../notices/NOTICE-netty-buffer.txt                |  526 +--
 .../notices/NOTICE-netty-codec.txt                 |  526 +--
 .../notices/NOTICE-netty-common.txt                |  526 +--
 .../notices/NOTICE-netty-handler.txt               |  526 +--
 .../notices/NOTICE-netty-resolver.txt              |  526 +--
 .../notices/NOTICE-netty-tcnative-classes.txt      |  100 +-
 .../NOTICE-netty-transport-classes-epoll.txt       |  526 +--
 .../NOTICE-netty-transport-native-epoll.txt        |  526 +--
 .../NOTICE-netty-transport-native-unix-common.txt  |  526 +--
 .../notices/NOTICE-netty-transport.txt             |  526 +--
 licenses/inlong-sort/LICENSE                       |  848 ++---
 licenses/inlong-sort/NOTICE                        |  462 +--
 licenses/inlong-sort/licenses/LICENSE-jsr305.txt   |   14 +-
 licenses/inlong-tubemq-manager/LICENSE             |  986 +++---
 licenses/inlong-tubemq-manager/NOTICE              | 1022 +++---
 .../licenses/LICENSE-aspectjweaver.txt             |  556 ++--
 .../licenses/LICENSE-jakarta.persistence-api.txt   | 1272 +++----
 .../licenses/LICENSE-jakarta.transaction-api.txt   | 1274 +++----
 .../licenses/LICENSE-jul-to-slf4j.txt              |   46 +-
 .../licenses/LICENSE-slf4j-api.txt                 |   46 +-
 .../notices/NOTICE-jakarta.persistence-api.txt     |  106 +-
 licenses/inlong-tubemq-server/LICENSE              |  940 +++---
 licenses/inlong-tubemq-server/NOTICE               |  814 ++---
 .../licenses/LICENSE-dom4j.txt                     |   78 +-
 .../licenses/LICENSE-jsr305.txt                    |   14 +-
 .../licenses/LICENSE-protobuf-java.txt             |   64 +-
 .../licenses/LICENSE-slf4j-api.txt                 |   48 +-
 .../notices/NOTICE-netty-buffer.txt                |  526 +--
 .../notices/NOTICE-netty-codec.txt                 |  526 +--
 .../notices/NOTICE-netty-common.txt                |  526 +--
 .../notices/NOTICE-netty-handler.txt               |  526 +--
 .../notices/NOTICE-netty-resolver.txt              |  526 +--
 .../notices/NOTICE-netty-tcnative-classes.txt      |  100 +-
 .../NOTICE-netty-transport-classes-epoll.txt       |  526 +--
 .../NOTICE-netty-transport-native-epoll.txt        |  526 +--
 .../NOTICE-netty-transport-native-unix-common.txt  |  526 +--
 .../notices/NOTICE-netty-transport.txt             |  526 +--
 565 files changed, 101275 insertions(+), 101313 deletions(-)

diff --git a/.gitattributes b/.gitattributes
deleted file mode 100644
index 759111fb0..000000000
--- a/.gitattributes
+++ /dev/null
@@ -1,38 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Set the default behavior, in case people don't have core.autocrlf set.
-* text=auto
-
-# Declare files that will always have LF line endings on checkout.
-* text eol=lf
-
-# Explicitly declare text files you want to always be normalized and converted
-# to native line endings on checkout.
-*.java text
-*.cpp text
-*.py text
-*.c text
-*.h text
-*.md text
-*.xml text
-*.txt text
-
-# Denote all files that are truly binary and should not be modified.
-*.png binary
-*.jpg binary
-*.jpeg binary
\ No newline at end of file
diff --git a/docker/docker-compose/.env b/docker/docker-compose/.env
index dce60412c..17ff3afa1 100644
--- a/docker/docker-compose/.env
+++ b/docker/docker-compose/.env
@@ -1,19 +1,19 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# you can choose the released version number, like 1.3.0
-# the latest tag corresponds to the master branch
-VERSION_TAG=latest
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# you can choose the released version number, like 1.3.0
+# the latest tag corresponds to the master branch
+VERSION_TAG=latest
diff --git a/inlong-agent/agent-common/src/main/java/org/apache/inlong/agent/utils/GsonUtil.java b/inlong-agent/agent-common/src/main/java/org/apache/inlong/agent/utils/GsonUtil.java
index adc19a7a9..5e810199f 100644
--- a/inlong-agent/agent-common/src/main/java/org/apache/inlong/agent/utils/GsonUtil.java
+++ b/inlong-agent/agent-common/src/main/java/org/apache/inlong/agent/utils/GsonUtil.java
@@ -1,57 +1,57 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.agent.utils;
-
-import com.google.gson.Gson;
-
-/**
- * GsonUtil : Gson instances are Thread-safe, so you can reuse them freely across multiple threads.
- */
-public class GsonUtil {
-
-    private static final Gson gson = new Gson();
-
-    /**
-     * instantiation is not allowed
-     */
-    private GsonUtil() {
-        throw new UnsupportedOperationException("This is a utility class, so instantiation is not allowed");
-    }
-
-    /**
-     * This method deserializes the specified Json into an object of the specified class.
-     *
-     * @param json     json
-     * @param classOfT class of T
-     * @param <T>      T
-     * @return T
-     */
-    public static <T> T fromJson(String json, Class<T> classOfT) {
-        return gson.fromJson(json, classOfT);
-    }
-
-    /**
-     * This method serializes the specified object into its equivalent Json representation.
-     *
-     * @param obj obj
-     * @return json content
-     */
-    public static String toJson(Object obj) {
-        return gson.toJson(obj);
-    }
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.agent.utils;
+
+import com.google.gson.Gson;
+
+/**
+ * GsonUtil : Gson instances are Thread-safe, so you can reuse them freely across multiple threads.
+ */
+public class GsonUtil {
+
+    private static final Gson gson = new Gson();
+
+    /**
+     * instantiation is not allowed
+     */
+    private GsonUtil() {
+        throw new UnsupportedOperationException("This is a utility class, so instantiation is not allowed");
+    }
+
+    /**
+     * This method deserializes the specified Json into an object of the specified class.
+     *
+     * @param json     json
+     * @param classOfT class of T
+     * @param <T>      T
+     * @return T
+     */
+    public static <T> T fromJson(String json, Class<T> classOfT) {
+        return gson.fromJson(json, classOfT);
+    }
+
+    /**
+     * This method serializes the specified object into its equivalent Json representation.
+     *
+     * @param obj obj
+     * @return json content
+     */
+    public static String toJson(Object obj) {
+        return gson.toJson(obj);
+    }
+}
diff --git a/inlong-agent/agent-plugins/src/main/java/org/apache/inlong/agent/plugin/sources/OracleSource.java b/inlong-agent/agent-plugins/src/main/java/org/apache/inlong/agent/plugin/sources/OracleSource.java
index 4be871833..b78f18dac 100644
--- a/inlong-agent/agent-plugins/src/main/java/org/apache/inlong/agent/plugin/sources/OracleSource.java
+++ b/inlong-agent/agent-plugins/src/main/java/org/apache/inlong/agent/plugin/sources/OracleSource.java
@@ -1,48 +1,48 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.agent.plugin.sources;
-
-import org.apache.inlong.agent.conf.JobProfile;
-import org.apache.inlong.agent.plugin.Reader;
-import org.apache.inlong.agent.plugin.sources.reader.OracleReader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Oracle SQL source
- */
-public class OracleSource extends AbstractSource {
-
-    private static final Logger logger = LoggerFactory.getLogger(OracleSource.class);
-
-    public OracleSource() {
-    }
-
-    @Override
-    public List<Reader> split(JobProfile conf) {
-        super.init(conf);
-        Reader oracleReader = new OracleReader();
-        List<Reader> readerList = new ArrayList<>();
-        readerList.add(oracleReader);
-        sourceMetric.sourceSuccessCount.incrementAndGet();
-        return readerList;
-    }
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.agent.plugin.sources;
+
+import org.apache.inlong.agent.conf.JobProfile;
+import org.apache.inlong.agent.plugin.Reader;
+import org.apache.inlong.agent.plugin.sources.reader.OracleReader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Oracle SQL source
+ */
+public class OracleSource extends AbstractSource {
+
+    private static final Logger logger = LoggerFactory.getLogger(OracleSource.class);
+
+    public OracleSource() {
+    }
+
+    @Override
+    public List<Reader> split(JobProfile conf) {
+        super.init(conf);
+        Reader oracleReader = new OracleReader();
+        List<Reader> readerList = new ArrayList<>();
+        readerList.add(oracleReader);
+        sourceMetric.sourceSuccessCount.incrementAndGet();
+        return readerList;
+    }
+}
diff --git a/inlong-agent/agent-plugins/src/main/java/org/apache/inlong/agent/plugin/sources/reader/OracleReader.java b/inlong-agent/agent-plugins/src/main/java/org/apache/inlong/agent/plugin/sources/reader/OracleReader.java
index a8e1d4904..bfc80542a 100644
--- a/inlong-agent/agent-plugins/src/main/java/org/apache/inlong/agent/plugin/sources/reader/OracleReader.java
+++ b/inlong-agent/agent-plugins/src/main/java/org/apache/inlong/agent/plugin/sources/reader/OracleReader.java
@@ -1,308 +1,308 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.agent.plugin.sources.reader;
-
-import com.google.common.base.Preconditions;
-import com.google.gson.Gson;
-import io.debezium.connector.oracle.OracleConnector;
-import io.debezium.engine.ChangeEvent;
-import io.debezium.engine.DebeziumEngine;
-import io.debezium.relational.history.FileDatabaseHistory;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.lang3.tuple.Pair;
-import org.apache.inlong.agent.conf.AgentConfiguration;
-import org.apache.inlong.agent.conf.JobProfile;
-import org.apache.inlong.agent.constant.AgentConstants;
-import org.apache.inlong.agent.constant.OracleConstants;
-import org.apache.inlong.agent.constant.SnapshotModeConstants;
-import org.apache.inlong.agent.message.DefaultMessage;
-import org.apache.inlong.agent.metrics.audit.AuditUtils;
-import org.apache.inlong.agent.plugin.Message;
-import org.apache.inlong.agent.plugin.sources.snapshot.OracleSnapshotBase;
-import org.apache.inlong.agent.plugin.utils.InLongDatabaseHistory;
-import org.apache.inlong.agent.plugin.utils.InLongFileOffsetBackingStore;
-import org.apache.inlong.agent.pojo.DebeziumFormat;
-import org.apache.inlong.agent.utils.AgentUtils;
-import org.apache.inlong.agent.utils.GsonUtil;
-import org.apache.kafka.connect.storage.FileOffsetBackingStore;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.nio.charset.StandardCharsets;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.LinkedBlockingQueue;
-
-import static org.apache.inlong.agent.constant.CommonConstants.DEFAULT_MAP_CAPACITY;
-import static org.apache.inlong.agent.constant.CommonConstants.PROXY_KEY_DATA;
-
-/**
- * Read data from Oracle database by Debezium
- */
-public class OracleReader extends AbstractReader {
-
-    public static final String ORACLE_READER_TAG_NAME = "AgentOracleMetric";
-    public static final String JOB_DATABASE_USER = "job.oracleJob.user";
-    public static final String JOB_DATABASE_PASSWORD = "job.oracleJob.password";
-    public static final String JOB_DATABASE_HOSTNAME = "job.oracleJob.hostname";
-    public static final String JOB_DATABASE_PORT = "job.oracleJob.port";
-    public static final String JOB_DATABASE_SNAPSHOT_MODE = "job.oracleJob.snapshot.mode";
-    public static final String JOB_DATABASE_SERVER_NAME = "job.oracleJob.serverName";
-    public static final String JOB_DATABASE_QUEUE_SIZE = "job.oracleJob.queueSize";
-    public static final String JOB_DATABASE_OFFSETS = "job.oracleJob.offsets";
-    public static final String JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_FILE = "job.oracleJob.offset.specificOffsetFile";
-    public static final String JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_POS = "job.oracleJob.offset.specificOffsetPos";
-    public static final String JOB_DATABASE_DBNAME = "job.oracleJob.dbname";
-    public static final String JOB_DATABASE_STORE_OFFSET_INTERVAL_MS = "job.oracleJob.offset.intervalMs";
-    public static final String JOB_DATABASE_STORE_HISTORY_FILENAME = "job.oracleJob.history.filename";
-
-    private static final Gson GSON = new Gson();
-    public static final String ORACLE = "oracle";
-
-    private static final Logger LOGGER = LoggerFactory.getLogger(SqlReader.class);
-
-    private final AgentConfiguration agentConf = AgentConfiguration.getAgentConf();
-
-    private String databaseStoreHistoryName;
-    private String instanceId;
-    private String dbName;
-    private String serverName;
-    private String userName;
-    private String password;
-    private String hostName;
-    private String port;
-    private String offsetFlushIntervalMs;
-    private String offsetStoreFileName;
-    private String snapshotMode;
-    private String offset;
-    private String specificOffsetFile;
-    private String specificOffsetPos;
-    private OracleSnapshotBase oracleSnapshot;
-    private boolean finished = false;
-    private ExecutorService executor;
-
-    /**
-     * pair.left : table name
-     * pair.right : actual data
-     */
-    private LinkedBlockingQueue<Pair<String, String>> oracleMessageQueue;
-    private JobProfile jobProfile;
-    private boolean destroyed = false;
-
-    public OracleReader() {
-    }
-
-    @Override
-    public Message read() {
-        if (!oracleMessageQueue.isEmpty()) {
-            return getOracleMessage();
-        } else {
-            return null;
-        }
-    }
-
-    /**
-     * poll message from buffer pool
-     *
-     * @return org.apache.inlong.agent.plugin.Message
-     */
-    private DefaultMessage getOracleMessage() {
-        // Retrieves and removes the head of this queue,
-        // or returns null if this queue is empty.
-        Pair<String, String> message = oracleMessageQueue.poll();
-        if (Objects.isNull(message)) {
-            return null;
-        }
-        Map<String, String> header = new HashMap<>(DEFAULT_MAP_CAPACITY);
-        header.put(PROXY_KEY_DATA, message.getKey());
-        return new DefaultMessage(GsonUtil.toJson(message.getValue()).getBytes(StandardCharsets.UTF_8), header);
-    }
-
-    public boolean isDestroyed() {
-        return destroyed;
-    }
-
-    @Override
-    public boolean isFinished() {
-        return finished;
-    }
-
-    @Override
-    public String getReadSource() {
-        return instanceId;
-    }
-
-    @Override
-    public void setReadTimeout(long mill) {
-
-    }
-
-    @Override
-    public void setWaitMillisecond(long millis) {
-
-    }
-
-    @Override
-    public String getSnapshot() {
-        if (oracleSnapshot != null) {
-            return oracleSnapshot.getSnapshot();
-        } else {
-            return StringUtils.EMPTY;
-        }
-    }
-
-    @Override
-    public void finishRead() {
-        this.finished = true;
-    }
-
-    @Override
-    public boolean isSourceExist() {
-        return true;
-    }
-
-    private String tryToInitAndGetHistoryPath() {
-        String historyPath = agentConf.get(
-                AgentConstants.AGENT_HISTORY_PATH, AgentConstants.DEFAULT_AGENT_HISTORY_PATH);
-        String parentPath = agentConf.get(
-                AgentConstants.AGENT_HOME, AgentConstants.DEFAULT_AGENT_HOME);
-        return AgentUtils.makeDirsIfNotExist(historyPath, parentPath).getAbsolutePath();
-    }
-
-    @Override
-    public void init(JobProfile jobConf) {
-        super.init(jobConf);
-        jobProfile = jobConf;
-        LOGGER.info("init oracle reader with jobConf {}", jobConf.toJsonStr());
-        userName = jobConf.get(JOB_DATABASE_USER);
-        password = jobConf.get(JOB_DATABASE_PASSWORD);
-        hostName = jobConf.get(JOB_DATABASE_HOSTNAME);
-        port = jobConf.get(JOB_DATABASE_PORT);
-        dbName = jobConf.get(JOB_DATABASE_DBNAME);
-        serverName = jobConf.get(JOB_DATABASE_SERVER_NAME);
-        instanceId = jobConf.getInstanceId();
-        offsetFlushIntervalMs = jobConf.get(JOB_DATABASE_STORE_OFFSET_INTERVAL_MS, "100000");
-        offsetStoreFileName = jobConf.get(JOB_DATABASE_STORE_HISTORY_FILENAME,
-                tryToInitAndGetHistoryPath()) + "/offset.dat" + instanceId;
-        snapshotMode = jobConf.get(JOB_DATABASE_SNAPSHOT_MODE, OracleConstants.INITIAL);
-        oracleMessageQueue = new LinkedBlockingQueue<>(jobConf.getInt(JOB_DATABASE_QUEUE_SIZE, 1000));
-        finished = false;
-
-        databaseStoreHistoryName = jobConf.get(JOB_DATABASE_STORE_HISTORY_FILENAME,
-                tryToInitAndGetHistoryPath()) + "/history.dat" + jobConf.getInstanceId();
-        offset = jobConf.get(JOB_DATABASE_OFFSETS, "");
-        specificOffsetFile = jobConf.get(JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_FILE, "");
-        specificOffsetPos = jobConf.get(JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_POS, "-1");
-
-        oracleSnapshot = new OracleSnapshotBase(offsetStoreFileName);
-        oracleSnapshot.save(offset, oracleSnapshot.getFile());
-
-        Properties props = getEngineProps();
-
-        DebeziumEngine<ChangeEvent<String, String>> engine = DebeziumEngine.create(
-                        io.debezium.engine.format.Json.class)
-                .using(props)
-                .notifying((records, committer) -> {
-                    try {
-                        for (ChangeEvent<String, String> record : records) {
-                            DebeziumFormat debeziumFormat = GSON
-                                    .fromJson(record.value(), DebeziumFormat.class);
-                            oracleMessageQueue.put(Pair.of(debeziumFormat.getSource().getTable(), record.value()));
-                            committer.markProcessed(record);
-                        }
-                        committer.markBatchFinished();
-                        long dataSize = records.stream().mapToLong(c -> c.value().length()).sum();
-                        AuditUtils.add(AuditUtils.AUDIT_ID_AGENT_READ_SUCCESS, inlongGroupId, inlongStreamId,
-                                System.currentTimeMillis(), records.size(), dataSize);
-                        readerMetric.pluginReadSuccessCount.addAndGet(records.size());
-                        readerMetric.pluginReadCount.addAndGet(records.size());
-                    } catch (Exception e) {
-                        readerMetric.pluginReadFailCount.addAndGet(records.size());
-                        readerMetric.pluginReadCount.addAndGet(records.size());
-                        LOGGER.error("parse binlog message error", e);
-                    }
-                })
-                .using((success, message, error) -> {
-                    if (!success) {
-                        LOGGER.error("oracle job with jobConf {} has error {}", instanceId, message, error);
-                    }
-                }).build();
-
-        executor = Executors.newSingleThreadExecutor();
-        executor.execute(engine);
-
-        LOGGER.info("get initial snapshot of job {}, snapshot {}", instanceId, getSnapshot());
-    }
-
-    private Properties getEngineProps() {
-        Properties props = new Properties();
-        props.setProperty("name", "engine" + instanceId);
-        props.setProperty("connector.class", OracleConnector.class.getCanonicalName());
-        props.setProperty("database.hostname", hostName);
-        props.setProperty("database.port", port);
-        props.setProperty("database.user", userName);
-        props.setProperty("database.password", password);
-        props.setProperty("database.dbname", dbName);
-        props.setProperty("database.server.name", serverName);
-        props.setProperty("offset.flush.interval.ms", offsetFlushIntervalMs);
-        props.setProperty("database.snapshot.mode", snapshotMode);
-        props.setProperty("key.converter.schemas.enable", "false");
-        props.setProperty("value.converter.schemas.enable", "false");
-        props.setProperty("snapshot.mode", snapshotMode);
-        props.setProperty("offset.storage.file.filename", offsetStoreFileName);
-        props.setProperty("database.history.file.filename", databaseStoreHistoryName);
-        if (SnapshotModeConstants.SPECIFIC_OFFSETS.equals(snapshotMode)) {
-            Preconditions.checkNotNull(JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_FILE,
-                    JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_FILE + " cannot be null");
-            Preconditions.checkNotNull(JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_POS,
-                    JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_POS + " cannot be null");
-            props.setProperty("offset.storage", InLongFileOffsetBackingStore.class.getCanonicalName());
-            props.setProperty(InLongFileOffsetBackingStore.OFFSET_STATE_VALUE,
-                    serializeOffset(instanceId, specificOffsetFile, specificOffsetPos));
-            props.setProperty("database.history", InLongDatabaseHistory.class.getCanonicalName());
-        } else {
-            props.setProperty("offset.storage", FileOffsetBackingStore.class.getCanonicalName());
-            props.setProperty("database.history", FileDatabaseHistory.class.getCanonicalName());
-        }
-        props.setProperty("tombstones.on.delete", "false");
-        props.setProperty("converters", "datetime");
-        props.setProperty("datetime.type", "org.apache.inlong.agent.plugin.utils.BinlogTimeConverter");
-        props.setProperty("datetime.format.date", "yyyy-MM-dd");
-        props.setProperty("datetime.format.time", "HH:mm:ss");
-        props.setProperty("datetime.format.datetime", "yyyy-MM-dd HH:mm:ss");
-        props.setProperty("datetime.format.timestamp", "yyyy-MM-dd HH:mm:ss");
-
-        LOGGER.info("oracle job {} start with props {}", jobProfile.getInstanceId(), props);
-        return props;
-    }
-
-    @Override
-    public void destroy() {
-        synchronized (this) {
-            if (!destroyed) {
-                this.executor.shutdownNow();
-                this.oracleSnapshot.close();
-                this.destroyed = true;
-            }
-        }
-    }
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.agent.plugin.sources.reader;
+
+import com.google.common.base.Preconditions;
+import com.google.gson.Gson;
+import io.debezium.connector.oracle.OracleConnector;
+import io.debezium.engine.ChangeEvent;
+import io.debezium.engine.DebeziumEngine;
+import io.debezium.relational.history.FileDatabaseHistory;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.inlong.agent.conf.AgentConfiguration;
+import org.apache.inlong.agent.conf.JobProfile;
+import org.apache.inlong.agent.constant.AgentConstants;
+import org.apache.inlong.agent.constant.OracleConstants;
+import org.apache.inlong.agent.constant.SnapshotModeConstants;
+import org.apache.inlong.agent.message.DefaultMessage;
+import org.apache.inlong.agent.metrics.audit.AuditUtils;
+import org.apache.inlong.agent.plugin.Message;
+import org.apache.inlong.agent.plugin.sources.snapshot.OracleSnapshotBase;
+import org.apache.inlong.agent.plugin.utils.InLongDatabaseHistory;
+import org.apache.inlong.agent.plugin.utils.InLongFileOffsetBackingStore;
+import org.apache.inlong.agent.pojo.DebeziumFormat;
+import org.apache.inlong.agent.utils.AgentUtils;
+import org.apache.inlong.agent.utils.GsonUtil;
+import org.apache.kafka.connect.storage.FileOffsetBackingStore;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.charset.StandardCharsets;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import static org.apache.inlong.agent.constant.CommonConstants.DEFAULT_MAP_CAPACITY;
+import static org.apache.inlong.agent.constant.CommonConstants.PROXY_KEY_DATA;
+
+/**
+ * Read data from Oracle database by Debezium
+ */
+public class OracleReader extends AbstractReader {
+
+    public static final String ORACLE_READER_TAG_NAME = "AgentOracleMetric";
+    public static final String JOB_DATABASE_USER = "job.oracleJob.user";
+    public static final String JOB_DATABASE_PASSWORD = "job.oracleJob.password";
+    public static final String JOB_DATABASE_HOSTNAME = "job.oracleJob.hostname";
+    public static final String JOB_DATABASE_PORT = "job.oracleJob.port";
+    public static final String JOB_DATABASE_SNAPSHOT_MODE = "job.oracleJob.snapshot.mode";
+    public static final String JOB_DATABASE_SERVER_NAME = "job.oracleJob.serverName";
+    public static final String JOB_DATABASE_QUEUE_SIZE = "job.oracleJob.queueSize";
+    public static final String JOB_DATABASE_OFFSETS = "job.oracleJob.offsets";
+    public static final String JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_FILE = "job.oracleJob.offset.specificOffsetFile";
+    public static final String JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_POS = "job.oracleJob.offset.specificOffsetPos";
+    public static final String JOB_DATABASE_DBNAME = "job.oracleJob.dbname";
+    public static final String JOB_DATABASE_STORE_OFFSET_INTERVAL_MS = "job.oracleJob.offset.intervalMs";
+    public static final String JOB_DATABASE_STORE_HISTORY_FILENAME = "job.oracleJob.history.filename";
+
+    private static final Gson GSON = new Gson();
+    public static final String ORACLE = "oracle";
+
+    private static final Logger LOGGER = LoggerFactory.getLogger(SqlReader.class);
+
+    private final AgentConfiguration agentConf = AgentConfiguration.getAgentConf();
+
+    private String databaseStoreHistoryName;
+    private String instanceId;
+    private String dbName;
+    private String serverName;
+    private String userName;
+    private String password;
+    private String hostName;
+    private String port;
+    private String offsetFlushIntervalMs;
+    private String offsetStoreFileName;
+    private String snapshotMode;
+    private String offset;
+    private String specificOffsetFile;
+    private String specificOffsetPos;
+    private OracleSnapshotBase oracleSnapshot;
+    private boolean finished = false;
+    private ExecutorService executor;
+
+    /**
+     * pair.left : table name
+     * pair.right : actual data
+     */
+    private LinkedBlockingQueue<Pair<String, String>> oracleMessageQueue;
+    private JobProfile jobProfile;
+    private boolean destroyed = false;
+
+    public OracleReader() {
+    }
+
+    @Override
+    public Message read() {
+        if (!oracleMessageQueue.isEmpty()) {
+            return getOracleMessage();
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * poll message from buffer pool
+     *
+     * @return org.apache.inlong.agent.plugin.Message
+     */
+    private DefaultMessage getOracleMessage() {
+        // Retrieves and removes the head of this queue,
+        // or returns null if this queue is empty.
+        Pair<String, String> message = oracleMessageQueue.poll();
+        if (Objects.isNull(message)) {
+            return null;
+        }
+        Map<String, String> header = new HashMap<>(DEFAULT_MAP_CAPACITY);
+        header.put(PROXY_KEY_DATA, message.getKey());
+        return new DefaultMessage(GsonUtil.toJson(message.getValue()).getBytes(StandardCharsets.UTF_8), header);
+    }
+
+    public boolean isDestroyed() {
+        return destroyed;
+    }
+
+    @Override
+    public boolean isFinished() {
+        return finished;
+    }
+
+    @Override
+    public String getReadSource() {
+        return instanceId;
+    }
+
+    @Override
+    public void setReadTimeout(long mill) {
+
+    }
+
+    @Override
+    public void setWaitMillisecond(long millis) {
+
+    }
+
+    @Override
+    public String getSnapshot() {
+        if (oracleSnapshot != null) {
+            return oracleSnapshot.getSnapshot();
+        } else {
+            return StringUtils.EMPTY;
+        }
+    }
+
+    @Override
+    public void finishRead() {
+        this.finished = true;
+    }
+
+    @Override
+    public boolean isSourceExist() {
+        return true;
+    }
+
+    private String tryToInitAndGetHistoryPath() {
+        String historyPath = agentConf.get(
+                AgentConstants.AGENT_HISTORY_PATH, AgentConstants.DEFAULT_AGENT_HISTORY_PATH);
+        String parentPath = agentConf.get(
+                AgentConstants.AGENT_HOME, AgentConstants.DEFAULT_AGENT_HOME);
+        return AgentUtils.makeDirsIfNotExist(historyPath, parentPath).getAbsolutePath();
+    }
+
+    @Override
+    public void init(JobProfile jobConf) {
+        super.init(jobConf);
+        jobProfile = jobConf;
+        LOGGER.info("init oracle reader with jobConf {}", jobConf.toJsonStr());
+        userName = jobConf.get(JOB_DATABASE_USER);
+        password = jobConf.get(JOB_DATABASE_PASSWORD);
+        hostName = jobConf.get(JOB_DATABASE_HOSTNAME);
+        port = jobConf.get(JOB_DATABASE_PORT);
+        dbName = jobConf.get(JOB_DATABASE_DBNAME);
+        serverName = jobConf.get(JOB_DATABASE_SERVER_NAME);
+        instanceId = jobConf.getInstanceId();
+        offsetFlushIntervalMs = jobConf.get(JOB_DATABASE_STORE_OFFSET_INTERVAL_MS, "100000");
+        offsetStoreFileName = jobConf.get(JOB_DATABASE_STORE_HISTORY_FILENAME,
+                tryToInitAndGetHistoryPath()) + "/offset.dat" + instanceId;
+        snapshotMode = jobConf.get(JOB_DATABASE_SNAPSHOT_MODE, OracleConstants.INITIAL);
+        oracleMessageQueue = new LinkedBlockingQueue<>(jobConf.getInt(JOB_DATABASE_QUEUE_SIZE, 1000));
+        finished = false;
+
+        databaseStoreHistoryName = jobConf.get(JOB_DATABASE_STORE_HISTORY_FILENAME,
+                tryToInitAndGetHistoryPath()) + "/history.dat" + jobConf.getInstanceId();
+        offset = jobConf.get(JOB_DATABASE_OFFSETS, "");
+        specificOffsetFile = jobConf.get(JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_FILE, "");
+        specificOffsetPos = jobConf.get(JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_POS, "-1");
+
+        oracleSnapshot = new OracleSnapshotBase(offsetStoreFileName);
+        oracleSnapshot.save(offset, oracleSnapshot.getFile());
+
+        Properties props = getEngineProps();
+
+        DebeziumEngine<ChangeEvent<String, String>> engine = DebeziumEngine.create(
+                        io.debezium.engine.format.Json.class)
+                .using(props)
+                .notifying((records, committer) -> {
+                    try {
+                        for (ChangeEvent<String, String> record : records) {
+                            DebeziumFormat debeziumFormat = GSON
+                                    .fromJson(record.value(), DebeziumFormat.class);
+                            oracleMessageQueue.put(Pair.of(debeziumFormat.getSource().getTable(), record.value()));
+                            committer.markProcessed(record);
+                        }
+                        committer.markBatchFinished();
+                        long dataSize = records.stream().mapToLong(c -> c.value().length()).sum();
+                        AuditUtils.add(AuditUtils.AUDIT_ID_AGENT_READ_SUCCESS, inlongGroupId, inlongStreamId,
+                                System.currentTimeMillis(), records.size(), dataSize);
+                        readerMetric.pluginReadSuccessCount.addAndGet(records.size());
+                        readerMetric.pluginReadCount.addAndGet(records.size());
+                    } catch (Exception e) {
+                        readerMetric.pluginReadFailCount.addAndGet(records.size());
+                        readerMetric.pluginReadCount.addAndGet(records.size());
+                        LOGGER.error("parse binlog message error", e);
+                    }
+                })
+                .using((success, message, error) -> {
+                    if (!success) {
+                        LOGGER.error("oracle job with jobConf {} has error {}", instanceId, message, error);
+                    }
+                }).build();
+
+        executor = Executors.newSingleThreadExecutor();
+        executor.execute(engine);
+
+        LOGGER.info("get initial snapshot of job {}, snapshot {}", instanceId, getSnapshot());
+    }
+
+    private Properties getEngineProps() {
+        Properties props = new Properties();
+        props.setProperty("name", "engine" + instanceId);
+        props.setProperty("connector.class", OracleConnector.class.getCanonicalName());
+        props.setProperty("database.hostname", hostName);
+        props.setProperty("database.port", port);
+        props.setProperty("database.user", userName);
+        props.setProperty("database.password", password);
+        props.setProperty("database.dbname", dbName);
+        props.setProperty("database.server.name", serverName);
+        props.setProperty("offset.flush.interval.ms", offsetFlushIntervalMs);
+        props.setProperty("database.snapshot.mode", snapshotMode);
+        props.setProperty("key.converter.schemas.enable", "false");
+        props.setProperty("value.converter.schemas.enable", "false");
+        props.setProperty("snapshot.mode", snapshotMode);
+        props.setProperty("offset.storage.file.filename", offsetStoreFileName);
+        props.setProperty("database.history.file.filename", databaseStoreHistoryName);
+        if (SnapshotModeConstants.SPECIFIC_OFFSETS.equals(snapshotMode)) {
+            Preconditions.checkNotNull(JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_FILE,
+                    JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_FILE + " cannot be null");
+            Preconditions.checkNotNull(JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_POS,
+                    JOB_DATABASE_OFFSET_SPECIFIC_OFFSET_POS + " cannot be null");
+            props.setProperty("offset.storage", InLongFileOffsetBackingStore.class.getCanonicalName());
+            props.setProperty(InLongFileOffsetBackingStore.OFFSET_STATE_VALUE,
+                    serializeOffset(instanceId, specificOffsetFile, specificOffsetPos));
+            props.setProperty("database.history", InLongDatabaseHistory.class.getCanonicalName());
+        } else {
+            props.setProperty("offset.storage", FileOffsetBackingStore.class.getCanonicalName());
+            props.setProperty("database.history", FileDatabaseHistory.class.getCanonicalName());
+        }
+        props.setProperty("tombstones.on.delete", "false");
+        props.setProperty("converters", "datetime");
+        props.setProperty("datetime.type", "org.apache.inlong.agent.plugin.utils.BinlogTimeConverter");
+        props.setProperty("datetime.format.date", "yyyy-MM-dd");
+        props.setProperty("datetime.format.time", "HH:mm:ss");
+        props.setProperty("datetime.format.datetime", "yyyy-MM-dd HH:mm:ss");
+        props.setProperty("datetime.format.timestamp", "yyyy-MM-dd HH:mm:ss");
+
+        LOGGER.info("oracle job {} start with props {}", jobProfile.getInstanceId(), props);
+        return props;
+    }
+
+    @Override
+    public void destroy() {
+        synchronized (this) {
+            if (!destroyed) {
+                this.executor.shutdownNow();
+                this.oracleSnapshot.close();
+                this.destroyed = true;
+            }
+        }
+    }
+}
diff --git a/inlong-agent/agent-plugins/src/test/java/org/apache/inlong/agent/plugin/sources/TestOracleConnect.java b/inlong-agent/agent-plugins/src/test/java/org/apache/inlong/agent/plugin/sources/TestOracleConnect.java
index e3e093af0..f577ed5db 100644
--- a/inlong-agent/agent-plugins/src/test/java/org/apache/inlong/agent/plugin/sources/TestOracleConnect.java
+++ b/inlong-agent/agent-plugins/src/test/java/org/apache/inlong/agent/plugin/sources/TestOracleConnect.java
@@ -1,62 +1,62 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.agent.plugin.sources;
-
-import org.apache.inlong.agent.conf.JobProfile;
-import org.apache.inlong.agent.constant.JobConstants;
-import org.apache.inlong.agent.plugin.Message;
-import org.apache.inlong.agent.plugin.sources.reader.OracleReader;
-import org.junit.Ignore;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.UUID;
-
-import static org.apache.inlong.agent.constant.CommonConstants.PROXY_INLONG_GROUP_ID;
-import static org.apache.inlong.agent.constant.CommonConstants.PROXY_INLONG_STREAM_ID;
-
-/**
- * Test cases for {@link OracleReader}.
- */
-public class TestOracleConnect {
-
-    private static final Logger LOGGER = LoggerFactory.getLogger(TestOracleConnect.class);
-
-    @Ignore
-    public void testOracle() {
-        JobProfile jobProfile = new JobProfile();
-        jobProfile.set("job.oracleJob.hostname", "localhost");
-        jobProfile.set("job.oracleJob.port", "1521");
-        jobProfile.set("job.oracleJob.user", "c##dbzuser");
-        jobProfile.set("job.oracleJob.password", "dbz");
-        jobProfile.set("job.oracleJob.sid", "ORCLCDB");
-        jobProfile.set("job.oracleJob.dbname", "ORCLCDB");
-        jobProfile.set("job.oracleJob.serverName", "server1");
-        jobProfile.set(JobConstants.JOB_INSTANCE_ID, UUID.randomUUID().toString());
-        jobProfile.set(PROXY_INLONG_GROUP_ID, UUID.randomUUID().toString());
-        jobProfile.set(PROXY_INLONG_STREAM_ID, UUID.randomUUID().toString());
-        OracleReader oracleReader = new OracleReader();
-        oracleReader.init(jobProfile);
-        while (true) {
-            Message message = oracleReader.read();
-            if (message != null) {
-                LOGGER.info("event content: {}", message);
-            }
-        }
-    }
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.agent.plugin.sources;
+
+import org.apache.inlong.agent.conf.JobProfile;
+import org.apache.inlong.agent.constant.JobConstants;
+import org.apache.inlong.agent.plugin.Message;
+import org.apache.inlong.agent.plugin.sources.reader.OracleReader;
+import org.junit.Ignore;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.UUID;
+
+import static org.apache.inlong.agent.constant.CommonConstants.PROXY_INLONG_GROUP_ID;
+import static org.apache.inlong.agent.constant.CommonConstants.PROXY_INLONG_STREAM_ID;
+
+/**
+ * Test cases for {@link OracleReader}.
+ */
+public class TestOracleConnect {
+
+    private static final Logger LOGGER = LoggerFactory.getLogger(TestOracleConnect.class);
+
+    @Ignore
+    public void testOracle() {
+        JobProfile jobProfile = new JobProfile();
+        jobProfile.set("job.oracleJob.hostname", "localhost");
+        jobProfile.set("job.oracleJob.port", "1521");
+        jobProfile.set("job.oracleJob.user", "c##dbzuser");
+        jobProfile.set("job.oracleJob.password", "dbz");
+        jobProfile.set("job.oracleJob.sid", "ORCLCDB");
+        jobProfile.set("job.oracleJob.dbname", "ORCLCDB");
+        jobProfile.set("job.oracleJob.serverName", "server1");
+        jobProfile.set(JobConstants.JOB_INSTANCE_ID, UUID.randomUUID().toString());
+        jobProfile.set(PROXY_INLONG_GROUP_ID, UUID.randomUUID().toString());
+        jobProfile.set(PROXY_INLONG_STREAM_ID, UUID.randomUUID().toString());
+        OracleReader oracleReader = new OracleReader();
+        oracleReader.init(jobProfile);
+        while (true) {
+            Message message = oracleReader.read();
+            if (message != null) {
+                LOGGER.info("event content: {}", message);
+            }
+        }
+    }
+}
diff --git a/inlong-agent/agent-plugins/src/test/java/org/apache/inlong/agent/plugin/sources/TestOracleSource.java b/inlong-agent/agent-plugins/src/test/java/org/apache/inlong/agent/plugin/sources/TestOracleSource.java
index 12f0207a5..160357e77 100644
--- a/inlong-agent/agent-plugins/src/test/java/org/apache/inlong/agent/plugin/sources/TestOracleSource.java
+++ b/inlong-agent/agent-plugins/src/test/java/org/apache/inlong/agent/plugin/sources/TestOracleSource.java
@@ -1,90 +1,90 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.agent.plugin.sources;
-
-import org.apache.inlong.agent.conf.JobProfile;
-import org.apache.inlong.agent.metrics.AgentMetricItem;
-import org.apache.inlong.agent.metrics.AgentMetricItemSet;
-import org.apache.inlong.common.metric.MetricItem;
-import org.apache.inlong.common.metric.MetricRegister;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.Mock;
-import org.powermock.api.mockito.PowerMockito;
-import org.powermock.core.classloader.annotations.PowerMockIgnore;
-import org.powermock.core.classloader.annotations.PrepareForTest;
-import org.powermock.modules.junit4.PowerMockRunner;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.powermock.api.mockito.PowerMockito.when;
-import static org.powermock.api.mockito.PowerMockito.whenNew;
-import static org.powermock.api.support.membermodification.MemberMatcher.field;
-
-/**
- * Test cases for {@link OracleSource}.
- */
-@RunWith(PowerMockRunner.class)
-@PrepareForTest({OracleSource.class, MetricRegister.class})
-@PowerMockIgnore({"javax.management.*"})
-public class TestOracleSource {
-
-    @Mock
-    JobProfile jobProfile;
-
-    @Mock
-    private AgentMetricItemSet agentMetricItemSet;
-
-    @Mock
-    private AgentMetricItem agentMetricItem;
-
-    private AtomicLong sourceSuccessCount;
-
-    private AtomicLong sourceFailCount;
-
-    @Before
-    public void setup() throws Exception {
-        sourceSuccessCount = new AtomicLong(0);
-        sourceFailCount = new AtomicLong(0);
-
-        // mock metrics
-        whenNew(AgentMetricItemSet.class).withArguments(anyString()).thenReturn(agentMetricItemSet);
-        when(agentMetricItemSet.findMetricItem(any())).thenReturn(agentMetricItem);
-        field(AgentMetricItem.class, "sourceSuccessCount").set(agentMetricItem, sourceSuccessCount);
-        field(AgentMetricItem.class, "sourceFailCount").set(agentMetricItem, sourceFailCount);
-        PowerMockito.mockStatic(MetricRegister.class);
-        PowerMockito.doNothing().when(
-                MetricRegister.class, "register", any(MetricItem.class));
-    }
-
-    /**
-     * Test cases for {@link OracleSource#split(JobProfile)}.
-     */
-    @Test
-    public void testSplit() {
-
-        // build mock
-        final OracleSource source = new OracleSource();
-        // assert
-        assertEquals(1, source.split(jobProfile).size());
-    }
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.agent.plugin.sources;
+
+import org.apache.inlong.agent.conf.JobProfile;
+import org.apache.inlong.agent.metrics.AgentMetricItem;
+import org.apache.inlong.agent.metrics.AgentMetricItemSet;
+import org.apache.inlong.common.metric.MetricItem;
+import org.apache.inlong.common.metric.MetricRegister;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.powermock.api.mockito.PowerMockito;
+import org.powermock.core.classloader.annotations.PowerMockIgnore;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.powermock.api.mockito.PowerMockito.when;
+import static org.powermock.api.mockito.PowerMockito.whenNew;
+import static org.powermock.api.support.membermodification.MemberMatcher.field;
+
+/**
+ * Test cases for {@link OracleSource}.
+ */
+@RunWith(PowerMockRunner.class)
+@PrepareForTest({OracleSource.class, MetricRegister.class})
+@PowerMockIgnore({"javax.management.*"})
+public class TestOracleSource {
+
+    @Mock
+    JobProfile jobProfile;
+
+    @Mock
+    private AgentMetricItemSet agentMetricItemSet;
+
+    @Mock
+    private AgentMetricItem agentMetricItem;
+
+    private AtomicLong sourceSuccessCount;
+
+    private AtomicLong sourceFailCount;
+
+    @Before
+    public void setup() throws Exception {
+        sourceSuccessCount = new AtomicLong(0);
+        sourceFailCount = new AtomicLong(0);
+
+        // mock metrics
+        whenNew(AgentMetricItemSet.class).withArguments(anyString()).thenReturn(agentMetricItemSet);
+        when(agentMetricItemSet.findMetricItem(any())).thenReturn(agentMetricItem);
+        field(AgentMetricItem.class, "sourceSuccessCount").set(agentMetricItem, sourceSuccessCount);
+        field(AgentMetricItem.class, "sourceFailCount").set(agentMetricItem, sourceFailCount);
+        PowerMockito.mockStatic(MetricRegister.class);
+        PowerMockito.doNothing().when(
+                MetricRegister.class, "register", any(MetricItem.class));
+    }
+
+    /**
+     * Test cases for {@link OracleSource#split(JobProfile)}.
+     */
+    @Test
+    public void testSplit() {
+
+        // build mock
+        final OracleSource source = new OracleSource();
+        // assert
+        assertEquals(1, source.split(jobProfile).size());
+    }
+}
diff --git a/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/sink/common/MsgDedupHandler.java b/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/sink/common/MsgDedupHandler.java
index e3ff37b7a..ba3dec596 100644
--- a/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/sink/common/MsgDedupHandler.java
+++ b/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/sink/common/MsgDedupHandler.java
@@ -1,106 +1,106 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.dataproxy.sink.common;
-
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.CacheStats;
-import com.google.common.cache.LoadingCache;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-// message deduplication handler
-public class MsgDedupHandler {
-
-    private static final Logger logger =
-            LoggerFactory.getLogger(MsgDedupHandler.class);
-    private static final int DEF_MAX_SURVIVE_CNT_SIZE = 5000000;
-    private static final int DEF_MAX_SURVIVE_TIME_MS = 30000;
-    private final AtomicBoolean started = new AtomicBoolean(false);
-    private volatile boolean enableDataDedup;
-    private LoadingCache<String, Long> msgSeqIdCache = null;
-
-    public MsgDedupHandler() {
-        enableDataDedup = false;
-    }
-
-    public void start(boolean cfgEnableFun, int cfgMaxSurviveTime, int cfgMaxSurviveSize) {
-        if (this.started.compareAndSet(false, true)) {
-            this.enableDataDedup = cfgEnableFun;
-            int maxSurviveTime = cfgMaxSurviveTime;
-            int maxSurviveSize = cfgMaxSurviveSize;
-            if (this.enableDataDedup) {
-                if (maxSurviveTime < 1000) {
-                    maxSurviveTime = DEF_MAX_SURVIVE_TIME_MS;
-                }
-                if (maxSurviveSize < 0) {
-                    maxSurviveSize = DEF_MAX_SURVIVE_CNT_SIZE;
-                }
-                msgSeqIdCache = CacheBuilder
-                        .newBuilder().concurrencyLevel(4 * 8).initialCapacity(5000000)
-                        .expireAfterAccess(maxSurviveTime, TimeUnit.MILLISECONDS)
-                        .maximumSize(maxSurviveSize)
-                        .build(new CacheLoader<String, Long>() {
-                            @Override
-                            public Long load(String key) {
-                                return System.currentTimeMillis();
-                            }
-                        });
-            }
-            logger.info("Initial message deduplication handler, enable = "
-                    + this.enableDataDedup + ", configured survived-time = "
-                    + cfgMaxSurviveTime + ", valid survived-time = "
-                    + maxSurviveTime + ", configured survived-size = "
-                    + cfgMaxSurviveSize + ", valid survived-size = "
-                    + maxSurviveSize);
-        }
-    }
-
-    public void invalidMsgSeqId(String msgSeqId) {
-        if (enableDataDedup && msgSeqId != null) {
-            if (msgSeqIdCache.asMap().containsKey(msgSeqId)) {
-                msgSeqIdCache.invalidate(msgSeqId);
-            }
-        }
-    }
-
-    public boolean judgeDupAndPutMsgSeqId(String msgSeqId) {
-        boolean isInclude = false;
-        if (enableDataDedup && msgSeqId != null) {
-            isInclude = msgSeqIdCache.asMap().containsKey(msgSeqId);
-            msgSeqIdCache.put(msgSeqId, System.currentTimeMillis());
-        }
-        return isInclude;
-    }
-
-    public String getCacheStatsInfo() {
-        if (enableDataDedup) {
-            return msgSeqIdCache.stats().toString();
-        }
-        return "Disable for message data deduplication function";
-    }
-
-    public CacheStats getCacheData() {
-        if (enableDataDedup) {
-            return msgSeqIdCache.stats();
-        }
-        return null;
-    }
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.dataproxy.sink.common;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.CacheStats;
+import com.google.common.cache.LoadingCache;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+// message deduplication handler
+public class MsgDedupHandler {
+
+    private static final Logger logger =
+            LoggerFactory.getLogger(MsgDedupHandler.class);
+    private static final int DEF_MAX_SURVIVE_CNT_SIZE = 5000000;
+    private static final int DEF_MAX_SURVIVE_TIME_MS = 30000;
+    private final AtomicBoolean started = new AtomicBoolean(false);
+    private volatile boolean enableDataDedup;
+    private LoadingCache<String, Long> msgSeqIdCache = null;
+
+    public MsgDedupHandler() {
+        enableDataDedup = false;
+    }
+
+    public void start(boolean cfgEnableFun, int cfgMaxSurviveTime, int cfgMaxSurviveSize) {
+        if (this.started.compareAndSet(false, true)) {
+            this.enableDataDedup = cfgEnableFun;
+            int maxSurviveTime = cfgMaxSurviveTime;
+            int maxSurviveSize = cfgMaxSurviveSize;
+            if (this.enableDataDedup) {
+                if (maxSurviveTime < 1000) {
+                    maxSurviveTime = DEF_MAX_SURVIVE_TIME_MS;
+                }
+                if (maxSurviveSize < 0) {
+                    maxSurviveSize = DEF_MAX_SURVIVE_CNT_SIZE;
+                }
+                msgSeqIdCache = CacheBuilder
+                        .newBuilder().concurrencyLevel(4 * 8).initialCapacity(5000000)
+                        .expireAfterAccess(maxSurviveTime, TimeUnit.MILLISECONDS)
+                        .maximumSize(maxSurviveSize)
+                        .build(new CacheLoader<String, Long>() {
+                            @Override
+                            public Long load(String key) {
+                                return System.currentTimeMillis();
+                            }
+                        });
+            }
+            logger.info("Initial message deduplication handler, enable = "
+                    + this.enableDataDedup + ", configured survived-time = "
+                    + cfgMaxSurviveTime + ", valid survived-time = "
+                    + maxSurviveTime + ", configured survived-size = "
+                    + cfgMaxSurviveSize + ", valid survived-size = "
+                    + maxSurviveSize);
+        }
+    }
+
+    public void invalidMsgSeqId(String msgSeqId) {
+        if (enableDataDedup && msgSeqId != null) {
+            if (msgSeqIdCache.asMap().containsKey(msgSeqId)) {
+                msgSeqIdCache.invalidate(msgSeqId);
+            }
+        }
+    }
+
+    public boolean judgeDupAndPutMsgSeqId(String msgSeqId) {
+        boolean isInclude = false;
+        if (enableDataDedup && msgSeqId != null) {
+            isInclude = msgSeqIdCache.asMap().containsKey(msgSeqId);
+            msgSeqIdCache.put(msgSeqId, System.currentTimeMillis());
+        }
+        return isInclude;
+    }
+
+    public String getCacheStatsInfo() {
+        if (enableDataDedup) {
+            return msgSeqIdCache.stats().toString();
+        }
+        return "Disable for message data deduplication function";
+    }
+
+    public CacheStats getCacheData() {
+        if (enableDataDedup) {
+            return msgSeqIdCache.stats();
+        }
+        return null;
+    }
+}
diff --git a/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/sink/common/TubeProducerHolder.java b/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/sink/common/TubeProducerHolder.java
index 905db9e10..37f828d24 100644
--- a/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/sink/common/TubeProducerHolder.java
+++ b/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/sink/common/TubeProducerHolder.java
@@ -1,281 +1,281 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.dataproxy.sink.common;
-
-import com.google.common.base.Preconditions;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.flume.FlumeException;
-import org.apache.inlong.dataproxy.config.pojo.MQClusterConfig;
-import org.apache.inlong.tubemq.client.config.TubeClientConfig;
-import org.apache.inlong.tubemq.client.exception.TubeClientException;
-import org.apache.inlong.tubemq.client.factory.TubeMultiSessionFactory;
-import org.apache.inlong.tubemq.client.producer.MessageProducer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class TubeProducerHolder {
-    private static final Logger logger =
-            LoggerFactory.getLogger(TubeProducerHolder.class);
-    private static final long SEND_FAILURE_WAIT = 30000L;
-    private static final long PUBLISH_FAILURE_WAIT = 60000L;
-    private final AtomicBoolean started = new AtomicBoolean(false);
-    private final String sinkName;
-    private final String clusterAddr;
-    private final MQClusterConfig clusterConfig;
-    private TubeMultiSessionFactory sessionFactory = null;
-    private final Map<String, MessageProducer> producerMap = new ConcurrentHashMap<>();
-    private MessageProducer lastProducer = null;
-    private final AtomicInteger lastPubTopicCnt = new AtomicInteger(0);
-    private static final ConcurrentHashMap<String, AtomicLong> FROZEN_TOPIC_MAP
-            = new ConcurrentHashMap<>();
-
-    public TubeProducerHolder(String sinkName, String clusterAddr, MQClusterConfig tubeConfig) {
-        Preconditions.checkState(StringUtils.isNotBlank(clusterAddr),
-                "No TubeMQ's cluster address list specified");
-        this.sinkName = sinkName;
-        this.clusterAddr = clusterAddr;
-        this.clusterConfig = tubeConfig;
-    }
-
-    public void start(Set<String> configTopicSet) {
-        if (!this.started.compareAndSet(false, true)) {
-            logger.info("ProducerHolder for " + sinkName + " has started!");
-            return;
-        }
-        logger.info("ProducerHolder for " + sinkName + " begin to start!");
-        // create session factory
-        try {
-            TubeClientConfig clientConfig = TubeUtils.buildClientConfig(clusterAddr, this.clusterConfig);
-            this.sessionFactory = new TubeMultiSessionFactory(clientConfig);
-            createProducersByTopicSet(configTopicSet);
-        } catch (Throwable e) {
-            stop();
-            String errInfo = "Build session factory  to " + clusterAddr
-                    + " for " + sinkName + " failure, please re-check";
-            logger.error(errInfo, e);
-            throw new FlumeException(errInfo);
-        }
-        logger.info("ProducerHolder for " + sinkName + " started!");
-    }
-
-    public void stop() {
-        if (this.started.get()) {
-            return;
-        }
-        // change start flag
-        if (!this.started.compareAndSet(true, false)) {
-            logger.info("ProducerHolder for " + sinkName + " has stopped!");
-            return;
-        }
-        logger.info("ProducerHolder for " + sinkName + " begin to stop!");
-        for (Map.Entry<String, MessageProducer> entry : producerMap.entrySet()) {
-            if (entry == null || entry.getValue() == null) {
-                continue;
-            }
-            try {
-                entry.getValue().shutdown();
-            } catch (Throwable e) {
-                // ignore log
-            }
-        }
-        producerMap.clear();
-        lastProducer = null;
-        lastPubTopicCnt.set(0);
-        FROZEN_TOPIC_MAP.clear();
-        if (sessionFactory != null) {
-            try {
-                sessionFactory.shutdown();
-            } catch (Throwable e) {
-                // ignore log
-            }
-            sessionFactory = null;
-        }
-        logger.info("ProducerHolder for " + sinkName + " finished stop!");
-    }
-
-    /**
-     * Get producer by topic name:
-     *   i. if the topic is judged to be an illegal topic, return null;
-     *   ii. if it is not an illegal topic or the status has expired, check:
-     *    a. if the topic has been published before, return the corresponding producer directly;
-     *    b. if the topic is not in the published list, perform the topic's publish action.
-     *  If the topic is thrown exception during the publishing process,
-     *     set the topic to an illegal topic
-     *
-     * @param topicName  the topic name
-     *
-     * @return  the producer
-     *          if topic is illegal, return null
-     * @throws  TubeClientException
-     */
-    public MessageProducer getProducer(String topicName) throws TubeClientException {
-        AtomicLong fbdTime = FROZEN_TOPIC_MAP.get(topicName);
-        if (fbdTime != null && fbdTime.get() > System.currentTimeMillis()) {
-            return null;
-        }
-        MessageProducer tmpProducer = producerMap.get(topicName);
-        if (tmpProducer != null) {
-            if (fbdTime != null) {
-                FROZEN_TOPIC_MAP.remove(topicName);
-            }
-            return tmpProducer;
-        }
-        synchronized (lastPubTopicCnt) {
-            fbdTime = FROZEN_TOPIC_MAP.get(topicName);
-            if (fbdTime != null && fbdTime.get() > System.currentTimeMillis()) {
-                return null;
-            }
-            if (lastProducer == null
-                    || lastPubTopicCnt.get() >= clusterConfig.getMaxTopicsEachProducerHold()) {
-                lastProducer = sessionFactory.createProducer();
-                lastPubTopicCnt.set(0);
-            }
-            try {
-                lastProducer.publish(topicName);
-            } catch (Throwable e) {
-                fbdTime = FROZEN_TOPIC_MAP.get(topicName);
-                if (fbdTime == null) {
-                    AtomicLong tmpFbdTime = new AtomicLong();
-                    fbdTime = FROZEN_TOPIC_MAP.putIfAbsent(topicName, tmpFbdTime);
-                    if (fbdTime == null) {
-                        fbdTime = tmpFbdTime;
-                    }
-                }
-                fbdTime.set(System.currentTimeMillis() + PUBLISH_FAILURE_WAIT);
-                logger.warn("Throw exception while publish topic="
-                        + topicName + ", exception is " + e.getMessage());
-                return null;
-            }
-            producerMap.put(topicName, lastProducer);
-            lastPubTopicCnt.incrementAndGet();
-            return lastProducer;
-        }
-    }
-
-    /**
-     * Whether frozen production according to the exceptions returned by message sending
-     *
-     * @param topicName  the topic name sent message
-     * @param throwable  the exception information thrown when sending a message
-     *
-     * @return  whether illegal topic
-     */
-    public boolean needFrozenSent(String topicName, Throwable throwable) {
-        if (throwable instanceof TubeClientException) {
-            String message = throwable.getMessage();
-            if (message != null && (message.contains("No available partition for topic")
-                    || message.contains("The brokers of topic are all forbidden"))) {
-                AtomicLong fbdTime = FROZEN_TOPIC_MAP.get(topicName);
-                if (fbdTime == null) {
-                    AtomicLong tmpFbdTime = new AtomicLong(0);
-                    fbdTime = FROZEN_TOPIC_MAP.putIfAbsent(topicName, tmpFbdTime);
-                    if (fbdTime == null) {
-                        fbdTime = tmpFbdTime;
-                    }
-                }
-                fbdTime.set(System.currentTimeMillis() + SEND_FAILURE_WAIT);
-                return true;
-            }
-        }
-        return false;
-    }
-
-    /**
-     * Create sink producers by configured topic set
-     * group topicSet to different group, each group is associated with a producer
-     *
-     * @param cfgTopicSet  the configured topic set
-     */
-    public synchronized void createProducersByTopicSet(Set<String> cfgTopicSet) throws Exception {
-        if (cfgTopicSet == null || cfgTopicSet.isEmpty()) {
-            return;
-        }
-        // filter published topics
-        List<String> filteredTopics = new ArrayList<>(cfgTopicSet.size());
-        for (String topicName : cfgTopicSet) {
-            if (StringUtils.isBlank(topicName)
-                    || producerMap.get(topicName) != null) {
-                continue;
-            }
-            filteredTopics.add(topicName);
-        }
-        if (filteredTopics.isEmpty()) {
-            return;
-        }
-        // alloc topic count
-        Collections.sort(filteredTopics);
-        long startTime = System.currentTimeMillis();
-        int maxPublishTopicCnt = clusterConfig.getMaxTopicsEachProducerHold();
-        int allocTotalCnt = filteredTopics.size();
-        List<Integer> topicGroupCnt = new ArrayList<>();
-        int paddingCnt = (lastPubTopicCnt.get() <= 0)
-                ? 0 : (maxPublishTopicCnt - lastPubTopicCnt.get());
-        while (allocTotalCnt > 0) {
-            if (paddingCnt > 0) {
-                topicGroupCnt.add(Math.min(allocTotalCnt, paddingCnt));
-                allocTotalCnt -= paddingCnt;
-                paddingCnt = 0;
-            } else {
-                topicGroupCnt.add(Math.min(allocTotalCnt, maxPublishTopicCnt));
-                allocTotalCnt -= maxPublishTopicCnt;
-            }
-        }
-        // create producer
-        int startPos = 0;
-        int endPos = 0;
-        Set<String> subTopicSet = new HashSet<>();
-        for (Integer dltCnt : topicGroupCnt) {
-            // allocate topic items
-            subTopicSet.clear();
-            endPos = startPos + dltCnt;
-            for (int index = startPos; index < endPos; index++) {
-                subTopicSet.add(filteredTopics.get(index));
-            }
-            startPos = endPos;
-            // create producer
-            if (lastProducer == null
-                    || lastPubTopicCnt.get() == maxPublishTopicCnt) {
-                lastProducer = sessionFactory.createProducer();
-                lastPubTopicCnt.set(0);
-            }
-            try {
-                lastProducer.publish(subTopicSet);
-            } catch (Throwable e) {
-                logger.info(sinkName + " meta sink publish fail.", e);
-            }
-            lastPubTopicCnt.addAndGet(subTopicSet.size());
-            for (String topicItem : subTopicSet) {
-                producerMap.put(topicItem, lastProducer);
-            }
-        }
-        logger.info(sinkName + " initializes producers for topics:"
-                + producerMap.keySet() + ", cost: " + (System.currentTimeMillis() - startTime)
-                + "ms");
-    }
-
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.dataproxy.sink.common;
+
+import com.google.common.base.Preconditions;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.flume.FlumeException;
+import org.apache.inlong.dataproxy.config.pojo.MQClusterConfig;
+import org.apache.inlong.tubemq.client.config.TubeClientConfig;
+import org.apache.inlong.tubemq.client.exception.TubeClientException;
+import org.apache.inlong.tubemq.client.factory.TubeMultiSessionFactory;
+import org.apache.inlong.tubemq.client.producer.MessageProducer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TubeProducerHolder {
+    private static final Logger logger =
+            LoggerFactory.getLogger(TubeProducerHolder.class);
+    private static final long SEND_FAILURE_WAIT = 30000L;
+    private static final long PUBLISH_FAILURE_WAIT = 60000L;
+    private final AtomicBoolean started = new AtomicBoolean(false);
+    private final String sinkName;
+    private final String clusterAddr;
+    private final MQClusterConfig clusterConfig;
+    private TubeMultiSessionFactory sessionFactory = null;
+    private final Map<String, MessageProducer> producerMap = new ConcurrentHashMap<>();
+    private MessageProducer lastProducer = null;
+    private final AtomicInteger lastPubTopicCnt = new AtomicInteger(0);
+    private static final ConcurrentHashMap<String, AtomicLong> FROZEN_TOPIC_MAP
+            = new ConcurrentHashMap<>();
+
+    public TubeProducerHolder(String sinkName, String clusterAddr, MQClusterConfig tubeConfig) {
+        Preconditions.checkState(StringUtils.isNotBlank(clusterAddr),
+                "No TubeMQ's cluster address list specified");
+        this.sinkName = sinkName;
+        this.clusterAddr = clusterAddr;
+        this.clusterConfig = tubeConfig;
+    }
+
+    public void start(Set<String> configTopicSet) {
+        if (!this.started.compareAndSet(false, true)) {
+            logger.info("ProducerHolder for " + sinkName + " has started!");
+            return;
+        }
+        logger.info("ProducerHolder for " + sinkName + " begin to start!");
+        // create session factory
+        try {
+            TubeClientConfig clientConfig = TubeUtils.buildClientConfig(clusterAddr, this.clusterConfig);
+            this.sessionFactory = new TubeMultiSessionFactory(clientConfig);
+            createProducersByTopicSet(configTopicSet);
+        } catch (Throwable e) {
+            stop();
+            String errInfo = "Build session factory  to " + clusterAddr
+                    + " for " + sinkName + " failure, please re-check";
+            logger.error(errInfo, e);
+            throw new FlumeException(errInfo);
+        }
+        logger.info("ProducerHolder for " + sinkName + " started!");
+    }
+
+    public void stop() {
+        if (this.started.get()) {
+            return;
+        }
+        // change start flag
+        if (!this.started.compareAndSet(true, false)) {
+            logger.info("ProducerHolder for " + sinkName + " has stopped!");
+            return;
+        }
+        logger.info("ProducerHolder for " + sinkName + " begin to stop!");
+        for (Map.Entry<String, MessageProducer> entry : producerMap.entrySet()) {
+            if (entry == null || entry.getValue() == null) {
+                continue;
+            }
+            try {
+                entry.getValue().shutdown();
+            } catch (Throwable e) {
+                // ignore log
+            }
+        }
+        producerMap.clear();
+        lastProducer = null;
+        lastPubTopicCnt.set(0);
+        FROZEN_TOPIC_MAP.clear();
+        if (sessionFactory != null) {
+            try {
+                sessionFactory.shutdown();
+            } catch (Throwable e) {
+                // ignore log
+            }
+            sessionFactory = null;
+        }
+        logger.info("ProducerHolder for " + sinkName + " finished stop!");
+    }
+
+    /**
+     * Get producer by topic name:
+     *   i. if the topic is judged to be an illegal topic, return null;
+     *   ii. if it is not an illegal topic or the status has expired, check:
+     *    a. if the topic has been published before, return the corresponding producer directly;
+     *    b. if the topic is not in the published list, perform the topic's publish action.
+     *  If the topic is thrown exception during the publishing process,
+     *     set the topic to an illegal topic
+     *
+     * @param topicName  the topic name
+     *
+     * @return  the producer
+     *          if topic is illegal, return null
+     * @throws  TubeClientException
+     */
+    public MessageProducer getProducer(String topicName) throws TubeClientException {
+        AtomicLong fbdTime = FROZEN_TOPIC_MAP.get(topicName);
+        if (fbdTime != null && fbdTime.get() > System.currentTimeMillis()) {
+            return null;
+        }
+        MessageProducer tmpProducer = producerMap.get(topicName);
+        if (tmpProducer != null) {
+            if (fbdTime != null) {
+                FROZEN_TOPIC_MAP.remove(topicName);
+            }
+            return tmpProducer;
+        }
+        synchronized (lastPubTopicCnt) {
+            fbdTime = FROZEN_TOPIC_MAP.get(topicName);
+            if (fbdTime != null && fbdTime.get() > System.currentTimeMillis()) {
+                return null;
+            }
+            if (lastProducer == null
+                    || lastPubTopicCnt.get() >= clusterConfig.getMaxTopicsEachProducerHold()) {
+                lastProducer = sessionFactory.createProducer();
+                lastPubTopicCnt.set(0);
+            }
+            try {
+                lastProducer.publish(topicName);
+            } catch (Throwable e) {
+                fbdTime = FROZEN_TOPIC_MAP.get(topicName);
+                if (fbdTime == null) {
+                    AtomicLong tmpFbdTime = new AtomicLong();
+                    fbdTime = FROZEN_TOPIC_MAP.putIfAbsent(topicName, tmpFbdTime);
+                    if (fbdTime == null) {
+                        fbdTime = tmpFbdTime;
+                    }
+                }
+                fbdTime.set(System.currentTimeMillis() + PUBLISH_FAILURE_WAIT);
+                logger.warn("Throw exception while publish topic="
+                        + topicName + ", exception is " + e.getMessage());
+                return null;
+            }
+            producerMap.put(topicName, lastProducer);
+            lastPubTopicCnt.incrementAndGet();
+            return lastProducer;
+        }
+    }
+
+    /**
+     * Whether frozen production according to the exceptions returned by message sending
+     *
+     * @param topicName  the topic name sent message
+     * @param throwable  the exception information thrown when sending a message
+     *
+     * @return  whether illegal topic
+     */
+    public boolean needFrozenSent(String topicName, Throwable throwable) {
+        if (throwable instanceof TubeClientException) {
+            String message = throwable.getMessage();
+            if (message != null && (message.contains("No available partition for topic")
+                    || message.contains("The brokers of topic are all forbidden"))) {
+                AtomicLong fbdTime = FROZEN_TOPIC_MAP.get(topicName);
+                if (fbdTime == null) {
+                    AtomicLong tmpFbdTime = new AtomicLong(0);
+                    fbdTime = FROZEN_TOPIC_MAP.putIfAbsent(topicName, tmpFbdTime);
+                    if (fbdTime == null) {
+                        fbdTime = tmpFbdTime;
+                    }
+                }
+                fbdTime.set(System.currentTimeMillis() + SEND_FAILURE_WAIT);
+                return true;
+            }
+        }
+        return false;
+    }
+
+    /**
+     * Create sink producers by configured topic set
+     * group topicSet to different group, each group is associated with a producer
+     *
+     * @param cfgTopicSet  the configured topic set
+     */
+    public synchronized void createProducersByTopicSet(Set<String> cfgTopicSet) throws Exception {
+        if (cfgTopicSet == null || cfgTopicSet.isEmpty()) {
+            return;
+        }
+        // filter published topics
+        List<String> filteredTopics = new ArrayList<>(cfgTopicSet.size());
+        for (String topicName : cfgTopicSet) {
+            if (StringUtils.isBlank(topicName)
+                    || producerMap.get(topicName) != null) {
+                continue;
+            }
+            filteredTopics.add(topicName);
+        }
+        if (filteredTopics.isEmpty()) {
+            return;
+        }
+        // alloc topic count
+        Collections.sort(filteredTopics);
+        long startTime = System.currentTimeMillis();
+        int maxPublishTopicCnt = clusterConfig.getMaxTopicsEachProducerHold();
+        int allocTotalCnt = filteredTopics.size();
+        List<Integer> topicGroupCnt = new ArrayList<>();
+        int paddingCnt = (lastPubTopicCnt.get() <= 0)
+                ? 0 : (maxPublishTopicCnt - lastPubTopicCnt.get());
+        while (allocTotalCnt > 0) {
+            if (paddingCnt > 0) {
+                topicGroupCnt.add(Math.min(allocTotalCnt, paddingCnt));
+                allocTotalCnt -= paddingCnt;
+                paddingCnt = 0;
+            } else {
+                topicGroupCnt.add(Math.min(allocTotalCnt, maxPublishTopicCnt));
+                allocTotalCnt -= maxPublishTopicCnt;
+            }
+        }
+        // create producer
+        int startPos = 0;
+        int endPos = 0;
+        Set<String> subTopicSet = new HashSet<>();
+        for (Integer dltCnt : topicGroupCnt) {
+            // allocate topic items
+            subTopicSet.clear();
+            endPos = startPos + dltCnt;
+            for (int index = startPos; index < endPos; index++) {
+                subTopicSet.add(filteredTopics.get(index));
+            }
+            startPos = endPos;
+            // create producer
+            if (lastProducer == null
+                    || lastPubTopicCnt.get() == maxPublishTopicCnt) {
+                lastProducer = sessionFactory.createProducer();
+                lastPubTopicCnt.set(0);
+            }
+            try {
+                lastProducer.publish(subTopicSet);
+            } catch (Throwable e) {
+                logger.info(sinkName + " meta sink publish fail.", e);
+            }
+            lastPubTopicCnt.addAndGet(subTopicSet.size());
+            for (String topicItem : subTopicSet) {
+                producerMap.put(topicItem, lastProducer);
+            }
+        }
+        logger.info(sinkName + " initializes producers for topics:"
+                + producerMap.keySet() + ", cost: " + (System.currentTimeMillis() - startTime)
+                + "ms");
+    }
+
+}
diff --git a/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/sink/common/TubeUtils.java b/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/sink/common/TubeUtils.java
index ce27432a2..4b0236333 100644
--- a/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/sink/common/TubeUtils.java
+++ b/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/sink/common/TubeUtils.java
@@ -1,78 +1,78 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.dataproxy.sink.common;
-
-import java.util.Map;
-import org.apache.flume.Event;
-import org.apache.inlong.common.msg.AttributeConstants;
-import org.apache.inlong.dataproxy.config.pojo.MQClusterConfig;
-import org.apache.inlong.dataproxy.consts.ConfigConstants;
-import org.apache.inlong.dataproxy.utils.Constants;
-import org.apache.inlong.dataproxy.utils.DateTimeUtils;
-import org.apache.inlong.dataproxy.utils.InLongMsgVer;
-import org.apache.inlong.dataproxy.utils.MessageUtils;
-import org.apache.inlong.tubemq.client.config.TubeClientConfig;
-import org.apache.inlong.tubemq.corebase.Message;
-
-public class TubeUtils {
-
-    /**
-     * Build TubeMQ's client configure
-     *
-     * @param clusterAddr    the TubeMQ cluster address
-     * @param tubeConfig     the TubeMQ cluster configure
-     * @return   the TubeClientConfig object
-     */
-    public static TubeClientConfig buildClientConfig(String clusterAddr, MQClusterConfig tubeConfig) {
-        final TubeClientConfig tubeClientConfig = new TubeClientConfig(clusterAddr);
-        tubeClientConfig.setLinkMaxAllowedDelayedMsgCount(tubeConfig.getLinkMaxAllowedDelayedMsgCount());
-        tubeClientConfig.setSessionWarnDelayedMsgCount(tubeConfig.getSessionWarnDelayedMsgCount());
-        tubeClientConfig.setSessionMaxAllowedDelayedMsgCount(tubeConfig.getSessionMaxAllowedDelayedMsgCount());
-        tubeClientConfig.setNettyWriteBufferHighWaterMark(tubeConfig.getNettyWriteBufferHighWaterMark());
-        tubeClientConfig.setHeartbeatPeriodMs(tubeConfig.getTubeHeartbeatPeriodMs());
-        tubeClientConfig.setRpcTimeoutMs(tubeConfig.getTubeRpcTimeoutMs());
-        return tubeClientConfig;
-    }
-
-    /**
-     * Build TubeMQ's message
-     *
-     * @param topicName      the topic name of message
-     * @param event          the DataProxy event
-     * @return   the message object
-     */
-    public static Message buildMessage(String topicName, Event event) {
-        Map<String, String> headers = event.getHeaders();
-        Message message = new Message(topicName, event.getBody());
-        String pkgVersion = headers.get(ConfigConstants.MSG_ENCODE_VER);
-        if (InLongMsgVer.INLONG_V1.getName().equalsIgnoreCase(pkgVersion)) {
-            long dataTimeL = Long.parseLong(headers.get(ConfigConstants.PKG_TIME_KEY));
-            message.putSystemHeader(headers.get(Constants.INLONG_STREAM_ID),
-                    DateTimeUtils.ms2yyyyMMddHHmm(dataTimeL));
-        } else {
-            long dataTimeL = Long.parseLong(headers.get(AttributeConstants.DATA_TIME));
-            message.putSystemHeader(headers.get(AttributeConstants.STREAM_ID),
-                    DateTimeUtils.ms2yyyyMMddHHmm(dataTimeL));
-        }
-        Map<String, String> extraAttrMap = MessageUtils.getXfsAttrs(headers, pkgVersion);
-        for (Map.Entry<String, String> entry : extraAttrMap.entrySet()) {
-            message.setAttrKeyVal(entry.getKey(), entry.getValue());
-        }
-        return message;
-    }
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.dataproxy.sink.common;
+
+import java.util.Map;
+import org.apache.flume.Event;
+import org.apache.inlong.common.msg.AttributeConstants;
+import org.apache.inlong.dataproxy.config.pojo.MQClusterConfig;
+import org.apache.inlong.dataproxy.consts.ConfigConstants;
+import org.apache.inlong.dataproxy.utils.Constants;
+import org.apache.inlong.dataproxy.utils.DateTimeUtils;
+import org.apache.inlong.dataproxy.utils.InLongMsgVer;
+import org.apache.inlong.dataproxy.utils.MessageUtils;
+import org.apache.inlong.tubemq.client.config.TubeClientConfig;
+import org.apache.inlong.tubemq.corebase.Message;
+
+public class TubeUtils {
+
+    /**
+     * Build TubeMQ's client configure
+     *
+     * @param clusterAddr    the TubeMQ cluster address
+     * @param tubeConfig     the TubeMQ cluster configure
+     * @return   the TubeClientConfig object
+     */
+    public static TubeClientConfig buildClientConfig(String clusterAddr, MQClusterConfig tubeConfig) {
+        final TubeClientConfig tubeClientConfig = new TubeClientConfig(clusterAddr);
+        tubeClientConfig.setLinkMaxAllowedDelayedMsgCount(tubeConfig.getLinkMaxAllowedDelayedMsgCount());
+        tubeClientConfig.setSessionWarnDelayedMsgCount(tubeConfig.getSessionWarnDelayedMsgCount());
+        tubeClientConfig.setSessionMaxAllowedDelayedMsgCount(tubeConfig.getSessionMaxAllowedDelayedMsgCount());
+        tubeClientConfig.setNettyWriteBufferHighWaterMark(tubeConfig.getNettyWriteBufferHighWaterMark());
+        tubeClientConfig.setHeartbeatPeriodMs(tubeConfig.getTubeHeartbeatPeriodMs());
+        tubeClientConfig.setRpcTimeoutMs(tubeConfig.getTubeRpcTimeoutMs());
+        return tubeClientConfig;
+    }
+
+    /**
+     * Build TubeMQ's message
+     *
+     * @param topicName      the topic name of message
+     * @param event          the DataProxy event
+     * @return   the message object
+     */
+    public static Message buildMessage(String topicName, Event event) {
+        Map<String, String> headers = event.getHeaders();
+        Message message = new Message(topicName, event.getBody());
+        String pkgVersion = headers.get(ConfigConstants.MSG_ENCODE_VER);
+        if (InLongMsgVer.INLONG_V1.getName().equalsIgnoreCase(pkgVersion)) {
+            long dataTimeL = Long.parseLong(headers.get(ConfigConstants.PKG_TIME_KEY));
+            message.putSystemHeader(headers.get(Constants.INLONG_STREAM_ID),
+                    DateTimeUtils.ms2yyyyMMddHHmm(dataTimeL));
+        } else {
+            long dataTimeL = Long.parseLong(headers.get(AttributeConstants.DATA_TIME));
+            message.putSystemHeader(headers.get(AttributeConstants.STREAM_ID),
+                    DateTimeUtils.ms2yyyyMMddHHmm(dataTimeL));
+        }
+        Map<String, String> extraAttrMap = MessageUtils.getXfsAttrs(headers, pkgVersion);
+        for (Map.Entry<String, String> entry : extraAttrMap.entrySet()) {
+            message.setAttrKeyVal(entry.getKey(), entry.getValue());
+        }
+        return message;
+    }
+}
diff --git a/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/utils/DateTimeUtils.java b/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/utils/DateTimeUtils.java
index af98a48e7..222e343c8 100644
--- a/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/utils/DateTimeUtils.java
+++ b/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/utils/DateTimeUtils.java
@@ -1,41 +1,41 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.dataproxy.utils;
-
-import java.time.Instant;
-import java.time.LocalDateTime;
-import java.time.ZoneId;
-import java.time.format.DateTimeFormatter;
-
-public class DateTimeUtils {
-    private static final DateTimeFormatter DATE_FORMATTER
-            = DateTimeFormatter.ofPattern("yyyyMMddHHmm");
-    private static final ZoneId defZoneId = ZoneId.systemDefault();
-
-    /**
-     * convert ms value to 'yyyyMMddHHmm' string
-     *
-     * @param timestamp The millisecond value of the specified time
-     * @return the time string in yyyyMMddHHmm format
-     */
-    public static String ms2yyyyMMddHHmm(long timestamp) {
-        LocalDateTime localDateTime =
-                LocalDateTime.ofInstant(Instant.ofEpochMilli(timestamp), defZoneId);
-        return DATE_FORMATTER.format(localDateTime);
-    }
-}
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.dataproxy.utils;
+
+import java.time.Instant;
+import java.time.LocalDateTime;
+import java.time.ZoneId;
+import java.time.format.DateTimeFormatter;
+
+public class DateTimeUtils {
+    private static final DateTimeFormatter DATE_FORMATTER
+            = DateTimeFormatter.ofPattern("yyyyMMddHHmm");
+    private static final ZoneId defZoneId = ZoneId.systemDefault();
+
+    /**
+     * convert ms value to 'yyyyMMddHHmm' string
+     *
+     * @param timestamp The millisecond value of the specified time
+     * @return the time string in yyyyMMddHHmm format
+     */
+    public static String ms2yyyyMMddHHmm(long timestamp) {
+        LocalDateTime localDateTime =
+                LocalDateTime.ofInstant(Instant.ofEpochMilli(timestamp), defZoneId);
+        return DATE_FORMATTER.format(localDateTime);
+    }
+}
diff --git a/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/utils/InLongMsgVer.java b/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/utils/InLongMsgVer.java
index c5ca08291..626398df6 100644
--- a/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/utils/InLongMsgVer.java
+++ b/inlong-dataproxy/dataproxy-source/src/main/java/org/apache/inlong/dataproxy/utils/InLongMsgVer.java
@@ -1,54 +1,54 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.dataproxy.utils;
-
-public enum InLongMsgVer {
-    INLONG_V0(0, "V0", "The inlong-msg V0 format"),
-    INLONG_V1(1, "V1", "The inlong-msg V1 format");
-
-    InLongMsgVer(int id, String name, String desc) {
-        this.id = id;
-        this.name = name;
-        this.desc = desc;
-    }
-
-    public int getId() {
-        return id;
-    }
-
-    public String getName() {
-        return name;
-    }
-
-    public String getDesc() {
-        return desc;
-    }
-
-    public static InLongMsgVer valueOf(int value) {
-        for (InLongMsgVer inLongMsgVer : InLongMsgVer.values()) {
-            if (inLongMsgVer.getId() == value) {
-                return inLongMsgVer;
-            }
-        }
-        return INLONG_V0;
-    }
-
-    private final int id;
-    private final String name;
-    private final String desc;
-}
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.dataproxy.utils;
+
+public enum InLongMsgVer {
+    INLONG_V0(0, "V0", "The inlong-msg V0 format"),
+    INLONG_V1(1, "V1", "The inlong-msg V1 format");
+
+    InLongMsgVer(int id, String name, String desc) {
+        this.id = id;
+        this.name = name;
+        this.desc = desc;
+    }
+
+    public int getId() {
+        return id;
+    }
+
+    public String getName() {
+        return name;
+    }
+
+    public String getDesc() {
+        return desc;
+    }
+
+    public static InLongMsgVer valueOf(int value) {
+        for (InLongMsgVer inLongMsgVer : InLongMsgVer.values()) {
+            if (inLongMsgVer.getId() == value) {
+                return inLongMsgVer;
+            }
+        }
+        return INLONG_V0;
+    }
+
+    private final int id;
+    private final String name;
+    private final String desc;
+}
diff --git a/inlong-distribution/src/main/assemblies/sort-connectors.xml b/inlong-distribution/src/main/assemblies/sort-connectors.xml
index 369eb28e0..e66c0977d 100644
--- a/inlong-distribution/src/main/assemblies/sort-connectors.xml
+++ b/inlong-distribution/src/main/assemblies/sort-connectors.xml
@@ -1,175 +1,175 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  ~  Licensed to the Apache Software Foundation (ASF) under one
-  ~  or more contributor license agreements.  See the NOTICE file
-  ~  distributed with this work for additional information
-  ~  regarding copyright ownership.  The ASF licenses this file
-  ~  to you under the Apache License, Version 2.0 (the
-  ~  "License"); you may not use this file except in compliance
-  ~  with the License.  You may obtain a copy of the License at
-  ~
-  ~     http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~  Unless required by applicable law or agreed to in writing, software
-  ~  distributed under the License is distributed on an "AS IS" BASIS,
-  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~  See the License for the specific language governing permissions and
-  ~  limitations under the License.
-  -->
-
-<assembly>
-    <id>sort-connectors</id>
-
-    <formats>
-        <format>dir</format>
-        <format>tar.gz</format>
-    </formats>
-    <includeBaseDirectory>true</includeBaseDirectory>
-    <fileSets>
-        <!--connector plugin-->
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/hbase/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-hbase-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/hive/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-hive-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/iceberg/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-iceberg-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/jdbc/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-jdbc-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/kafka/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-kafka-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/mongodb-cdc/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-mongodb-cdc-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/mysql-cdc/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-mysql-cdc-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/postgres-cdc/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-postgres-cdc-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/pulsar/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-pulsar-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/sqlserver-cdc/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-sqlserver-cdc-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/oracle-cdc/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-oracle-cdc-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/elasticsearch-6/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-elasticsearch6-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/elasticsearch-7/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-elasticsearch7-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/iceberg-dlc/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-iceberg-dlc-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/tubemq/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-tubemq-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/filesystem/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-filesystem-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <fileSet>
-            <directory>../inlong-sort/sort-connectors/doris/target</directory>
-            <outputDirectory>inlong-sort/connectors</outputDirectory>
-            <includes>
-                <include>sort-connector-doris-${project.version}.jar</include>
-            </includes>
-            <fileMode>0644</fileMode>
-        </fileSet>
-        <!-- module's 3td-licenses, notices-->
-        <fileSet>
-            <directory>../licenses/inlong-sort-connectors</directory>
-            <includes>
-                <include>**/*</include>
-            </includes>
-            <outputDirectory>./</outputDirectory>
-        </fileSet>
-    </fileSets>
-</assembly>
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~  Licensed to the Apache Software Foundation (ASF) under one
+  ~  or more contributor license agreements.  See the NOTICE file
+  ~  distributed with this work for additional information
+  ~  regarding copyright ownership.  The ASF licenses this file
+  ~  to you under the Apache License, Version 2.0 (the
+  ~  "License"); you may not use this file except in compliance
+  ~  with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~  Unless required by applicable law or agreed to in writing, software
+  ~  distributed under the License is distributed on an "AS IS" BASIS,
+  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~  See the License for the specific language governing permissions and
+  ~  limitations under the License.
+  -->
+
+<assembly>
+    <id>sort-connectors</id>
+
+    <formats>
+        <format>dir</format>
+        <format>tar.gz</format>
+    </formats>
+    <includeBaseDirectory>true</includeBaseDirectory>
+    <fileSets>
+        <!--connector plugin-->
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/hbase/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-hbase-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/hive/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-hive-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/iceberg/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-iceberg-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/jdbc/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-jdbc-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/kafka/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-kafka-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/mongodb-cdc/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-mongodb-cdc-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/mysql-cdc/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-mysql-cdc-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/postgres-cdc/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-postgres-cdc-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/pulsar/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-pulsar-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/sqlserver-cdc/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-sqlserver-cdc-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/oracle-cdc/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-oracle-cdc-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/elasticsearch-6/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-elasticsearch6-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/elasticsearch-7/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-elasticsearch7-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/iceberg-dlc/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-iceberg-dlc-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/tubemq/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-tubemq-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/filesystem/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-filesystem-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <fileSet>
+            <directory>../inlong-sort/sort-connectors/doris/target</directory>
+            <outputDirectory>inlong-sort/connectors</outputDirectory>
+            <includes>
+                <include>sort-connector-doris-${project.version}.jar</include>
+            </includes>
+            <fileMode>0644</fileMode>
+        </fileSet>
+        <!-- module's 3td-licenses, notices-->
+        <fileSet>
+            <directory>../licenses/inlong-sort-connectors</directory>
+            <includes>
+                <include>**/*</include>
+            </includes>
+            <outputDirectory>./</outputDirectory>
+        </fileSet>
+    </fileSets>
+</assembly>
diff --git a/inlong-manager/manager-service/src/test/resources/plugins/manager-plugin-example.jar b/inlong-manager/manager-service/src/test/resources/plugins/manager-plugin-example.jar
index b03d6d205..fa2aaf612 100644
Binary files a/inlong-manager/manager-service/src/test/resources/plugins/manager-plugin-example.jar and b/inlong-manager/manager-service/src/test/resources/plugins/manager-plugin-example.jar differ
diff --git a/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-cpp/release/lib/libdataproxy_sdk.a b/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-cpp/release/lib/libdataproxy_sdk.a
index 184174f68..362a5f39d 100644
Binary files a/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-cpp/release/lib/libdataproxy_sdk.a and b/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-cpp/release/lib/libdataproxy_sdk.a differ
diff --git a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/constant/DLCConstant.java b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/constant/DLCConstant.java
index 5130355a0..e4ef7d310 100644
--- a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/constant/DLCConstant.java
+++ b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/constant/DLCConstant.java
@@ -1,80 +1,80 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.inlong.sort.protocol.constant;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-public class DLCConstant {
-    /**
-     * DLC internet access domain name.
-     */
-    public static final String DLC_ENDPOINT = "dlc.tencentcloudapi.com";
-    // ============================== DLC AUTH PARAMS(Required) =====================================
-    /**
-     * dlc account region
-     */
-    public static final String DLC_REGION = "qcloud.dlc.region";
-    /**
-     * dlc account secret id
-     */
-    public static final String DLC_SECRET_ID = "qcloud.dlc.secret-id";
-    /**
-     * dlc account secret key
-     */
-    public static final String DLC_SECRET_KEY = "qcloud.dlc.secret-key";
-    /**
-     * Current user appid.
-     */
-    public static final String DLC_USER_APPID = "qcloud.dlc.user.appid";
-    /**
-     * Managed account uid.
-     */
-    public static final String DLC_MANAGED_ACCOUNT_UID = "qcloud.dlc.managed.account.uid";
-    /**
-     * dlc jdbc url(optional)
-     */
-    public static final String DLC_JDBC_URL = "qcloud.dlc.jdbc.url";
-
-    // ============================== FS CREDENTIALS AUTH PARAMS =====================================
-    public static final String FS_LAKEFS_IMPL  = "fs.lakefs.impl";
-    public static final String FS_COS_IMPL  = "fs.cosn.impl";
-    public static final String FS_COS_AUTH_PROVIDER  = "fs.cosn.credentials.provider";
-    public static final String FS_COS_REGION  = "fs.cosn.userinfo.region";
-    public static final String FS_COS_SECRET_ID  = "fs.cosn.userinfo.secretId";
-    public static final String FS_COS_SECRET_KEY  = "fs.cosn.userinfo.secretKey";
-
-    public static final String FS_AUTH_DLC_SECRET_ID = "service.secret.id";
-    public static final String FS_AUTH_DLC_SECRET_KEY = "service.secret.key";
-    public static final String FS_AUTH_DLC_REGION  = "service.region";
-    public static final String FS_AUTH_DLC_ACCOUNT_APPID  = "user.appid";
-    public static final String FS_AUTH_DLC_MANAGED_ACCOUNT_UID  = "request.identity.token";
-
-    public static final String DLC_CATALOG_IMPL_CLASS =
-            "org.apache.inlong.sort.iceberg.catalog.hybris.DlcWrappedHybrisCatalog";
-    public static final Map<String, String> DLC_DEFAULT_IMPL =
-            Collections.unmodifiableMap(new HashMap<String, String>() {
-                {
-                    put(FS_LAKEFS_IMPL, "org.apache.hadoop.fs.CosFileSystem");
-                    put(FS_COS_IMPL, "org.apache.hadoop.fs.CosFileSystem");
-                    put(FS_COS_AUTH_PROVIDER, "org.apache.hadoop.fs.auth.DlcCloudCredentialsProvider");
-                }
-            });
-}
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.inlong.sort.protocol.constant;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+public class DLCConstant {
+    /**
+     * DLC internet access domain name.
+     */
+    public static final String DLC_ENDPOINT = "dlc.tencentcloudapi.com";
+    // ============================== DLC AUTH PARAMS(Required) =====================================
+    /**
+     * dlc account region
+     */
+    public static final String DLC_REGION = "qcloud.dlc.region";
+    /**
+     * dlc account secret id
+     */
+    public static final String DLC_SECRET_ID = "qcloud.dlc.secret-id";
+    /**
+     * dlc account secret key
+     */
+    public static final String DLC_SECRET_KEY = "qcloud.dlc.secret-key";
+    /**
+     * Current user appid.
+     */
+    public static final String DLC_USER_APPID = "qcloud.dlc.user.appid";
+    /**
+     * Managed account uid.
+     */
+    public static final String DLC_MANAGED_ACCOUNT_UID = "qcloud.dlc.managed.account.uid";
+    /**
+     * dlc jdbc url(optional)
+     */
+    public static final String DLC_JDBC_URL = "qcloud.dlc.jdbc.url";
+
+    // ============================== FS CREDENTIALS AUTH PARAMS =====================================
+    public static final String FS_LAKEFS_IMPL  = "fs.lakefs.impl";
+    public static final String FS_COS_IMPL  = "fs.cosn.impl";
+    public static final String FS_COS_AUTH_PROVIDER  = "fs.cosn.credentials.provider";
+    public static final String FS_COS_REGION  = "fs.cosn.userinfo.region";
+    public static final String FS_COS_SECRET_ID  = "fs.cosn.userinfo.secretId";
+    public static final String FS_COS_SECRET_KEY  = "fs.cosn.userinfo.secretKey";
+
+    public static final String FS_AUTH_DLC_SECRET_ID = "service.secret.id";
+    public static final String FS_AUTH_DLC_SECRET_KEY = "service.secret.key";
+    public static final String FS_AUTH_DLC_REGION  = "service.region";
+    public static final String FS_AUTH_DLC_ACCOUNT_APPID  = "user.appid";
+    public static final String FS_AUTH_DLC_MANAGED_ACCOUNT_UID  = "request.identity.token";
+
+    public static final String DLC_CATALOG_IMPL_CLASS =
+            "org.apache.inlong.sort.iceberg.catalog.hybris.DlcWrappedHybrisCatalog";
+    public static final Map<String, String> DLC_DEFAULT_IMPL =
+            Collections.unmodifiableMap(new HashMap<String, String>() {
+                {
+                    put(FS_LAKEFS_IMPL, "org.apache.hadoop.fs.CosFileSystem");
+                    put(FS_COS_IMPL, "org.apache.hadoop.fs.CosFileSystem");
+                    put(FS_COS_AUTH_PROVIDER, "org.apache.hadoop.fs.auth.DlcCloudCredentialsProvider");
+                }
+            });
+}
diff --git a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/constant/IcebergConstant.java b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/constant/IcebergConstant.java
index b43f550cc..34d14b1a7 100644
--- a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/constant/IcebergConstant.java
+++ b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/constant/IcebergConstant.java
@@ -1,54 +1,54 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.inlong.sort.protocol.constant;
-
-/**
- * Iceberg option constant
- */
-public class IcebergConstant {
-    /**
-     * Iceberg supported catalog type
-     */
-    public enum CatalogType {
-        /**
-         * Data stored in hive metastore.
-         */
-        HIVE,
-        /**
-         * Data stored in hadoop filesystem.
-         */
-        HADOOP,
-        /**
-         * Data stored in hybris metastore.
-         */
-        HYBRIS;
-
-        /**
-         * get catalogType from name
-         */
-        public static CatalogType forName(String name) {
-            for (CatalogType value : values()) {
-                if (value.name().equals(name)) {
-                    return value;
-                }
-            }
-            throw new IllegalArgumentException(String.format("Unsupport catalogType:%s", name));
-        }
-    }
-}
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.inlong.sort.protocol.constant;
+
+/**
+ * Iceberg option constant
+ */
+public class IcebergConstant {
+    /**
+     * Iceberg supported catalog type
+     */
+    public enum CatalogType {
+        /**
+         * Data stored in hive metastore.
+         */
+        HIVE,
+        /**
+         * Data stored in hadoop filesystem.
+         */
+        HADOOP,
+        /**
+         * Data stored in hybris metastore.
+         */
+        HYBRIS;
+
+        /**
+         * get catalogType from name
+         */
+        public static CatalogType forName(String name) {
+            for (CatalogType value : values()) {
+                if (value.name().equals(name)) {
+                    return value;
+                }
+            }
+            throw new IllegalArgumentException(String.format("Unsupport catalogType:%s", name));
+        }
+    }
+}
diff --git a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/extract/PulsarExtractNode.java b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/extract/PulsarExtractNode.java
index 814b8faa6..9cd07af3e 100644
--- a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/extract/PulsarExtractNode.java
+++ b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/extract/PulsarExtractNode.java
@@ -1,125 +1,125 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.inlong.sort.protocol.node.extract;
-
-import com.google.common.base.Preconditions;
-import lombok.Data;
-import lombok.EqualsAndHashCode;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonTypeName;
-import org.apache.inlong.sort.protocol.FieldInfo;
-import org.apache.inlong.sort.protocol.InlongMetric;
-import org.apache.inlong.sort.protocol.node.ExtractNode;
-import org.apache.inlong.sort.protocol.node.format.Format;
-import org.apache.inlong.sort.protocol.transformation.WatermarkField;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.util.List;
-import java.util.Map;
-
-@EqualsAndHashCode(callSuper = true)
-@JsonTypeName("pulsarExtract")
-@Data
-public class PulsarExtractNode extends ExtractNode implements InlongMetric {
-    private static final long serialVersionUID = 1L;
-
-    @Nonnull
-    @JsonProperty("topic")
-    private String topic;
-    @JsonProperty("adminUrl")
-    private String adminUrl;
-    @Nonnull
-    @JsonProperty("serviceUrl")
-    private String serviceUrl;
-    @Nonnull
-    @JsonProperty("format")
-    private Format format;
-
-    @JsonProperty("scanStartupMode")
-    private String scanStartupMode;
-
-    @JsonProperty("primaryKey")
-    private String primaryKey;
-
-    @JsonCreator
-    public PulsarExtractNode(@JsonProperty("id") String id,
-            @JsonProperty("name") String name,
-            @JsonProperty("fields") List<FieldInfo> fields,
-            @Nullable @JsonProperty("watermarkField") WatermarkField watermarkField,
-            @JsonProperty("properties") Map<String, String> properties,
-            @Nonnull @JsonProperty("topic") String topic,
-            @JsonProperty("adminUrl") String adminUrl,
-            @Nonnull @JsonProperty("serviceUrl") String serviceUrl,
-            @Nonnull @JsonProperty("format") Format format,
-            @Nonnull @JsonProperty("scanStartupMode") String scanStartupMode,
-            @JsonProperty("primaryKey") String primaryKey) {
-        super(id, name, fields, watermarkField, properties);
-        this.topic = Preconditions.checkNotNull(topic, "pulsar topic is null.");
-        this.serviceUrl = Preconditions.checkNotNull(serviceUrl, "pulsar serviceUrl is null.");
-        this.format = Preconditions.checkNotNull(format, "pulsar format is null.");
-        this.scanStartupMode = Preconditions.checkNotNull(scanStartupMode,
-                "pulsar scanStartupMode is null.");
-        this.adminUrl = adminUrl;
-        this.primaryKey = primaryKey;
-    }
-
-    /**
-     * generate table options
-     *
-     * @return options
-     */
-    @Override
-    public Map<String, String> tableOptions() {
-        Map<String, String> options = super.tableOptions();
-        if (StringUtils.isEmpty(this.primaryKey)) {
-            options.put("connector", "pulsar-inlong");
-            options.putAll(format.generateOptions(false));
-        } else {
-            options.put("connector", "upsert-pulsar-inlong");
-            options.putAll(format.generateOptions(true));
-        }
-        if (adminUrl != null) {
-            options.put("admin-url", adminUrl);
-        }
-        options.put("generic", "true");
-        options.put("service-url", serviceUrl);
-        options.put("topic", topic);
-        options.put("scan.startup.mode", scanStartupMode);
-
-        return options;
-    }
-
-    @Override
-    public String genTableName() {
-        return String.format("table_%s", super.getId());
-    }
-
-    @Override
-    public String getPrimaryKey() {
-        return primaryKey;
-    }
-
-    @Override
-    public List<FieldInfo> getPartitionFields() {
-        return super.getPartitionFields();
-    }
-}
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.inlong.sort.protocol.node.extract;
+
+import com.google.common.base.Preconditions;
+import lombok.Data;
+import lombok.EqualsAndHashCode;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonTypeName;
+import org.apache.inlong.sort.protocol.FieldInfo;
+import org.apache.inlong.sort.protocol.InlongMetric;
+import org.apache.inlong.sort.protocol.node.ExtractNode;
+import org.apache.inlong.sort.protocol.node.format.Format;
+import org.apache.inlong.sort.protocol.transformation.WatermarkField;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import java.util.List;
+import java.util.Map;
+
+@EqualsAndHashCode(callSuper = true)
+@JsonTypeName("pulsarExtract")
+@Data
+public class PulsarExtractNode extends ExtractNode implements InlongMetric {
+    private static final long serialVersionUID = 1L;
+
+    @Nonnull
+    @JsonProperty("topic")
+    private String topic;
+    @JsonProperty("adminUrl")
+    private String adminUrl;
+    @Nonnull
+    @JsonProperty("serviceUrl")
+    private String serviceUrl;
+    @Nonnull
+    @JsonProperty("format")
+    private Format format;
+
+    @JsonProperty("scanStartupMode")
+    private String scanStartupMode;
+
+    @JsonProperty("primaryKey")
+    private String primaryKey;
+
+    @JsonCreator
+    public PulsarExtractNode(@JsonProperty("id") String id,
+            @JsonProperty("name") String name,
+            @JsonProperty("fields") List<FieldInfo> fields,
+            @Nullable @JsonProperty("watermarkField") WatermarkField watermarkField,
+            @JsonProperty("properties") Map<String, String> properties,
+            @Nonnull @JsonProperty("topic") String topic,
+            @JsonProperty("adminUrl") String adminUrl,
+            @Nonnull @JsonProperty("serviceUrl") String serviceUrl,
+            @Nonnull @JsonProperty("format") Format format,
+            @Nonnull @JsonProperty("scanStartupMode") String scanStartupMode,
+            @JsonProperty("primaryKey") String primaryKey) {
+        super(id, name, fields, watermarkField, properties);
+        this.topic = Preconditions.checkNotNull(topic, "pulsar topic is null.");
+        this.serviceUrl = Preconditions.checkNotNull(serviceUrl, "pulsar serviceUrl is null.");
+        this.format = Preconditions.checkNotNull(format, "pulsar format is null.");
+        this.scanStartupMode = Preconditions.checkNotNull(scanStartupMode,
+                "pulsar scanStartupMode is null.");
+        this.adminUrl = adminUrl;
+        this.primaryKey = primaryKey;
+    }
+
+    /**
+     * generate table options
+     *
+     * @return options
+     */
+    @Override
+    public Map<String, String> tableOptions() {
+        Map<String, String> options = super.tableOptions();
+        if (StringUtils.isEmpty(this.primaryKey)) {
+            options.put("connector", "pulsar-inlong");
+            options.putAll(format.generateOptions(false));
+        } else {
+            options.put("connector", "upsert-pulsar-inlong");
+            options.putAll(format.generateOptions(true));
+        }
+        if (adminUrl != null) {
+            options.put("admin-url", adminUrl);
+        }
+        options.put("generic", "true");
+        options.put("service-url", serviceUrl);
+        options.put("topic", topic);
+        options.put("scan.startup.mode", scanStartupMode);
+
+        return options;
+    }
+
+    @Override
+    public String genTableName() {
+        return String.format("table_%s", super.getId());
+    }
+
+    @Override
+    public String getPrimaryKey() {
+        return primaryKey;
+    }
+
+    @Override
+    public List<FieldInfo> getPartitionFields() {
+        return super.getPartitionFields();
+    }
+}
diff --git a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/format/InLongMsgFormat.java b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/format/InLongMsgFormat.java
index deb2f0c39..8c7dad631 100644
--- a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/format/InLongMsgFormat.java
+++ b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/format/InLongMsgFormat.java
@@ -1,91 +1,91 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.sort.protocol.node.format;
-
-import lombok.Data;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonIgnore;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonTypeName;
-import org.apache.inlong.common.msg.InLongMsg;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * The {@link InLongMsg} format.
- *
- * @see <a href="https://inlong.apache.org/docs/development/inlong_msg">InLongMsg Format</a>
- */
-@JsonTypeName("inLongMsgFormat")
-@Data
-public class InLongMsgFormat implements Format {
-
-    private static final long serialVersionUID = 1L;
-
-    private static final String IDENTIFIER = "inlong-msg";
-
-    @JsonProperty(value = "innerFormat")
-    private Format innerFormat;
-
-    @JsonProperty(value = "ignoreParseErrors", defaultValue = "false")
-    private Boolean ignoreParseErrors;
-
-    @JsonCreator
-    public InLongMsgFormat(@JsonProperty(value = "innerFormat") Format innerFormat,
-            @JsonProperty(value = "ignoreParseErrors", defaultValue = "false") Boolean ignoreParseErrors) {
-        this.innerFormat = innerFormat;
-        this.ignoreParseErrors = ignoreParseErrors;
-    }
-
-    public InLongMsgFormat() {
-        this(new CsvFormat(), false);
-    }
-
-    @JsonIgnore
-    @Override
-    public String getFormat() {
-        return IDENTIFIER;
-    }
-
-    @Override
-    public String identifier() {
-        return IDENTIFIER;
-    }
-
-    /**
-     * Generate options for connector
-     *
-     * @return options
-     */
-    @Override
-    public Map<String, String> generateOptions() {
-        Map<String, String> options = new HashMap<>(16);
-        options.put("format", getFormat());
-        options.put("inlong-msg.inner.format", innerFormat.getFormat());
-        innerFormat.generateOptions().entrySet()
-                .stream()
-                .filter(entry -> !"format".equals(entry.getKey()))
-                .forEach(entry -> options.put("inlong-msg." + entry.getKey(), entry.getValue()));
-        if (this.ignoreParseErrors != null) {
-            options.put("inlong-msg.ignore-parse-errors", this.ignoreParseErrors.toString());
-        }
-
-        return options;
-    }
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.sort.protocol.node.format;
+
+import lombok.Data;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonIgnore;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonTypeName;
+import org.apache.inlong.common.msg.InLongMsg;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * The {@link InLongMsg} format.
+ *
+ * @see <a href="https://inlong.apache.org/docs/development/inlong_msg">InLongMsg Format</a>
+ */
+@JsonTypeName("inLongMsgFormat")
+@Data
+public class InLongMsgFormat implements Format {
+
+    private static final long serialVersionUID = 1L;
+
+    private static final String IDENTIFIER = "inlong-msg";
+
+    @JsonProperty(value = "innerFormat")
+    private Format innerFormat;
+
+    @JsonProperty(value = "ignoreParseErrors", defaultValue = "false")
+    private Boolean ignoreParseErrors;
+
+    @JsonCreator
+    public InLongMsgFormat(@JsonProperty(value = "innerFormat") Format innerFormat,
+            @JsonProperty(value = "ignoreParseErrors", defaultValue = "false") Boolean ignoreParseErrors) {
+        this.innerFormat = innerFormat;
+        this.ignoreParseErrors = ignoreParseErrors;
+    }
+
+    public InLongMsgFormat() {
+        this(new CsvFormat(), false);
+    }
+
+    @JsonIgnore
+    @Override
+    public String getFormat() {
+        return IDENTIFIER;
+    }
+
+    @Override
+    public String identifier() {
+        return IDENTIFIER;
+    }
+
+    /**
+     * Generate options for connector
+     *
+     * @return options
+     */
+    @Override
+    public Map<String, String> generateOptions() {
+        Map<String, String> options = new HashMap<>(16);
+        options.put("format", getFormat());
+        options.put("inlong-msg.inner.format", innerFormat.getFormat());
+        innerFormat.generateOptions().entrySet()
+                .stream()
+                .filter(entry -> !"format".equals(entry.getKey()))
+                .forEach(entry -> options.put("inlong-msg." + entry.getKey(), entry.getValue()));
+        if (this.ignoreParseErrors != null) {
+            options.put("inlong-msg.ignore-parse-errors", this.ignoreParseErrors.toString());
+        }
+
+        return options;
+    }
+}
diff --git a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/ClickHouseLoadNode.java b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/ClickHouseLoadNode.java
index 53ac4cd1d..e29a077f0 100644
--- a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/ClickHouseLoadNode.java
+++ b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/ClickHouseLoadNode.java
@@ -1,123 +1,123 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.inlong.sort.protocol.node.load;
-
-import com.google.common.base.Preconditions;
-import lombok.Data;
-import lombok.EqualsAndHashCode;
-import lombok.NoArgsConstructor;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonTypeName;
-import org.apache.inlong.sort.protocol.FieldInfo;
-import org.apache.inlong.sort.protocol.InlongMetric;
-import org.apache.inlong.sort.protocol.enums.FilterStrategy;
-import org.apache.inlong.sort.protocol.node.LoadNode;
-import org.apache.inlong.sort.protocol.transformation.FieldRelation;
-import org.apache.inlong.sort.protocol.transformation.FilterFunction;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.io.Serializable;
-import java.util.List;
-import java.util.Map;
-
-/**
- * clickHouse update operations is heavy, it will takes precedence over all others operation to execute, so it has a
- * poor performance. It is more suitable for batch scenes but not streaming scenes where operations are performed
- * frequently.
- */
-@EqualsAndHashCode(callSuper = true)
-@JsonTypeName("clickHouseLoad")
-@Data
-@NoArgsConstructor
-public class ClickHouseLoadNode extends LoadNode implements InlongMetric, Serializable {
-
-    private static final long serialVersionUID = -1L;
-
-    @JsonProperty("tableName")
-    @Nonnull
-    private String tableName;
-
-    @JsonProperty("url")
-    @Nonnull
-    private String url;
-
-    @JsonProperty("userName")
-    @Nonnull
-    private String userName;
-
-    @JsonProperty("passWord")
-    @Nonnull
-    private String password;
-
-    @JsonProperty("primaryKey")
-    private String primaryKey;
-
-    @JsonCreator
-    public ClickHouseLoadNode(@JsonProperty("id") String id,
-            @JsonProperty("name") String name,
-            @JsonProperty("fields") List<FieldInfo> fields,
-            @JsonProperty("fieldRelations") List<FieldRelation> fieldRelations,
-            @JsonProperty("filters") List<FilterFunction> filters,
-            @JsonProperty("filterStrategy") FilterStrategy filterStrategy,
-            @Nullable @JsonProperty("sinkParallelism") Integer sinkParallelism,
-            @JsonProperty("properties") Map<String, String> properties,
-            @Nonnull @JsonProperty("tableName") String tableName,
-            @Nonnull @JsonProperty("url") String url,
-            @Nonnull @JsonProperty("userName") String userName,
-            @Nonnull @JsonProperty("passWord") String password,
-            @JsonProperty("primaryKey") String primaryKey
-    ) {
-        super(id, name, fields, fieldRelations, filters, filterStrategy, sinkParallelism, properties);
-        this.tableName = Preconditions.checkNotNull(tableName, "table name is null");
-        this.url = Preconditions.checkNotNull(url, "url is null");
-        this.userName = Preconditions.checkNotNull(userName, "userName is null");
-        this.password = Preconditions.checkNotNull(password, "password is null");
-        this.primaryKey = primaryKey;
-    }
-
-    @Override
-    public Map<String, String> tableOptions() {
-        Map<String, String> options = super.tableOptions();
-        options.put("connector", "jdbc-inlong");
-        options.put("dialect-impl", "org.apache.inlong.sort.jdbc.dialect.ClickHouseDialect");
-        options.put("url", url);
-        options.put("table-name", tableName);
-        options.put("username", userName);
-        options.put("password", password);
-        return options;
-    }
-
-    @Override
-    public String genTableName() {
-        return tableName;
-    }
-
-    @Override
-    public String getPrimaryKey() {
-        return primaryKey;
-    }
-
-    @Override
-    public List<FieldInfo> getPartitionFields() {
-        return super.getPartitionFields();
-    }
-
-}
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.inlong.sort.protocol.node.load;
+
+import com.google.common.base.Preconditions;
+import lombok.Data;
+import lombok.EqualsAndHashCode;
+import lombok.NoArgsConstructor;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonTypeName;
+import org.apache.inlong.sort.protocol.FieldInfo;
+import org.apache.inlong.sort.protocol.InlongMetric;
+import org.apache.inlong.sort.protocol.enums.FilterStrategy;
+import org.apache.inlong.sort.protocol.node.LoadNode;
+import org.apache.inlong.sort.protocol.transformation.FieldRelation;
+import org.apache.inlong.sort.protocol.transformation.FilterFunction;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import java.io.Serializable;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * clickHouse update operations is heavy, it will takes precedence over all others operation to execute, so it has a
+ * poor performance. It is more suitable for batch scenes but not streaming scenes where operations are performed
+ * frequently.
+ */
+@EqualsAndHashCode(callSuper = true)
+@JsonTypeName("clickHouseLoad")
+@Data
+@NoArgsConstructor
+public class ClickHouseLoadNode extends LoadNode implements InlongMetric, Serializable {
+
+    private static final long serialVersionUID = -1L;
+
+    @JsonProperty("tableName")
+    @Nonnull
+    private String tableName;
+
+    @JsonProperty("url")
+    @Nonnull
+    private String url;
+
+    @JsonProperty("userName")
+    @Nonnull
+    private String userName;
+
+    @JsonProperty("passWord")
+    @Nonnull
+    private String password;
+
+    @JsonProperty("primaryKey")
+    private String primaryKey;
+
+    @JsonCreator
+    public ClickHouseLoadNode(@JsonProperty("id") String id,
+            @JsonProperty("name") String name,
+            @JsonProperty("fields") List<FieldInfo> fields,
+            @JsonProperty("fieldRelations") List<FieldRelation> fieldRelations,
+            @JsonProperty("filters") List<FilterFunction> filters,
+            @JsonProperty("filterStrategy") FilterStrategy filterStrategy,
+            @Nullable @JsonProperty("sinkParallelism") Integer sinkParallelism,
+            @JsonProperty("properties") Map<String, String> properties,
+            @Nonnull @JsonProperty("tableName") String tableName,
+            @Nonnull @JsonProperty("url") String url,
+            @Nonnull @JsonProperty("userName") String userName,
+            @Nonnull @JsonProperty("passWord") String password,
+            @JsonProperty("primaryKey") String primaryKey
+    ) {
+        super(id, name, fields, fieldRelations, filters, filterStrategy, sinkParallelism, properties);
+        this.tableName = Preconditions.checkNotNull(tableName, "table name is null");
+        this.url = Preconditions.checkNotNull(url, "url is null");
+        this.userName = Preconditions.checkNotNull(userName, "userName is null");
+        this.password = Preconditions.checkNotNull(password, "password is null");
+        this.primaryKey = primaryKey;
+    }
+
+    @Override
+    public Map<String, String> tableOptions() {
+        Map<String, String> options = super.tableOptions();
+        options.put("connector", "jdbc-inlong");
+        options.put("dialect-impl", "org.apache.inlong.sort.jdbc.dialect.ClickHouseDialect");
+        options.put("url", url);
+        options.put("table-name", tableName);
+        options.put("username", userName);
+        options.put("password", password);
+        return options;
+    }
+
+    @Override
+    public String genTableName() {
+        return tableName;
+    }
+
+    @Override
+    public String getPrimaryKey() {
+        return primaryKey;
+    }
+
+    @Override
+    public List<FieldInfo> getPartitionFields() {
+        return super.getPartitionFields();
+    }
+
+}
diff --git a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/DLCIcebergLoadNode.java b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/DLCIcebergLoadNode.java
index 109cd2544..dcb55426a 100644
--- a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/DLCIcebergLoadNode.java
+++ b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/DLCIcebergLoadNode.java
@@ -1,144 +1,144 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.inlong.sort.protocol.node.load;
-
-import com.google.common.base.Preconditions;
-import lombok.Data;
-import lombok.EqualsAndHashCode;
-import lombok.NoArgsConstructor;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonTypeName;
-import org.apache.inlong.sort.protocol.FieldInfo;
-import org.apache.inlong.sort.protocol.InlongMetric;
-import org.apache.inlong.sort.protocol.constant.DLCConstant;
-import org.apache.inlong.sort.protocol.constant.IcebergConstant.CatalogType;
-import org.apache.inlong.sort.protocol.enums.FilterStrategy;
-import org.apache.inlong.sort.protocol.node.LoadNode;
-import org.apache.inlong.sort.protocol.transformation.FieldRelation;
-import org.apache.inlong.sort.protocol.transformation.FilterFunction;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.io.Serializable;
-import java.util.List;
-import java.util.Map;
-
-@JsonTypeName("dlcIcebergLoad")
-@Data
-@NoArgsConstructor
-@EqualsAndHashCode(callSuper = true)
-public class DLCIcebergLoadNode extends LoadNode implements InlongMetric, Serializable {
-
-    private static final long serialVersionUID = -1L;
-
-    @JsonProperty("tableName")
-    @Nonnull
-    private String tableName;
-
-    @JsonProperty("dbName")
-    @Nonnull
-    private String dbName;
-
-    @JsonProperty("primaryKey")
-    private String primaryKey;
-
-    @JsonProperty("uri")
-    private String uri;
-
-    @JsonProperty("warehouse")
-    private String warehouse;
-
-    @JsonCreator
-    public DLCIcebergLoadNode(@JsonProperty("id") String id,
-            @JsonProperty("name") String name,
-            @JsonProperty("fields") List<FieldInfo> fields,
-            @JsonProperty("fieldRelations") List<FieldRelation> fieldRelationShips,
-            @JsonProperty("filters") List<FilterFunction> filters,
-            @JsonProperty("filterStrategy") FilterStrategy filterStrategy,
-            @Nullable @JsonProperty("sinkParallelism") Integer sinkParallelism,
-            @JsonProperty("properties") Map<String, String> properties,
-            @Nonnull @JsonProperty("dbName") String dbName,
-            @Nonnull @JsonProperty("tableName") String tableName,
-            @JsonProperty("primaryKey") String primaryKey,
-            @JsonProperty("uri") String uri,
-            @JsonProperty("warehouse") String warehouse) {
-        super(id, name, fields, fieldRelationShips, filters, filterStrategy, sinkParallelism, properties);
-        this.tableName = Preconditions.checkNotNull(tableName, "table name is null");
-        this.dbName = Preconditions.checkNotNull(dbName, "db name is null");
-        this.primaryKey = primaryKey;
-        this.uri = uri == null ? DLCConstant.DLC_ENDPOINT : uri;
-        this.warehouse = warehouse;
-        validateAuth(properties);
-    }
-
-    @Override
-    public Map<String, String> tableOptions() {
-        Map<String, String> options = super.tableOptions();
-        options.put("connector", "dlc-inlong");
-        options.put("catalog-database", dbName);
-        options.put("catalog-table", tableName);
-        options.put("default-database", dbName);
-        options.put("catalog-name", CatalogType.HYBRIS.name());
-        options.put("catalog-impl", DLCConstant.DLC_CATALOG_IMPL_CLASS);
-        if (null != uri) {
-            options.put("uri", uri);
-        }
-        if (null != warehouse) {
-            options.put("warehouse", warehouse);
-        }
-        options.putAll(DLCConstant.DLC_DEFAULT_IMPL);
-        // for filesystem auth
-        options.put(DLCConstant.FS_COS_REGION, options.get(DLCConstant.DLC_REGION));
-        options.put(DLCConstant.FS_COS_SECRET_ID, options.get(DLCConstant.DLC_SECRET_ID));
-        options.put(DLCConstant.FS_COS_SECRET_KEY, options.get(DLCConstant.DLC_SECRET_KEY));
-
-        options.put(DLCConstant.FS_AUTH_DLC_SECRET_ID, options.get(DLCConstant.DLC_SECRET_ID));
-        options.put(DLCConstant.FS_AUTH_DLC_SECRET_KEY, options.get(DLCConstant.DLC_SECRET_KEY));
-        options.put(DLCConstant.FS_AUTH_DLC_REGION, options.get(DLCConstant.DLC_REGION));
-        options.put(DLCConstant.FS_AUTH_DLC_ACCOUNT_APPID, options.get(DLCConstant.DLC_USER_APPID));
-        options.put(DLCConstant.FS_AUTH_DLC_MANAGED_ACCOUNT_UID, options.get(DLCConstant.DLC_MANAGED_ACCOUNT_UID));
-        return options;
-    }
-
-    @Override
-    public String genTableName() {
-        return tableName;
-    }
-
-    @Override
-    public String getPrimaryKey() {
-        return primaryKey;
-    }
-
-    @Override
-    public List<FieldInfo> getPartitionFields() {
-        return super.getPartitionFields();
-    }
-
-    private void validateAuth(Map<String, String> properties) {
-        Preconditions.checkNotNull(properties);
-        Preconditions.checkNotNull(properties.get(DLCConstant.DLC_SECRET_ID), "dlc secret-id is null");
-        Preconditions.checkNotNull(properties.get(DLCConstant.DLC_SECRET_KEY), "dlc secret-key is null");
-        Preconditions.checkNotNull(properties.get(DLCConstant.DLC_REGION), "dlc region is null");
-        Preconditions.checkNotNull(properties.get(DLCConstant.DLC_USER_APPID), "dlc user appid is null");
-        Preconditions.checkNotNull(
-                properties.get(DLCConstant.DLC_MANAGED_ACCOUNT_UID), "dlc managed account appid is null");
-    }
-}
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.inlong.sort.protocol.node.load;
+
+import com.google.common.base.Preconditions;
+import lombok.Data;
+import lombok.EqualsAndHashCode;
+import lombok.NoArgsConstructor;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonTypeName;
+import org.apache.inlong.sort.protocol.FieldInfo;
+import org.apache.inlong.sort.protocol.InlongMetric;
+import org.apache.inlong.sort.protocol.constant.DLCConstant;
+import org.apache.inlong.sort.protocol.constant.IcebergConstant.CatalogType;
+import org.apache.inlong.sort.protocol.enums.FilterStrategy;
+import org.apache.inlong.sort.protocol.node.LoadNode;
+import org.apache.inlong.sort.protocol.transformation.FieldRelation;
+import org.apache.inlong.sort.protocol.transformation.FilterFunction;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import java.io.Serializable;
+import java.util.List;
+import java.util.Map;
+
+@JsonTypeName("dlcIcebergLoad")
+@Data
+@NoArgsConstructor
+@EqualsAndHashCode(callSuper = true)
+public class DLCIcebergLoadNode extends LoadNode implements InlongMetric, Serializable {
+
+    private static final long serialVersionUID = -1L;
+
+    @JsonProperty("tableName")
+    @Nonnull
+    private String tableName;
+
+    @JsonProperty("dbName")
+    @Nonnull
+    private String dbName;
+
+    @JsonProperty("primaryKey")
+    private String primaryKey;
+
+    @JsonProperty("uri")
+    private String uri;
+
+    @JsonProperty("warehouse")
+    private String warehouse;
+
+    @JsonCreator
+    public DLCIcebergLoadNode(@JsonProperty("id") String id,
+            @JsonProperty("name") String name,
+            @JsonProperty("fields") List<FieldInfo> fields,
+            @JsonProperty("fieldRelations") List<FieldRelation> fieldRelationShips,
+            @JsonProperty("filters") List<FilterFunction> filters,
+            @JsonProperty("filterStrategy") FilterStrategy filterStrategy,
+            @Nullable @JsonProperty("sinkParallelism") Integer sinkParallelism,
+            @JsonProperty("properties") Map<String, String> properties,
+            @Nonnull @JsonProperty("dbName") String dbName,
+            @Nonnull @JsonProperty("tableName") String tableName,
+            @JsonProperty("primaryKey") String primaryKey,
+            @JsonProperty("uri") String uri,
+            @JsonProperty("warehouse") String warehouse) {
+        super(id, name, fields, fieldRelationShips, filters, filterStrategy, sinkParallelism, properties);
+        this.tableName = Preconditions.checkNotNull(tableName, "table name is null");
+        this.dbName = Preconditions.checkNotNull(dbName, "db name is null");
+        this.primaryKey = primaryKey;
+        this.uri = uri == null ? DLCConstant.DLC_ENDPOINT : uri;
+        this.warehouse = warehouse;
+        validateAuth(properties);
+    }
+
+    @Override
+    public Map<String, String> tableOptions() {
+        Map<String, String> options = super.tableOptions();
+        options.put("connector", "dlc-inlong");
+        options.put("catalog-database", dbName);
+        options.put("catalog-table", tableName);
+        options.put("default-database", dbName);
+        options.put("catalog-name", CatalogType.HYBRIS.name());
+        options.put("catalog-impl", DLCConstant.DLC_CATALOG_IMPL_CLASS);
+        if (null != uri) {
+            options.put("uri", uri);
+        }
+        if (null != warehouse) {
+            options.put("warehouse", warehouse);
+        }
+        options.putAll(DLCConstant.DLC_DEFAULT_IMPL);
+        // for filesystem auth
+        options.put(DLCConstant.FS_COS_REGION, options.get(DLCConstant.DLC_REGION));
+        options.put(DLCConstant.FS_COS_SECRET_ID, options.get(DLCConstant.DLC_SECRET_ID));
+        options.put(DLCConstant.FS_COS_SECRET_KEY, options.get(DLCConstant.DLC_SECRET_KEY));
+
+        options.put(DLCConstant.FS_AUTH_DLC_SECRET_ID, options.get(DLCConstant.DLC_SECRET_ID));
+        options.put(DLCConstant.FS_AUTH_DLC_SECRET_KEY, options.get(DLCConstant.DLC_SECRET_KEY));
+        options.put(DLCConstant.FS_AUTH_DLC_REGION, options.get(DLCConstant.DLC_REGION));
+        options.put(DLCConstant.FS_AUTH_DLC_ACCOUNT_APPID, options.get(DLCConstant.DLC_USER_APPID));
+        options.put(DLCConstant.FS_AUTH_DLC_MANAGED_ACCOUNT_UID, options.get(DLCConstant.DLC_MANAGED_ACCOUNT_UID));
+        return options;
+    }
+
+    @Override
+    public String genTableName() {
+        return tableName;
+    }
+
+    @Override
+    public String getPrimaryKey() {
+        return primaryKey;
+    }
+
+    @Override
+    public List<FieldInfo> getPartitionFields() {
+        return super.getPartitionFields();
+    }
+
+    private void validateAuth(Map<String, String> properties) {
+        Preconditions.checkNotNull(properties);
+        Preconditions.checkNotNull(properties.get(DLCConstant.DLC_SECRET_ID), "dlc secret-id is null");
+        Preconditions.checkNotNull(properties.get(DLCConstant.DLC_SECRET_KEY), "dlc secret-key is null");
+        Preconditions.checkNotNull(properties.get(DLCConstant.DLC_REGION), "dlc region is null");
+        Preconditions.checkNotNull(properties.get(DLCConstant.DLC_USER_APPID), "dlc user appid is null");
+        Preconditions.checkNotNull(
+                properties.get(DLCConstant.DLC_MANAGED_ACCOUNT_UID), "dlc managed account appid is null");
+    }
+}
diff --git a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/DorisLoadNode.java b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/DorisLoadNode.java
index 85992a082..a281f24fe 100644
--- a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/DorisLoadNode.java
+++ b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/DorisLoadNode.java
@@ -1,177 +1,177 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.sort.protocol.node.load;
-
-import com.google.common.base.Preconditions;
-import lombok.Data;
-import lombok.EqualsAndHashCode;
-import lombok.NoArgsConstructor;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonInclude;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonInclude.Include;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonTypeName;
-import org.apache.inlong.sort.protocol.FieldInfo;
-import org.apache.inlong.sort.protocol.InlongMetric;
-import org.apache.inlong.sort.protocol.constant.DorisConstant;
-import org.apache.inlong.sort.protocol.enums.FilterStrategy;
-import org.apache.inlong.sort.protocol.node.LoadNode;
-import org.apache.inlong.sort.protocol.node.format.Format;
-import org.apache.inlong.sort.protocol.transformation.FieldRelation;
-import org.apache.inlong.sort.protocol.transformation.FilterFunction;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.io.Serializable;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import static org.apache.inlong.sort.protocol.constant.DorisConstant.SINK_MULTIPLE_DATABASE_PATTERN;
-import static org.apache.inlong.sort.protocol.constant.DorisConstant.SINK_MULTIPLE_ENABLE;
-import static org.apache.inlong.sort.protocol.constant.DorisConstant.SINK_MULTIPLE_FORMAT;
-import static org.apache.inlong.sort.protocol.constant.DorisConstant.SINK_MULTIPLE_TABLE_PATTERN;
-
-/**
- * doris load node using doris flink-doris-connector-1.13.5_2.11
- */
-@EqualsAndHashCode(callSuper = true)
-@JsonTypeName("dorisLoadNode")
-@JsonInclude(Include.NON_NULL)
-@Data
-@NoArgsConstructor
-public class DorisLoadNode extends LoadNode implements InlongMetric, Serializable {
-
-    private static final long serialVersionUID = -8002903269814211382L;
-
-    @JsonProperty("feNodes")
-    @Nonnull
-    private String feNodes;
-
-    @JsonProperty("username")
-    @Nonnull
-    private String userName;
-
-    @JsonProperty("password")
-    @Nonnull
-    private String password;
-
-    @JsonProperty("tableIdentifier")
-    @Nullable
-    private String tableIdentifier;
-
-    @JsonProperty("primaryKey")
-    private String primaryKey;
-
-    @Nullable
-    @JsonProperty("sinkMultipleEnable")
-    private Boolean sinkMultipleEnable = false;
-
-    @Nullable
-    @JsonProperty("sinkMultipleFormat")
-    private Format sinkMultipleFormat;
-
-    @Nullable
-    @JsonProperty("databasePattern")
-    private String databasePattern;
-
-    @Nullable
-    @JsonProperty("tablePattern")
-    private String tablePattern;
-
-    public DorisLoadNode(@JsonProperty("id") String id,
-            @JsonProperty("name") String name,
-            @JsonProperty("fields") List<FieldInfo> fields,
-            @JsonProperty("fieldRelations") List<FieldRelation> fieldRelations,
-            @JsonProperty("filters") List<FilterFunction> filters,
-            @JsonProperty("filterStrategy") FilterStrategy filterStrategy,
-            @Nullable @JsonProperty("sinkParallelism") Integer sinkParallelism,
-            @JsonProperty("properties") Map<String, String> properties,
-            @Nonnull @JsonProperty("feNodes") String feNodes,
-            @Nonnull @JsonProperty("username") String userName,
-            @Nonnull @JsonProperty("password") String password,
-            @Nonnull @JsonProperty("tableIdentifier") String tableIdentifier,
-            @JsonProperty("primaryKey") String primaryKey) {
-        this(id, name, fields, fieldRelations, filters, filterStrategy, sinkParallelism, properties, feNodes, userName,
-                password, tableIdentifier, primaryKey, null, null,
-                null, null);
-    }
-
-    @JsonCreator
-    public DorisLoadNode(@JsonProperty("id") String id,
-            @JsonProperty("name") String name,
-            @JsonProperty("fields") List<FieldInfo> fields,
-            @JsonProperty("fieldRelations") List<FieldRelation> fieldRelations,
-            @JsonProperty("filters") List<FilterFunction> filters,
-            @JsonProperty("filterStrategy") FilterStrategy filterStrategy,
-            @Nullable @JsonProperty("sinkParallelism") Integer sinkParallelism,
-            @JsonProperty("properties") Map<String, String> properties,
-            @Nonnull @JsonProperty("feNodes") String feNodes,
-            @Nonnull @JsonProperty("username") String userName,
-            @Nonnull @JsonProperty("password") String password,
-            @Nullable @JsonProperty("tableIdentifier") String tableIdentifier,
-            @JsonProperty("primaryKey") String primaryKey,
-            @Nullable @JsonProperty(value = "sinkMultipleEnable", defaultValue = "false") Boolean sinkMultipleEnable,
-            @Nullable @JsonProperty("sinkMultipleFormat") Format sinkMultipleFormat,
-            @Nullable @JsonProperty("databasePattern") String databasePattern,
-            @Nullable @JsonProperty("tablePattern") String tablePattern) {
-        super(id, name, fields, fieldRelations, filters, filterStrategy, sinkParallelism, properties);
-        this.feNodes = Preconditions.checkNotNull(feNodes, "feNodes is null");
-        this.userName = Preconditions.checkNotNull(userName, "username is null");
-        this.password = Preconditions.checkNotNull(password, "password is null");
-        this.primaryKey = primaryKey;
-        this.sinkMultipleEnable = sinkMultipleEnable;
-        if (sinkMultipleEnable == null || !sinkMultipleEnable) {
-            this.tableIdentifier = Preconditions.checkNotNull(tableIdentifier, "tableIdentifier is null");
-        } else {
-            this.databasePattern = Preconditions.checkNotNull(databasePattern, "databasePattern is null");
-            this.tablePattern = Preconditions.checkNotNull(tablePattern, "tablePattern is null");
-            this.sinkMultipleFormat = Preconditions.checkNotNull(sinkMultipleFormat,
-                    "sinkMultipleFormat is null");
-        }
-    }
-
-    @Override
-    public Map<String, String> tableOptions() {
-        Map<String, String> options = super.tableOptions();
-        options.put(DorisConstant.CONNECTOR, "doris-inlong");
-        options.put(DorisConstant.FE_NODES, feNodes);
-        options.put(DorisConstant.USERNAME, userName);
-        options.put(DorisConstant.PASSWORD, password);
-        if (sinkMultipleEnable != null && sinkMultipleEnable) {
-            options.put(SINK_MULTIPLE_ENABLE, sinkMultipleEnable.toString());
-            options.put(SINK_MULTIPLE_FORMAT, Objects.requireNonNull(sinkMultipleFormat).identifier());
-            options.put(SINK_MULTIPLE_DATABASE_PATTERN, databasePattern);
-            options.put(SINK_MULTIPLE_TABLE_PATTERN, tablePattern);
-        } else {
-            options.put(SINK_MULTIPLE_ENABLE, "false");
-            options.put(DorisConstant.TABLE_IDENTIFIER, tableIdentifier);
-        }
-        return options;
-    }
-
-    @Override
-    public String genTableName() {
-        return String.format("table_%s", super.getId());
-    }
-
-    @Override
-    public String getPrimaryKey() {
-        return primaryKey;
-    }
-
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.sort.protocol.node.load;
+
+import com.google.common.base.Preconditions;
+import lombok.Data;
+import lombok.EqualsAndHashCode;
+import lombok.NoArgsConstructor;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonInclude;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonInclude.Include;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonTypeName;
+import org.apache.inlong.sort.protocol.FieldInfo;
+import org.apache.inlong.sort.protocol.InlongMetric;
+import org.apache.inlong.sort.protocol.constant.DorisConstant;
+import org.apache.inlong.sort.protocol.enums.FilterStrategy;
+import org.apache.inlong.sort.protocol.node.LoadNode;
+import org.apache.inlong.sort.protocol.node.format.Format;
+import org.apache.inlong.sort.protocol.transformation.FieldRelation;
+import org.apache.inlong.sort.protocol.transformation.FilterFunction;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import java.io.Serializable;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import static org.apache.inlong.sort.protocol.constant.DorisConstant.SINK_MULTIPLE_DATABASE_PATTERN;
+import static org.apache.inlong.sort.protocol.constant.DorisConstant.SINK_MULTIPLE_ENABLE;
+import static org.apache.inlong.sort.protocol.constant.DorisConstant.SINK_MULTIPLE_FORMAT;
+import static org.apache.inlong.sort.protocol.constant.DorisConstant.SINK_MULTIPLE_TABLE_PATTERN;
+
+/**
+ * doris load node using doris flink-doris-connector-1.13.5_2.11
+ */
+@EqualsAndHashCode(callSuper = true)
+@JsonTypeName("dorisLoadNode")
+@JsonInclude(Include.NON_NULL)
+@Data
+@NoArgsConstructor
+public class DorisLoadNode extends LoadNode implements InlongMetric, Serializable {
+
+    private static final long serialVersionUID = -8002903269814211382L;
+
+    @JsonProperty("feNodes")
+    @Nonnull
+    private String feNodes;
+
+    @JsonProperty("username")
+    @Nonnull
+    private String userName;
+
+    @JsonProperty("password")
+    @Nonnull
+    private String password;
+
+    @JsonProperty("tableIdentifier")
+    @Nullable
+    private String tableIdentifier;
+
+    @JsonProperty("primaryKey")
+    private String primaryKey;
+
+    @Nullable
+    @JsonProperty("sinkMultipleEnable")
+    private Boolean sinkMultipleEnable = false;
+
+    @Nullable
+    @JsonProperty("sinkMultipleFormat")
+    private Format sinkMultipleFormat;
+
+    @Nullable
+    @JsonProperty("databasePattern")
+    private String databasePattern;
+
+    @Nullable
+    @JsonProperty("tablePattern")
+    private String tablePattern;
+
+    public DorisLoadNode(@JsonProperty("id") String id,
+            @JsonProperty("name") String name,
+            @JsonProperty("fields") List<FieldInfo> fields,
+            @JsonProperty("fieldRelations") List<FieldRelation> fieldRelations,
+            @JsonProperty("filters") List<FilterFunction> filters,
+            @JsonProperty("filterStrategy") FilterStrategy filterStrategy,
+            @Nullable @JsonProperty("sinkParallelism") Integer sinkParallelism,
+            @JsonProperty("properties") Map<String, String> properties,
+            @Nonnull @JsonProperty("feNodes") String feNodes,
+            @Nonnull @JsonProperty("username") String userName,
+            @Nonnull @JsonProperty("password") String password,
+            @Nonnull @JsonProperty("tableIdentifier") String tableIdentifier,
+            @JsonProperty("primaryKey") String primaryKey) {
+        this(id, name, fields, fieldRelations, filters, filterStrategy, sinkParallelism, properties, feNodes, userName,
+                password, tableIdentifier, primaryKey, null, null,
+                null, null);
+    }
+
+    @JsonCreator
+    public DorisLoadNode(@JsonProperty("id") String id,
+            @JsonProperty("name") String name,
+            @JsonProperty("fields") List<FieldInfo> fields,
+            @JsonProperty("fieldRelations") List<FieldRelation> fieldRelations,
+            @JsonProperty("filters") List<FilterFunction> filters,
+            @JsonProperty("filterStrategy") FilterStrategy filterStrategy,
+            @Nullable @JsonProperty("sinkParallelism") Integer sinkParallelism,
+            @JsonProperty("properties") Map<String, String> properties,
+            @Nonnull @JsonProperty("feNodes") String feNodes,
+            @Nonnull @JsonProperty("username") String userName,
+            @Nonnull @JsonProperty("password") String password,
+            @Nullable @JsonProperty("tableIdentifier") String tableIdentifier,
+            @JsonProperty("primaryKey") String primaryKey,
+            @Nullable @JsonProperty(value = "sinkMultipleEnable", defaultValue = "false") Boolean sinkMultipleEnable,
+            @Nullable @JsonProperty("sinkMultipleFormat") Format sinkMultipleFormat,
+            @Nullable @JsonProperty("databasePattern") String databasePattern,
+            @Nullable @JsonProperty("tablePattern") String tablePattern) {
+        super(id, name, fields, fieldRelations, filters, filterStrategy, sinkParallelism, properties);
+        this.feNodes = Preconditions.checkNotNull(feNodes, "feNodes is null");
+        this.userName = Preconditions.checkNotNull(userName, "username is null");
+        this.password = Preconditions.checkNotNull(password, "password is null");
+        this.primaryKey = primaryKey;
+        this.sinkMultipleEnable = sinkMultipleEnable;
+        if (sinkMultipleEnable == null || !sinkMultipleEnable) {
+            this.tableIdentifier = Preconditions.checkNotNull(tableIdentifier, "tableIdentifier is null");
+        } else {
+            this.databasePattern = Preconditions.checkNotNull(databasePattern, "databasePattern is null");
+            this.tablePattern = Preconditions.checkNotNull(tablePattern, "tablePattern is null");
+            this.sinkMultipleFormat = Preconditions.checkNotNull(sinkMultipleFormat,
+                    "sinkMultipleFormat is null");
+        }
+    }
+
+    @Override
+    public Map<String, String> tableOptions() {
+        Map<String, String> options = super.tableOptions();
+        options.put(DorisConstant.CONNECTOR, "doris-inlong");
+        options.put(DorisConstant.FE_NODES, feNodes);
+        options.put(DorisConstant.USERNAME, userName);
+        options.put(DorisConstant.PASSWORD, password);
+        if (sinkMultipleEnable != null && sinkMultipleEnable) {
+            options.put(SINK_MULTIPLE_ENABLE, sinkMultipleEnable.toString());
+            options.put(SINK_MULTIPLE_FORMAT, Objects.requireNonNull(sinkMultipleFormat).identifier());
+            options.put(SINK_MULTIPLE_DATABASE_PATTERN, databasePattern);
+            options.put(SINK_MULTIPLE_TABLE_PATTERN, tablePattern);
+        } else {
+            options.put(SINK_MULTIPLE_ENABLE, "false");
+            options.put(DorisConstant.TABLE_IDENTIFIER, tableIdentifier);
+        }
+        return options;
+    }
+
+    @Override
+    public String genTableName() {
+        return String.format("table_%s", super.getId());
+    }
+
+    @Override
+    public String getPrimaryKey() {
+        return primaryKey;
+    }
+
+}
diff --git a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/IcebergLoadNode.java b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/IcebergLoadNode.java
index 1b1da5493..dcf1dcfc9 100644
--- a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/IcebergLoadNode.java
+++ b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/IcebergLoadNode.java
@@ -1,130 +1,130 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.inlong.sort.protocol.node.load;
-
-import com.google.common.base.Preconditions;
-import lombok.Data;
-import lombok.EqualsAndHashCode;
-import lombok.NoArgsConstructor;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonTypeName;
-import org.apache.inlong.sort.protocol.FieldInfo;
-import org.apache.inlong.sort.protocol.InlongMetric;
-import org.apache.inlong.sort.protocol.constant.IcebergConstant;
-import org.apache.inlong.sort.protocol.constant.IcebergConstant.CatalogType;
-import org.apache.inlong.sort.protocol.enums.FilterStrategy;
-import org.apache.inlong.sort.protocol.node.LoadNode;
-import org.apache.inlong.sort.protocol.transformation.FieldRelation;
-import org.apache.inlong.sort.protocol.transformation.FilterFunction;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.io.Serializable;
-import java.util.List;
-import java.util.Map;
-
-@JsonTypeName("icebergLoad")
-@Data
-@NoArgsConstructor
-@EqualsAndHashCode(callSuper = true)
-public class IcebergLoadNode extends LoadNode implements InlongMetric, Serializable {
-
-    private static final long serialVersionUID = -1L;
-
-    @JsonProperty("tableName")
-    @Nonnull
-    private String tableName;
-
-    @JsonProperty("dbName")
-    @Nonnull
-    private String dbName;
-
-    @JsonProperty("primaryKey")
-    private String primaryKey;
-
-    @JsonProperty("catalogType")
-    private IcebergConstant.CatalogType catalogType;
-
-    @JsonProperty("uri")
-    private String uri;
-
-    @JsonProperty("warehouse")
-    private String warehouse;
-
-    @JsonCreator
-    public IcebergLoadNode(@JsonProperty("id") String id,
-            @JsonProperty("name") String name,
-            @JsonProperty("fields") List<FieldInfo> fields,
-            @JsonProperty("fieldRelations") List<FieldRelation> fieldRelations,
-            @JsonProperty("filters") List<FilterFunction> filters,
-            @JsonProperty("filterStrategy") FilterStrategy filterStrategy,
-            @Nullable @JsonProperty("sinkParallelism") Integer sinkParallelism,
-            @JsonProperty("properties") Map<String, String> properties,
-            @Nonnull @JsonProperty("dbName") String dbName,
-            @Nonnull @JsonProperty("tableName") String tableName,
-            @JsonProperty("primaryKey") String primaryKey,
-            @JsonProperty("catalogType") IcebergConstant.CatalogType catalogType,
-            @JsonProperty("uri") String uri,
-            @JsonProperty("warehouse") String warehouse) {
-        super(id, name, fields, fieldRelations, filters, filterStrategy, sinkParallelism, properties);
-        this.tableName = Preconditions.checkNotNull(tableName, "table name is null");
-        this.dbName = Preconditions.checkNotNull(dbName, "db name is null");
-        this.primaryKey = primaryKey;
-        this.catalogType = catalogType == null ? CatalogType.HIVE : catalogType;
-        this.uri = uri;
-        this.warehouse = warehouse;
-    }
-
-    @Override
-    public Map<String, String> tableOptions() {
-        Map<String, String> options = super.tableOptions();
-        options.put("connector", "iceberg-inlong");
-        // for test sink.ignore.changelog
-        // options.put("sink.ignore.changelog", "true");
-        options.put("catalog-database", dbName);
-        options.put("catalog-table", tableName);
-        options.put("default-database", dbName);
-        options.put("catalog-type", catalogType.name());
-        options.put("catalog-name", catalogType.name());
-        if (null != uri) {
-            options.put("uri", uri);
-        }
-        if (null != warehouse) {
-            options.put("warehouse", warehouse);
-        }
-        return options;
-    }
-
-    @Override
-    public String genTableName() {
-        return tableName;
-    }
-
-    @Override
-    public String getPrimaryKey() {
-        return primaryKey;
-    }
-
-    @Override
-    public List<FieldInfo> getPartitionFields() {
-        return super.getPartitionFields();
-    }
-
-}
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.inlong.sort.protocol.node.load;
+
+import com.google.common.base.Preconditions;
+import lombok.Data;
+import lombok.EqualsAndHashCode;
+import lombok.NoArgsConstructor;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonTypeName;
+import org.apache.inlong.sort.protocol.FieldInfo;
+import org.apache.inlong.sort.protocol.InlongMetric;
+import org.apache.inlong.sort.protocol.constant.IcebergConstant;
+import org.apache.inlong.sort.protocol.constant.IcebergConstant.CatalogType;
+import org.apache.inlong.sort.protocol.enums.FilterStrategy;
+import org.apache.inlong.sort.protocol.node.LoadNode;
+import org.apache.inlong.sort.protocol.transformation.FieldRelation;
+import org.apache.inlong.sort.protocol.transformation.FilterFunction;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import java.io.Serializable;
+import java.util.List;
+import java.util.Map;
+
+@JsonTypeName("icebergLoad")
+@Data
+@NoArgsConstructor
+@EqualsAndHashCode(callSuper = true)
+public class IcebergLoadNode extends LoadNode implements InlongMetric, Serializable {
+
+    private static final long serialVersionUID = -1L;
+
+    @JsonProperty("tableName")
+    @Nonnull
+    private String tableName;
+
+    @JsonProperty("dbName")
+    @Nonnull
+    private String dbName;
+
+    @JsonProperty("primaryKey")
+    private String primaryKey;
+
+    @JsonProperty("catalogType")
+    private IcebergConstant.CatalogType catalogType;
+
+    @JsonProperty("uri")
+    private String uri;
+
+    @JsonProperty("warehouse")
+    private String warehouse;
+
+    @JsonCreator
+    public IcebergLoadNode(@JsonProperty("id") String id,
+            @JsonProperty("name") String name,
+            @JsonProperty("fields") List<FieldInfo> fields,
+            @JsonProperty("fieldRelations") List<FieldRelation> fieldRelations,
+            @JsonProperty("filters") List<FilterFunction> filters,
+            @JsonProperty("filterStrategy") FilterStrategy filterStrategy,
+            @Nullable @JsonProperty("sinkParallelism") Integer sinkParallelism,
+            @JsonProperty("properties") Map<String, String> properties,
+            @Nonnull @JsonProperty("dbName") String dbName,
+            @Nonnull @JsonProperty("tableName") String tableName,
+            @JsonProperty("primaryKey") String primaryKey,
+            @JsonProperty("catalogType") IcebergConstant.CatalogType catalogType,
+            @JsonProperty("uri") String uri,
+            @JsonProperty("warehouse") String warehouse) {
+        super(id, name, fields, fieldRelations, filters, filterStrategy, sinkParallelism, properties);
+        this.tableName = Preconditions.checkNotNull(tableName, "table name is null");
+        this.dbName = Preconditions.checkNotNull(dbName, "db name is null");
+        this.primaryKey = primaryKey;
+        this.catalogType = catalogType == null ? CatalogType.HIVE : catalogType;
+        this.uri = uri;
+        this.warehouse = warehouse;
+    }
+
+    @Override
+    public Map<String, String> tableOptions() {
+        Map<String, String> options = super.tableOptions();
+        options.put("connector", "iceberg-inlong");
+        // for test sink.ignore.changelog
+        // options.put("sink.ignore.changelog", "true");
+        options.put("catalog-database", dbName);
+        options.put("catalog-table", tableName);
+        options.put("default-database", dbName);
+        options.put("catalog-type", catalogType.name());
+        options.put("catalog-name", catalogType.name());
+        if (null != uri) {
+            options.put("uri", uri);
+        }
+        if (null != warehouse) {
+            options.put("warehouse", warehouse);
+        }
+        return options;
+    }
+
+    @Override
+    public String genTableName() {
+        return tableName;
+    }
+
+    @Override
+    public String getPrimaryKey() {
+        return primaryKey;
+    }
+
+    @Override
+    public List<FieldInfo> getPartitionFields() {
+        return super.getPartitionFields();
+    }
+
+}
diff --git a/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/extract/PulsarExtractNodeTest.java b/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/extract/PulsarExtractNodeTest.java
index 6f0da2637..0ab2fc81b 100644
--- a/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/extract/PulsarExtractNodeTest.java
+++ b/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/extract/PulsarExtractNodeTest.java
@@ -1,55 +1,55 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.inlong.sort.protocol.node.extract;
-
-import org.apache.inlong.sort.SerializeBaseTest;
-import org.apache.inlong.sort.formats.common.IntFormatInfo;
-import org.apache.inlong.sort.formats.common.StringFormatInfo;
-import org.apache.inlong.sort.protocol.FieldInfo;
-import org.apache.inlong.sort.protocol.node.Node;
-import org.apache.inlong.sort.protocol.node.format.CsvFormat;
-import org.apache.inlong.sort.protocol.node.format.Format;
-
-import java.util.Arrays;
-import java.util.List;
-
-/**
- * Test for {@link PulsarExtractNode}
- */
-public class PulsarExtractNodeTest extends SerializeBaseTest<Node> {
-
-    @Override
-    public Node getTestObject() {
-        List<FieldInfo> fields = Arrays.asList(
-                new FieldInfo("name", new StringFormatInfo()),
-                new FieldInfo("age", new IntFormatInfo()));
-        Format format = new CsvFormat();
-        return new PulsarExtractNode("2",
-                "pulsar_input",
-                fields,
-                null,
-                null,
-                "persistent://public/default/test_stream",
-                "http://localhost:8080",
-                "pulsar://localhost:6650",
-                format,
-                "earliest",
-                null);
-    }
-}
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.inlong.sort.protocol.node.extract;
+
+import org.apache.inlong.sort.SerializeBaseTest;
+import org.apache.inlong.sort.formats.common.IntFormatInfo;
+import org.apache.inlong.sort.formats.common.StringFormatInfo;
+import org.apache.inlong.sort.protocol.FieldInfo;
+import org.apache.inlong.sort.protocol.node.Node;
+import org.apache.inlong.sort.protocol.node.format.CsvFormat;
+import org.apache.inlong.sort.protocol.node.format.Format;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Test for {@link PulsarExtractNode}
+ */
+public class PulsarExtractNodeTest extends SerializeBaseTest<Node> {
+
+    @Override
+    public Node getTestObject() {
+        List<FieldInfo> fields = Arrays.asList(
+                new FieldInfo("name", new StringFormatInfo()),
+                new FieldInfo("age", new IntFormatInfo()));
+        Format format = new CsvFormat();
+        return new PulsarExtractNode("2",
+                "pulsar_input",
+                fields,
+                null,
+                null,
+                "persistent://public/default/test_stream",
+                "http://localhost:8080",
+                "pulsar://localhost:6650",
+                format,
+                "earliest",
+                null);
+    }
+}
diff --git a/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/format/InLongMsgFormatTest.java b/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/format/InLongMsgFormatTest.java
index 47863c573..414cff7d5 100644
--- a/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/format/InLongMsgFormatTest.java
+++ b/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/format/InLongMsgFormatTest.java
@@ -1,31 +1,31 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.sort.protocol.node.format;
-
-import org.apache.inlong.sort.SerializeBaseTest;
-
-/**
- * Test for {@link InLongMsgFormat}
- */
-public class InLongMsgFormatTest extends SerializeBaseTest<InLongMsgFormat> {
-
-    @Override
-    public InLongMsgFormat getTestObject() {
-        return new InLongMsgFormat();
-    }
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.sort.protocol.node.format;
+
+import org.apache.inlong.sort.SerializeBaseTest;
+
+/**
+ * Test for {@link InLongMsgFormat}
+ */
+public class InLongMsgFormatTest extends SerializeBaseTest<InLongMsgFormat> {
+
+    @Override
+    public InLongMsgFormat getTestObject() {
+        return new InLongMsgFormat();
+    }
 }
\ No newline at end of file
diff --git a/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/load/ClickHouseLoadNodeTest.java b/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/load/ClickHouseLoadNodeTest.java
index ceb9e5f12..b3496cdc2 100644
--- a/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/load/ClickHouseLoadNodeTest.java
+++ b/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/load/ClickHouseLoadNodeTest.java
@@ -1,51 +1,51 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.inlong.sort.protocol.node.load;
-
-import org.apache.inlong.sort.SerializeBaseTest;
-import org.apache.inlong.sort.formats.common.StringFormatInfo;
-import org.apache.inlong.sort.protocol.FieldInfo;
-import org.apache.inlong.sort.protocol.node.Node;
-import org.apache.inlong.sort.protocol.transformation.FieldRelation;
-
-import java.util.Arrays;
-
-/**
- * Test for {@link ClickHouseLoadNode} Serialization/Deserialization.
- */
-public class ClickHouseLoadNodeTest extends SerializeBaseTest<Node> {
-    @Override
-    public Node getTestObject() {
-
-        return new ClickHouseLoadNode("2", "test_clickhouse",
-                Arrays.asList(new FieldInfo("id", new StringFormatInfo())),
-                Arrays.asList(new FieldRelation(new FieldInfo("id", new StringFormatInfo()),
-                        new FieldInfo("id", new StringFormatInfo()))),
-                null,
-                null,
-                1,
-                null,
-                "ck_demo",
-                "jdbc:clickhouse://localhost:8023/default",
-                "root",
-                "root",
-                ""
-        );
-    }
-}
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.inlong.sort.protocol.node.load;
+
+import org.apache.inlong.sort.SerializeBaseTest;
+import org.apache.inlong.sort.formats.common.StringFormatInfo;
+import org.apache.inlong.sort.protocol.FieldInfo;
+import org.apache.inlong.sort.protocol.node.Node;
+import org.apache.inlong.sort.protocol.transformation.FieldRelation;
+
+import java.util.Arrays;
+
+/**
+ * Test for {@link ClickHouseLoadNode} Serialization/Deserialization.
+ */
+public class ClickHouseLoadNodeTest extends SerializeBaseTest<Node> {
+    @Override
+    public Node getTestObject() {
+
+        return new ClickHouseLoadNode("2", "test_clickhouse",
+                Arrays.asList(new FieldInfo("id", new StringFormatInfo())),
+                Arrays.asList(new FieldRelation(new FieldInfo("id", new StringFormatInfo()),
+                        new FieldInfo("id", new StringFormatInfo()))),
+                null,
+                null,
+                1,
+                null,
+                "ck_demo",
+                "jdbc:clickhouse://localhost:8023/default",
+                "root",
+                "root",
+                ""
+        );
+    }
+}
diff --git a/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/load/DLCIcebergLoadNodeTest.java b/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/load/DLCIcebergLoadNodeTest.java
index 6b2f9d6ed..a12ab5f0a 100644
--- a/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/load/DLCIcebergLoadNodeTest.java
+++ b/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/load/DLCIcebergLoadNodeTest.java
@@ -1,60 +1,60 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.inlong.sort.protocol.node.load;
-
-import org.apache.inlong.sort.SerializeBaseTest;
-import org.apache.inlong.sort.formats.common.StringFormatInfo;
-import org.apache.inlong.sort.protocol.FieldInfo;
-import org.apache.inlong.sort.protocol.constant.DLCConstant;
-import org.apache.inlong.sort.protocol.transformation.FieldRelation;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * test for dlc load node
- */
-public class DLCIcebergLoadNodeTest extends SerializeBaseTest<DLCIcebergLoadNode> {
-    @Override
-    public DLCIcebergLoadNode getTestObject() {
-        Map<String, String> properties = new HashMap<>();
-        properties.put(DLCConstant.DLC_REGION, "ap-beijing");
-        properties.put(DLCConstant.DLC_SECRET_ID, "XXXXXXXXXXX");
-        properties.put(DLCConstant.DLC_SECRET_KEY, "XXXXXXXXXXX");
-        properties.put(DLCConstant.DLC_USER_APPID, "XXXXXXXXXXX");
-        properties.put(DLCConstant.DLC_MANAGED_ACCOUNT_UID, "XXXXXXXXXXX");
-
-        return new DLCIcebergLoadNode(
-                "iceberg_dlc",
-                "iceberg_dlc_output",
-                Arrays.asList(new FieldInfo("field", new StringFormatInfo())),
-                Arrays.asList(new FieldRelation(new FieldInfo("field", new StringFormatInfo()),
-                        new FieldInfo("field", new StringFormatInfo()))),
-                null,
-                null,
-                null,
-                properties,
-                "inlong",
-                "inlong_iceberg_dlc",
-                null,
-                null,
-                "/iceberg_dlc/warehouse");
-    }
-}
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.inlong.sort.protocol.node.load;
+
+import org.apache.inlong.sort.SerializeBaseTest;
+import org.apache.inlong.sort.formats.common.StringFormatInfo;
+import org.apache.inlong.sort.protocol.FieldInfo;
+import org.apache.inlong.sort.protocol.constant.DLCConstant;
+import org.apache.inlong.sort.protocol.transformation.FieldRelation;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * test for dlc load node
+ */
+public class DLCIcebergLoadNodeTest extends SerializeBaseTest<DLCIcebergLoadNode> {
+    @Override
+    public DLCIcebergLoadNode getTestObject() {
+        Map<String, String> properties = new HashMap<>();
+        properties.put(DLCConstant.DLC_REGION, "ap-beijing");
+        properties.put(DLCConstant.DLC_SECRET_ID, "XXXXXXXXXXX");
+        properties.put(DLCConstant.DLC_SECRET_KEY, "XXXXXXXXXXX");
+        properties.put(DLCConstant.DLC_USER_APPID, "XXXXXXXXXXX");
+        properties.put(DLCConstant.DLC_MANAGED_ACCOUNT_UID, "XXXXXXXXXXX");
+
+        return new DLCIcebergLoadNode(
+                "iceberg_dlc",
+                "iceberg_dlc_output",
+                Arrays.asList(new FieldInfo("field", new StringFormatInfo())),
+                Arrays.asList(new FieldRelation(new FieldInfo("field", new StringFormatInfo()),
+                        new FieldInfo("field", new StringFormatInfo()))),
+                null,
+                null,
+                null,
+                properties,
+                "inlong",
+                "inlong_iceberg_dlc",
+                null,
+                null,
+                "/iceberg_dlc/warehouse");
+    }
+}
diff --git a/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/load/DorisLoadNodeTest.java b/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/load/DorisLoadNodeTest.java
index 10d4b4410..16aa081ac 100644
--- a/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/load/DorisLoadNodeTest.java
+++ b/inlong-sort/sort-common/src/test/java/org/apache/inlong/sort/protocol/node/load/DorisLoadNodeTest.java
@@ -1,73 +1,73 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.sort.protocol.node.load;
-
-import org.apache.inlong.sort.SerializeBaseTest;
-import org.apache.inlong.sort.formats.common.DecimalFormatInfo;
-import org.apache.inlong.sort.formats.common.DoubleFormatInfo;
-import org.apache.inlong.sort.formats.common.IntFormatInfo;
-import org.apache.inlong.sort.formats.common.StringFormatInfo;
-import org.apache.inlong.sort.protocol.FieldInfo;
-import org.apache.inlong.sort.protocol.transformation.FieldRelation;
-import org.apache.inlong.sort.protocol.transformation.FilterFunction;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.List;
-
-/**
- * Test for {@link DorisLoadNode}
- */
-public class DorisLoadNodeTest extends SerializeBaseTest<DorisLoadNode> {
-
-    @Override
-    public DorisLoadNode getTestObject() {
-        List<FieldInfo> fields = Arrays.asList(
-                new FieldInfo("dt", new StringFormatInfo()),
-                new FieldInfo("id", new IntFormatInfo()),
-                new FieldInfo("name", new StringFormatInfo()),
-                new FieldInfo("age", new IntFormatInfo()),
-                new FieldInfo("price", new DecimalFormatInfo()),
-                new FieldInfo("sale", new DoubleFormatInfo())
-        );
-
-        List<FieldRelation> fieldRelations = Arrays
-                .asList(new FieldRelation(new FieldInfo("dt", new StringFormatInfo()),
-                                new FieldInfo("dt", new StringFormatInfo())),
-                        new FieldRelation(new FieldInfo("id", new IntFormatInfo()),
-                                new FieldInfo("id", new IntFormatInfo())),
-                        new FieldRelation(new FieldInfo("name", new StringFormatInfo()),
-                                new FieldInfo("name", new StringFormatInfo())),
-                        new FieldRelation(new FieldInfo("age", new IntFormatInfo()),
-                                new FieldInfo("age", new IntFormatInfo())),
-                        new FieldRelation(new FieldInfo("price", new DecimalFormatInfo()),
-                                new FieldInfo("price", new DecimalFormatInfo())),
-                        new FieldRelation(new FieldInfo("sale", new DoubleFormatInfo()),
-                                new FieldInfo("sale", new DoubleFormatInfo()))
-                );
-
-        List<FilterFunction> filters = new ArrayList<>();
-        Map<String, String> map = new HashMap<>();
-        return new DorisLoadNode("2", "doris_output", fields, fieldRelations,
-                filters, null, 1, map,
-                "localhost:8030", "root",
-                "000000", "test.test2", null);
-    }
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.sort.protocol.node.load;
+
+import org.apache.inlong.sort.SerializeBaseTest;
+import org.apache.inlong.sort.formats.common.DecimalFormatInfo;
+import org.apache.inlong.sort.formats.common.DoubleFormatInfo;
+import org.apache.inlong.sort.formats.common.IntFormatInfo;
+import org.apache.inlong.sort.formats.common.StringFormatInfo;
+import org.apache.inlong.sort.protocol.FieldInfo;
+import org.apache.inlong.sort.protocol.transformation.FieldRelation;
+import org.apache.inlong.sort.protocol.transformation.FilterFunction;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.List;
+
+/**
+ * Test for {@link DorisLoadNode}
+ */
+public class DorisLoadNodeTest extends SerializeBaseTest<DorisLoadNode> {
+
+    @Override
+    public DorisLoadNode getTestObject() {
+        List<FieldInfo> fields = Arrays.asList(
+                new FieldInfo("dt", new StringFormatInfo()),
+                new FieldInfo("id", new IntFormatInfo()),
+                new FieldInfo("name", new StringFormatInfo()),
+                new FieldInfo("age", new IntFormatInfo()),
+                new FieldInfo("price", new DecimalFormatInfo()),
+                new FieldInfo("sale", new DoubleFormatInfo())
+        );
+
+        List<FieldRelation> fieldRelations = Arrays
+                .asList(new FieldRelation(new FieldInfo("dt", new StringFormatInfo()),
+                                new FieldInfo("dt", new StringFormatInfo())),
+                        new FieldRelation(new FieldInfo("id", new IntFormatInfo()),
+                                new FieldInfo("id", new IntFormatInfo())),
+                        new FieldRelation(new FieldInfo("name", new StringFormatInfo()),
+                                new FieldInfo("name", new StringFormatInfo())),
+                        new FieldRelation(new FieldInfo("age", new IntFormatInfo()),
+                                new FieldInfo("age", new IntFormatInfo())),
+                        new FieldRelation(new FieldInfo("price", new DecimalFormatInfo()),
+                                new FieldInfo("price", new DecimalFormatInfo())),
+                        new FieldRelation(new FieldInfo("sale", new DoubleFormatInfo()),
+                                new FieldInfo("sale", new DoubleFormatInfo()))
+                );
+
+        List<FilterFunction> filters = new ArrayList<>();
+        Map<String, String> map = new HashMap<>();
+        return new DorisLoadNode("2", "doris_output", fields, fieldRelations,
+                filters, null, 1, map,
+                "localhost:8030", "root",
+                "000000", "test.test2", null);
+    }
+}
diff --git a/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/format/JsonToRowDataConverters.java b/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/format/JsonToRowDataConverters.java
index 4be01338c..b4e2d3de6 100644
--- a/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/format/JsonToRowDataConverters.java
+++ b/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/format/JsonToRowDataConverters.java
@@ -1,416 +1,416 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements. See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License. You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-
-package org.apache.inlong.sort.base.format;
-
-import org.apache.flink.annotation.Internal;
-import org.apache.flink.formats.common.TimestampFormat;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ArrayNode;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
-import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.TextNode;
-import org.apache.flink.table.api.TableException;
-import org.apache.flink.table.data.DecimalData;
-import org.apache.flink.table.data.GenericArrayData;
-import org.apache.flink.table.data.GenericMapData;
-import org.apache.flink.table.data.GenericRowData;
-import org.apache.flink.table.data.RowData;
-import org.apache.flink.table.data.StringData;
-import org.apache.flink.table.data.TimestampData;
-import org.apache.flink.table.types.logical.ArrayType;
-import org.apache.flink.table.types.logical.DecimalType;
-import org.apache.flink.table.types.logical.IntType;
-import org.apache.flink.table.types.logical.LogicalType;
-import org.apache.flink.table.types.logical.LogicalTypeFamily;
-import org.apache.flink.table.types.logical.MapType;
-import org.apache.flink.table.types.logical.MultisetType;
-import org.apache.flink.table.types.logical.RowType;
-import org.apache.flink.table.types.logical.utils.LogicalTypeChecks;
-import org.apache.flink.table.types.logical.utils.LogicalTypeUtils;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.lang.reflect.Array;
-import java.math.BigDecimal;
-import java.time.LocalDate;
-import java.time.LocalDateTime;
-import java.time.LocalTime;
-import java.time.ZoneOffset;
-import java.time.format.DateTimeParseException;
-import java.time.temporal.TemporalAccessor;
-import java.time.temporal.TemporalQueries;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import static java.time.format.DateTimeFormatter.ISO_LOCAL_DATE;
-import static org.apache.flink.formats.common.TimeFormats.ISO8601_TIMESTAMP_FORMAT;
-import static org.apache.flink.formats.common.TimeFormats.ISO8601_TIMESTAMP_WITH_LOCAL_TIMEZONE_FORMAT;
-import static org.apache.flink.formats.common.TimeFormats.SQL_TIMESTAMP_FORMAT;
-import static org.apache.flink.formats.common.TimeFormats.SQL_TIMESTAMP_WITH_LOCAL_TIMEZONE_FORMAT;
-import static org.apache.flink.formats.common.TimeFormats.SQL_TIME_FORMAT;
-
-/** Tool class used to convert from {@link JsonNode} to {@link RowData}. * */
-@Internal
-public class JsonToRowDataConverters implements Serializable {
-
-    private static final long serialVersionUID = 1L;
-
-    /** Flag indicating whether to fail if a field is missing. */
-    private final boolean failOnMissingField;
-
-    /** Flag indicating whether to ignore invalid fields/rows (default: throw an exception). */
-    private final boolean ignoreParseErrors;
-
-    /** Timestamp format specification which is used to parse timestamp. */
-    private final TimestampFormat timestampFormat;
-
-    /** Wherther adapt spark sql program. */
-    private final boolean adaptSpark;
-
-    public JsonToRowDataConverters(
-            boolean failOnMissingField,
-            boolean ignoreParseErrors,
-            TimestampFormat timestampFormat,
-            boolean adaptSpark) {
-        this.failOnMissingField = failOnMissingField;
-        this.ignoreParseErrors = ignoreParseErrors;
-        this.timestampFormat = timestampFormat;
-        this.adaptSpark = adaptSpark;
-    }
-
-    /**
-     * Runtime converter that converts {@link JsonNode}s into objects of Flink Table & SQL internal
-     * data structures.
-     */
-    @FunctionalInterface
-    public interface JsonToRowDataConverter extends Serializable {
-        Object convert(JsonNode jsonNode);
-    }
-
-    /** Creates a runtime converter which is null safe. */
-    public JsonToRowDataConverter createConverter(LogicalType type) {
-        return wrapIntoNullableConverter(createNotNullConverter(type));
-    }
-
-    /** Creates a runtime converter which assuming input object is not null. */
-    private JsonToRowDataConverter createNotNullConverter(LogicalType type) {
-        switch (type.getTypeRoot()) {
-            case NULL:
-                return jsonNode -> null;
-            case BOOLEAN:
-                return this::convertToBoolean;
-            case TINYINT:
-                return jsonNode -> Byte.parseByte(jsonNode.asText().trim());
-            case SMALLINT:
-                return jsonNode -> Short.parseShort(jsonNode.asText().trim());
-            case INTEGER:
-            case INTERVAL_YEAR_MONTH:
-                return this::convertToInt;
-            case BIGINT:
-            case INTERVAL_DAY_TIME:
-                return this::convertToLong;
-            case DATE:
-                return this::convertToDate;
-            case TIME_WITHOUT_TIME_ZONE:
-                return this::convertToTime;
-            case TIMESTAMP_WITHOUT_TIME_ZONE:
-                return this::convertToTimestamp;
-            case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
-                if (adaptSpark) {
-                  return  jsonNode -> {
-                      try {
-                          return convertToTimestampWithLocalZone(jsonNode);
-                      } catch (DateTimeParseException e) {
-                          return convertToTimestamp(jsonNode);
-                      }
-                  };
-                }
-                return this::convertToTimestampWithLocalZone;
-            case FLOAT:
-                return this::convertToFloat;
-            case DOUBLE:
-                return this::convertToDouble;
-            case CHAR:
-            case VARCHAR:
-                return this::convertToString;
-            case BINARY:
-            case VARBINARY:
-                return this::convertToBytes;
-            case DECIMAL:
-                return createDecimalConverter((DecimalType) type);
-            case ARRAY:
-                return createArrayConverter((ArrayType) type);
-            case MAP:
-                MapType mapType = (MapType) type;
-                return createMapConverter(
-                        mapType.asSummaryString(), mapType.getKeyType(), mapType.getValueType());
-            case MULTISET:
-                MultisetType multisetType = (MultisetType) type;
-                return createMapConverter(
-                        multisetType.asSummaryString(),
-                        multisetType.getElementType(),
-                        new IntType());
-            case ROW:
-                return createRowConverter((RowType) type);
-            case RAW:
-            default:
-                throw new UnsupportedOperationException("Unsupported type: " + type);
-        }
-    }
-
-    private boolean convertToBoolean(JsonNode jsonNode) {
-        if (jsonNode.isBoolean()) {
-            // avoid redundant toString and parseBoolean, for better performance
-            return jsonNode.asBoolean();
-        } else {
-            String boolStr = jsonNode.asText().trim();
-            // Compatible with tinyint data and bool conversion
-            return (boolStr != null) && ("true".equalsIgnoreCase(boolStr) || "1".equalsIgnoreCase(boolStr));
-        }
-    }
-
-    private int convertToInt(JsonNode jsonNode) {
-        if (jsonNode.canConvertToInt()) {
-            // avoid redundant toString and parseInt, for better performance
-            return jsonNode.asInt();
-        } else {
-            return Integer.parseInt(jsonNode.asText().trim());
-        }
-    }
-
-    private long convertToLong(JsonNode jsonNode) {
-        if (jsonNode.canConvertToLong()) {
-            // avoid redundant toString and parseLong, for better performance
-            return jsonNode.asLong();
-        } else {
-            return Long.parseLong(jsonNode.asText().trim());
-        }
-    }
-
-    private double convertToDouble(JsonNode jsonNode) {
-        if (jsonNode.isDouble()) {
-            // avoid redundant toString and parseDouble, for better performance
-            return jsonNode.asDouble();
-        } else {
-            return Double.parseDouble(jsonNode.asText().trim());
-        }
-    }
-
-    private float convertToFloat(JsonNode jsonNode) {
-        if (jsonNode.isDouble()) {
-            // avoid redundant toString and parseDouble, for better performance
-            return (float) jsonNode.asDouble();
-        } else {
-            return Float.parseFloat(jsonNode.asText().trim());
-        }
-    }
-
-    private int convertToDate(JsonNode jsonNode) {
-        LocalDate date = ISO_LOCAL_DATE.parse(jsonNode.asText()).query(TemporalQueries.localDate());
-        return (int) date.toEpochDay();
-    }
-
-    private int convertToTime(JsonNode jsonNode) {
-        TemporalAccessor parsedTime = SQL_TIME_FORMAT.parse(jsonNode.asText());
-        LocalTime localTime = parsedTime.query(TemporalQueries.localTime());
-
-        // get number of milliseconds of the day
-        return localTime.toSecondOfDay() * 1000;
-    }
-
-    private TimestampData convertToTimestamp(JsonNode jsonNode) {
-        TemporalAccessor parsedTimestamp;
-        switch (timestampFormat) {
-            case SQL:
-                parsedTimestamp = SQL_TIMESTAMP_FORMAT.parse(jsonNode.asText());
-                break;
-            case ISO_8601:
-                parsedTimestamp = ISO8601_TIMESTAMP_FORMAT.parse(jsonNode.asText());
-                break;
-            default:
-                throw new TableException(
-                        String.format(
-                                "Unsupported timestamp format '%s'. Validator should have checked that.",
-                                timestampFormat));
-        }
-        LocalTime localTime = parsedTimestamp.query(TemporalQueries.localTime());
-        LocalDate localDate = parsedTimestamp.query(TemporalQueries.localDate());
-
-        return TimestampData.fromLocalDateTime(LocalDateTime.of(localDate, localTime));
-    }
-
-    private TimestampData convertToTimestampWithLocalZone(JsonNode jsonNode) {
-        TemporalAccessor parsedTimestampWithLocalZone;
-        switch (timestampFormat) {
-            case SQL:
-                parsedTimestampWithLocalZone =
-                        SQL_TIMESTAMP_WITH_LOCAL_TIMEZONE_FORMAT.parse(jsonNode.asText());
-                break;
-            case ISO_8601:
-                parsedTimestampWithLocalZone =
-                        ISO8601_TIMESTAMP_WITH_LOCAL_TIMEZONE_FORMAT.parse(jsonNode.asText());
-                break;
-            default:
-                throw new TableException(
-                        String.format(
-                                "Unsupported timestamp format '%s'. Validator should have checked that.",
-                                timestampFormat));
-        }
-        LocalTime localTime = parsedTimestampWithLocalZone.query(TemporalQueries.localTime());
-        LocalDate localDate = parsedTimestampWithLocalZone.query(TemporalQueries.localDate());
-
-        return TimestampData.fromInstant(
-                LocalDateTime.of(localDate, localTime).toInstant(ZoneOffset.UTC));
-    }
-
-    private StringData convertToString(JsonNode jsonNode) {
-        if (jsonNode.isContainerNode()) {
-            return StringData.fromString(jsonNode.toString());
-        } else {
-            return StringData.fromString(jsonNode.asText());
-        }
-    }
-
-    private byte[] convertToBytes(JsonNode jsonNode) {
-        try {
-            return jsonNode.binaryValue();
-        } catch (IOException e) {
-            throw new JsonParseException("Unable to deserialize byte array.", e);
-        }
-    }
-
-    private JsonToRowDataConverter createDecimalConverter(DecimalType decimalType) {
-        final int precision = decimalType.getPrecision();
-        final int scale = decimalType.getScale();
-        return jsonNode -> {
-            BigDecimal bigDecimal;
-            if (jsonNode.isBigDecimal()) {
-                bigDecimal = jsonNode.decimalValue();
-            } else {
-                bigDecimal = new BigDecimal(jsonNode.asText());
-            }
-            return DecimalData.fromBigDecimal(bigDecimal, precision, scale);
-        };
-    }
-
-    private JsonToRowDataConverter createArrayConverter(ArrayType arrayType) {
-        JsonToRowDataConverter elementConverter = createConverter(arrayType.getElementType());
-        final Class<?> elementClass =
-                LogicalTypeUtils.toInternalConversionClass(arrayType.getElementType());
-        return jsonNode -> {
-            final ArrayNode node = (ArrayNode) jsonNode;
-            final Object[] array = (Object[]) Array.newInstance(elementClass, node.size());
-            for (int i = 0; i < node.size(); i++) {
-                final JsonNode innerNode = node.get(i);
-                array[i] = elementConverter.convert(innerNode);
-            }
-            return new GenericArrayData(array);
-        };
-    }
-
-    private JsonToRowDataConverter createMapConverter(
-            String typeSummary, LogicalType keyType, LogicalType valueType) {
-        if (!LogicalTypeChecks.hasFamily(keyType, LogicalTypeFamily.CHARACTER_STRING)) {
-            throw new UnsupportedOperationException(
-                    "JSON format doesn't support non-string as key type of map. "
-                            + "The type is: "
-                            + typeSummary);
-        }
-        final JsonToRowDataConverter keyConverter = createConverter(keyType);
-        final JsonToRowDataConverter valueConverter = createConverter(valueType);
-
-        return jsonNode -> {
-            Iterator<Entry<String, JsonNode>> fields = jsonNode.fields();
-            Map<Object, Object> result = new HashMap<>();
-            while (fields.hasNext()) {
-                Map.Entry<String, JsonNode> entry = fields.next();
-                Object key = keyConverter.convert(TextNode.valueOf(entry.getKey()));
-                Object value = valueConverter.convert(entry.getValue());
-                result.put(key, value);
-            }
-            return new GenericMapData(result);
-        };
-    }
-
-    public JsonToRowDataConverter createRowConverter(RowType rowType) {
-        final JsonToRowDataConverter[] fieldConverters =
-                rowType.getFields().stream()
-                        .map(RowType.RowField::getType)
-                        .map(this::createConverter)
-                        .toArray(JsonToRowDataConverter[]::new);
-        final String[] fieldNames = rowType.getFieldNames().toArray(new String[0]);
-
-        return jsonNode -> {
-            ObjectNode node = (ObjectNode) jsonNode;
-            int arity = fieldNames.length;
-            GenericRowData row = new GenericRowData(arity);
-            for (int i = 0; i < arity; i++) {
-                String fieldName = fieldNames[i];
-                JsonNode field = node.get(fieldName);
-                Object convertedField = convertField(fieldConverters[i], fieldName, field);
-                row.setField(i, convertedField);
-            }
-            return row;
-        };
-    }
-
-    private Object convertField(
-            JsonToRowDataConverter fieldConverter, String fieldName, JsonNode field) {
-        if (field == null) {
-            if (failOnMissingField) {
-                throw new JsonParseException("Could not find field with name '" + fieldName + "'.");
-            } else {
-                return null;
-            }
-        } else {
-            return fieldConverter.convert(field);
-        }
-    }
-
-    private JsonToRowDataConverter wrapIntoNullableConverter(
-            JsonToRowDataConverter converter) {
-        return jsonNode -> {
-            if (jsonNode == null || jsonNode.isNull() || jsonNode.isMissingNode()) {
-                return null;
-            }
-            try {
-                return converter.convert(jsonNode);
-            } catch (Throwable t) {
-                if (!ignoreParseErrors) {
-                    throw t;
-                }
-                return null;
-            }
-        };
-    }
-
-    /** Exception which refers to parse errors in converters. */
-    private static final class JsonParseException extends RuntimeException {
-        private static final long serialVersionUID = 1L;
-
-        public JsonParseException(String message) {
-            super(message);
-        }
-
-        public JsonParseException(String message, Throwable cause) {
-            super(message, cause);
-        }
-    }
-}
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements. See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License. You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.inlong.sort.base.format;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.formats.common.TimestampFormat;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ArrayNode;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.TextNode;
+import org.apache.flink.table.api.TableException;
+import org.apache.flink.table.data.DecimalData;
+import org.apache.flink.table.data.GenericArrayData;
+import org.apache.flink.table.data.GenericMapData;
+import org.apache.flink.table.data.GenericRowData;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.data.StringData;
+import org.apache.flink.table.data.TimestampData;
+import org.apache.flink.table.types.logical.ArrayType;
+import org.apache.flink.table.types.logical.DecimalType;
+import org.apache.flink.table.types.logical.IntType;
+import org.apache.flink.table.types.logical.LogicalType;
+import org.apache.flink.table.types.logical.LogicalTypeFamily;
+import org.apache.flink.table.types.logical.MapType;
+import org.apache.flink.table.types.logical.MultisetType;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.flink.table.types.logical.utils.LogicalTypeChecks;
+import org.apache.flink.table.types.logical.utils.LogicalTypeUtils;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.lang.reflect.Array;
+import java.math.BigDecimal;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.time.ZoneOffset;
+import java.time.format.DateTimeParseException;
+import java.time.temporal.TemporalAccessor;
+import java.time.temporal.TemporalQueries;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import static java.time.format.DateTimeFormatter.ISO_LOCAL_DATE;
+import static org.apache.flink.formats.common.TimeFormats.ISO8601_TIMESTAMP_FORMAT;
+import static org.apache.flink.formats.common.TimeFormats.ISO8601_TIMESTAMP_WITH_LOCAL_TIMEZONE_FORMAT;
+import static org.apache.flink.formats.common.TimeFormats.SQL_TIMESTAMP_FORMAT;
+import static org.apache.flink.formats.common.TimeFormats.SQL_TIMESTAMP_WITH_LOCAL_TIMEZONE_FORMAT;
+import static org.apache.flink.formats.common.TimeFormats.SQL_TIME_FORMAT;
+
+/** Tool class used to convert from {@link JsonNode} to {@link RowData}. * */
+@Internal
+public class JsonToRowDataConverters implements Serializable {
+
+    private static final long serialVersionUID = 1L;
+
+    /** Flag indicating whether to fail if a field is missing. */
+    private final boolean failOnMissingField;
+
+    /** Flag indicating whether to ignore invalid fields/rows (default: throw an exception). */
+    private final boolean ignoreParseErrors;
+
+    /** Timestamp format specification which is used to parse timestamp. */
+    private final TimestampFormat timestampFormat;
+
+    /** Wherther adapt spark sql program. */
+    private final boolean adaptSpark;
+
+    public JsonToRowDataConverters(
+            boolean failOnMissingField,
+            boolean ignoreParseErrors,
+            TimestampFormat timestampFormat,
+            boolean adaptSpark) {
+        this.failOnMissingField = failOnMissingField;
+        this.ignoreParseErrors = ignoreParseErrors;
+        this.timestampFormat = timestampFormat;
+        this.adaptSpark = adaptSpark;
+    }
+
+    /**
+     * Runtime converter that converts {@link JsonNode}s into objects of Flink Table & SQL internal
+     * data structures.
+     */
+    @FunctionalInterface
+    public interface JsonToRowDataConverter extends Serializable {
+        Object convert(JsonNode jsonNode);
+    }
+
+    /** Creates a runtime converter which is null safe. */
+    public JsonToRowDataConverter createConverter(LogicalType type) {
+        return wrapIntoNullableConverter(createNotNullConverter(type));
+    }
+
+    /** Creates a runtime converter which assuming input object is not null. */
+    private JsonToRowDataConverter createNotNullConverter(LogicalType type) {
+        switch (type.getTypeRoot()) {
+            case NULL:
+                return jsonNode -> null;
+            case BOOLEAN:
+                return this::convertToBoolean;
+            case TINYINT:
+                return jsonNode -> Byte.parseByte(jsonNode.asText().trim());
+            case SMALLINT:
+                return jsonNode -> Short.parseShort(jsonNode.asText().trim());
+            case INTEGER:
+            case INTERVAL_YEAR_MONTH:
+                return this::convertToInt;
+            case BIGINT:
+            case INTERVAL_DAY_TIME:
+                return this::convertToLong;
+            case DATE:
+                return this::convertToDate;
+            case TIME_WITHOUT_TIME_ZONE:
+                return this::convertToTime;
+            case TIMESTAMP_WITHOUT_TIME_ZONE:
+                return this::convertToTimestamp;
+            case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
+                if (adaptSpark) {
+                  return  jsonNode -> {
+                      try {
+                          return convertToTimestampWithLocalZone(jsonNode);
+                      } catch (DateTimeParseException e) {
+                          return convertToTimestamp(jsonNode);
+                      }
+                  };
+                }
+                return this::convertToTimestampWithLocalZone;
+            case FLOAT:
+                return this::convertToFloat;
+            case DOUBLE:
+                return this::convertToDouble;
+            case CHAR:
+            case VARCHAR:
+                return this::convertToString;
+            case BINARY:
+            case VARBINARY:
+                return this::convertToBytes;
+            case DECIMAL:
+                return createDecimalConverter((DecimalType) type);
+            case ARRAY:
+                return createArrayConverter((ArrayType) type);
+            case MAP:
+                MapType mapType = (MapType) type;
+                return createMapConverter(
+                        mapType.asSummaryString(), mapType.getKeyType(), mapType.getValueType());
+            case MULTISET:
+                MultisetType multisetType = (MultisetType) type;
+                return createMapConverter(
+                        multisetType.asSummaryString(),
+                        multisetType.getElementType(),
+                        new IntType());
+            case ROW:
+                return createRowConverter((RowType) type);
+            case RAW:
+            default:
+                throw new UnsupportedOperationException("Unsupported type: " + type);
+        }
+    }
+
+    private boolean convertToBoolean(JsonNode jsonNode) {
+        if (jsonNode.isBoolean()) {
+            // avoid redundant toString and parseBoolean, for better performance
+            return jsonNode.asBoolean();
+        } else {
+            String boolStr = jsonNode.asText().trim();
+            // Compatible with tinyint data and bool conversion
+            return (boolStr != null) && ("true".equalsIgnoreCase(boolStr) || "1".equalsIgnoreCase(boolStr));
+        }
+    }
+
+    private int convertToInt(JsonNode jsonNode) {
+        if (jsonNode.canConvertToInt()) {
+            // avoid redundant toString and parseInt, for better performance
+            return jsonNode.asInt();
+        } else {
+            return Integer.parseInt(jsonNode.asText().trim());
+        }
+    }
+
+    private long convertToLong(JsonNode jsonNode) {
+        if (jsonNode.canConvertToLong()) {
+            // avoid redundant toString and parseLong, for better performance
+            return jsonNode.asLong();
+        } else {
+            return Long.parseLong(jsonNode.asText().trim());
+        }
+    }
+
+    private double convertToDouble(JsonNode jsonNode) {
+        if (jsonNode.isDouble()) {
+            // avoid redundant toString and parseDouble, for better performance
+            return jsonNode.asDouble();
+        } else {
+            return Double.parseDouble(jsonNode.asText().trim());
+        }
+    }
+
+    private float convertToFloat(JsonNode jsonNode) {
+        if (jsonNode.isDouble()) {
+            // avoid redundant toString and parseDouble, for better performance
+            return (float) jsonNode.asDouble();
+        } else {
+            return Float.parseFloat(jsonNode.asText().trim());
+        }
+    }
+
+    private int convertToDate(JsonNode jsonNode) {
+        LocalDate date = ISO_LOCAL_DATE.parse(jsonNode.asText()).query(TemporalQueries.localDate());
+        return (int) date.toEpochDay();
+    }
+
+    private int convertToTime(JsonNode jsonNode) {
+        TemporalAccessor parsedTime = SQL_TIME_FORMAT.parse(jsonNode.asText());
+        LocalTime localTime = parsedTime.query(TemporalQueries.localTime());
+
+        // get number of milliseconds of the day
+        return localTime.toSecondOfDay() * 1000;
+    }
+
+    private TimestampData convertToTimestamp(JsonNode jsonNode) {
+        TemporalAccessor parsedTimestamp;
+        switch (timestampFormat) {
+            case SQL:
+                parsedTimestamp = SQL_TIMESTAMP_FORMAT.parse(jsonNode.asText());
+                break;
+            case ISO_8601:
+                parsedTimestamp = ISO8601_TIMESTAMP_FORMAT.parse(jsonNode.asText());
+                break;
+            default:
+                throw new TableException(
+                        String.format(
+                                "Unsupported timestamp format '%s'. Validator should have checked that.",
+                                timestampFormat));
+        }
+        LocalTime localTime = parsedTimestamp.query(TemporalQueries.localTime());
+        LocalDate localDate = parsedTimestamp.query(TemporalQueries.localDate());
+
+        return TimestampData.fromLocalDateTime(LocalDateTime.of(localDate, localTime));
+    }
+
+    private TimestampData convertToTimestampWithLocalZone(JsonNode jsonNode) {
+        TemporalAccessor parsedTimestampWithLocalZone;
+        switch (timestampFormat) {
+            case SQL:
+                parsedTimestampWithLocalZone =
+                        SQL_TIMESTAMP_WITH_LOCAL_TIMEZONE_FORMAT.parse(jsonNode.asText());
+                break;
+            case ISO_8601:
+                parsedTimestampWithLocalZone =
+                        ISO8601_TIMESTAMP_WITH_LOCAL_TIMEZONE_FORMAT.parse(jsonNode.asText());
+                break;
+            default:
+                throw new TableException(
+                        String.format(
+                                "Unsupported timestamp format '%s'. Validator should have checked that.",
+                                timestampFormat));
+        }
+        LocalTime localTime = parsedTimestampWithLocalZone.query(TemporalQueries.localTime());
+        LocalDate localDate = parsedTimestampWithLocalZone.query(TemporalQueries.localDate());
+
+        return TimestampData.fromInstant(
+                LocalDateTime.of(localDate, localTime).toInstant(ZoneOffset.UTC));
+    }
+
+    private StringData convertToString(JsonNode jsonNode) {
+        if (jsonNode.isContainerNode()) {
+            return StringData.fromString(jsonNode.toString());
+        } else {
+            return StringData.fromString(jsonNode.asText());
+        }
+    }
+
+    private byte[] convertToBytes(JsonNode jsonNode) {
+        try {
+            return jsonNode.binaryValue();
+        } catch (IOException e) {
+            throw new JsonParseException("Unable to deserialize byte array.", e);
+        }
+    }
+
+    private JsonToRowDataConverter createDecimalConverter(DecimalType decimalType) {
+        final int precision = decimalType.getPrecision();
+        final int scale = decimalType.getScale();
+        return jsonNode -> {
+            BigDecimal bigDecimal;
+            if (jsonNode.isBigDecimal()) {
+                bigDecimal = jsonNode.decimalValue();
+            } else {
+                bigDecimal = new BigDecimal(jsonNode.asText());
+            }
+            return DecimalData.fromBigDecimal(bigDecimal, precision, scale);
+        };
+    }
+
+    private JsonToRowDataConverter createArrayConverter(ArrayType arrayType) {
+        JsonToRowDataConverter elementConverter = createConverter(arrayType.getElementType());
+        final Class<?> elementClass =
+                LogicalTypeUtils.toInternalConversionClass(arrayType.getElementType());
+        return jsonNode -> {
+            final ArrayNode node = (ArrayNode) jsonNode;
+            final Object[] array = (Object[]) Array.newInstance(elementClass, node.size());
+            for (int i = 0; i < node.size(); i++) {
+                final JsonNode innerNode = node.get(i);
+                array[i] = elementConverter.convert(innerNode);
+            }
+            return new GenericArrayData(array);
+        };
+    }
+
+    private JsonToRowDataConverter createMapConverter(
+            String typeSummary, LogicalType keyType, LogicalType valueType) {
+        if (!LogicalTypeChecks.hasFamily(keyType, LogicalTypeFamily.CHARACTER_STRING)) {
+            throw new UnsupportedOperationException(
+                    "JSON format doesn't support non-string as key type of map. "
+                            + "The type is: "
+                            + typeSummary);
+        }
+        final JsonToRowDataConverter keyConverter = createConverter(keyType);
+        final JsonToRowDataConverter valueConverter = createConverter(valueType);
+
+        return jsonNode -> {
+            Iterator<Entry<String, JsonNode>> fields = jsonNode.fields();
+            Map<Object, Object> result = new HashMap<>();
+            while (fields.hasNext()) {
+                Map.Entry<String, JsonNode> entry = fields.next();
+                Object key = keyConverter.convert(TextNode.valueOf(entry.getKey()));
+                Object value = valueConverter.convert(entry.getValue());
+                result.put(key, value);
+            }
+            return new GenericMapData(result);
+        };
+    }
+
+    public JsonToRowDataConverter createRowConverter(RowType rowType) {
+        final JsonToRowDataConverter[] fieldConverters =
+                rowType.getFields().stream()
+                        .map(RowType.RowField::getType)
+                        .map(this::createConverter)
+                        .toArray(JsonToRowDataConverter[]::new);
+        final String[] fieldNames = rowType.getFieldNames().toArray(new String[0]);
+
+        return jsonNode -> {
+            ObjectNode node = (ObjectNode) jsonNode;
+            int arity = fieldNames.length;
+            GenericRowData row = new GenericRowData(arity);
+            for (int i = 0; i < arity; i++) {
+                String fieldName = fieldNames[i];
+                JsonNode field = node.get(fieldName);
+                Object convertedField = convertField(fieldConverters[i], fieldName, field);
+                row.setField(i, convertedField);
+            }
+            return row;
+        };
+    }
+
+    private Object convertField(
+            JsonToRowDataConverter fieldConverter, String fieldName, JsonNode field) {
+        if (field == null) {
+            if (failOnMissingField) {
+                throw new JsonParseException("Could not find field with name '" + fieldName + "'.");
+            } else {
+                return null;
+            }
+        } else {
+            return fieldConverter.convert(field);
+        }
+    }
+
+    private JsonToRowDataConverter wrapIntoNullableConverter(
+            JsonToRowDataConverter converter) {
+        return jsonNode -> {
+            if (jsonNode == null || jsonNode.isNull() || jsonNode.isMissingNode()) {
+                return null;
+            }
+            try {
+                return converter.convert(jsonNode);
+            } catch (Throwable t) {
+                if (!ignoreParseErrors) {
+                    throw t;
+                }
+                return null;
+            }
+        };
+    }
+
+    /** Exception which refers to parse errors in converters. */
+    private static final class JsonParseException extends RuntimeException {
+        private static final long serialVersionUID = 1L;
+
+        public JsonParseException(String message) {
+            super(message);
+        }
+
+        public JsonParseException(String message, Throwable cause) {
+            super(message, cause);
+        }
+    }
+}
diff --git a/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/metric/MetricOption.java b/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/metric/MetricOption.java
index da6785bf3..c6c52bde2 100644
--- a/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/metric/MetricOption.java
+++ b/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/metric/MetricOption.java
@@ -1,211 +1,211 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements. See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License. You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-
-package org.apache.inlong.sort.base.metric;
-
-import org.apache.flink.util.Preconditions;
-import org.apache.flink.util.StringUtils;
-
-import javax.annotation.Nullable;
-import java.io.Serializable;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Optional;
-import java.util.regex.Pattern;
-import java.util.stream.Stream;
-
-import static org.apache.inlong.sort.base.Constants.DELIMITER;
-import static org.apache.inlong.sort.base.Constants.GROUP_ID;
-import static org.apache.inlong.sort.base.Constants.STREAM_ID;
-
-public class MetricOption implements Serializable {
-    private static final long serialVersionUID = 1L;
-    private static final String IP_OR_HOST_PORT = "^(.*):([0-9]|[1-9]\\d|[1-9]\\d{"
-            + "2}|[1-9]\\d{"
-            + "3}|[1-5]\\d{"
-            + "4}|6[0-4]\\d{"
-            + "3}|65[0-4]\\d{"
-            + "2}|655[0-2]\\d|6553[0-5])$";
-
-    private Map<String, String> labels;
-    private final HashSet<String> ipPortList;
-    private String ipPorts;
-    private RegisteredMetric registeredMetric;
-    private long initRecords;
-    private long initBytes;
-    private long initDirtyRecords;
-    private long initDirtyBytes;
-
-    private MetricOption(
-            String inlongLabels,
-            @Nullable String inlongAudit,
-            RegisteredMetric registeredMetric,
-            long initRecords,
-            long initBytes,
-            Long initDirtyRecords,
-            Long initDirtyBytes) {
-        Preconditions.checkArgument(!StringUtils.isNullOrWhitespaceOnly(inlongLabels),
-                "Inlong labels must be set for register metric.");
-
-        this.initRecords = initRecords;
-        this.initBytes = initBytes;
-        this.initDirtyRecords = initDirtyRecords;
-        this.initDirtyBytes = initDirtyBytes;
-        this.labels = new LinkedHashMap<>();
-        String[] inLongLabelArray = inlongLabels.split(DELIMITER);
-        Preconditions.checkArgument(Stream.of(inLongLabelArray).allMatch(label -> label.contains("=")),
-                "InLong metric label format must be xxx=xxx");
-        Stream.of(inLongLabelArray).forEach(label -> {
-            String key = label.substring(0, label.indexOf('='));
-            String value = label.substring(label.indexOf('=') + 1);
-            labels.put(key, value);
-        });
-
-        this.ipPortList = new HashSet<>();
-        this.ipPorts = inlongAudit;
-        if (ipPorts != null) {
-            Preconditions.checkArgument(labels.containsKey(GROUP_ID) && labels.containsKey(STREAM_ID),
-                    "groupId and streamId must be set when enable inlong audit collect.");
-            String[] ipPortStrs = inlongAudit.split(DELIMITER);
-            for (String ipPort : ipPortStrs) {
-                Preconditions.checkArgument(Pattern.matches(IP_OR_HOST_PORT, ipPort),
-                        "Error inLong audit format: " + inlongAudit);
-                this.ipPortList.add(ipPort);
-            }
-        }
-
-        if (registeredMetric != null) {
-            this.registeredMetric = registeredMetric;
-        }
-    }
-
-    public Map<String, String> getLabels() {
-        return labels;
-    }
-
-    public HashSet<String> getIpPortList() {
-        return ipPortList;
-    }
-
-    public Optional<String> getIpPorts() {
-        return Optional.ofNullable(ipPorts);
-    }
-
-    public RegisteredMetric getRegisteredMetric() {
-        return registeredMetric;
-    }
-
-    public long getInitRecords() {
-        return initRecords;
-    }
-
-    public long getInitBytes() {
-        return initBytes;
-    }
-
-    public void setInitRecords(long initRecords) {
-        this.initRecords = initRecords;
-    }
-
-    public void setInitBytes(long initBytes) {
-        this.initBytes = initBytes;
-    }
-
-    public long getInitDirtyRecords() {
-        return initDirtyRecords;
-    }
-
-    public void setInitDirtyRecords(long initDirtyRecords) {
-        this.initDirtyRecords = initDirtyRecords;
-    }
-
-    public long getInitDirtyBytes() {
-        return initDirtyBytes;
-    }
-
-    public void setInitDirtyBytes(long initDirtyBytes) {
-        this.initDirtyBytes = initDirtyBytes;
-    }
-
-    public static Builder builder() {
-        return new Builder();
-    }
-
-    public enum RegisteredMetric {
-        ALL,
-        NORMAL,
-        DIRTY
-    }
-
-    public static class Builder {
-        private String inlongLabels;
-        private String inlongAudit;
-        private RegisteredMetric registeredMetric = RegisteredMetric.ALL;
-        private long initRecords = 0L;
-        private long initBytes = 0L;
-        private Long initDirtyRecords = 0L;
-        private Long initDirtyBytes = 0L;
-
-        private Builder() {
-        }
-
-        public MetricOption.Builder withInlongLabels(String inlongLabels) {
-            this.inlongLabels = inlongLabels;
-            return this;
-        }
-
-        public MetricOption.Builder withInlongAudit(String inlongAudit) {
-            this.inlongAudit = inlongAudit;
-            return this;
-        }
-
-        public MetricOption.Builder withRegisterMetric(RegisteredMetric registeredMetric) {
-            this.registeredMetric = registeredMetric;
-            return this;
-        }
-
-        public MetricOption.Builder withInitRecords(long initRecords) {
-            this.initRecords = initRecords;
-            return this;
-        }
-
-        public MetricOption.Builder withInitBytes(long initBytes) {
-            this.initBytes = initBytes;
-            return this;
-        }
-
-        public MetricOption.Builder withInitDirtyRecords(Long initDirtyRecords) {
-            this.initDirtyRecords = initDirtyRecords;
-            return this;
-        }
-
-        public MetricOption.Builder withInitDirtyBytes(Long initDirtyBytes) {
-            this.initDirtyBytes = initDirtyBytes;
-            return this;
-        }
-
-        public MetricOption build() {
-            if (inlongLabels == null && inlongAudit == null) {
-                return null;
-            }
-            return new MetricOption(inlongLabels, inlongAudit, registeredMetric, initRecords, initBytes,
-                    initDirtyRecords, initDirtyBytes);
-        }
-    }
-}
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements. See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License. You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.inlong.sort.base.metric;
+
+import org.apache.flink.util.Preconditions;
+import org.apache.flink.util.StringUtils;
+
+import javax.annotation.Nullable;
+import java.io.Serializable;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.regex.Pattern;
+import java.util.stream.Stream;
+
+import static org.apache.inlong.sort.base.Constants.DELIMITER;
+import static org.apache.inlong.sort.base.Constants.GROUP_ID;
+import static org.apache.inlong.sort.base.Constants.STREAM_ID;
+
+public class MetricOption implements Serializable {
+    private static final long serialVersionUID = 1L;
+    private static final String IP_OR_HOST_PORT = "^(.*):([0-9]|[1-9]\\d|[1-9]\\d{"
+            + "2}|[1-9]\\d{"
+            + "3}|[1-5]\\d{"
+            + "4}|6[0-4]\\d{"
+            + "3}|65[0-4]\\d{"
+            + "2}|655[0-2]\\d|6553[0-5])$";
+
+    private Map<String, String> labels;
+    private final HashSet<String> ipPortList;
+    private String ipPorts;
+    private RegisteredMetric registeredMetric;
+    private long initRecords;
+    private long initBytes;
+    private long initDirtyRecords;
+    private long initDirtyBytes;
+
+    private MetricOption(
+            String inlongLabels,
+            @Nullable String inlongAudit,
+            RegisteredMetric registeredMetric,
+            long initRecords,
+            long initBytes,
+            Long initDirtyRecords,
+            Long initDirtyBytes) {
+        Preconditions.checkArgument(!StringUtils.isNullOrWhitespaceOnly(inlongLabels),
+                "Inlong labels must be set for register metric.");
+
+        this.initRecords = initRecords;
+        this.initBytes = initBytes;
+        this.initDirtyRecords = initDirtyRecords;
+        this.initDirtyBytes = initDirtyBytes;
+        this.labels = new LinkedHashMap<>();
+        String[] inLongLabelArray = inlongLabels.split(DELIMITER);
+        Preconditions.checkArgument(Stream.of(inLongLabelArray).allMatch(label -> label.contains("=")),
+                "InLong metric label format must be xxx=xxx");
+        Stream.of(inLongLabelArray).forEach(label -> {
+            String key = label.substring(0, label.indexOf('='));
+            String value = label.substring(label.indexOf('=') + 1);
+            labels.put(key, value);
+        });
+
+        this.ipPortList = new HashSet<>();
+        this.ipPorts = inlongAudit;
+        if (ipPorts != null) {
+            Preconditions.checkArgument(labels.containsKey(GROUP_ID) && labels.containsKey(STREAM_ID),
+                    "groupId and streamId must be set when enable inlong audit collect.");
+            String[] ipPortStrs = inlongAudit.split(DELIMITER);
+            for (String ipPort : ipPortStrs) {
+                Preconditions.checkArgument(Pattern.matches(IP_OR_HOST_PORT, ipPort),
+                        "Error inLong audit format: " + inlongAudit);
+                this.ipPortList.add(ipPort);
+            }
+        }
+
+        if (registeredMetric != null) {
+            this.registeredMetric = registeredMetric;
+        }
+    }
+
+    public Map<String, String> getLabels() {
+        return labels;
+    }
+
+    public HashSet<String> getIpPortList() {
+        return ipPortList;
+    }
+
+    public Optional<String> getIpPorts() {
+        return Optional.ofNullable(ipPorts);
+    }
+
+    public RegisteredMetric getRegisteredMetric() {
+        return registeredMetric;
+    }
+
+    public long getInitRecords() {
+        return initRecords;
+    }
+
+    public long getInitBytes() {
+        return initBytes;
+    }
+
+    public void setInitRecords(long initRecords) {
+        this.initRecords = initRecords;
+    }
+
+    public void setInitBytes(long initBytes) {
+        this.initBytes = initBytes;
+    }
+
+    public long getInitDirtyRecords() {
+        return initDirtyRecords;
+    }
+
+    public void setInitDirtyRecords(long initDirtyRecords) {
+        this.initDirtyRecords = initDirtyRecords;
+    }
+
+    public long getInitDirtyBytes() {
+        return initDirtyBytes;
+    }
+
+    public void setInitDirtyBytes(long initDirtyBytes) {
+        this.initDirtyBytes = initDirtyBytes;
+    }
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public enum RegisteredMetric {
+        ALL,
+        NORMAL,
+        DIRTY
+    }
+
+    public static class Builder {
+        private String inlongLabels;
+        private String inlongAudit;
+        private RegisteredMetric registeredMetric = RegisteredMetric.ALL;
+        private long initRecords = 0L;
+        private long initBytes = 0L;
+        private Long initDirtyRecords = 0L;
+        private Long initDirtyBytes = 0L;
+
+        private Builder() {
+        }
+
+        public MetricOption.Builder withInlongLabels(String inlongLabels) {
+            this.inlongLabels = inlongLabels;
+            return this;
+        }
+
+        public MetricOption.Builder withInlongAudit(String inlongAudit) {
+            this.inlongAudit = inlongAudit;
+            return this;
+        }
+
+        public MetricOption.Builder withRegisterMetric(RegisteredMetric registeredMetric) {
+            this.registeredMetric = registeredMetric;
+            return this;
+        }
+
+        public MetricOption.Builder withInitRecords(long initRecords) {
+            this.initRecords = initRecords;
+            return this;
+        }
+
+        public MetricOption.Builder withInitBytes(long initBytes) {
+            this.initBytes = initBytes;
+            return this;
+        }
+
+        public MetricOption.Builder withInitDirtyRecords(Long initDirtyRecords) {
+            this.initDirtyRecords = initDirtyRecords;
+            return this;
+        }
+
+        public MetricOption.Builder withInitDirtyBytes(Long initDirtyBytes) {
+            this.initDirtyBytes = initDirtyBytes;
+            return this;
+        }
+
+        public MetricOption build() {
+            if (inlongLabels == null && inlongAudit == null) {
+                return null;
+            }
+            return new MetricOption(inlongLabels, inlongAudit, registeredMetric, initRecords, initBytes,
+                    initDirtyRecords, initDirtyBytes);
+        }
+    }
+}
diff --git a/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/sink/MultipleSinkOption.java b/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/sink/MultipleSinkOption.java
index 1eee73798..3d5663b74 100644
--- a/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/sink/MultipleSinkOption.java
+++ b/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/sink/MultipleSinkOption.java
@@ -1,155 +1,155 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements. See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License. You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-
-package org.apache.inlong.sort.base.sink;
-
-import org.apache.flink.shaded.guava18.com.google.common.collect.ImmutableMap;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Serializable;
-import java.util.Map;
-
-import static org.apache.inlong.sort.base.Constants.SINK_MULTIPLE_TYPE_MAP_COMPATIBLE_WITH_SPARK;
-import static org.apache.inlong.sort.base.sink.SchemaUpdateExceptionPolicy.ALERT_WITH_IGNORE;
-import static org.apache.inlong.sort.base.sink.SchemaUpdateExceptionPolicy.LOG_WITH_IGNORE;
-import static org.apache.inlong.sort.base.sink.SchemaUpdateExceptionPolicy.TRY_IT_BEST;
-
-/**
- * MultipleSinkOption collect all parameters used for multiple sink.
- */
-public class MultipleSinkOption implements Serializable {
-
-    private static final long serialVersionUID = 1L;
-    private static final Logger LOG = LoggerFactory.getLogger(MultipleSinkOption.class);
-
-    private final String format;
-
-    private boolean sparkEngineEnable;
-
-    private final SchemaUpdateExceptionPolicy schemaUpdatePolicy;
-    private final String databasePattern;
-
-    private final String tablePattern;
-
-    private final boolean pkAutoGenerated;
-
-    public MultipleSinkOption(String format,
-            boolean sparkEngineEnable,
-            SchemaUpdateExceptionPolicy schemaUpdatePolicy,
-            String databasePattern,
-            String tablePattern,
-            boolean pkAutoGenerated) {
-        this.format = format;
-        this.sparkEngineEnable = sparkEngineEnable;
-        this.schemaUpdatePolicy = schemaUpdatePolicy;
-        this.databasePattern = databasePattern;
-        this.tablePattern = tablePattern;
-        this.pkAutoGenerated = pkAutoGenerated;
-    }
-
-    public String getFormat() {
-        return format;
-    }
-
-    public boolean isSparkEngineEnable() {
-        return sparkEngineEnable;
-    }
-
-    public Map<String, String> getFormatOption() {
-        return ImmutableMap.of(
-                SINK_MULTIPLE_TYPE_MAP_COMPATIBLE_WITH_SPARK.key(), String.valueOf(isSparkEngineEnable()));
-    }
-
-    public SchemaUpdateExceptionPolicy getSchemaUpdatePolicy() {
-        return schemaUpdatePolicy;
-    }
-
-    public String getDatabasePattern() {
-        return databasePattern;
-    }
-
-    public String getTablePattern() {
-        return tablePattern;
-    }
-
-    public boolean isPkAutoGenerated() {
-        return pkAutoGenerated;
-    }
-
-    public static Builder builder() {
-        return new Builder();
-    }
-
-    public static class Builder {
-        private String format;
-        private boolean sparkEngineEnable;
-        private SchemaUpdateExceptionPolicy schemaUpdatePolicy;
-        private String databasePattern;
-        private String tablePattern;
-        private boolean pkAutoGenerated;
-
-        public MultipleSinkOption.Builder withFormat(String format) {
-            this.format = format;
-            return this;
-        }
-
-        public MultipleSinkOption.Builder withSparkEngineEnable(boolean sparkEngineEnable) {
-            this.sparkEngineEnable = sparkEngineEnable;
-            return this;
-        }
-
-        public MultipleSinkOption.Builder withSchemaUpdatePolicy(SchemaUpdateExceptionPolicy schemaUpdatePolicy) {
-            this.schemaUpdatePolicy = schemaUpdatePolicy;
-            return this;
-        }
-
-        public MultipleSinkOption.Builder withDatabasePattern(String databasePattern) {
-            this.databasePattern = databasePattern;
-            return this;
-        }
-
-        public MultipleSinkOption.Builder withTablePattern(String tablePattern) {
-            this.tablePattern = tablePattern;
-            return this;
-        }
-
-        public MultipleSinkOption.Builder withPkAutoGenerated(boolean pkAutoGenerated) {
-            this.pkAutoGenerated = pkAutoGenerated;
-            return this;
-        }
-
-        public MultipleSinkOption build() {
-            return new MultipleSinkOption(
-                    format, sparkEngineEnable, schemaUpdatePolicy, databasePattern, tablePattern, pkAutoGenerated);
-        }
-    }
-
-    public static boolean canHandleWithSchemaUpdate(String tableName,
-            TableChange tableChange, SchemaUpdateExceptionPolicy policy) {
-        if (TRY_IT_BEST.equals(policy)) {
-            return true;
-        } else if (LOG_WITH_IGNORE.equals(policy) || ALERT_WITH_IGNORE.equals(policy)) {
-            LOG.warn("Ignore table {} schema change: {}.", tableName, tableChange);
-            return false;
-        }
-
-        throw new UnsupportedOperationException(
-                String.format("Unsupported table %s schema change: %s.", tableName, tableChange));
-    }
-}
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements. See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License. You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.inlong.sort.base.sink;
+
+import org.apache.flink.shaded.guava18.com.google.common.collect.ImmutableMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Serializable;
+import java.util.Map;
+
+import static org.apache.inlong.sort.base.Constants.SINK_MULTIPLE_TYPE_MAP_COMPATIBLE_WITH_SPARK;
+import static org.apache.inlong.sort.base.sink.SchemaUpdateExceptionPolicy.ALERT_WITH_IGNORE;
+import static org.apache.inlong.sort.base.sink.SchemaUpdateExceptionPolicy.LOG_WITH_IGNORE;
+import static org.apache.inlong.sort.base.sink.SchemaUpdateExceptionPolicy.TRY_IT_BEST;
+
+/**
+ * MultipleSinkOption collect all parameters used for multiple sink.
+ */
+public class MultipleSinkOption implements Serializable {
+
+    private static final long serialVersionUID = 1L;
+    private static final Logger LOG = LoggerFactory.getLogger(MultipleSinkOption.class);
+
+    private final String format;
+
+    private boolean sparkEngineEnable;
+
+    private final SchemaUpdateExceptionPolicy schemaUpdatePolicy;
+    private final String databasePattern;
+
+    private final String tablePattern;
+
+    private final boolean pkAutoGenerated;
+
+    public MultipleSinkOption(String format,
+            boolean sparkEngineEnable,
+            SchemaUpdateExceptionPolicy schemaUpdatePolicy,
+            String databasePattern,
+            String tablePattern,
+            boolean pkAutoGenerated) {
+        this.format = format;
+        this.sparkEngineEnable = sparkEngineEnable;
+        this.schemaUpdatePolicy = schemaUpdatePolicy;
+        this.databasePattern = databasePattern;
+        this.tablePattern = tablePattern;
+        this.pkAutoGenerated = pkAutoGenerated;
+    }
+
+    public String getFormat() {
+        return format;
+    }
+
+    public boolean isSparkEngineEnable() {
+        return sparkEngineEnable;
+    }
+
+    public Map<String, String> getFormatOption() {
+        return ImmutableMap.of(
+                SINK_MULTIPLE_TYPE_MAP_COMPATIBLE_WITH_SPARK.key(), String.valueOf(isSparkEngineEnable()));
+    }
+
+    public SchemaUpdateExceptionPolicy getSchemaUpdatePolicy() {
+        return schemaUpdatePolicy;
+    }
+
+    public String getDatabasePattern() {
+        return databasePattern;
+    }
+
+    public String getTablePattern() {
+        return tablePattern;
+    }
+
+    public boolean isPkAutoGenerated() {
+        return pkAutoGenerated;
+    }
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public static class Builder {
+        private String format;
+        private boolean sparkEngineEnable;
+        private SchemaUpdateExceptionPolicy schemaUpdatePolicy;
+        private String databasePattern;
+        private String tablePattern;
+        private boolean pkAutoGenerated;
+
+        public MultipleSinkOption.Builder withFormat(String format) {
+            this.format = format;
+            return this;
+        }
+
+        public MultipleSinkOption.Builder withSparkEngineEnable(boolean sparkEngineEnable) {
+            this.sparkEngineEnable = sparkEngineEnable;
+            return this;
+        }
+
+        public MultipleSinkOption.Builder withSchemaUpdatePolicy(SchemaUpdateExceptionPolicy schemaUpdatePolicy) {
+            this.schemaUpdatePolicy = schemaUpdatePolicy;
+            return this;
+        }
+
+        public MultipleSinkOption.Builder withDatabasePattern(String databasePattern) {
+            this.databasePattern = databasePattern;
+            return this;
+        }
+
+        public MultipleSinkOption.Builder withTablePattern(String tablePattern) {
+            this.tablePattern = tablePattern;
+            return this;
+        }
+
+        public MultipleSinkOption.Builder withPkAutoGenerated(boolean pkAutoGenerated) {
+            this.pkAutoGenerated = pkAutoGenerated;
+            return this;
+        }
+
+        public MultipleSinkOption build() {
+            return new MultipleSinkOption(
+                    format, sparkEngineEnable, schemaUpdatePolicy, databasePattern, tablePattern, pkAutoGenerated);
+        }
+    }
+
+    public static boolean canHandleWithSchemaUpdate(String tableName,
+            TableChange tableChange, SchemaUpdateExceptionPolicy policy) {
+        if (TRY_IT_BEST.equals(policy)) {
+            return true;
+        } else if (LOG_WITH_IGNORE.equals(policy) || ALERT_WITH_IGNORE.equals(policy)) {
+            LOG.warn("Ignore table {} schema change: {}.", tableName, tableChange);
+            return false;
+        }
+
+        throw new UnsupportedOperationException(
+                String.format("Unsupported table %s schema change: %s.", tableName, tableChange));
+    }
+}
diff --git a/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/sink/SchemaUpdateExceptionPolicy.java b/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/sink/SchemaUpdateExceptionPolicy.java
index 14f26b328..7da5ac772 100644
--- a/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/sink/SchemaUpdateExceptionPolicy.java
+++ b/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/sink/SchemaUpdateExceptionPolicy.java
@@ -1,46 +1,46 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements. See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License. You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-
-package org.apache.inlong.sort.base.sink;
-
-/**
- * Multiple sink scenes will meet different table data.
- * Maybe one table data have different schema, once it's schema mismatch with catalog schema, how to handle
- * this table data. For example schema mismatch:
- *
- * <pre>
- * data : {a : int, b : string, c : date}
- * catalog : {a : string, b : timestamp}
- * </pre>
- */
-public enum SchemaUpdateExceptionPolicy {
-    TRY_IT_BEST("Try it best to handle schema update, if can not handle it, just ignore it."),
-    LOG_WITH_IGNORE("Ignore schema update and log it."),
-    ALERT_WITH_IGNORE("Ignore schema update and alert it."),
-    THROW_WITH_STOP("Throw exception to stop flink job when meet schema update.");
-
-    private String description;
-
-    SchemaUpdateExceptionPolicy(String description) {
-        this.description = description;
-    }
-
-    public String getDescription() {
-        return description;
-    }
-}
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements. See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License. You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.inlong.sort.base.sink;
+
+/**
+ * Multiple sink scenes will meet different table data.
+ * Maybe one table data have different schema, once it's schema mismatch with catalog schema, how to handle
+ * this table data. For example schema mismatch:
+ *
+ * <pre>
+ * data : {a : int, b : string, c : date}
+ * catalog : {a : string, b : timestamp}
+ * </pre>
+ */
+public enum SchemaUpdateExceptionPolicy {
+    TRY_IT_BEST("Try it best to handle schema update, if can not handle it, just ignore it."),
+    LOG_WITH_IGNORE("Ignore schema update and log it."),
+    ALERT_WITH_IGNORE("Ignore schema update and alert it."),
+    THROW_WITH_STOP("Throw exception to stop flink job when meet schema update.");
+
+    private String description;
+
+    SchemaUpdateExceptionPolicy(String description) {
+        this.description = description;
+    }
+
+    public String getDescription() {
+        return description;
+    }
+}
diff --git a/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/sink/TableChange.java b/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/sink/TableChange.java
index ef1d4a15d..c6b423b8a 100644
--- a/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/sink/TableChange.java
+++ b/inlong-sort/sort-connectors/base/src/main/java/org/apache/inlong/sort/base/sink/TableChange.java
@@ -1,201 +1,201 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements. See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License. You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-
-package org.apache.inlong.sort.base.sink;
-
-import org.apache.flink.table.types.logical.LogicalType;
-import org.apache.flink.util.Preconditions;
-
-import javax.annotation.Nullable;
-import java.util.Arrays;
-import java.util.Objects;
-
-/**
- * TableChange represent requested changes to a table.
- */
-public interface TableChange {
-    final class First implements ColumnPosition {
-        private static final First INSTANCE = new First();
-
-        private First() {
-
-        }
-
-        @Override
-        public String toString() {
-            return "FIRST";
-        }
-    }
-
-    final class After implements ColumnPosition {
-        private final String column;
-
-        private After(String column) {
-            assert column != null;
-            this.column = column;
-        }
-
-        public String column() {
-            return column;
-        }
-
-        @Override
-        public String toString() {
-            return "AFTER " + column;
-        }
-
-        @Override
-        public boolean equals(Object o) {
-            if (this == o) {
-                return true;
-            }
-            if (o == null || getClass() != o.getClass()) {
-                return false;
-            }
-            After after = (After) o;
-            return column.equals(after.column);
-        }
-
-        @Override
-        public int hashCode() {
-            return Objects.hash(column);
-        }
-    }
-
-    interface ColumnPosition {
-
-        static ColumnPosition first() {
-            return First.INSTANCE;
-        }
-
-        static ColumnPosition after(String column) {
-            return new After(column);
-        }
-    }
-
-    interface ColumnChange extends TableChange {
-        String[] fieldNames();
-    }
-
-    final class AddColumn implements ColumnChange {
-        private final String[] fieldNames;
-        private final LogicalType dataType;
-        private final boolean isNullable;
-        private final String comment;
-        private final ColumnPosition position;
-
-        public AddColumn(
-                String[] fieldNames,
-                LogicalType dataType,
-                boolean isNullable,
-                String comment,
-                ColumnPosition position) {
-            Preconditions.checkArgument(fieldNames.length > 0,
-                    "Invalid field name: at least one name is required");
-            this.fieldNames = fieldNames;
-            this.dataType = dataType;
-            this.isNullable = isNullable;
-            this.comment = comment;
-            this.position = position;
-        }
-
-        @Override
-        public String[] fieldNames() {
-            return fieldNames;
-        }
-
-        public LogicalType dataType() {
-            return dataType;
-        }
-
-        public boolean isNullable() {
-            return isNullable;
-        }
-
-        @Nullable
-        public String comment() {
-            return comment;
-        }
-
-        @Nullable
-        public ColumnPosition position() {
-            return position;
-        }
-
-        @Override
-        public boolean equals(Object o) {
-            if (this == o) {
-                return true;
-            }
-            if (o == null || getClass() != o.getClass()) {
-                return false;
-            }
-            AddColumn addColumn = (AddColumn) o;
-            return isNullable == addColumn.isNullable
-                    && Arrays.equals(fieldNames, addColumn.fieldNames)
-                    && dataType.equals(addColumn.dataType)
-                    && Objects.equals(comment, addColumn.comment)
-                    && Objects.equals(position, addColumn.position);
-        }
-
-        @Override
-        public int hashCode() {
-            int result = Objects.hash(dataType, isNullable, comment, position);
-            result = 31 * result + Arrays.hashCode(fieldNames);
-            return result;
-        }
-
-        @Override
-        public String toString() {
-            return String.format("ADD COLUMNS `%s` %s %s %s %s",
-                    fieldNames[fieldNames.length - 1],
-                    dataType,
-                    isNullable ? "" : "NOT NULL",
-                    comment,
-                    position);
-        }
-    }
-
-    final class DeleteColumn implements ColumnChange {
-        @Override
-        public String[] fieldNames() {
-            return new String[0];
-        }
-    }
-
-    /**
-     * Represents a column change that is not recognized by the connector.
-     */
-    final class UnknownColumnChange implements ColumnChange {
-        private String description;
-
-        public UnknownColumnChange(String description) {
-            this.description = description;
-        }
-
-        @Override
-        public String[] fieldNames() {
-            return new String[0];
-        }
-
-        @Override
-        public String toString() {
-            return description;
-        }
-    }
-}
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements. See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License. You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.inlong.sort.base.sink;
+
+import org.apache.flink.table.types.logical.LogicalType;
+import org.apache.flink.util.Preconditions;
+
+import javax.annotation.Nullable;
+import java.util.Arrays;
+import java.util.Objects;
+
+/**
+ * TableChange represent requested changes to a table.
+ */
+public interface TableChange {
+    final class First implements ColumnPosition {
+        private static final First INSTANCE = new First();
+
+        private First() {
+
+        }
+
+        @Override
+        public String toString() {
+            return "FIRST";
+        }
+    }
+
+    final class After implements ColumnPosition {
+        private final String column;
+
+        private After(String column) {
+            assert column != null;
+            this.column = column;
+        }
+
+        public String column() {
+            return column;
+        }
+
+        @Override
+        public String toString() {
+            return "AFTER " + column;
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            if (this == o) {
+                return true;
+            }
+            if (o == null || getClass() != o.getClass()) {
+                return false;
+            }
+            After after = (After) o;
+            return column.equals(after.column);
+        }
+
+        @Override
+        public int hashCode() {
+            return Objects.hash(column);
+        }
+    }
+
+    interface ColumnPosition {
+
+        static ColumnPosition first() {
+            return First.INSTANCE;
+        }
+
+        static ColumnPosition after(String column) {
+            return new After(column);
+        }
+    }
+
+    interface ColumnChange extends TableChange {
+        String[] fieldNames();
+    }
+
+    final class AddColumn implements ColumnChange {
+        private final String[] fieldNames;
+        private final LogicalType dataType;
+        private final boolean isNullable;
+        private final String comment;
+        private final ColumnPosition position;
+
+        public AddColumn(
+                String[] fieldNames,
+                LogicalType dataType,
+                boolean isNullable,
+                String comment,
+                ColumnPosition position) {
+            Preconditions.checkArgument(fieldNames.length > 0,
+                    "Invalid field name: at least one name is required");
+            this.fieldNames = fieldNames;
+            this.dataType = dataType;
+            this.isNullable = isNullable;
+            this.comment = comment;
+            this.position = position;
+        }
+
+        @Override
+        public String[] fieldNames() {
+            return fieldNames;
+        }
+
+        public LogicalType dataType() {
+            return dataType;
+        }
+
+        public boolean isNullable() {
+            return isNullable;
+        }
+
+        @Nullable
+        public String comment() {
+            return comment;
+        }
+
+        @Nullable
+        public ColumnPosition position() {
+            return position;
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            if (this == o) {
+                return true;
+            }
+            if (o == null || getClass() != o.getClass()) {
+                return false;
+            }
+            AddColumn addColumn = (AddColumn) o;
+            return isNullable == addColumn.isNullable
+                    && Arrays.equals(fieldNames, addColumn.fieldNames)
+                    && dataType.equals(addColumn.dataType)
+                    && Objects.equals(comment, addColumn.comment)
+                    && Objects.equals(position, addColumn.position);
+        }
+
+        @Override
+        public int hashCode() {
+            int result = Objects.hash(dataType, isNullable, comment, position);
+            result = 31 * result + Arrays.hashCode(fieldNames);
+            return result;
+        }
+
+        @Override
+        public String toString() {
+            return String.format("ADD COLUMNS `%s` %s %s %s %s",
+                    fieldNames[fieldNames.length - 1],
+                    dataType,
+                    isNullable ? "" : "NOT NULL",
+                    comment,
+                    position);
+        }
+    }
+
+    final class DeleteColumn implements ColumnChange {
+        @Override
+        public String[] fieldNames() {
+            return new String[0];
+        }
+    }
+
+    /**
+     * Represents a column change that is not recognized by the connector.
+     */
+    final class UnknownColumnChange implements ColumnChange {
+        private String description;
+
+        public UnknownColumnChange(String description) {
+            this.description = description;
+        }
+
+        @Override
+        public String[] fieldNames() {
+            return new String[0];
+        }
+
+        @Override
+        public String toString() {
+            return description;
+        }
+    }
+}
diff --git a/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/AbstractStreamingWriter.java b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/AbstractStreamingWriter.java
index 75182e7d3..dd9456203 100644
--- a/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/AbstractStreamingWriter.java
+++ b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/AbstractStreamingWriter.java
@@ -1,233 +1,233 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements. See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License. You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-
-package org.apache.inlong.sort.hive.filesystem;
-
-import org.apache.flink.api.common.state.ListState;
-import org.apache.flink.api.common.state.ListStateDescriptor;
-import org.apache.flink.api.common.typeinfo.TypeHint;
-import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.core.fs.Path;
-import org.apache.flink.runtime.state.StateInitializationContext;
-import org.apache.flink.runtime.state.StateSnapshotContext;
-import org.apache.flink.streaming.api.functions.sink.filesystem.Bucket;
-import org.apache.flink.streaming.api.functions.sink.filesystem.BucketLifeCycleListener;
-import org.apache.flink.streaming.api.functions.sink.filesystem.Buckets;
-import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
-import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSinkHelper;
-import org.apache.flink.streaming.api.operators.AbstractStreamOperator;
-import org.apache.flink.streaming.api.operators.BoundedOneInput;
-import org.apache.flink.streaming.api.operators.ChainingStrategy;
-import org.apache.flink.streaming.api.operators.OneInputStreamOperator;
-import org.apache.flink.streaming.api.watermark.Watermark;
-import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
-import org.apache.inlong.sort.base.metric.MetricOption;
-import org.apache.inlong.sort.base.metric.MetricOption.RegisteredMetric;
-import org.apache.inlong.sort.base.metric.MetricState;
-import org.apache.inlong.sort.base.metric.SinkMetricData;
-import org.apache.inlong.sort.base.util.MetricStateUtils;
-
-import javax.annotation.Nullable;
-
-import static org.apache.inlong.sort.base.Constants.INLONG_METRIC_STATE_NAME;
-import static org.apache.inlong.sort.base.Constants.NUM_BYTES_OUT;
-import static org.apache.inlong.sort.base.Constants.NUM_RECORDS_OUT;
-
-/**
- * Operator for file system sink. It is a operator version of {@link StreamingFileSink}. It can send
- * file and bucket information to downstream.
- */
-public abstract class AbstractStreamingWriter<IN, OUT> extends AbstractStreamOperator<OUT>
-        implements OneInputStreamOperator<IN, OUT>, BoundedOneInput {
-
-    private static final long serialVersionUID = 1L;
-
-    // ------------------------ configuration fields --------------------------
-
-    private final long bucketCheckInterval;
-
-    private final StreamingFileSink.BucketsBuilder<
-            IN, String, ? extends StreamingFileSink.BucketsBuilder<IN, String, ?>>
-            bucketsBuilder;
-
-    @Nullable
-    private String inlongMetric;
-
-    @Nullable
-    private String auditHostAndPorts;
-
-    // --------------------------- runtime fields -----------------------------
-
-    private transient Buckets<IN, String> buckets;
-
-    private transient StreamingFileSinkHelper<IN> helper;
-
-    private transient long currentWatermark;
-
-    @Nullable
-    private transient SinkMetricData metricData;
-    private transient ListState<MetricState> metricStateListState;
-    private transient MetricState metricState;
-
-    public AbstractStreamingWriter(
-            long bucketCheckInterval,
-            StreamingFileSink.BucketsBuilder<
-                    IN, String, ? extends StreamingFileSink.BucketsBuilder<IN, String, ?>>
-                    bucketsBuilder,
-            String inlongMetric,
-            String auditHostAndPorts) {
-        this.bucketCheckInterval = bucketCheckInterval;
-        this.bucketsBuilder = bucketsBuilder;
-        this.inlongMetric = inlongMetric;
-        this.auditHostAndPorts = auditHostAndPorts;
-        setChainingStrategy(ChainingStrategy.ALWAYS);
-    }
-
-    /** Notifies a partition created. */
-    protected abstract void partitionCreated(String partition);
-
-    /**
-     * Notifies a partition become inactive. A partition becomes inactive after all the records
-     * received so far have been committed.
-     */
-    protected abstract void partitionInactive(String partition);
-
-    /**
-     * Notifies a new file has been opened.
-     *
-     * <p>Note that this does not mean that the file has been created in the file system. It is only
-     * created logically and the actual file will be generated after it is committed.
-     */
-    protected abstract void onPartFileOpened(String partition, Path newPath);
-
-    /** Commit up to this checkpoint id. */
-    protected void commitUpToCheckpoint(long checkpointId) throws Exception {
-        helper.commitUpToCheckpoint(checkpointId);
-    }
-
-    @Override
-    public void open() throws Exception {
-        super.open();
-        MetricOption metricOption = MetricOption.builder()
-                .withInlongLabels(inlongMetric)
-                .withInlongAudit(auditHostAndPorts)
-                .withInitRecords(metricState != null ? metricState.getMetricValue(NUM_RECORDS_OUT) : 0L)
-                .withInitBytes(metricState != null ? metricState.getMetricValue(NUM_BYTES_OUT) : 0L)
-                .withRegisterMetric(RegisteredMetric.ALL)
-                .build();
-        if (metricOption != null) {
-            metricData = new SinkMetricData(metricOption, getRuntimeContext().getMetricGroup());
-        }
-    }
-
-    @Override
-    public void initializeState(StateInitializationContext context) throws Exception {
-        super.initializeState(context);
-        buckets = bucketsBuilder.createBuckets(getRuntimeContext().getIndexOfThisSubtask());
-
-        // Set listener before the initialization of Buckets.
-        buckets.setBucketLifeCycleListener(
-                new BucketLifeCycleListener<IN, String>() {
-
-                    @Override
-                    public void bucketCreated(Bucket<IN, String> bucket) {
-                        partitionCreated(bucket.getBucketId());
-                    }
-
-                    @Override
-                    public void bucketInactive(Bucket<IN, String> bucket) {
-                        partitionInactive(bucket.getBucketId());
-                    }
-                });
-
-        buckets.setFileLifeCycleListener(this::onPartFileOpened);
-
-        helper =
-                new StreamingFileSinkHelper<>(
-                        buckets,
-                        context.isRestored(),
-                        context.getOperatorStateStore(),
-                        getRuntimeContext().getProcessingTimeService(),
-                        bucketCheckInterval);
-
-        currentWatermark = Long.MIN_VALUE;
-
-        // init metric state
-        if (this.inlongMetric != null) {
-            this.metricStateListState = context.getOperatorStateStore().getUnionListState(
-                    new ListStateDescriptor<>(
-                            INLONG_METRIC_STATE_NAME, TypeInformation.of(new TypeHint<MetricState>() {
-                    })));
-        }
-        if (context.isRestored()) {
-            metricState = MetricStateUtils.restoreMetricState(metricStateListState,
-                    getRuntimeContext().getIndexOfThisSubtask(), getRuntimeContext().getNumberOfParallelSubtasks());
-        }
-    }
-
-    @Override
-    public void snapshotState(StateSnapshotContext context) throws Exception {
-        super.snapshotState(context);
-        helper.snapshotState(context.getCheckpointId());
-        if (metricData != null && metricStateListState != null) {
-            MetricStateUtils.snapshotMetricStateForSinkMetricData(metricStateListState, metricData,
-                    getRuntimeContext().getIndexOfThisSubtask());
-        }
-    }
-
-    @Override
-    public void processWatermark(Watermark mark) throws Exception {
-        super.processWatermark(mark);
-        currentWatermark = mark.getTimestamp();
-    }
-
-    @Override
-    public void processElement(StreamRecord<IN> element) throws Exception {
-        helper.onElement(
-                element.getValue(),
-                getProcessingTimeService().getCurrentProcessingTime(),
-                element.hasTimestamp() ? element.getTimestamp() : null,
-                currentWatermark);
-        if (metricData != null) {
-            metricData.invokeWithEstimate(element.getValue());
-        }
-    }
-
-    @Override
-    public void notifyCheckpointComplete(long checkpointId) throws Exception {
-        super.notifyCheckpointComplete(checkpointId);
-        commitUpToCheckpoint(checkpointId);
-    }
-
-    @Override
-    public void endInput() throws Exception {
-        buckets.onProcessingTime(Long.MAX_VALUE);
-        helper.snapshotState(Long.MAX_VALUE);
-        output.emitWatermark(new Watermark(Long.MAX_VALUE));
-        commitUpToCheckpoint(Long.MAX_VALUE);
-    }
-
-    @Override
-    public void dispose() throws Exception {
-        super.dispose();
-        if (helper != null) {
-            helper.close();
-        }
-    }
-}
-
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements. See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License. You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.inlong.sort.hive.filesystem;
+
+import org.apache.flink.api.common.state.ListState;
+import org.apache.flink.api.common.state.ListStateDescriptor;
+import org.apache.flink.api.common.typeinfo.TypeHint;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.core.fs.Path;
+import org.apache.flink.runtime.state.StateInitializationContext;
+import org.apache.flink.runtime.state.StateSnapshotContext;
+import org.apache.flink.streaming.api.functions.sink.filesystem.Bucket;
+import org.apache.flink.streaming.api.functions.sink.filesystem.BucketLifeCycleListener;
+import org.apache.flink.streaming.api.functions.sink.filesystem.Buckets;
+import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
+import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSinkHelper;
+import org.apache.flink.streaming.api.operators.AbstractStreamOperator;
+import org.apache.flink.streaming.api.operators.BoundedOneInput;
+import org.apache.flink.streaming.api.operators.ChainingStrategy;
+import org.apache.flink.streaming.api.operators.OneInputStreamOperator;
+import org.apache.flink.streaming.api.watermark.Watermark;
+import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
+import org.apache.inlong.sort.base.metric.MetricOption;
+import org.apache.inlong.sort.base.metric.MetricOption.RegisteredMetric;
+import org.apache.inlong.sort.base.metric.MetricState;
+import org.apache.inlong.sort.base.metric.SinkMetricData;
+import org.apache.inlong.sort.base.util.MetricStateUtils;
+
+import javax.annotation.Nullable;
+
+import static org.apache.inlong.sort.base.Constants.INLONG_METRIC_STATE_NAME;
+import static org.apache.inlong.sort.base.Constants.NUM_BYTES_OUT;
+import static org.apache.inlong.sort.base.Constants.NUM_RECORDS_OUT;
+
+/**
+ * Operator for file system sink. It is a operator version of {@link StreamingFileSink}. It can send
+ * file and bucket information to downstream.
+ */
+public abstract class AbstractStreamingWriter<IN, OUT> extends AbstractStreamOperator<OUT>
+        implements OneInputStreamOperator<IN, OUT>, BoundedOneInput {
+
+    private static final long serialVersionUID = 1L;
+
+    // ------------------------ configuration fields --------------------------
+
+    private final long bucketCheckInterval;
+
+    private final StreamingFileSink.BucketsBuilder<
+            IN, String, ? extends StreamingFileSink.BucketsBuilder<IN, String, ?>>
+            bucketsBuilder;
+
+    @Nullable
+    private String inlongMetric;
+
+    @Nullable
+    private String auditHostAndPorts;
+
+    // --------------------------- runtime fields -----------------------------
+
+    private transient Buckets<IN, String> buckets;
+
+    private transient StreamingFileSinkHelper<IN> helper;
+
+    private transient long currentWatermark;
+
+    @Nullable
+    private transient SinkMetricData metricData;
+    private transient ListState<MetricState> metricStateListState;
+    private transient MetricState metricState;
+
+    public AbstractStreamingWriter(
+            long bucketCheckInterval,
+            StreamingFileSink.BucketsBuilder<
+                    IN, String, ? extends StreamingFileSink.BucketsBuilder<IN, String, ?>>
+                    bucketsBuilder,
+            String inlongMetric,
+            String auditHostAndPorts) {
+        this.bucketCheckInterval = bucketCheckInterval;
+        this.bucketsBuilder = bucketsBuilder;
+        this.inlongMetric = inlongMetric;
+        this.auditHostAndPorts = auditHostAndPorts;
+        setChainingStrategy(ChainingStrategy.ALWAYS);
+    }
+
+    /** Notifies a partition created. */
+    protected abstract void partitionCreated(String partition);
+
+    /**
+     * Notifies a partition become inactive. A partition becomes inactive after all the records
+     * received so far have been committed.
+     */
+    protected abstract void partitionInactive(String partition);
+
+    /**
+     * Notifies a new file has been opened.
+     *
+     * <p>Note that this does not mean that the file has been created in the file system. It is only
+     * created logically and the actual file will be generated after it is committed.
+     */
+    protected abstract void onPartFileOpened(String partition, Path newPath);
+
+    /** Commit up to this checkpoint id. */
+    protected void commitUpToCheckpoint(long checkpointId) throws Exception {
+        helper.commitUpToCheckpoint(checkpointId);
+    }
+
+    @Override
+    public void open() throws Exception {
+        super.open();
+        MetricOption metricOption = MetricOption.builder()
+                .withInlongLabels(inlongMetric)
+                .withInlongAudit(auditHostAndPorts)
+                .withInitRecords(metricState != null ? metricState.getMetricValue(NUM_RECORDS_OUT) : 0L)
+                .withInitBytes(metricState != null ? metricState.getMetricValue(NUM_BYTES_OUT) : 0L)
+                .withRegisterMetric(RegisteredMetric.ALL)
+                .build();
+        if (metricOption != null) {
+            metricData = new SinkMetricData(metricOption, getRuntimeContext().getMetricGroup());
+        }
+    }
+
+    @Override
+    public void initializeState(StateInitializationContext context) throws Exception {
+        super.initializeState(context);
+        buckets = bucketsBuilder.createBuckets(getRuntimeContext().getIndexOfThisSubtask());
+
+        // Set listener before the initialization of Buckets.
+        buckets.setBucketLifeCycleListener(
+                new BucketLifeCycleListener<IN, String>() {
+
+                    @Override
+                    public void bucketCreated(Bucket<IN, String> bucket) {
+                        partitionCreated(bucket.getBucketId());
+                    }
+
+                    @Override
+                    public void bucketInactive(Bucket<IN, String> bucket) {
+                        partitionInactive(bucket.getBucketId());
+                    }
+                });
+
+        buckets.setFileLifeCycleListener(this::onPartFileOpened);
+
+        helper =
+                new StreamingFileSinkHelper<>(
+                        buckets,
+                        context.isRestored(),
+                        context.getOperatorStateStore(),
+                        getRuntimeContext().getProcessingTimeService(),
+                        bucketCheckInterval);
+
+        currentWatermark = Long.MIN_VALUE;
+
+        // init metric state
+        if (this.inlongMetric != null) {
+            this.metricStateListState = context.getOperatorStateStore().getUnionListState(
+                    new ListStateDescriptor<>(
+                            INLONG_METRIC_STATE_NAME, TypeInformation.of(new TypeHint<MetricState>() {
+                    })));
+        }
+        if (context.isRestored()) {
+            metricState = MetricStateUtils.restoreMetricState(metricStateListState,
+                    getRuntimeContext().getIndexOfThisSubtask(), getRuntimeContext().getNumberOfParallelSubtasks());
+        }
+    }
+
+    @Override
+    public void snapshotState(StateSnapshotContext context) throws Exception {
+        super.snapshotState(context);
+        helper.snapshotState(context.getCheckpointId());
+        if (metricData != null && metricStateListState != null) {
+            MetricStateUtils.snapshotMetricStateForSinkMetricData(metricStateListState, metricData,
+                    getRuntimeContext().getIndexOfThisSubtask());
+        }
+    }
+
+    @Override
+    public void processWatermark(Watermark mark) throws Exception {
+        super.processWatermark(mark);
+        currentWatermark = mark.getTimestamp();
+    }
+
+    @Override
+    public void processElement(StreamRecord<IN> element) throws Exception {
+        helper.onElement(
+                element.getValue(),
+                getProcessingTimeService().getCurrentProcessingTime(),
+                element.hasTimestamp() ? element.getTimestamp() : null,
+                currentWatermark);
+        if (metricData != null) {
+            metricData.invokeWithEstimate(element.getValue());
+        }
+    }
+
+    @Override
+    public void notifyCheckpointComplete(long checkpointId) throws Exception {
+        super.notifyCheckpointComplete(checkpointId);
+        commitUpToCheckpoint(checkpointId);
+    }
+
+    @Override
+    public void endInput() throws Exception {
+        buckets.onProcessingTime(Long.MAX_VALUE);
+        helper.snapshotState(Long.MAX_VALUE);
+        output.emitWatermark(new Watermark(Long.MAX_VALUE));
+        commitUpToCheckpoint(Long.MAX_VALUE);
+    }
+
+    @Override
+    public void dispose() throws Exception {
+        super.dispose();
+        if (helper != null) {
+            helper.close();
+        }
+    }
+}
+
diff --git a/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/CompactFileWriter.java b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/CompactFileWriter.java
index 8e1ee100c..2c368ba19 100644
--- a/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/CompactFileWriter.java
+++ b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/CompactFileWriter.java
@@ -1,69 +1,69 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements. See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License. You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-
-package org.apache.inlong.sort.hive.filesystem;
-
-import org.apache.flink.core.fs.Path;
-import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
-import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
-import org.apache.flink.table.filesystem.stream.compact.CompactMessages.CoordinatorInput;
-import org.apache.flink.table.filesystem.stream.compact.CompactMessages.EndCheckpoint;
-import org.apache.flink.table.filesystem.stream.compact.CompactMessages.InputFile;
-
-/** Writer for emitting {@link InputFile} and {@link EndCheckpoint} to downstream. */
-public class CompactFileWriter<T>
-        extends AbstractStreamingWriter<T, CoordinatorInput> {
-
-    private static final long serialVersionUID = 1L;
-
-    public CompactFileWriter(
-            long bucketCheckInterval,
-            StreamingFileSink.BucketsBuilder<
-                    T, String, ? extends StreamingFileSink.BucketsBuilder<T, String, ?>>
-                    bucketsBuilder,
-            String inlongMetric,
-            String auditHostAndPorts) {
-        super(bucketCheckInterval, bucketsBuilder, inlongMetric, auditHostAndPorts);
-    }
-
-    @Override
-    protected void partitionCreated(String partition) {
-
-    }
-
-    @Override
-    protected void partitionInactive(String partition) {
-
-    }
-
-    @Override
-    protected void onPartFileOpened(String partition, Path newPath) {
-        output.collect(new StreamRecord<>(new InputFile(partition, newPath)));
-    }
-
-    @Override
-    protected void commitUpToCheckpoint(long checkpointId) throws Exception {
-        super.commitUpToCheckpoint(checkpointId);
-        output.collect(
-                new StreamRecord<>(
-                        new EndCheckpoint(
-                                checkpointId,
-                                getRuntimeContext().getIndexOfThisSubtask(),
-                                getRuntimeContext().getNumberOfParallelSubtasks())));
-    }
-}
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements. See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License. You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.inlong.sort.hive.filesystem;
+
+import org.apache.flink.core.fs.Path;
+import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
+import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
+import org.apache.flink.table.filesystem.stream.compact.CompactMessages.CoordinatorInput;
+import org.apache.flink.table.filesystem.stream.compact.CompactMessages.EndCheckpoint;
+import org.apache.flink.table.filesystem.stream.compact.CompactMessages.InputFile;
+
+/** Writer for emitting {@link InputFile} and {@link EndCheckpoint} to downstream. */
+public class CompactFileWriter<T>
+        extends AbstractStreamingWriter<T, CoordinatorInput> {
+
+    private static final long serialVersionUID = 1L;
+
+    public CompactFileWriter(
+            long bucketCheckInterval,
+            StreamingFileSink.BucketsBuilder<
+                    T, String, ? extends StreamingFileSink.BucketsBuilder<T, String, ?>>
+                    bucketsBuilder,
+            String inlongMetric,
+            String auditHostAndPorts) {
+        super(bucketCheckInterval, bucketsBuilder, inlongMetric, auditHostAndPorts);
+    }
+
+    @Override
+    protected void partitionCreated(String partition) {
+
+    }
+
+    @Override
+    protected void partitionInactive(String partition) {
+
+    }
+
+    @Override
+    protected void onPartFileOpened(String partition, Path newPath) {
+        output.collect(new StreamRecord<>(new InputFile(partition, newPath)));
+    }
+
+    @Override
+    protected void commitUpToCheckpoint(long checkpointId) throws Exception {
+        super.commitUpToCheckpoint(checkpointId);
+        output.collect(
+                new StreamRecord<>(
+                        new EndCheckpoint(
+                                checkpointId,
+                                getRuntimeContext().getIndexOfThisSubtask(),
+                                getRuntimeContext().getNumberOfParallelSubtasks())));
+    }
+}
diff --git a/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/StreamingFileWriter.java b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/StreamingFileWriter.java
index a8285775b..b0c8aee26 100644
--- a/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/StreamingFileWriter.java
+++ b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/StreamingFileWriter.java
@@ -1,103 +1,103 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements. See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License. You may obtain a copy of the License at
- *
- *  http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-
-package org.apache.inlong.sort.hive.filesystem;
-
-import org.apache.flink.core.fs.Path;
-import org.apache.flink.runtime.state.StateInitializationContext;
-import org.apache.flink.runtime.state.StateSnapshotContext;
-import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
-import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
-import org.apache.flink.table.filesystem.stream.PartitionCommitInfo;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.NavigableMap;
-import java.util.Set;
-import java.util.TreeMap;
-
-/** Writer for emitting {@link PartitionCommitInfo} to downstream. */
-public class StreamingFileWriter<IN> extends AbstractStreamingWriter<IN, PartitionCommitInfo> {
-
-    private static final long serialVersionUID = 2L;
-
-    private transient Set<String> currentNewPartitions;
-    private transient TreeMap<Long, Set<String>> newPartitions;
-    private transient Set<String> committablePartitions;
-
-    public StreamingFileWriter(
-            long bucketCheckInterval,
-            StreamingFileSink.BucketsBuilder<
-                    IN, String, ? extends StreamingFileSink.BucketsBuilder<IN, String, ?>>
-                    bucketsBuilder,
-            String inlongMetric,
-            String auditHostAndPorts) {
-        super(bucketCheckInterval, bucketsBuilder, inlongMetric, auditHostAndPorts);
-    }
-
-    @Override
-    public void initializeState(StateInitializationContext context) throws Exception {
-        currentNewPartitions = new HashSet<>();
-        newPartitions = new TreeMap<>();
-        committablePartitions = new HashSet<>();
-        super.initializeState(context);
-    }
-
-    @Override
-    protected void partitionCreated(String partition) {
-        currentNewPartitions.add(partition);
-    }
-
-    @Override
-    protected void partitionInactive(String partition) {
-        committablePartitions.add(partition);
-    }
-
-    @Override
-    protected void onPartFileOpened(String s, Path newPath) {
-
-    }
-
-    @Override
-    public void snapshotState(StateSnapshotContext context) throws Exception {
-        super.snapshotState(context);
-        newPartitions.put(context.getCheckpointId(), new HashSet<>(currentNewPartitions));
-        currentNewPartitions.clear();
-    }
-
-    @Override
-    protected void commitUpToCheckpoint(long checkpointId) throws Exception {
-        super.commitUpToCheckpoint(checkpointId);
-
-        NavigableMap<Long, Set<String>> headPartitions =
-                this.newPartitions.headMap(checkpointId, true);
-        Set<String> partitions = new HashSet<>(committablePartitions);
-        committablePartitions.clear();
-        headPartitions.values().forEach(partitions::addAll);
-        headPartitions.clear();
-
-        output.collect(
-                new StreamRecord<>(
-                        new PartitionCommitInfo(
-                                checkpointId,
-                                getRuntimeContext().getIndexOfThisSubtask(),
-                                getRuntimeContext().getNumberOfParallelSubtasks(),
-                                new ArrayList<>(partitions))));
-    }
-}
-
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements. See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License. You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.inlong.sort.hive.filesystem;
+
+import org.apache.flink.core.fs.Path;
+import org.apache.flink.runtime.state.StateInitializationContext;
+import org.apache.flink.runtime.state.StateSnapshotContext;
+import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
+import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
+import org.apache.flink.table.filesystem.stream.PartitionCommitInfo;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.NavigableMap;
+import java.util.Set;
+import java.util.TreeMap;
+
+/** Writer for emitting {@link PartitionCommitInfo} to downstream. */
+public class StreamingFileWriter<IN> extends AbstractStreamingWriter<IN, PartitionCommitInfo> {
+
+    private static final long serialVersionUID = 2L;
+
+    private transient Set<String> currentNewPartitions;
+    private transient TreeMap<Long, Set<String>> newPartitions;
+    private transient Set<String> committablePartitions;
+
+    public StreamingFileWriter(
+            long bucketCheckInterval,
+            StreamingFileSink.BucketsBuilder<
+                    IN, String, ? extends StreamingFileSink.BucketsBuilder<IN, String, ?>>
+                    bucketsBuilder,
+            String inlongMetric,
+            String auditHostAndPorts) {
+        super(bucketCheckInterval, bucketsBuilder, inlongMetric, auditHostAndPorts);
+    }
+
+    @Override
+    public void initializeState(StateInitializationContext context) throws Exception {
+        currentNewPartitions = new HashSet<>();
+        newPartitions = new TreeMap<>();
+        committablePartitions = new HashSet<>();
+        super.initializeState(context);
+    }
+
+    @Override
+    protected void partitionCreated(String partition) {
+        currentNewPartitions.add(partition);
+    }
+
+    @Override
+    protected void partitionInactive(String partition) {
+        committablePartitions.add(partition);
+    }
+
+    @Override
+    protected void onPartFileOpened(String s, Path newPath) {
+
+    }
+
+    @Override
+    public void snapshotState(StateSnapshotContext context) throws Exception {
+        super.snapshotState(context);
+        newPartitions.put(context.getCheckpointId(), new HashSet<>(currentNewPartitions));
+        currentNewPartitions.clear();
+    }
+
+    @Override
+    protected void commitUpToCheckpoint(long checkpointId) throws Exception {
+        super.commitUpToCheckpoint(checkpointId);
+
+        NavigableMap<Long, Set<String>> headPartitions =
+                this.newPartitions.headMap(checkpointId, true);
+        Set<String> partitions = new HashSet<>(committablePartitions);
+        committablePartitions.clear();
+        headPartitions.values().forEach(partitions::addAll);
+        headPartitions.clear();
+
+        output.collect(
+                new StreamRecord<>(
+                        new PartitionCommitInfo(
+                                checkpointId,
+                                getRuntimeContext().getIndexOfThisSubtask(),
+                                getRuntimeContext().getNumberOfParallelSubtasks(),
+                                new ArrayList<>(partitions))));
+    }
+}
+
diff --git a/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/StreamingSink.java b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/StreamingSink.java
index bba10e9a9..1c7663064 100644
--- a/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/StreamingSink.java
+++ b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/filesystem/StreamingSink.java
@@ -1,164 +1,164 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.sort.hive.filesystem;
-
-import org.apache.flink.api.common.typeinfo.TypeInformation;
-import org.apache.flink.api.common.typeinfo.Types;
-import org.apache.flink.configuration.Configuration;
-import org.apache.flink.core.fs.FileSystem;
-import org.apache.flink.core.fs.Path;
-import org.apache.flink.streaming.api.datastream.DataStream;
-import org.apache.flink.streaming.api.datastream.DataStreamSink;
-import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
-import org.apache.flink.streaming.api.functions.sink.DiscardingSink;
-import org.apache.flink.streaming.api.functions.sink.filesystem.BucketWriter;
-import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
-import org.apache.flink.table.catalog.ObjectIdentifier;
-import org.apache.flink.table.filesystem.FileSystemFactory;
-import org.apache.flink.table.filesystem.TableMetaStoreFactory;
-import org.apache.flink.table.filesystem.stream.PartitionCommitInfo;
-import org.apache.flink.table.filesystem.stream.PartitionCommitter;
-import org.apache.flink.table.filesystem.stream.compact.CompactBucketWriter;
-import org.apache.flink.table.filesystem.stream.compact.CompactCoordinator;
-import org.apache.flink.table.filesystem.stream.compact.CompactMessages.CoordinatorInput;
-import org.apache.flink.table.filesystem.stream.compact.CompactMessages.CoordinatorOutput;
-import org.apache.flink.table.filesystem.stream.compact.CompactOperator;
-import org.apache.flink.table.filesystem.stream.compact.CompactReader;
-import org.apache.flink.table.filesystem.stream.compact.CompactWriter;
-import org.apache.flink.util.function.SupplierWithException;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.List;
-
-import static org.apache.flink.table.filesystem.FileSystemOptions.SINK_PARTITION_COMMIT_POLICY_KIND;
-
-/** Helper for creating streaming file sink. */
-public class StreamingSink {
-    private StreamingSink() {
-
-    }
-
-    /**
-     * Create a file writer by input stream. This is similar to {@link StreamingFileSink}, in
-     * addition, it can emit {@link PartitionCommitInfo} to down stream.
-     */
-    public static <T> DataStream<PartitionCommitInfo> writer(
-            DataStream<T> inputStream,
-            long bucketCheckInterval,
-            StreamingFileSink.BucketsBuilder<
-                    T, String, ? extends StreamingFileSink.BucketsBuilder<T, String, ?>>
-                    bucketsBuilder,
-            int parallelism,
-            String inlongMetric,
-            String auditHostAndPorts) {
-        StreamingFileWriter<T> fileWriter =
-                new StreamingFileWriter<>(bucketCheckInterval, bucketsBuilder, inlongMetric, auditHostAndPorts);
-        return inputStream
-                .transform(
-                        StreamingFileWriter.class.getSimpleName(),
-                        TypeInformation.of(PartitionCommitInfo.class),
-                        fileWriter)
-                .setParallelism(parallelism);
-    }
-
-    /**
-     * Create a file writer with compaction operators by input stream. In addition, it can emit
-     * {@link PartitionCommitInfo} to down stream.
-     */
-    public static <T> DataStream<PartitionCommitInfo> compactionWriter(
-            DataStream<T> inputStream,
-            long bucketCheckInterval,
-            StreamingFileSink.BucketsBuilder<
-                    T, String, ? extends StreamingFileSink.BucketsBuilder<T, String, ?>>
-                    bucketsBuilder,
-            FileSystemFactory fsFactory,
-            Path path,
-            CompactReader.Factory<T> readFactory,
-            long targetFileSize,
-            int parallelism,
-            String inlongMetric,
-            String auditHostAndPorts) {
-        CompactFileWriter<T> writer = new CompactFileWriter<>(
-                bucketCheckInterval, bucketsBuilder, inlongMetric, auditHostAndPorts);
-
-        SupplierWithException<FileSystem, IOException> fsSupplier =
-                (SupplierWithException<FileSystem, IOException> & Serializable)
-                        () -> fsFactory.create(path.toUri());
-
-        CompactCoordinator coordinator = new CompactCoordinator(fsSupplier, targetFileSize);
-
-        SingleOutputStreamOperator<CoordinatorOutput> coordinatorOp =
-                inputStream
-                        .transform(
-                                "streaming-writer",
-                                TypeInformation.of(CoordinatorInput.class),
-                                writer)
-                        .setParallelism(parallelism)
-                        .transform(
-                                "compact-coordinator",
-                                TypeInformation.of(CoordinatorOutput.class),
-                                coordinator)
-                        .setParallelism(1)
-                        .setMaxParallelism(1);
-
-        CompactWriter.Factory<T> writerFactory =
-                CompactBucketWriter.factory(
-                        (SupplierWithException<BucketWriter<T, String>, IOException> & Serializable)
-                                bucketsBuilder::createBucketWriter);
-
-        CompactOperator<T> compacter =
-                new CompactOperator<>(fsSupplier, readFactory, writerFactory);
-
-        return coordinatorOp
-                .broadcast()
-                .transform(
-                        "compact-operator",
-                        TypeInformation.of(PartitionCommitInfo.class),
-                        compacter)
-                .setParallelism(parallelism);
-    }
-
-    /**
-     * Create a sink from file writer. Decide whether to add the node to commit partitions according
-     * to options.
-     */
-    public static DataStreamSink<?> sink(
-            DataStream<PartitionCommitInfo> writer,
-            Path locationPath,
-            ObjectIdentifier identifier,
-            List<String> partitionKeys,
-            TableMetaStoreFactory msFactory,
-            FileSystemFactory fsFactory,
-            Configuration options) {
-        DataStream<?> stream = writer;
-        if (partitionKeys.size() > 0 && options.contains(SINK_PARTITION_COMMIT_POLICY_KIND)) {
-            PartitionCommitter committer =
-                    new PartitionCommitter(
-                            locationPath, identifier, partitionKeys, msFactory, fsFactory, options);
-            stream =
-                    writer.transform(
-                                    PartitionCommitter.class.getSimpleName(), Types.VOID, committer)
-                            .setParallelism(1)
-                            .setMaxParallelism(1);
-        }
-
-        return stream.addSink(new DiscardingSink<>()).name("end").setParallelism(1);
-    }
-}
-
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.sort.hive.filesystem;
+
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.common.typeinfo.Types;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.core.fs.FileSystem;
+import org.apache.flink.core.fs.Path;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.datastream.DataStreamSink;
+import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
+import org.apache.flink.streaming.api.functions.sink.DiscardingSink;
+import org.apache.flink.streaming.api.functions.sink.filesystem.BucketWriter;
+import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
+import org.apache.flink.table.catalog.ObjectIdentifier;
+import org.apache.flink.table.filesystem.FileSystemFactory;
+import org.apache.flink.table.filesystem.TableMetaStoreFactory;
+import org.apache.flink.table.filesystem.stream.PartitionCommitInfo;
+import org.apache.flink.table.filesystem.stream.PartitionCommitter;
+import org.apache.flink.table.filesystem.stream.compact.CompactBucketWriter;
+import org.apache.flink.table.filesystem.stream.compact.CompactCoordinator;
+import org.apache.flink.table.filesystem.stream.compact.CompactMessages.CoordinatorInput;
+import org.apache.flink.table.filesystem.stream.compact.CompactMessages.CoordinatorOutput;
+import org.apache.flink.table.filesystem.stream.compact.CompactOperator;
+import org.apache.flink.table.filesystem.stream.compact.CompactReader;
+import org.apache.flink.table.filesystem.stream.compact.CompactWriter;
+import org.apache.flink.util.function.SupplierWithException;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.List;
+
+import static org.apache.flink.table.filesystem.FileSystemOptions.SINK_PARTITION_COMMIT_POLICY_KIND;
+
+/** Helper for creating streaming file sink. */
+public class StreamingSink {
+    private StreamingSink() {
+
+    }
+
+    /**
+     * Create a file writer by input stream. This is similar to {@link StreamingFileSink}, in
+     * addition, it can emit {@link PartitionCommitInfo} to down stream.
+     */
+    public static <T> DataStream<PartitionCommitInfo> writer(
+            DataStream<T> inputStream,
+            long bucketCheckInterval,
+            StreamingFileSink.BucketsBuilder<
+                    T, String, ? extends StreamingFileSink.BucketsBuilder<T, String, ?>>
+                    bucketsBuilder,
+            int parallelism,
+            String inlongMetric,
+            String auditHostAndPorts) {
+        StreamingFileWriter<T> fileWriter =
+                new StreamingFileWriter<>(bucketCheckInterval, bucketsBuilder, inlongMetric, auditHostAndPorts);
+        return inputStream
+                .transform(
+                        StreamingFileWriter.class.getSimpleName(),
+                        TypeInformation.of(PartitionCommitInfo.class),
+                        fileWriter)
+                .setParallelism(parallelism);
+    }
+
+    /**
+     * Create a file writer with compaction operators by input stream. In addition, it can emit
+     * {@link PartitionCommitInfo} to down stream.
+     */
+    public static <T> DataStream<PartitionCommitInfo> compactionWriter(
+            DataStream<T> inputStream,
+            long bucketCheckInterval,
+            StreamingFileSink.BucketsBuilder<
+                    T, String, ? extends StreamingFileSink.BucketsBuilder<T, String, ?>>
+                    bucketsBuilder,
+            FileSystemFactory fsFactory,
+            Path path,
+            CompactReader.Factory<T> readFactory,
+            long targetFileSize,
+            int parallelism,
+            String inlongMetric,
+            String auditHostAndPorts) {
+        CompactFileWriter<T> writer = new CompactFileWriter<>(
+                bucketCheckInterval, bucketsBuilder, inlongMetric, auditHostAndPorts);
+
+        SupplierWithException<FileSystem, IOException> fsSupplier =
+                (SupplierWithException<FileSystem, IOException> & Serializable)
+                        () -> fsFactory.create(path.toUri());
+
+        CompactCoordinator coordinator = new CompactCoordinator(fsSupplier, targetFileSize);
+
+        SingleOutputStreamOperator<CoordinatorOutput> coordinatorOp =
+                inputStream
+                        .transform(
+                                "streaming-writer",
+                                TypeInformation.of(CoordinatorInput.class),
+                                writer)
+                        .setParallelism(parallelism)
+                        .transform(
+                                "compact-coordinator",
+                                TypeInformation.of(CoordinatorOutput.class),
+                                coordinator)
+                        .setParallelism(1)
+                        .setMaxParallelism(1);
+
+        CompactWriter.Factory<T> writerFactory =
+                CompactBucketWriter.factory(
+                        (SupplierWithException<BucketWriter<T, String>, IOException> & Serializable)
+                                bucketsBuilder::createBucketWriter);
+
+        CompactOperator<T> compacter =
+                new CompactOperator<>(fsSupplier, readFactory, writerFactory);
+
+        return coordinatorOp
+                .broadcast()
+                .transform(
+                        "compact-operator",
+                        TypeInformation.of(PartitionCommitInfo.class),
+                        compacter)
+                .setParallelism(parallelism);
+    }
+
+    /**
+     * Create a sink from file writer. Decide whether to add the node to commit partitions according
+     * to options.
+     */
+    public static DataStreamSink<?> sink(
+            DataStream<PartitionCommitInfo> writer,
+            Path locationPath,
+            ObjectIdentifier identifier,
+            List<String> partitionKeys,
+            TableMetaStoreFactory msFactory,
+            FileSystemFactory fsFactory,
+            Configuration options) {
+        DataStream<?> stream = writer;
+        if (partitionKeys.size() > 0 && options.contains(SINK_PARTITION_COMMIT_POLICY_KIND)) {
+            PartitionCommitter committer =
+                    new PartitionCommitter(
+                            locationPath, identifier, partitionKeys, msFactory, fsFactory, options);
+            stream =
+                    writer.transform(
+                                    PartitionCommitter.class.getSimpleName(), Types.VOID, committer)
+                            .setParallelism(1)
+                            .setMaxParallelism(1);
+        }
+
+        return stream.addSink(new DiscardingSink<>()).name("end").setParallelism(1);
+    }
+}
+
diff --git a/inlong-sort/sort-connectors/iceberg-dlc/src/main/java/org/apache/inlong/sort/iceberg/flink/CompactTableProperties.java b/inlong-sort/sort-connectors/iceberg-dlc/src/main/java/org/apache/inlong/sort/iceberg/flink/CompactTableProperties.java
index fa379b227..08efdc00d 100644
--- a/inlong-sort/sort-connectors/iceberg-dlc/src/main/java/org/apache/inlong/sort/iceberg/flink/CompactTableProperties.java
+++ b/inlong-sort/sort-connectors/iceberg-dlc/src/main/java/org/apache/inlong/sort/iceberg/flink/CompactTableProperties.java
@@ -1,98 +1,98 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.inlong.sort.iceberg.flink;
-
-import java.util.Set;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-public class CompactTableProperties {
-    public static final String COMPACT_PREFIX = "write.compact.";
-
-    public static final String COMPACT_ENABLED = "write.compact.enable";
-    public static final boolean COMPACT_ENABLED_DEFAULT = false;
-
-    public static final String COMPACT_INTERVAL = "write.compact.snapshot.interval";
-    public static final int COMPACT_INTERVAL_DEFAULT = 5;
-
-    public static final String COMPACT_RESOUCE_POOL = "write.compact.resource.name";
-    public static final String COMPACT_RESOUCE_POOL_DEFAULT = "default";
-
-    // Supported by spark rewrite action option
-    public static final String COMPACT_MAX_CONCURRENT_FILE_GROUP_REWRITES
-            = "write.compact.max-concurrent-file-group-rewrites";
-    public static final int COMPACT_MAX_CONCURRENT_FILE_GROUP_REWRITES_DEFAULT = 1;
-
-    public static final String COMPACT_MAX_FILE_GROUP_SIZE_BYTES = "write.compact.max-file-group-size-bytes";
-    public static final long COMPACT_MAX_FILE_GROUP_SIZE_BYTES_DEFAULT = 1024L * 1024L * 1024L * 100L; // 100 Gigabytes
-
-    public static final String COMPACT_PARTIAL_PROGRESS_ENABLED = "write.compact.partial-progress.enabled";
-    public static final boolean COMPACT_PARTIAL_PROGRESS_ENABLED_DEFAULT = false;
-
-    public static final String COMPACT_PARTIAL_PROGRESS_MAX_COMMITS = "write.compact.partial-progress.max-commits";
-    public static final int COMPACT_PARTIAL_PROGRESS_MAX_COMMITS_DEFAULT = 10;
-
-    public static final String COMPACT_TARGET_FILE_SIZE_BYTES = "write.compact.target-file-size-bytes";
-    public static final int COMPACT_TARGET_FILE_SIZE_BYTES_DEFAULT = 512 * 1024 * 1024; // 512 MB
-
-    public static final String COMPACT_USE_STARTING_SEQUENCE_NUMBER = "write.compact.use-starting-sequence-number";
-    public static final boolean COMPACT_USE_STARTING_SEQUENCE_NUMBER_DEFAULT = true;
-
-    public static final String COMPACT_MIN_INPUT_FILES = "write.compact.min-input-files";
-    public static final int COMPACT_MIN_INPUT_FILES_DEFAULT = 5;
-
-    public static final String COMPACT_DELETE_FILE_THRESHOLD = "write.compact.delete-file-threshold";
-    public static final int COMPACT_DELETE_FILE_THRESHOLD_DEFAULT = Integer.MAX_VALUE;
-
-    public static final String COMPACT_MIN_FILE_SIZE_BYTES = "write.compact.min-file-size-bytes";
-    public static final double COMPACT_MIN_FILE_SIZE_BYTES_DEFAULT = 0.75d * COMPACT_TARGET_FILE_SIZE_BYTES_DEFAULT;
-
-    public static final String COMPACT_MAX_FILE_SIZE_BYTES = "write.compact.max-file-size-bytes";
-    public static final double COMPACT_MAX_FILE_SIZE_BYTES_DEFAULT = 1.80d * COMPACT_TARGET_FILE_SIZE_BYTES_DEFAULT;
-
-    public static final Set<String> TABLE_AUTO_COMPACT_PROPERTIES = Stream.of(
-                COMPACT_ENABLED,
-                COMPACT_INTERVAL,
-                COMPACT_RESOUCE_POOL,
-                COMPACT_MAX_CONCURRENT_FILE_GROUP_REWRITES,
-                COMPACT_MAX_FILE_GROUP_SIZE_BYTES,
-                COMPACT_PARTIAL_PROGRESS_ENABLED,
-                COMPACT_PARTIAL_PROGRESS_MAX_COMMITS,
-                COMPACT_TARGET_FILE_SIZE_BYTES,
-                COMPACT_USE_STARTING_SEQUENCE_NUMBER,
-                COMPACT_MIN_INPUT_FILES,
-                COMPACT_DELETE_FILE_THRESHOLD,
-                COMPACT_MIN_FILE_SIZE_BYTES,
-                COMPACT_MAX_FILE_SIZE_BYTES
-        ).collect(Collectors.toSet());
-
-    public static final Set<String> ACTION_AUTO_COMPACT_OPTIONS = Stream.of(
-            COMPACT_MAX_CONCURRENT_FILE_GROUP_REWRITES,
-            COMPACT_MAX_FILE_GROUP_SIZE_BYTES,
-            COMPACT_PARTIAL_PROGRESS_ENABLED,
-            COMPACT_PARTIAL_PROGRESS_MAX_COMMITS,
-            COMPACT_TARGET_FILE_SIZE_BYTES,
-            COMPACT_USE_STARTING_SEQUENCE_NUMBER,
-            COMPACT_MIN_INPUT_FILES,
-            COMPACT_DELETE_FILE_THRESHOLD,
-            COMPACT_MIN_FILE_SIZE_BYTES,
-            COMPACT_MAX_FILE_SIZE_BYTES
-    ).collect(Collectors.toSet());
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.inlong.sort.iceberg.flink;
+
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+public class CompactTableProperties {
+    public static final String COMPACT_PREFIX = "write.compact.";
+
+    public static final String COMPACT_ENABLED = "write.compact.enable";
+    public static final boolean COMPACT_ENABLED_DEFAULT = false;
+
+    public static final String COMPACT_INTERVAL = "write.compact.snapshot.interval";
+    public static final int COMPACT_INTERVAL_DEFAULT = 5;
+
+    public static final String COMPACT_RESOUCE_POOL = "write.compact.resource.name";
+    public static final String COMPACT_RESOUCE_POOL_DEFAULT = "default";
+
+    // Supported by spark rewrite action option
+    public static final String COMPACT_MAX_CONCURRENT_FILE_GROUP_REWRITES
+            = "write.compact.max-concurrent-file-group-rewrites";
+    public static final int COMPACT_MAX_CONCURRENT_FILE_GROUP_REWRITES_DEFAULT = 1;
+
+    public static final String COMPACT_MAX_FILE_GROUP_SIZE_BYTES = "write.compact.max-file-group-size-bytes";
+    public static final long COMPACT_MAX_FILE_GROUP_SIZE_BYTES_DEFAULT = 1024L * 1024L * 1024L * 100L; // 100 Gigabytes
+
+    public static final String COMPACT_PARTIAL_PROGRESS_ENABLED = "write.compact.partial-progress.enabled";
+    public static final boolean COMPACT_PARTIAL_PROGRESS_ENABLED_DEFAULT = false;
+
+    public static final String COMPACT_PARTIAL_PROGRESS_MAX_COMMITS = "write.compact.partial-progress.max-commits";
+    public static final int COMPACT_PARTIAL_PROGRESS_MAX_COMMITS_DEFAULT = 10;
+
+    public static final String COMPACT_TARGET_FILE_SIZE_BYTES = "write.compact.target-file-size-bytes";
+    public static final int COMPACT_TARGET_FILE_SIZE_BYTES_DEFAULT = 512 * 1024 * 1024; // 512 MB
+
+    public static final String COMPACT_USE_STARTING_SEQUENCE_NUMBER = "write.compact.use-starting-sequence-number";
+    public static final boolean COMPACT_USE_STARTING_SEQUENCE_NUMBER_DEFAULT = true;
+
+    public static final String COMPACT_MIN_INPUT_FILES = "write.compact.min-input-files";
+    public static final int COMPACT_MIN_INPUT_FILES_DEFAULT = 5;
+
+    public static final String COMPACT_DELETE_FILE_THRESHOLD = "write.compact.delete-file-threshold";
+    public static final int COMPACT_DELETE_FILE_THRESHOLD_DEFAULT = Integer.MAX_VALUE;
+
+    public static final String COMPACT_MIN_FILE_SIZE_BYTES = "write.compact.min-file-size-bytes";
+    public static final double COMPACT_MIN_FILE_SIZE_BYTES_DEFAULT = 0.75d * COMPACT_TARGET_FILE_SIZE_BYTES_DEFAULT;
+
+    public static final String COMPACT_MAX_FILE_SIZE_BYTES = "write.compact.max-file-size-bytes";
+    public static final double COMPACT_MAX_FILE_SIZE_BYTES_DEFAULT = 1.80d * COMPACT_TARGET_FILE_SIZE_BYTES_DEFAULT;
+
+    public static final Set<String> TABLE_AUTO_COMPACT_PROPERTIES = Stream.of(
+                COMPACT_ENABLED,
+                COMPACT_INTERVAL,
+                COMPACT_RESOUCE_POOL,
+                COMPACT_MAX_CONCURRENT_FILE_GROUP_REWRITES,
+                COMPACT_MAX_FILE_GROUP_SIZE_BYTES,
+                COMPACT_PARTIAL_PROGRESS_ENABLED,
+                COMPACT_PARTIAL_PROGRESS_MAX_COMMITS,
+                COMPACT_TARGET_FILE_SIZE_BYTES,
+                COMPACT_USE_STARTING_SEQUENCE_NUMBER,
+                COMPACT_MIN_INPUT_FILES,
+                COMPACT_DELETE_FILE_THRESHOLD,
+                COMPACT_MIN_FILE_SIZE_BYTES,
+                COMPACT_MAX_FILE_SIZE_BYTES
+        ).collect(Collectors.toSet());
+
+    public static final Set<String> ACTION_AUTO_COMPACT_OPTIONS = Stream.of(
+            COMPACT_MAX_CONCURRENT_FILE_GROUP_REWRITES,
+            COMPACT_MAX_FILE_GROUP_SIZE_BYTES,
+            COMPACT_PARTIAL_PROGRESS_ENABLED,
+            COMPACT_PARTIAL_PROGRESS_MAX_COMMITS,
+            COMPACT_TARGET_FILE_SIZE_BYTES,
+            COMPACT_USE_STARTING_SEQUENCE_NUMBER,
+            COMPACT_MIN_INPUT_FILES,
+            COMPACT_DELETE_FILE_THRESHOLD,
+            COMPACT_MIN_FILE_SIZE_BYTES,
+            COMPACT_MAX_FILE_SIZE_BYTES
+    ).collect(Collectors.toSet());
+}
diff --git a/inlong-sort/sort-connectors/iceberg-dlc/src/main/java/org/apache/inlong/sort/iceberg/flink/FlinkCatalog.java b/inlong-sort/sort-connectors/iceberg-dlc/src/main/java/org/apache/inlong/sort/iceberg/flink/FlinkCatalog.java
index 412384afe..695bf2ea6 100644
--- a/inlong-sort/sort-connectors/iceberg-dlc/src/main/java/org/apache/inlong/sort/iceberg/flink/FlinkCatalog.java
+++ b/inlong-sort/sort-connectors/iceberg-dlc/src/main/java/org/apache/inlong/sort/iceberg/flink/FlinkCatalog.java
@@ -1,732 +1,732 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.inlong.sort.iceberg.flink;
-
-import org.apache.flink.table.api.TableSchema;
-import org.apache.flink.table.catalog.AbstractCatalog;
-import org.apache.flink.table.catalog.CatalogBaseTable;
-import org.apache.flink.table.catalog.CatalogDatabase;
-import org.apache.flink.table.catalog.CatalogDatabaseImpl;
-import org.apache.flink.table.catalog.CatalogFunction;
-import org.apache.flink.table.catalog.CatalogPartition;
-import org.apache.flink.table.catalog.CatalogPartitionSpec;
-import org.apache.flink.table.catalog.CatalogTable;
-import org.apache.flink.table.catalog.CatalogTableImpl;
-import org.apache.flink.table.catalog.ObjectPath;
-import org.apache.flink.table.catalog.exceptions.CatalogException;
-import org.apache.flink.table.catalog.exceptions.DatabaseAlreadyExistException;
-import org.apache.flink.table.catalog.exceptions.DatabaseNotEmptyException;
-import org.apache.flink.table.catalog.exceptions.DatabaseNotExistException;
-import org.apache.flink.table.catalog.exceptions.FunctionNotExistException;
-import org.apache.flink.table.catalog.exceptions.TableAlreadyExistException;
-import org.apache.flink.table.catalog.exceptions.TableNotExistException;
-import org.apache.flink.table.catalog.exceptions.TableNotPartitionedException;
-import org.apache.flink.table.catalog.stats.CatalogColumnStatistics;
-import org.apache.flink.table.catalog.stats.CatalogTableStatistics;
-import org.apache.flink.table.expressions.Expression;
-import org.apache.flink.table.factories.Factory;
-import org.apache.flink.util.StringUtils;
-import org.apache.iceberg.CachingCatalog;
-import org.apache.iceberg.DataFile;
-import org.apache.iceberg.FileScanTask;
-import org.apache.iceberg.PartitionField;
-import org.apache.iceberg.PartitionSpec;
-import org.apache.iceberg.Schema;
-import org.apache.iceberg.StructLike;
-import org.apache.iceberg.Table;
-import org.apache.iceberg.Transaction;
-import org.apache.iceberg.UpdateProperties;
-import org.apache.iceberg.catalog.Catalog;
-import org.apache.iceberg.catalog.Namespace;
-import org.apache.iceberg.catalog.SupportsNamespaces;
-import org.apache.iceberg.catalog.TableIdentifier;
-import org.apache.iceberg.exceptions.AlreadyExistsException;
-import org.apache.iceberg.exceptions.NamespaceNotEmptyException;
-import org.apache.iceberg.exceptions.NoSuchNamespaceException;
-import org.apache.iceberg.flink.CatalogLoader;
-import org.apache.iceberg.flink.FlinkSchemaUtil;
-import org.apache.iceberg.flink.util.FlinkCompatibilityUtil;
-import org.apache.iceberg.io.CloseableIterable;
-import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
-import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
-import org.apache.iceberg.relocated.com.google.common.collect.Lists;
-import org.apache.iceberg.relocated.com.google.common.collect.Maps;
-import org.apache.iceberg.relocated.com.google.common.collect.Sets;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-/**
- * A Flink Catalog implementation that wraps an Iceberg {@link Catalog}.
- * <p>
- * The mapping between Flink database and Iceberg namespace:
- * Supplying a base namespace for a given catalog, so if you have a catalog that supports a 2-level namespace, you
- * would supply the first level in the catalog configuration and the second level would be exposed as Flink databases.
- * <p>
- * The Iceberg table manages its partitions by itself. The partition of the Iceberg table is independent of the
- * partition of Flink.
- *
- * Copy from iceberg-flink:iceberg-flink-1.13:0.13.2
- */
-public class FlinkCatalog extends AbstractCatalog {
-
-    private final CatalogLoader catalogLoader;
-    private final Catalog icebergCatalog;
-    private final Namespace baseNamespace;
-    private final SupportsNamespaces asNamespaceCatalog;
-    private final Closeable closeable;
-    private final boolean cacheEnabled;
-
-    public FlinkCatalog(
-            String catalogName,
-            String defaultDatabase,
-            Namespace baseNamespace,
-            CatalogLoader catalogLoader,
-            boolean cacheEnabled) {
-        super(catalogName, defaultDatabase);
-        this.catalogLoader = catalogLoader;
-        this.baseNamespace = baseNamespace;
-        this.cacheEnabled = cacheEnabled;
-
-        Catalog originalCatalog = catalogLoader.loadCatalog();
-        icebergCatalog = cacheEnabled ? CachingCatalog.wrap(originalCatalog) : originalCatalog;
-        asNamespaceCatalog = originalCatalog instanceof SupportsNamespaces
-                ? (SupportsNamespaces) originalCatalog : null;
-        closeable = originalCatalog instanceof Closeable ? (Closeable) originalCatalog : null;
-    }
-
-    @Override
-    public void open() throws CatalogException {
-        // Create the default database if it does not exist.
-        try {
-            createDatabase(getDefaultDatabase(), ImmutableMap.of(), true);
-        } catch (DatabaseAlreadyExistException e) {
-            // Ignore the exception if it's already exist.
-        }
-    }
-
-    @Override
-    public void close() throws CatalogException {
-        if (closeable != null) {
-            try {
-                closeable.close();
-            } catch (IOException e) {
-                throw new CatalogException(e);
-            }
-        }
-    }
-
-    public Catalog catalog() {
-        return icebergCatalog;
-    }
-
-    private Namespace toNamespace(String database) {
-        String[] namespace = new String[baseNamespace.levels().length + 1];
-        System.arraycopy(baseNamespace.levels(), 0, namespace, 0, baseNamespace.levels().length);
-        namespace[baseNamespace.levels().length] = database;
-        return Namespace.of(namespace);
-    }
-
-    TableIdentifier toIdentifier(ObjectPath path) {
-        return TableIdentifier.of(toNamespace(path.getDatabaseName()), path.getObjectName());
-    }
-
-    @Override
-    public List<String> listDatabases() throws CatalogException {
-        if (asNamespaceCatalog == null) {
-            return Collections.singletonList(getDefaultDatabase());
-        }
-
-        return asNamespaceCatalog.listNamespaces(baseNamespace).stream()
-                .map(n -> n.level(n.levels().length - 1))
-                .collect(Collectors.toList());
-    }
-
-    @Override
-    public CatalogDatabase getDatabase(String databaseName) throws DatabaseNotExistException, CatalogException {
-        if (asNamespaceCatalog == null) {
-            if (!getDefaultDatabase().equals(databaseName)) {
-                throw new DatabaseNotExistException(getName(), databaseName);
-            } else {
-                return new CatalogDatabaseImpl(Maps.newHashMap(), "");
-            }
-        } else {
-            try {
-                Map<String, String> metadata =
-                        Maps.newHashMap(asNamespaceCatalog.loadNamespaceMetadata(toNamespace(databaseName)));
-                String comment = metadata.remove("comment");
-                return new CatalogDatabaseImpl(metadata, comment);
-            } catch (NoSuchNamespaceException e) {
-                throw new DatabaseNotExistException(getName(), databaseName, e);
-            }
-        }
-    }
-
-    @Override
-    public boolean databaseExists(String databaseName) throws CatalogException {
-        try {
-            getDatabase(databaseName);
-            return true;
-        } catch (DatabaseNotExistException ignore) {
-            return false;
-        }
-    }
-
-    @Override
-    public void createDatabase(String name, CatalogDatabase database, boolean ignoreIfExists)
-            throws DatabaseAlreadyExistException, CatalogException {
-        createDatabase(name, mergeComment(database.getProperties(), database.getComment()), ignoreIfExists);
-    }
-
-    private void createDatabase(String databaseName, Map<String, String> metadata, boolean ignoreIfExists)
-            throws DatabaseAlreadyExistException, CatalogException {
-        if (asNamespaceCatalog != null) {
-            try {
-                asNamespaceCatalog.createNamespace(toNamespace(databaseName), metadata);
-            } catch (AlreadyExistsException e) {
-                if (!ignoreIfExists) {
-                    throw new DatabaseAlreadyExistException(getName(), databaseName, e);
-                }
-            }
-        } else {
-            throw new UnsupportedOperationException("Namespaces are not supported by catalog: " + getName());
-        }
-    }
-
-    private Map<String, String> mergeComment(Map<String, String> metadata, String comment) {
-        Map<String, String> ret = Maps.newHashMap(metadata);
-        if (metadata.containsKey("comment")) {
-            throw new CatalogException("Database properties should not contain key: 'comment'.");
-        }
-
-        if (!StringUtils.isNullOrWhitespaceOnly(comment)) {
-            ret.put("comment", comment);
-        }
-        return ret;
-    }
-
-    @Override
-    public void dropDatabase(String name, boolean ignoreIfNotExists, boolean cascade)
-            throws DatabaseNotExistException, DatabaseNotEmptyException, CatalogException {
-        if (asNamespaceCatalog != null) {
-            try {
-                boolean success = asNamespaceCatalog.dropNamespace(toNamespace(name));
-                if (!success && !ignoreIfNotExists) {
-                    throw new DatabaseNotExistException(getName(), name);
-                }
-            } catch (NoSuchNamespaceException e) {
-                if (!ignoreIfNotExists) {
-                    throw new DatabaseNotExistException(getName(), name, e);
-                }
-            } catch (NamespaceNotEmptyException e) {
-                throw new DatabaseNotEmptyException(getName(), name, e);
-            }
-        } else {
-            if (!ignoreIfNotExists) {
-                throw new DatabaseNotExistException(getName(), name);
-            }
-        }
-    }
-
-    @Override
-    public void alterDatabase(String name, CatalogDatabase newDatabase, boolean ignoreIfNotExists)
-            throws DatabaseNotExistException, CatalogException {
-        if (asNamespaceCatalog != null) {
-            Namespace namespace = toNamespace(name);
-            Map<String, String> updates = Maps.newHashMap();
-            Set<String> removals = Sets.newHashSet();
-
-            try {
-                Map<String, String> oldProperties = asNamespaceCatalog.loadNamespaceMetadata(namespace);
-                Map<String, String> newProperties = mergeComment(newDatabase.getProperties(), newDatabase.getComment());
-
-                for (String key : oldProperties.keySet()) {
-                    if (!newProperties.containsKey(key)) {
-                        removals.add(key);
-                    }
-                }
-
-                for (Map.Entry<String, String> entry : newProperties.entrySet()) {
-                    if (!entry.getValue().equals(oldProperties.get(entry.getKey()))) {
-                        updates.put(entry.getKey(), entry.getValue());
-                    }
-                }
-
-                if (!updates.isEmpty()) {
-                    asNamespaceCatalog.setProperties(namespace, updates);
-                }
-
-                if (!removals.isEmpty()) {
-                    asNamespaceCatalog.removeProperties(namespace, removals);
-                }
-
-            } catch (NoSuchNamespaceException e) {
-                if (!ignoreIfNotExists) {
-                    throw new DatabaseNotExistException(getName(), name, e);
-                }
-            }
-        } else {
-            if (getDefaultDatabase().equals(name)) {
-                throw new CatalogException(
-                        "Can not alter the default database when the iceberg catalog doesn't support namespaces.");
-            }
-            if (!ignoreIfNotExists) {
-                throw new DatabaseNotExistException(getName(), name);
-            }
-        }
-    }
-
-    @Override
-    public List<String> listTables(String databaseName) throws DatabaseNotExistException, CatalogException {
-        try {
-            return icebergCatalog.listTables(toNamespace(databaseName)).stream()
-                    .map(TableIdentifier::name)
-                    .collect(Collectors.toList());
-        } catch (NoSuchNamespaceException e) {
-            throw new DatabaseNotExistException(getName(), databaseName, e);
-        }
-    }
-
-    @Override
-    public CatalogTable getTable(ObjectPath tablePath) throws TableNotExistException, CatalogException {
-        Table table = loadIcebergTable(tablePath);
-        return toCatalogTable(table);
-    }
-
-    private Table loadIcebergTable(ObjectPath tablePath) throws TableNotExistException {
-        try {
-            Table table = icebergCatalog.loadTable(toIdentifier(tablePath));
-            if (cacheEnabled) {
-                table.refresh();
-            }
-
-            return table;
-        } catch (org.apache.iceberg.exceptions.NoSuchTableException e) {
-            throw new TableNotExistException(getName(), tablePath, e);
-        }
-    }
-
-    @Override
-    public boolean tableExists(ObjectPath tablePath) throws CatalogException {
-        return icebergCatalog.tableExists(toIdentifier(tablePath));
-    }
-
-    @Override
-    public void dropTable(ObjectPath tablePath, boolean ignoreIfNotExists)
-            throws TableNotExistException, CatalogException {
-        try {
-            icebergCatalog.dropTable(toIdentifier(tablePath));
-        } catch (org.apache.iceberg.exceptions.NoSuchTableException e) {
-            if (!ignoreIfNotExists) {
-                throw new TableNotExistException(getName(), tablePath, e);
-            }
-        }
-    }
-
-    @Override
-    public void renameTable(ObjectPath tablePath, String newTableName, boolean ignoreIfNotExists)
-            throws TableNotExistException, TableAlreadyExistException, CatalogException {
-        try {
-            icebergCatalog.renameTable(
-                    toIdentifier(tablePath),
-                    toIdentifier(new ObjectPath(tablePath.getDatabaseName(), newTableName)));
-        } catch (org.apache.iceberg.exceptions.NoSuchTableException e) {
-            if (!ignoreIfNotExists) {
-                throw new TableNotExistException(getName(), tablePath, e);
-            }
-        } catch (AlreadyExistsException e) {
-            throw new TableAlreadyExistException(getName(), tablePath, e);
-        }
-    }
-
-    @Override
-    public void createTable(ObjectPath tablePath, CatalogBaseTable table, boolean ignoreIfExists)
-            throws CatalogException, TableAlreadyExistException {
-        if (Objects.equals(table.getOptions().get("connector"), FlinkDynamicTableFactory.FACTORY_IDENTIFIER)) {
-            throw new IllegalArgumentException("Cannot create the table with 'connector'='iceberg' table property in "
-                    + "an iceberg catalog, Please create table with 'connector'='iceberg' property in a non-iceberg "
-                    + "catalog or create table without 'connector'='iceberg' related properties in an iceberg table.");
-        }
-
-        createIcebergTable(tablePath, table, ignoreIfExists);
-    }
-
-    void createIcebergTable(ObjectPath tablePath, CatalogBaseTable table, boolean ignoreIfExists)
-            throws CatalogException, TableAlreadyExistException {
-        validateFlinkTable(table);
-
-        Schema icebergSchema = FlinkSchemaUtil.convert(table.getSchema());
-        PartitionSpec spec = toPartitionSpec(((CatalogTable) table).getPartitionKeys(), icebergSchema);
-
-        ImmutableMap.Builder<String, String> properties = ImmutableMap.builder();
-        String location = null;
-        for (Map.Entry<String, String> entry : table.getOptions().entrySet()) {
-            if ("location".equalsIgnoreCase(entry.getKey())) {
-                location = entry.getValue();
-            } else {
-                properties.put(entry.getKey(), entry.getValue());
-            }
-        }
-
-        try {
-            icebergCatalog.createTable(
-                    toIdentifier(tablePath),
-                    icebergSchema,
-                    spec,
-                    location,
-                    properties.build());
-        } catch (AlreadyExistsException e) {
-            if (!ignoreIfExists) {
-                throw new TableAlreadyExistException(getName(), tablePath, e);
-            }
-        }
-    }
-
-    @Override
-    public void alterTable(ObjectPath tablePath, CatalogBaseTable newTable, boolean ignoreIfNotExists)
-            throws CatalogException, TableNotExistException {
-        validateFlinkTable(newTable);
-
-        Table icebergTable;
-        try {
-            icebergTable = loadIcebergTable(tablePath);
-        } catch (TableNotExistException e) {
-            if (!ignoreIfNotExists) {
-                throw e;
-            } else {
-                return;
-            }
-        }
-
-        CatalogTable table = toCatalogTable(icebergTable);
-
-        // Currently, Flink SQL only support altering table properties.
-
-        // For current Flink Catalog API, support for adding/removing/renaming columns cannot be done by comparing
-        // CatalogTable instances, unless the Flink schema contains Iceberg column IDs.
-        if (!table.getSchema().equals(newTable.getSchema())) {
-            throw new UnsupportedOperationException("Altering schema is not supported yet.");
-        }
-
-        if (!table.getPartitionKeys().equals(((CatalogTable) newTable).getPartitionKeys())) {
-            throw new UnsupportedOperationException("Altering partition keys is not supported yet.");
-        }
-
-        Map<String, String> oldProperties = table.getOptions();
-        Map<String, String> setProperties = Maps.newHashMap();
-
-        String setLocation = null;
-        String setSnapshotId = null;
-        String pickSnapshotId = null;
-
-        for (Map.Entry<String, String> entry : newTable.getOptions().entrySet()) {
-            String key = entry.getKey();
-            String value = entry.getValue();
-
-            if (Objects.equals(value, oldProperties.get(key))) {
-                continue;
-            }
-
-            if ("location".equalsIgnoreCase(key)) {
-                setLocation = value;
-            } else if ("current-snapshot-id".equalsIgnoreCase(key)) {
-                setSnapshotId = value;
-            } else if ("cherry-pick-snapshot-id".equalsIgnoreCase(key)) {
-                pickSnapshotId = value;
-            } else {
-                setProperties.put(key, value);
-            }
-        }
-
-        oldProperties.keySet().forEach(k -> {
-            if (!newTable.getOptions().containsKey(k)) {
-                setProperties.put(k, null);
-            }
-        });
-
-        commitChanges(icebergTable, setLocation, setSnapshotId, pickSnapshotId, setProperties);
-    }
-
-    private static void validateFlinkTable(CatalogBaseTable table) {
-        Preconditions.checkArgument(table instanceof CatalogTable, "The Table should be a CatalogTable.");
-
-        TableSchema schema = table.getSchema();
-        schema.getTableColumns().forEach(column -> {
-            if (!FlinkCompatibilityUtil.isPhysicalColumn(column)) {
-                throw new UnsupportedOperationException("Creating table with computed columns is not supported yet.");
-            }
-        });
-
-        if (!schema.getWatermarkSpecs().isEmpty()) {
-            throw new UnsupportedOperationException("Creating table with watermark specs is not supported yet.");
-        }
-    }
-
-    private static PartitionSpec toPartitionSpec(List<String> partitionKeys, Schema icebergSchema) {
-        PartitionSpec.Builder builder = PartitionSpec.builderFor(icebergSchema);
-        partitionKeys.forEach(builder::identity);
-        return builder.build();
-    }
-
-    private static List<String> toPartitionKeys(PartitionSpec spec, Schema icebergSchema) {
-        List<String> partitionKeys = Lists.newArrayList();
-        for (PartitionField field : spec.fields()) {
-            if (field.transform().isIdentity()) {
-                partitionKeys.add(icebergSchema.findColumnName(field.sourceId()));
-            } else {
-                // Not created by Flink SQL.
-                // For compatibility with iceberg tables, return empty.
-                // TODO modify this after Flink support partition transform.
-                return Collections.emptyList();
-            }
-        }
-        return partitionKeys;
-    }
-
-    private static void commitChanges(Table table, String setLocation, String setSnapshotId,
-            String pickSnapshotId, Map<String, String> setProperties) {
-        // don't allow setting the snapshot and picking a commit at the same time because order is ambiguous and
-        // choosing one order leads to different results
-        Preconditions.checkArgument(setSnapshotId == null || pickSnapshotId == null,
-                "Cannot set the current snapshot ID and cherry-pick snapshot changes");
-
-        if (setSnapshotId != null) {
-            long newSnapshotId = Long.parseLong(setSnapshotId);
-            table.manageSnapshots().setCurrentSnapshot(newSnapshotId).commit();
-        }
-
-        // if updating the table snapshot, perform that update first in case it fails
-        if (pickSnapshotId != null) {
-            long newSnapshotId = Long.parseLong(pickSnapshotId);
-            table.manageSnapshots().cherrypick(newSnapshotId).commit();
-        }
-
-        Transaction transaction = table.newTransaction();
-
-        if (setLocation != null) {
-            transaction.updateLocation()
-                    .setLocation(setLocation)
-                    .commit();
-        }
-
-        if (!setProperties.isEmpty()) {
-            UpdateProperties updateProperties = transaction.updateProperties();
-            setProperties.forEach((k, v) -> {
-                if (v == null) {
-                    updateProperties.remove(k);
-                } else {
-                    updateProperties.set(k, v);
-                }
-            });
-            updateProperties.commit();
-        }
-
-        transaction.commitTransaction();
-    }
-
-    static CatalogTable toCatalogTable(Table table) {
-        TableSchema schema = FlinkSchemaUtil.toSchema(table.schema());
-        List<String> partitionKeys = toPartitionKeys(table.spec(), table.schema());
-
-        // NOTE: We can not create a IcebergCatalogTable extends CatalogTable, because Flink optimizer may use
-        // CatalogTableImpl to copy a new catalog table.
-        // Let's re-loading table from Iceberg catalog when creating source/sink operators.
-        // Iceberg does not have Table comment, so pass a null (Default comment value in Flink).
-        return new CatalogTableImpl(schema, partitionKeys, table.properties(), null);
-    }
-
-    @Override
-    public Optional<Factory> getFactory() {
-        return Optional.of(new FlinkDynamicTableFactory(this));
-    }
-
-    CatalogLoader getCatalogLoader() {
-        return catalogLoader;
-    }
-
-    // ------------------------------ Unsupported methods ---------------------------------------------
-
-    @Override
-    public List<String> listViews(String databaseName) throws CatalogException {
-        return Collections.emptyList();
-    }
-
-    @Override
-    public CatalogPartition getPartition(ObjectPath tablePath, CatalogPartitionSpec partitionSpec)
-            throws CatalogException {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public boolean partitionExists(ObjectPath tablePath, CatalogPartitionSpec partitionSpec) throws CatalogException {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public void createPartition(ObjectPath tablePath, CatalogPartitionSpec partitionSpec, CatalogPartition partition,
-            boolean ignoreIfExists) throws CatalogException {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public void dropPartition(ObjectPath tablePath, CatalogPartitionSpec partitionSpec, boolean ignoreIfNotExists)
-            throws CatalogException {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public void alterPartition(ObjectPath tablePath, CatalogPartitionSpec partitionSpec, CatalogPartition newPartition,
-            boolean ignoreIfNotExists) throws CatalogException {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public List<String> listFunctions(String dbName) throws CatalogException {
-        return Collections.emptyList();
-    }
-
-    @Override
-    public CatalogFunction getFunction(ObjectPath functionPath) throws FunctionNotExistException, CatalogException {
-        throw new FunctionNotExistException(getName(), functionPath);
-    }
-
-    @Override
-    public boolean functionExists(ObjectPath functionPath) throws CatalogException {
-        return false;
-    }
-
-    @Override
-    public void createFunction(ObjectPath functionPath, CatalogFunction function, boolean ignoreIfExists)
-            throws CatalogException {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public void alterFunction(ObjectPath functionPath, CatalogFunction newFunction, boolean ignoreIfNotExists)
-            throws CatalogException {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public void dropFunction(ObjectPath functionPath, boolean ignoreIfNotExists)
-            throws CatalogException {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public void alterTableStatistics(ObjectPath tablePath, CatalogTableStatistics tableStatistics,
-            boolean ignoreIfNotExists) throws CatalogException {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public void alterTableColumnStatistics(ObjectPath tablePath, CatalogColumnStatistics columnStatistics,
-            boolean ignoreIfNotExists) throws CatalogException {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public void alterPartitionStatistics(ObjectPath tablePath, CatalogPartitionSpec partitionSpec,
-            CatalogTableStatistics partitionStatistics, boolean ignoreIfNotExists) throws CatalogException {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public void alterPartitionColumnStatistics(ObjectPath tablePath, CatalogPartitionSpec partitionSpec,
-            CatalogColumnStatistics columnStatistics, boolean ignoreIfNotExists) throws CatalogException {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public List<CatalogPartitionSpec> listPartitions(ObjectPath tablePath)
-            throws TableNotExistException, TableNotPartitionedException, CatalogException {
-        Table table = loadIcebergTable(tablePath);
-
-        if (table.spec().isUnpartitioned()) {
-            throw new TableNotPartitionedException(icebergCatalog.name(), tablePath);
-        }
-
-        Set<CatalogPartitionSpec> set = Sets.newHashSet();
-        try (CloseableIterable<FileScanTask> tasks = table.newScan().planFiles()) {
-            for (DataFile dataFile : CloseableIterable.transform(tasks, FileScanTask::file)) {
-                Map<String, String> map = Maps.newHashMap();
-                StructLike structLike = dataFile.partition();
-                PartitionSpec spec = table.specs().get(dataFile.specId());
-                for (int i = 0; i < structLike.size(); i++) {
-                    map.put(spec.fields().get(i).name(), String.valueOf(structLike.get(i, Object.class)));
-                }
-                set.add(new CatalogPartitionSpec(map));
-            }
-        } catch (IOException e) {
-            throw new CatalogException(String.format("Failed to list partitions of table %s", tablePath), e);
-        }
-
-        return Lists.newArrayList(set);
-    }
-
-    @Override
-    public List<CatalogPartitionSpec> listPartitions(ObjectPath tablePath, CatalogPartitionSpec partitionSpec)
-            throws CatalogException {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public List<CatalogPartitionSpec> listPartitionsByFilter(ObjectPath tablePath, List<Expression> filters)
-            throws CatalogException {
-        throw new UnsupportedOperationException();
-    }
-
-    // After partition pruning and filter push down, the statistics have become very inaccurate, so the statistics from
-    // here are of little significance.
-    // Flink will support something like SupportsReportStatistics in future.
-
-    @Override
-    public CatalogTableStatistics getTableStatistics(ObjectPath tablePath)
-            throws CatalogException {
-        return CatalogTableStatistics.UNKNOWN;
-    }
-
-    @Override
-    public CatalogColumnStatistics getTableColumnStatistics(ObjectPath tablePath)
-            throws CatalogException {
-        return CatalogColumnStatistics.UNKNOWN;
-    }
-
-    @Override
-    public CatalogTableStatistics getPartitionStatistics(ObjectPath tablePath, CatalogPartitionSpec partitionSpec)
-            throws CatalogException {
-        return CatalogTableStatistics.UNKNOWN;
-    }
-
-    @Override
-    public CatalogColumnStatistics getPartitionColumnStatistics(
-            ObjectPath tablePath, CatalogPartitionSpec partitionSpec)
-            throws CatalogException {
-        return CatalogColumnStatistics.UNKNOWN;
-    }
-}
-
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.inlong.sort.iceberg.flink;
+
+import org.apache.flink.table.api.TableSchema;
+import org.apache.flink.table.catalog.AbstractCatalog;
+import org.apache.flink.table.catalog.CatalogBaseTable;
+import org.apache.flink.table.catalog.CatalogDatabase;
+import org.apache.flink.table.catalog.CatalogDatabaseImpl;
+import org.apache.flink.table.catalog.CatalogFunction;
+import org.apache.flink.table.catalog.CatalogPartition;
+import org.apache.flink.table.catalog.CatalogPartitionSpec;
+import org.apache.flink.table.catalog.CatalogTable;
+import org.apache.flink.table.catalog.CatalogTableImpl;
+import org.apache.flink.table.catalog.ObjectPath;
+import org.apache.flink.table.catalog.exceptions.CatalogException;
+import org.apache.flink.table.catalog.exceptions.DatabaseAlreadyExistException;
+import org.apache.flink.table.catalog.exceptions.DatabaseNotEmptyException;
+import org.apache.flink.table.catalog.exceptions.DatabaseNotExistException;
+import org.apache.flink.table.catalog.exceptions.FunctionNotExistException;
+import org.apache.flink.table.catalog.exceptions.TableAlreadyExistException;
+import org.apache.flink.table.catalog.exceptions.TableNotExistException;
+import org.apache.flink.table.catalog.exceptions.TableNotPartitionedException;
+import org.apache.flink.table.catalog.stats.CatalogColumnStatistics;
+import org.apache.flink.table.catalog.stats.CatalogTableStatistics;
+import org.apache.flink.table.expressions.Expression;
+import org.apache.flink.table.factories.Factory;
... 196492 lines suppressed ...