You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flink.apache.org by fp...@apache.org on 2022/02/16 09:41:02 UTC

[flink] branch master updated (4f20772 -> 07f23e0)

This is an automated email from the ASF dual-hosted git repository.

fpaul pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git.


    from 4f20772  [FLINK-26148][runtime] Change the format of adaptive batch scheduler config option to jobmanager.adaptive-batch-scheduler.XXX
     new 1602e4b  [FLINK-24246][connector/pulsar] Bump PulsarClient version to latest 2.9.1
     new 36de46d  [FLINK-26020][connector/pulsar] Unified Pulsar Connector config model for Pulsar source and sink.
     new a195f72  [FLINK-26021][connector/pulsar] Add the ability to merge the partitioned Pulsar topics.
     new 9bc8b0f  [FLINK-26023][connector/pulsar] Create a Pulsar sink config model for matching ProducerConfigurationData.
     new 0e72bfed [FLINK-26024][connector/pulsar] Create a PulsarSerializationSchema for better records serialization.
     new 136add5  [FLINK-26022][connector/pulsar] Implement at-least-once and exactly-once Pulsar Sink.
     new b6be14d  [FLINK-26025][connector/pulsar] Replace MockPulsar with new Pulsar test tools based on PulsarStandalone.
     new 714dd80  [FLINK-26026][connector/pulsar] Create unit tests for Pulsar sink connector.
     new 07f23e0  [FLINK-26038][connector/pulsar] Support delay message on PulsarSink.

The 9 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../generated/pulsar_client_configuration.html     |   8 +-
 .../generated/pulsar_source_configuration.html     |   2 +-
 .../5b9eed8a-5fb6-4373-98ac-3be2a71941b8           |  11 -
 flink-connectors/flink-connector-pulsar/pom.xml    |  90 +++--
 ...arConfigUtils.java => PulsarClientFactory.java} | 209 ++++-------
 .../pulsar/common/config/PulsarConfigBuilder.java  | 143 +++++++
 .../common/config/PulsarConfigValidator.java       | 105 ++++++
 .../pulsar/common/config/PulsarConfiguration.java  | 104 ++++++
 .../pulsar/common/config/PulsarOptions.java        |  18 +-
 .../common/utils/PulsarTransactionUtils.java       | 108 ++----
 .../flink/connector/pulsar/sink/PulsarSink.java    | 145 ++++++++
 .../connector/pulsar/sink/PulsarSinkBuilder.java   | 372 ++++++++++++++++++
 .../connector/pulsar/sink/PulsarSinkOptions.java   | 269 +++++++++++++
 .../pulsar/sink/committer/PulsarCommittable.java   |  71 ++++
 .../committer/PulsarCommittableSerializer.java     |  65 ++++
 .../pulsar/sink/committer/PulsarCommitter.java     | 174 +++++++++
 .../pulsar/sink/config/PulsarSinkConfigUtils.java  | 112 ++++++
 .../pulsar/sink/config/SinkConfiguration.java      | 160 ++++++++
 .../connector/pulsar/sink/writer/PulsarWriter.java | 274 ++++++++++++++
 .../sink/writer/context/PulsarSinkContext.java     |  46 +++
 .../sink/writer/context/PulsarSinkContextImpl.java |  61 +++
 .../writer/delayer/FixedMessageDelayer.java}       |  25 +-
 .../pulsar/sink/writer/delayer/MessageDelayer.java |  62 +++
 .../pulsar/sink/writer/message/PulsarMessage.java  | 111 ++++++
 .../sink/writer/message/PulsarMessageBuilder.java  | 127 +++++++
 .../sink/writer/router/KeyHashTopicRouter.java     |  71 ++++
 .../pulsar/sink/writer/router/MessageKeyHash.java  |  85 +++++
 .../sink/writer/router/RoundRobinTopicRouter.java  |  63 ++++
 .../pulsar/sink/writer/router/TopicRouter.java     |  64 ++++
 .../sink/writer/router/TopicRoutingMode.java       |  87 +++++
 .../writer/serializer/PulsarSchemaWrapper.java     |  59 +++
 .../serializer/PulsarSerializationSchema.java      | 129 +++++++
 .../PulsarSerializationSchemaWrapper.java          |  59 +++
 .../sink/writer/topic/TopicMetadataListener.java   | 173 +++++++++
 .../sink/writer/topic/TopicProducerRegister.java   | 202 ++++++++++
 .../connector/pulsar/source/PulsarSource.java      |  27 +-
 .../pulsar/source/PulsarSourceBuilder.java         | 122 +++---
 .../pulsar/source/PulsarSourceOptions.java         |  12 +-
 .../pulsar/source/config/CursorVerification.java   |  23 +-
 .../source/config/PulsarSourceConfigUtils.java     | 138 +++----
 .../pulsar/source/config/SourceConfiguration.java  | 190 ++++++----
 .../source/enumerator/PulsarSourceEnumerator.java  |  18 +-
 .../source/enumerator/SplitsAssignmentState.java   |   2 +-
 .../cursor/stop/LatestMessageStopCursor.java       |   1 +
 .../source/enumerator/topic/TopicNameUtils.java    |  45 +++
 .../source/enumerator/topic/TopicPartition.java    |   4 +-
 .../enumerator/topic/range/RangeGenerator.java     |   8 +
 .../source/reader/PulsarSourceReaderFactory.java   |  19 +-
 .../deserializer/PulsarDeserializationSchema.java  |   9 +
 .../PulsarDeserializationSchemaWrapper.java        |   4 +-
 .../reader/deserializer/PulsarSchemaWrapper.java   |  12 +-
 .../reader/source/PulsarOrderedSourceReader.java   |   5 +-
 .../reader/source/PulsarSourceReaderBase.java      |   4 +-
 .../reader/source/PulsarUnorderedSourceReader.java |   3 -
 .../split/PulsarOrderedPartitionSplitReader.java   |   4 +-
 .../split/PulsarPartitionSplitReaderBase.java      |   6 +-
 .../split/PulsarUnorderedPartitionSplitReader.java |  23 +-
 .../common/config/PulsarConfigBuilderTest.java     |  76 ++++
 .../common/config/PulsarConfigValidatorTest.java   |  57 +++
 .../common/config/PulsarConfigurationTest.java     |  65 ++++
 .../common/schema/PulsarSchemaUtilsTest.java       |   6 +-
 .../pulsar/sink/PulsarSinkBuilderTest.java         | 107 ++++++
 .../connector/pulsar/sink/PulsarSinkITCase.java    |  99 +++++
 .../committer/PulsarCommittableSerializerTest.java |  53 +++
 .../pulsar/sink/writer/PulsarWriterTest.java       | 202 ++++++++++
 .../sink/writer/router/KeyHashTopicRouterTest.java | 111 ++++++
 .../writer/router/RoundRobinTopicRouterTest.java   |  88 +++++
 .../writer/topic/TopicMetadataListenerTest.java    | 140 +++++++
 .../writer/topic/TopicProducerRegisterTest.java    |  91 +++++
 .../pulsar/source/PulsarSourceBuilderTest.java     |  69 ++--
 .../pulsar/source/PulsarSourceITCase.java          |   2 +-
 .../enumerator/PulsarSourceEnumeratorTest.java     |   1 -
 .../subscriber/PulsarSubscriberTest.java           |  10 +-
 .../enumerator/topic/TopicNameUtilsTest.java       |  16 +
 .../PulsarDeserializationSchemaTest.java           |  10 +-
 .../reader/source/PulsarSourceReaderTestBase.java  |   8 +-
 .../split/PulsarPartitionSplitReaderTestBase.java  |  11 +-
 .../pulsar/testutils/PulsarTestContext.java        |   4 -
 .../pulsar/testutils/PulsarTestSuiteBase.java      |   2 +-
 .../connector/pulsar/testutils/SampleData.java     |  96 -----
 .../cases/MultipleTopicConsumingContext.java       |   1 -
 .../cases/MultipleTopicTemplateContext.java        |   1 -
 .../cases/SingleTopicConsumingContext.java         |   1 -
 .../pulsar/testutils/function/ControlSource.java   | 228 ++++++++++++
 .../pulsar/testutils/runtime/PulsarRuntime.java    |  36 +-
 .../testutils/runtime/PulsarRuntimeOperator.java   | 414 ++++++++++++++++++---
 .../runtime/container/PulsarContainerRuntime.java  |  61 ++-
 .../runtime/embedded/PulsarEmbeddedRuntime.java    | 284 ++++++++++++++
 .../runtime/mock/BlankBrokerInterceptor.java       |  61 ---
 .../runtime/mock/MockBookKeeperClientFactory.java  |  74 ----
 .../testutils/runtime/mock/MockPulsarService.java  |  87 -----
 .../runtime/mock/MockZooKeeperClientFactory.java   |  73 ----
 .../runtime/mock/NonClosableMockBookKeeper.java    |  55 ---
 .../testutils/runtime/mock/PulsarMockRuntime.java  | 160 --------
 .../mock/SameThreadOrderedSafeExecutor.java        |  56 ---
 .../test/resources/containers/txnStandalone.conf   | 100 ++++-
 .../src/main/resources/META-INF/NOTICE             |  16 +-
 .../util/flink/container/FlinkContainers.java      |   2 +-
 .../flink-end-to-end-tests-pulsar/pom.xml          |  43 ++-
 .../util/pulsar/PulsarSourceOrderedE2ECase.java    |   7 +-
 .../util/pulsar/PulsarSourceUnorderedE2ECase.java  |   7 +-
 .../pulsar/cases/ExclusiveSubscriptionContext.java |  14 -
 .../pulsar/cases/FailoverSubscriptionContext.java  |  14 -
 .../pulsar/cases/KeySharedSubscriptionContext.java |   7 +-
 .../pulsar/cases/SharedSubscriptionContext.java    |   7 +-
 .../FlinkContainerWithPulsarEnvironment.java       |   5 +
 .../common/PulsarContainerTestEnvironment.java     |  17 +-
 .../pyflink/datastream/tests/test_connectors.py    |   2 +-
 .../org/apache/flink/util/DockerImageVersions.java |   2 +-
 109 files changed, 6217 insertions(+), 1485 deletions(-)
 rename flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/{PulsarConfigUtils.java => PulsarClientFactory.java} (55%)
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigBuilder.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigValidator.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarConfiguration.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSink.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilder.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkOptions.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommittable.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommittableSerializer.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommitter.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/config/PulsarSinkConfigUtils.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/config/SinkConfiguration.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriter.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/context/PulsarSinkContext.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/context/PulsarSinkContextImpl.java
 copy flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/{source/config/CursorVerification.java => sink/writer/delayer/FixedMessageDelayer.java} (53%)
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/delayer/MessageDelayer.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/message/PulsarMessage.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/message/PulsarMessageBuilder.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/KeyHashTopicRouter.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/MessageKeyHash.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/RoundRobinTopicRouter.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/TopicRouter.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/TopicRoutingMode.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/serializer/PulsarSchemaWrapper.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/serializer/PulsarSerializationSchema.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/serializer/PulsarSerializationSchemaWrapper.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicMetadataListener.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicProducerRegister.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigBuilderTest.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigValidatorTest.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigurationTest.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilderTest.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/PulsarSinkITCase.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommittableSerializerTest.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriterTest.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/router/KeyHashTopicRouterTest.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/router/RoundRobinTopicRouterTest.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicMetadataListenerTest.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicProducerRegisterTest.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/function/ControlSource.java
 create mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/embedded/PulsarEmbeddedRuntime.java
 delete mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/BlankBrokerInterceptor.java
 delete mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/MockBookKeeperClientFactory.java
 delete mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/MockPulsarService.java
 delete mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/MockZooKeeperClientFactory.java
 delete mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/NonClosableMockBookKeeper.java
 delete mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/PulsarMockRuntime.java
 delete mode 100644 flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/SameThreadOrderedSafeExecutor.java
 copy flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/config/CursorVerification.java => flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/common/PulsarContainerTestEnvironment.java (58%)

[flink] 07/09: [FLINK-26025][connector/pulsar] Replace MockPulsar with new Pulsar test tools based on PulsarStandalone.

Posted by fp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

fpaul pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit b6be14da65fedf01e82dc83a58e791709ce8ce57
Author: Yufan Sheng <yu...@streamnative.io>
AuthorDate: Wed Feb 9 15:09:50 2022 +0800

    [FLINK-26025][connector/pulsar] Replace MockPulsar with new Pulsar test tools based on PulsarStandalone.
    
    1. Drop some unused fields in test classes.
    2. Fix the checkstyle issues for source test.
    3. Fix violations for Pulsar connector according to the flink-architecture-tests.
    4. Create a standalone Pulsar for test.
    5. Add new methods to PulsarRuntimeOperator.
    6. Fix the bug in PulsarContainerRuntime, support running tests in E2E environment.
    7. Create PulsarContainerTestEnvironment for supporting E2E tests.
    8. Add a lot of comments for Pulsar testing tools.
    9. Drop mocked Pulsar service, use standalone Pulsar instead.
---
 flink-connectors/flink-connector-pulsar/pom.xml    |  16 -
 .../source/enumerator/cursor/StopCursor.java       |   2 -
 .../source/enumerator/topic/TopicPartition.java    |   4 +-
 .../split/PulsarUnorderedPartitionSplitReader.java |  16 +-
 .../common/schema/PulsarSchemaUtilsTest.java       |   6 +-
 .../pulsar/source/PulsarSourceITCase.java          |   2 +-
 .../subscriber/PulsarSubscriberTest.java           |  10 +-
 .../reader/source/PulsarSourceReaderTestBase.java  |   2 +-
 .../pulsar/testutils/PulsarTestContext.java        |   4 -
 .../pulsar/testutils/PulsarTestSuiteBase.java      |   2 +-
 .../connector/pulsar/testutils/SampleData.java     |  96 -----
 .../cases/MultipleTopicConsumingContext.java       |   1 -
 .../cases/MultipleTopicTemplateContext.java        |   1 -
 .../cases/SingleTopicConsumingContext.java         |   1 -
 .../pulsar/testutils/runtime/PulsarRuntime.java    |  36 +-
 .../testutils/runtime/PulsarRuntimeOperator.java   | 414 ++++++++++++++++++---
 .../runtime/container/PulsarContainerRuntime.java  |  61 ++-
 .../runtime/embedded/PulsarEmbeddedRuntime.java    | 284 ++++++++++++++
 .../runtime/mock/BlankBrokerInterceptor.java       |  61 ---
 .../runtime/mock/MockBookKeeperClientFactory.java  |  74 ----
 .../testutils/runtime/mock/MockPulsarService.java  |  87 -----
 .../runtime/mock/MockZooKeeperClientFactory.java   |  73 ----
 .../runtime/mock/NonClosableMockBookKeeper.java    |  55 ---
 .../testutils/runtime/mock/PulsarMockRuntime.java  | 160 --------
 .../mock/SameThreadOrderedSafeExecutor.java        |  56 ---
 .../test/resources/containers/txnStandalone.conf   | 100 ++++-
 .../util/flink/container/FlinkContainers.java      |   2 +-
 .../util/pulsar/PulsarSourceOrderedE2ECase.java    |   7 +-
 .../util/pulsar/PulsarSourceUnorderedE2ECase.java  |   7 +-
 .../pulsar/cases/ExclusiveSubscriptionContext.java |  14 -
 .../pulsar/cases/FailoverSubscriptionContext.java  |  14 -
 .../pulsar/cases/KeySharedSubscriptionContext.java |   7 +-
 .../pulsar/cases/SharedSubscriptionContext.java    |   7 +-
 .../common/PulsarContainerTestEnvironment.java     |  31 ++
 34 files changed, 867 insertions(+), 846 deletions(-)

diff --git a/flink-connectors/flink-connector-pulsar/pom.xml b/flink-connectors/flink-connector-pulsar/pom.xml
index 45047eb..fc7b68c 100644
--- a/flink-connectors/flink-connector-pulsar/pom.xml
+++ b/flink-connectors/flink-connector-pulsar/pom.xml
@@ -120,22 +120,6 @@ under the License.
 		<!-- we don't override the version here. -->
 		<dependency>
 			<groupId>org.apache.pulsar</groupId>
-			<artifactId>testmocks</artifactId>
-			<version>${pulsar.version}</version>
-			<scope>test</scope>
-			<exclusions>
-				<exclusion>
-					<groupId>org.testng</groupId>
-					<artifactId>testng</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>org.powermock</groupId>
-					<artifactId>powermock-module-testng</artifactId>
-				</exclusion>
-			</exclusions>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.pulsar</groupId>
 			<artifactId>pulsar-broker</artifactId>
 			<version>${pulsar.version}</version>
 			<scope>test</scope>
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/StopCursor.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/StopCursor.java
index b85944f..aaec143 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/StopCursor.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/StopCursor.java
@@ -18,7 +18,6 @@
 
 package org.apache.flink.connector.pulsar.source.enumerator.cursor;
 
-import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.connector.pulsar.source.enumerator.cursor.stop.LatestMessageStopCursor;
 import org.apache.flink.connector.pulsar.source.enumerator.cursor.stop.MessageIdStopCursor;
@@ -42,7 +41,6 @@ import java.io.Serializable;
 public interface StopCursor extends Serializable {
 
     /** The open method for the cursor initializer. This method could be executed multiple times. */
-    @Internal
     default void open(PulsarAdmin admin, TopicPartition partition) {}
 
     /**
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/topic/TopicPartition.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/topic/TopicPartition.java
index f29d88d..b3035cd 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/topic/TopicPartition.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/topic/TopicPartition.java
@@ -18,7 +18,7 @@
 
 package org.apache.flink.connector.pulsar.source.enumerator.topic;
 
-import org.apache.flink.annotation.Internal;
+import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.connector.base.source.reader.splitreader.SplitReader;
 
 import org.apache.pulsar.client.api.Range;
@@ -35,7 +35,7 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  * Topic partition is the basic topic information used by {@link SplitReader}, we create this topic
  * metas for a specified topic by subscription type and convert it into a partition split.
  */
-@Internal
+@PublicEvolving
 public class TopicPartition implements Serializable {
     private static final long serialVersionUID = -1474354741550810953L;
 
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarUnorderedPartitionSplitReader.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarUnorderedPartitionSplitReader.java
index 2027df4..5940cc9 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarUnorderedPartitionSplitReader.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarUnorderedPartitionSplitReader.java
@@ -40,11 +40,11 @@ import org.slf4j.LoggerFactory;
 import javax.annotation.Nullable;
 
 import java.time.Duration;
-import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 
 import static org.apache.flink.connector.pulsar.common.utils.PulsarExceptionUtils.sneakyClient;
+import static org.apache.flink.connector.pulsar.common.utils.PulsarTransactionUtils.createTransaction;
 
 /**
  * The split reader a given {@link PulsarPartitionSplit}, it would be closed once the {@link
@@ -162,18 +162,6 @@ public class PulsarUnorderedPartitionSplitReader<OUT> extends PulsarPartitionSpl
 
     private Transaction newTransaction() {
         long timeoutMillis = sourceConfiguration.getTransactionTimeoutMillis();
-        CompletableFuture<Transaction> future =
-                sneakyClient(pulsarClient::newTransaction)
-                        .withTransactionTimeout(timeoutMillis, TimeUnit.MILLISECONDS)
-                        .build();
-
-        try {
-            return future.get();
-        } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-            throw new RuntimeException(e);
-        } catch (ExecutionException e) {
-            throw new RuntimeException(e);
-        }
+        return createTransaction(pulsarClient, timeoutMillis);
     }
 }
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/schema/PulsarSchemaUtilsTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/schema/PulsarSchemaUtilsTest.java
index 1aa4404..d4bd1fc 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/schema/PulsarSchemaUtilsTest.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/schema/PulsarSchemaUtilsTest.java
@@ -57,14 +57,12 @@ class PulsarSchemaUtilsTest {
     }
 
     @Test
-    @SuppressWarnings("java:S5778")
     void createSchemaForComplexSchema() {
         // Avro
         Schema<Foo> avro1 = Schema.AVRO(Foo.class);
         PulsarSchema<Foo> avro2 = new PulsarSchema<>(avro1, Foo.class);
-        assertThrows(
-                NullPointerException.class,
-                () -> PulsarSchemaUtils.createSchema(avro1.getSchemaInfo()));
+        SchemaInfo info1 = avro1.getSchemaInfo();
+        assertThrows(NullPointerException.class, () -> PulsarSchemaUtils.createSchema(info1));
 
         Schema<Foo> schema = PulsarSchemaUtils.createSchema(avro2.getSchemaInfo());
         assertNotEquals(schema.getSchemaInfo(), avro1.getSchemaInfo());
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/PulsarSourceITCase.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/PulsarSourceITCase.java
index b28e449..94c5c83 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/PulsarSourceITCase.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/PulsarSourceITCase.java
@@ -40,7 +40,7 @@ class PulsarSourceITCase extends SourceTestSuiteBase<String> {
 
     // Defines pulsar running environment
     @TestExternalSystem
-    PulsarTestEnvironment pulsar = new PulsarTestEnvironment(PulsarRuntime.mock());
+    PulsarTestEnvironment pulsar = new PulsarTestEnvironment(PulsarRuntime.embedded());
 
     @TestSemantics
     CheckpointingMode[] semantics = new CheckpointingMode[] {CheckpointingMode.EXACTLY_ONCE};
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/enumerator/subscriber/PulsarSubscriberTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/enumerator/subscriber/PulsarSubscriberTest.java
index bdfbb42..8409f63 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/enumerator/subscriber/PulsarSubscriberTest.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/enumerator/subscriber/PulsarSubscriberTest.java
@@ -64,8 +64,8 @@ class PulsarSubscriberTest extends PulsarTestSuiteBase {
 
         assertEquals(expectedPartitions, topicPartitions);
 
-        operator().deleteTopic(TOPIC1, true);
-        operator().deleteTopic(TOPIC2, true);
+        operator().deleteTopic(TOPIC1);
+        operator().deleteTopic(TOPIC2);
     }
 
     @Test
@@ -91,8 +91,8 @@ class PulsarSubscriberTest extends PulsarTestSuiteBase {
 
         assertEquals(expectedPartitions, topicPartitions);
 
-        operator().deleteTopic(TOPIC1, true);
-        operator().deleteTopic(TOPIC2, true);
-        operator().deleteTopic(TOPIC3, true);
+        operator().deleteTopic(TOPIC1);
+        operator().deleteTopic(TOPIC2);
+        operator().deleteTopic(TOPIC3);
     }
 }
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarSourceReaderTestBase.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarSourceReaderTestBase.java
index f7cb120..a42741d 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarSourceReaderTestBase.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarSourceReaderTestBase.java
@@ -94,7 +94,7 @@ abstract class PulsarSourceReaderTestBase extends PulsarTestSuiteBase {
 
     @AfterEach
     void afterEach(String topicName) {
-        operator().deleteTopic(topicName, true);
+        operator().deleteTopic(topicName);
     }
 
     @TestTemplate
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/PulsarTestContext.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/PulsarTestContext.java
index c6af529..f238a03 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/PulsarTestContext.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/PulsarTestContext.java
@@ -27,10 +27,6 @@ import java.util.List;
 
 /** Common test context for pulsar based test. */
 public abstract class PulsarTestContext<T> implements DataStreamSourceExternalContext<T> {
-    private static final long serialVersionUID = 1L;
-
-    private static final int NUM_RECORDS_UPPER_BOUND = 500;
-    private static final int NUM_RECORDS_LOWER_BOUND = 100;
 
     protected final PulsarRuntimeOperator operator;
     protected final List<URL> connectorJarPaths;
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/PulsarTestSuiteBase.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/PulsarTestSuiteBase.java
index b55fdc5..c87140b 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/PulsarTestSuiteBase.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/PulsarTestSuiteBase.java
@@ -56,7 +56,7 @@ public abstract class PulsarTestSuiteBase {
      * pulsar broker. Override this method when needs.
      */
     protected PulsarRuntime runtime() {
-        return PulsarRuntime.mock();
+        return PulsarRuntime.embedded();
     }
 
     /** Operate pulsar by acquiring a runtime operator. */
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/SampleData.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/SampleData.java
index ec54761..8fec4ff 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/SampleData.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/SampleData.java
@@ -18,117 +18,21 @@
 
 package org.apache.flink.connector.pulsar.testutils;
 
-import java.nio.charset.StandardCharsets;
-import java.time.LocalDate;
-import java.time.LocalDateTime;
 import java.util.Arrays;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
-import java.util.Random;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.function.Supplier;
-
-import static java.util.function.Function.identity;
-import static java.util.stream.Stream.generate;
-import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric;
-import static org.apache.commons.lang3.RandomStringUtils.randomAscii;
-import static org.apache.flink.shaded.guava30.com.google.common.collect.ImmutableList.toImmutableList;
-import static org.apache.flink.shaded.guava30.com.google.common.collect.ImmutableMap.toImmutableMap;
 
 /** Sample data for various test cases. */
 public class SampleData {
 
-    private static final Random RAND = new Random(System.currentTimeMillis());
-    private static final Supplier<Integer> LIST_SIZE = () -> RAND.nextInt(10) + 5;
-    private static final long MIN_DAY = LocalDate.of(2000, 1, 1).toEpochDay();
-    private static final long MAX_DAY = LocalDate.of(2040, 12, 31).toEpochDay();
-    private static final Supplier<Bar> BAR_SUPPLIER =
-            () -> new Bar(RAND.nextBoolean(), randomAlphanumeric(10));
-    private static final Supplier<List<Bar>> BAR_LIST_SUPPLIER =
-            () -> generate(BAR_SUPPLIER).limit(LIST_SIZE.get()).collect(toImmutableList());
-    private static final Supplier<Map<String, Bar>> BAR_MAP_SUPPLIER =
-            () ->
-                    generate(BAR_SUPPLIER)
-                            .limit(LIST_SIZE.get())
-                            .collect(toImmutableMap(Bar::toString, identity()));
-
     // --------------------------------//
     //                                 //
     // Random sample data for tests.   //
     //                                 //
     // --------------------------------//
 
-    public static final List<Boolean> BOOLEAN_LIST =
-            generate(RAND::nextBoolean).limit(LIST_SIZE.get()).collect(toImmutableList());
-
-    public static final List<Integer> INTEGER_LIST =
-            generate(RAND::nextInt).limit(LIST_SIZE.get()).collect(toImmutableList());
-
-    public static final List<byte[]> BYTES_LIST =
-            generate(() -> randomAscii(8))
-                    .limit(LIST_SIZE.get())
-                    .map(s -> s.getBytes(StandardCharsets.UTF_8))
-                    .collect(toImmutableList());
-
-    public static final List<Byte> INT_8_LIST =
-            generate(RAND::nextInt)
-                    .limit(LIST_SIZE.get())
-                    .map(Integer::byteValue)
-                    .collect(toImmutableList());
-
-    public static final List<Short> INT_16_LIST =
-            generate(RAND::nextInt)
-                    .limit(LIST_SIZE.get())
-                    .map(Integer::shortValue)
-                    .collect(toImmutableList());
-
-    public static final List<Long> INT_64_LIST =
-            generate(RAND::nextLong).limit(LIST_SIZE.get()).collect(toImmutableList());
-
-    public static final List<Double> DOUBLE_LIST =
-            generate(RAND::nextDouble).limit(LIST_SIZE.get()).collect(toImmutableList());
-
-    public static final List<Float> FLOAT_LIST =
-            generate(RAND::nextFloat).limit(LIST_SIZE.get()).collect(toImmutableList());
-
-    public static final List<String> STRING_LIST =
-            generate(() -> randomAlphanumeric(8)).limit(LIST_SIZE.get()).collect(toImmutableList());
-
-    public static final List<LocalDate> LOCAL_DATE_LIST =
-            generate(() -> ThreadLocalRandom.current().nextLong(MIN_DAY, MAX_DAY))
-                    .limit(LIST_SIZE.get())
-                    .map(LocalDate::ofEpochDay)
-                    .collect(toImmutableList());
-
-    public static final List<LocalDateTime> LOCAL_DATE_TIME_LIST =
-            generate(() -> ThreadLocalRandom.current().nextLong(MIN_DAY, MAX_DAY))
-                    .limit(LIST_SIZE.get())
-                    .map(LocalDate::ofEpochDay)
-                    .map(LocalDate::atStartOfDay)
-                    .collect(toImmutableList());
-
-    public static final List<FA> FA_LIST =
-            generate(() -> new FA(BAR_LIST_SUPPLIER.get().toArray(new Bar[0])))
-                    .limit(LIST_SIZE.get())
-                    .collect(toImmutableList());
-
-    public static final List<Foo> FOO_LIST =
-            generate(() -> new Foo(RAND.nextInt(), RAND.nextFloat(), BAR_SUPPLIER.get()))
-                    .limit(LIST_SIZE.get())
-                    .collect(toImmutableList());
-
-    public static final List<FL> FL_LIST =
-            generate(() -> new FL(BAR_LIST_SUPPLIER.get()))
-                    .limit(LIST_SIZE.get())
-                    .collect(toImmutableList());
-
-    public static final List<FM> FM_LIST =
-            generate(() -> new FM(BAR_MAP_SUPPLIER.get()))
-                    .limit(LIST_SIZE.get())
-                    .collect(toImmutableList());
-
     /** Foo type. */
     public static class Foo {
         public int i;
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/cases/MultipleTopicConsumingContext.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/cases/MultipleTopicConsumingContext.java
index 35c2b58..57027f3 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/cases/MultipleTopicConsumingContext.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/cases/MultipleTopicConsumingContext.java
@@ -31,7 +31,6 @@ import java.util.List;
  * splits.
  */
 public class MultipleTopicConsumingContext extends MultipleTopicTemplateContext {
-    private static final long serialVersionUID = 1L;
 
     public MultipleTopicConsumingContext(PulsarTestEnvironment environment) {
         this(environment, Collections.emptyList());
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/cases/MultipleTopicTemplateContext.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/cases/MultipleTopicTemplateContext.java
index 8c77cb5..3eca9e7 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/cases/MultipleTopicTemplateContext.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/cases/MultipleTopicTemplateContext.java
@@ -49,7 +49,6 @@ import static org.apache.pulsar.client.api.Schema.STRING;
  * source splits.
  */
 public abstract class MultipleTopicTemplateContext extends PulsarTestContext<String> {
-    private static final long serialVersionUID = 1L;
 
     private int numTopics = 0;
 
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/cases/SingleTopicConsumingContext.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/cases/SingleTopicConsumingContext.java
index ab3db06..f5bfa45 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/cases/SingleTopicConsumingContext.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/cases/SingleTopicConsumingContext.java
@@ -47,7 +47,6 @@ import static org.apache.pulsar.client.api.SubscriptionType.Exclusive;
  * source splits.
  */
 public class SingleTopicConsumingContext extends PulsarTestContext<String> {
-    private static final long serialVersionUID = 1L;
 
     private static final String TOPIC_NAME_PREFIX = "pulsar-single-topic";
     private final String topicName;
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/PulsarRuntime.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/PulsarRuntime.java
index d46658e..9c1cd01 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/PulsarRuntime.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/PulsarRuntime.java
@@ -20,13 +20,15 @@ package org.apache.flink.connector.pulsar.testutils.runtime;
 
 import org.apache.flink.connector.pulsar.testutils.PulsarTestEnvironment;
 import org.apache.flink.connector.pulsar.testutils.runtime.container.PulsarContainerRuntime;
-import org.apache.flink.connector.pulsar.testutils.runtime.mock.PulsarMockRuntime;
+import org.apache.flink.connector.pulsar.testutils.runtime.embedded.PulsarEmbeddedRuntime;
 
 import org.testcontainers.containers.GenericContainer;
 
 /**
  * An abstraction for different pulsar runtimes. Providing the common methods for {@link
  * PulsarTestEnvironment}.
+ *
+ * <p>All the Pulsar runtime should enable the transaction by default.
  */
 public interface PulsarRuntime {
 
@@ -36,17 +38,43 @@ public interface PulsarRuntime {
     /** Shutdown this pulsar runtime. */
     void tearDown();
 
-    /** Return a operator for operating this pulsar runtime. */
+    /**
+     * Return a operator for operating this pulsar runtime. This operator predefined a set of
+     * extremely useful methods for Pulsar. You can easily add new methods in this operator.
+     */
     PulsarRuntimeOperator operator();
 
-    static PulsarRuntime mock() {
-        return new PulsarMockRuntime();
+    /**
+     * Create a standalone Pulsar instance in test thread. We would start a embedded zookeeper and
+     * bookkeeper. The stream storage for bookkeeper is disabled. The function worker is disabled on
+     * Pulsar broker.
+     *
+     * <p>This runtime would be faster than {@link #container()} and behaves the same like the
+     * {@link #container()}.
+     */
+    static PulsarRuntime embedded() {
+        return new PulsarEmbeddedRuntime();
     }
 
+    /**
+     * Create a Pulsar instance in docker. We would start a standalone Pulsar in TestContainers.
+     * This runtime is often used in end-to-end tests. The performance may be a bit of slower than
+     * {@link #embedded()}. The stream storage for bookkeeper is disabled. The function worker is
+     * disabled on Pulsar broker.
+     */
     static PulsarRuntime container() {
         return new PulsarContainerRuntime();
     }
 
+    /**
+     * Create a Pulsar instance in docker. We would start a standalone Pulsar in TestContainers.
+     * This runtime is often used in end-to-end tests. The performance may be a bit of slower than
+     * {@link #embedded()}. The stream storage for bookkeeper is disabled. The function worker is
+     * disabled on Pulsar broker.
+     *
+     * <p>We would link the created Pulsar docker instance with the given flink instance. This would
+     * enable the connection for Pulsar and Flink in docker environment.
+     */
     static PulsarRuntime container(GenericContainer<?> flinkContainer) {
         return new PulsarContainerRuntime().bindWithFlinkContainer(flinkContainer);
     }
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/PulsarRuntimeOperator.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/PulsarRuntimeOperator.java
index e53f7aa..a78ea99 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/PulsarRuntimeOperator.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/PulsarRuntimeOperator.java
@@ -19,7 +19,8 @@
 package org.apache.flink.connector.pulsar.testutils.runtime;
 
 import org.apache.flink.configuration.Configuration;
-import org.apache.flink.connector.pulsar.source.enumerator.topic.TopicNameUtils;
+import org.apache.flink.connector.base.DeliveryGuarantee;
+import org.apache.flink.connector.pulsar.common.config.PulsarConfiguration;
 import org.apache.flink.connector.pulsar.source.enumerator.topic.TopicPartition;
 import org.apache.flink.connector.pulsar.source.enumerator.topic.TopicRange;
 import org.apache.flink.connector.testframe.external.ExternalContext;
@@ -28,79 +29,139 @@ import org.apache.flink.shaded.guava30.com.google.common.base.Strings;
 
 import org.apache.pulsar.client.admin.PulsarAdmin;
 import org.apache.pulsar.client.admin.PulsarAdminException;
+import org.apache.pulsar.client.admin.PulsarAdminException.NotFoundException;
+import org.apache.pulsar.client.api.Consumer;
+import org.apache.pulsar.client.api.Message;
 import org.apache.pulsar.client.api.MessageId;
 import org.apache.pulsar.client.api.Producer;
 import org.apache.pulsar.client.api.PulsarClient;
 import org.apache.pulsar.client.api.PulsarClientException;
 import org.apache.pulsar.client.api.Schema;
+import org.apache.pulsar.client.api.SubscriptionInitialPosition;
+import org.apache.pulsar.client.api.TypedMessageBuilder;
+import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClient;
+import org.apache.pulsar.client.api.transaction.TxnID;
+import org.apache.pulsar.client.impl.PulsarClientImpl;
 import org.apache.pulsar.common.naming.TopicName;
+import org.apache.pulsar.common.partition.PartitionedTopicMetadata;
 
 import java.io.Closeable;
 import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.io.Serializable;
+import java.time.Duration;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
+import java.util.Map;
 import java.util.Random;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutionException;
 import java.util.function.Supplier;
 import java.util.stream.Stream;
 
 import static java.util.Collections.emptyList;
 import static java.util.Collections.singletonList;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.function.Function.identity;
 import static java.util.stream.Collectors.toList;
+import static java.util.stream.Collectors.toMap;
 import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric;
+import static org.apache.flink.connector.base.DeliveryGuarantee.EXACTLY_ONCE;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_ADMIN_URL;
+import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_ENABLE_TRANSACTION;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_SERVICE_URL;
 import static org.apache.flink.connector.pulsar.common.utils.PulsarExceptionUtils.sneakyAdmin;
 import static org.apache.flink.connector.pulsar.common.utils.PulsarExceptionUtils.sneakyClient;
 import static org.apache.flink.connector.pulsar.common.utils.PulsarExceptionUtils.sneakyThrow;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_SEND_TIMEOUT_MS;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_WRITE_DELIVERY_GUARANTEE;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_WRITE_TRANSACTION_TIMEOUT;
+import static org.apache.flink.connector.pulsar.source.enumerator.topic.TopicNameUtils.topicName;
+import static org.apache.flink.connector.pulsar.source.enumerator.topic.TopicNameUtils.topicNameWithPartition;
 import static org.apache.flink.util.Preconditions.checkArgument;
+import static org.apache.pulsar.client.api.SubscriptionMode.Durable;
+import static org.apache.pulsar.client.api.SubscriptionType.Exclusive;
 
 /**
- * A pulsar cluster operator is used for operating pulsar instance. It's serializable for using in
+ * A pulsar cluster operator used for operating pulsar instance. It's serializable for using in
  * {@link ExternalContext}.
  */
-public class PulsarRuntimeOperator implements Serializable, Closeable {
-    private static final long serialVersionUID = -630646912412751301L;
+public class PulsarRuntimeOperator implements Closeable {
 
     public static final int DEFAULT_PARTITIONS = 10;
     public static final int NUM_RECORDS_PER_PARTITION = 20;
+    public static final String SUBSCRIPTION_NAME = "PulsarRuntimeOperator";
 
-    private String serviceUrl;
-    private String adminUrl;
-
-    private transient PulsarClient client;
-    private transient PulsarAdmin admin;
+    private final String serviceUrl;
+    private final String adminUrl;
+    private final PulsarClient client;
+    private final PulsarAdmin admin;
+    private final ConcurrentHashMap<String, ConcurrentHashMap<Integer, Producer<?>>> producers;
+    private final ConcurrentHashMap<String, ConcurrentHashMap<Integer, Consumer<?>>> consumers;
 
     public PulsarRuntimeOperator(String serviceUrl, String adminUrl) {
-        this.serviceUrl = serviceUrl;
-        this.adminUrl = adminUrl;
-        initializeClients();
+        this(serviceUrl, serviceUrl, adminUrl, adminUrl);
+    }
+
+    public PulsarRuntimeOperator(
+            String serviceUrl,
+            String containerServiceUrl,
+            String adminUrl,
+            String containerAdminUrl) {
+        this.serviceUrl = containerServiceUrl;
+        this.adminUrl = containerAdminUrl;
+        this.client =
+                sneakyClient(
+                        () ->
+                                PulsarClient.builder()
+                                        .serviceUrl(serviceUrl)
+                                        .enableTransaction(true)
+                                        .build());
+        this.admin = sneakyClient(() -> PulsarAdmin.builder().serviceHttpUrl(adminUrl).build());
+        this.producers = new ConcurrentHashMap<>();
+        this.consumers = new ConcurrentHashMap<>();
     }
 
     /**
      * Create a topic with default {@link #DEFAULT_PARTITIONS} partitions and send a fixed number
      * {@link #NUM_RECORDS_PER_PARTITION} of records to this topic.
+     *
+     * @param topic Pulsar topic name, it couldn't be a name with partition index.
      */
     public void setupTopic(String topic) {
         Random random = new Random(System.currentTimeMillis());
         setupTopic(topic, Schema.STRING, () -> randomAlphanumeric(10 + random.nextInt(20)));
     }
 
+    /**
+     * Create a topic with default {@link #DEFAULT_PARTITIONS} partitions and send a fixed number
+     * {@link #NUM_RECORDS_PER_PARTITION} of records to this topic.
+     *
+     * @param topic Pulsar topic name, it couldn't be a name with partition index.
+     * @param schema The Pulsar schema for serializing records into bytes.
+     * @param supplier The supplier for providing the records which would be sent to Pulsar.
+     */
     public <T> void setupTopic(String topic, Schema<T> schema, Supplier<T> supplier) {
         setupTopic(topic, schema, supplier, NUM_RECORDS_PER_PARTITION);
     }
 
+    /**
+     * Create a topic with default {@link #DEFAULT_PARTITIONS} partitions and send a fixed number of
+     * records to this topic.
+     *
+     * @param topic Pulsar topic name, it couldn't be a name with partition index.
+     * @param schema The Pulsar schema for serializing records into bytes.
+     * @param supplier The supplier for providing the records which would be sent to Pulsar.
+     * @param numRecordsPerSplit The number of records for a partition.
+     */
     public <T> void setupTopic(
             String topic, Schema<T> schema, Supplier<T> supplier, int numRecordsPerSplit) {
-        createTopic(topic, DEFAULT_PARTITIONS);
+        String topicName = topicName(topic);
+        createTopic(topicName, DEFAULT_PARTITIONS);
 
         // Make sure every topic partition has messages.
         for (int i = 0; i < DEFAULT_PARTITIONS; i++) {
-            String partitionName = TopicNameUtils.topicNameWithPartition(topic, i);
+            String partitionName = topicNameWithPartition(topic, i);
             List<T> messages =
                     Stream.generate(supplier).limit(numRecordsPerSplit).collect(toList());
 
@@ -108,27 +169,67 @@ public class PulsarRuntimeOperator implements Serializable, Closeable {
         }
     }
 
+    /**
+     * Create a pulsar topic with given partition number.
+     *
+     * @param topic The name of the topic.
+     * @param numberOfPartitions The number of partitions. We would create a non-partitioned topic
+     *     if this number if zero.
+     */
     public void createTopic(String topic, int numberOfPartitions) {
         checkArgument(numberOfPartitions >= 0);
-        if (numberOfPartitions == 0) {
+        if (numberOfPartitions <= 0) {
             createNonPartitionedTopic(topic);
         } else {
             createPartitionedTopic(topic, numberOfPartitions);
         }
     }
 
+    /**
+     * Increase the partition number of the topic.
+     *
+     * @param topic The topic name.
+     * @param newPartitionsNum The new partition size which should exceed previous size.
+     */
     public void increaseTopicPartitions(String topic, int newPartitionsNum) {
+        PartitionedTopicMetadata metadata =
+                sneakyAdmin(() -> admin().topics().getPartitionedTopicMetadata(topic));
+        checkArgument(
+                metadata.partitions < newPartitionsNum,
+                "The new partition size which should exceed previous size.");
+
         sneakyAdmin(() -> admin().topics().updatePartitionedTopic(topic, newPartitionsNum));
     }
 
-    public void deleteTopic(String topic, boolean isPartitioned) {
-        if (isPartitioned) {
-            sneakyAdmin(() -> admin().topics().deletePartitionedTopic(topic));
+    /**
+     * Delete a Pulsar topic.
+     *
+     * @param topic The topic name.
+     */
+    public void deleteTopic(String topic) {
+        String topicName = topicName(topic);
+        PartitionedTopicMetadata metadata;
+
+        try {
+            metadata = admin().topics().getPartitionedTopicMetadata(topicName);
+        } catch (NotFoundException e) {
+            // This topic doesn't exist. Just skip deletion.
+            return;
+        } catch (PulsarAdminException e) {
+            sneakyThrow(e);
+            return;
+        }
+
+        removeConsumers(topic);
+        removeProducers(topic);
+        if (metadata.partitions <= 0) {
+            sneakyAdmin(() -> admin().topics().delete(topicName));
         } else {
-            sneakyAdmin(() -> admin().topics().delete(topic));
+            sneakyAdmin(() -> admin().topics().deletePartitionedTopic(topicName));
         }
     }
 
+    /** Convert the topic metadata into a list of topic partitions. */
     public List<TopicPartition> topicInfo(String topic) {
         try {
             return client().getPartitionsForTopic(topic).get().stream()
@@ -144,10 +245,31 @@ public class PulsarRuntimeOperator implements Serializable, Closeable {
         }
     }
 
-    protected List<TopicPartition> topicsInfo(Collection<String> topics) {
-        return topics.stream().flatMap(s -> topicInfo(s).stream()).collect(toList());
+    /**
+     * Query a list of topics. Convert the topic metadata into a list of topic partitions. Return a
+     * mapping for topic and its partitions.
+     */
+    public Map<String, List<TopicPartition>> topicsInfo(String... topics) {
+        return topicsInfo(Arrays.asList(topics));
+    }
+
+    /**
+     * Query a list of topics. Convert the topic metadata into a list of topic partitions. Return a
+     * mapping for topic and its partitions.
+     */
+    public Map<String, List<TopicPartition>> topicsInfo(Collection<String> topics) {
+        return topics.stream().collect(toMap(identity(), this::topicInfo));
     }
 
+    /**
+     * Send a single message to Pulsar, return the message id after the ack from Pulsar.
+     *
+     * @param topic The name of the topic.
+     * @param schema The schema for serialization.
+     * @param message The record need to be sent.
+     * @param <T> The type of the record.
+     * @return message id.
+     */
     public <T> MessageId sendMessage(String topic, Schema<T> schema, T message) {
         List<MessageId> messageIds = sendMessages(topic, schema, singletonList(message));
         checkArgument(messageIds.size() == 1);
@@ -155,6 +277,16 @@ public class PulsarRuntimeOperator implements Serializable, Closeable {
         return messageIds.get(0);
     }
 
+    /**
+     * Send a single message to Pulsar, return the message id after the ack from Pulsar.
+     *
+     * @param topic The name of the topic.
+     * @param schema The schema for serialization.
+     * @param key The message key.
+     * @param message The record need to be sent.
+     * @param <T> The type of the record.
+     * @return message id.
+     */
     public <T> MessageId sendMessage(String topic, Schema<T> schema, String key, T message) {
         List<MessageId> messageIds = sendMessages(topic, schema, key, singletonList(message));
         checkArgument(messageIds.size() == 1);
@@ -162,23 +294,42 @@ public class PulsarRuntimeOperator implements Serializable, Closeable {
         return messageIds.get(0);
     }
 
+    /**
+     * Send a list of messages to Pulsar, return the message id set after the ack from Pulsar.
+     *
+     * @param topic The name of the topic.
+     * @param schema The schema for serialization.
+     * @param messages The records need to be sent.
+     * @param <T> The type of the record.
+     * @return message id.
+     */
     public <T> List<MessageId> sendMessages(
             String topic, Schema<T> schema, Collection<T> messages) {
         return sendMessages(topic, schema, null, messages);
     }
 
+    /**
+     * Send a list messages to Pulsar, return the message id set after the ack from Pulsar.
+     *
+     * @param topic The name of the topic.
+     * @param schema The schema for serialization.
+     * @param key The message key.
+     * @param messages The records need to be sent.
+     * @param <T> The type of the record.
+     * @return message id.
+     */
     public <T> List<MessageId> sendMessages(
             String topic, Schema<T> schema, String key, Collection<T> messages) {
-        try (Producer<T> producer = client().newProducer(schema).topic(topic).create()) {
+        try {
+            Producer<T> producer = createProducer(topic, schema);
             List<MessageId> messageIds = new ArrayList<>(messages.size());
 
             for (T message : messages) {
-                MessageId messageId;
-                if (Strings.isNullOrEmpty(key)) {
-                    messageId = producer.newMessage().value(message).send();
-                } else {
-                    messageId = producer.newMessage().key(key).value(message).send();
+                TypedMessageBuilder<T> builder = producer.newMessage().value(message);
+                if (!Strings.isNullOrEmpty(key)) {
+                    builder.key(key);
                 }
+                MessageId messageId = builder.send();
                 messageIds.add(messageId);
             }
 
@@ -189,22 +340,117 @@ public class PulsarRuntimeOperator implements Serializable, Closeable {
         }
     }
 
+    /**
+     * Consume a message from the given Pulsar topic, this method would be blocked until we get a
+     * message from this topic.
+     */
+    public <T> Message<T> receiveMessage(String topic, Schema<T> schema) {
+        try {
+            Consumer<T> consumer = createConsumer(topic, schema);
+            return drainOneMessage(consumer);
+        } catch (PulsarClientException e) {
+            sneakyThrow(e);
+            return null;
+        }
+    }
+
+    /**
+     * Consume a message from the given Pulsar topic, this method would be blocked until we meet
+     * timeout. A null message would be returned if no message has been consumed from Pulsar.
+     */
+    public <T> Message<T> receiveMessage(String topic, Schema<T> schema, Duration timeout) {
+        try {
+            Consumer<T> consumer = createConsumer(topic, schema);
+            Message<T> message = consumer.receiveAsync().get(timeout.toMillis(), MILLISECONDS);
+            consumer.acknowledgeCumulative(message.getMessageId());
+
+            return message;
+        } catch (Exception e) {
+            return null;
+        }
+    }
+
+    /**
+     * Consume a fixed number of messages from the given Pulsar topic, this method would be blocked
+     * until we get the exactly number of messages from this topic.
+     */
+    public <T> List<Message<T>> receiveMessages(String topic, Schema<T> schema, int counts) {
+        if (counts == 0) {
+            return emptyList();
+        } else if (counts < 0) {
+            // Drain all messages.
+            return receiveAllMessages(topic, schema, Duration.ofMinutes(1));
+        } else if (counts == 1) {
+            // Drain one message.
+            Message<T> message = receiveMessage(topic, schema);
+            return singletonList(message);
+        } else {
+            // Drain a fixed number of messages.
+            try {
+                Consumer<T> consumer = createConsumer(topic, schema);
+                List<Message<T>> messages = new ArrayList<>(counts);
+                for (int i = 0; i < counts; i++) {
+                    Message<T> message = drainOneMessage(consumer);
+                    messages.add(message);
+                }
+                return messages;
+            } catch (PulsarClientException e) {
+                sneakyThrow(e);
+                return emptyList();
+            }
+        }
+    }
+
+    /**
+     * Drain all the messages from current topic. We will wait for all the messages has been
+     * consumed until the timeout.
+     */
+    public <T> List<Message<T>> receiveAllMessages(
+            String topic, Schema<T> schema, Duration timeout) {
+        List<Message<T>> messages = new ArrayList<>();
+
+        Message<T> message = receiveMessage(topic, schema, timeout);
+        while (message != null) {
+            messages.add(message);
+            message = receiveMessage(topic, schema, timeout);
+        }
+
+        return messages;
+    }
+
+    /** Return the transaction coordinator client for operating {@link TxnID}. */
+    public TransactionCoordinatorClient coordinatorClient() {
+        return ((PulsarClientImpl) client()).getTcClient();
+    }
+
+    /**
+     * Return the broker url for this Pulsar runtime. It's only used in flink environment. You can't
+     * create the {@link PulsarClient} by this broker url, use the {@link #client()} instead.
+     */
     public String serviceUrl() {
         return serviceUrl;
     }
 
+    /**
+     * Return the broker http url for this Pulsar runtime. It's only used in flink environment. You
+     * can't create the {@link PulsarAdmin} by this broker http url, use the {@link #admin()}
+     * instead.
+     */
     public String adminUrl() {
         return adminUrl;
     }
 
+    /** The client for creating producer and consumer. It's used in tests. */
     public PulsarClient client() {
         return client;
     }
 
+    /** The client for creating topics and query other metadata, etc. It's used in tests. */
     public PulsarAdmin admin() {
         return admin;
     }
 
+    /** The configuration for constructing {@link PulsarConfiguration}. */
     public Configuration config() {
         Configuration configuration = new Configuration();
         configuration.set(PULSAR_SERVICE_URL, serviceUrl());
@@ -212,8 +458,25 @@ public class PulsarRuntimeOperator implements Serializable, Closeable {
         return configuration;
     }
 
+    /** Create the sink configuration with common settings. */
+    public Configuration sinkConfig(DeliveryGuarantee deliveryGuarantee) {
+        Configuration configuration = config();
+        configuration.set(PULSAR_WRITE_DELIVERY_GUARANTEE, deliveryGuarantee);
+        if (deliveryGuarantee == EXACTLY_ONCE) {
+            configuration.set(PULSAR_WRITE_TRANSACTION_TIMEOUT, Duration.ofMinutes(5).toMillis());
+            configuration.set(PULSAR_ENABLE_TRANSACTION, true);
+            configuration.set(PULSAR_SEND_TIMEOUT_MS, 0L);
+        }
+
+        return configuration;
+    }
+
+    /** This method is used for test framework. You can't close this operator manually. */
     @Override
     public void close() throws IOException {
+        producers.clear();
+        consumers.clear();
+
         if (admin != null) {
             admin.close();
         }
@@ -236,27 +499,94 @@ public class PulsarRuntimeOperator implements Serializable, Closeable {
     private void createPartitionedTopic(String topic, int numberOfPartitions) {
         try {
             admin().lookups().lookupPartitionedTopic(topic);
-            sneakyAdmin(() -> admin().topics().expireMessagesForAllSubscriptionsAsync(topic, 0));
+            sneakyAdmin(() -> admin().topics().expireMessagesForAllSubscriptions(topic, 0));
         } catch (PulsarAdminException e) {
             sneakyAdmin(() -> admin().topics().createPartitionedTopic(topic, numberOfPartitions));
         }
     }
 
-    private void initializeClients() {
-        this.client = sneakyClient(() -> PulsarClient.builder().serviceUrl(serviceUrl).build());
-        this.admin = sneakyClient(() -> PulsarAdmin.builder().serviceHttpUrl(adminUrl).build());
+    @SuppressWarnings("unchecked")
+    private <T> Producer<T> createProducer(String topic, Schema<T> schema)
+            throws PulsarClientException {
+        TopicName topicName = TopicName.get(topic);
+        String name = topicName.getPartitionedTopicName();
+        int index = topicName.getPartitionIndex();
+        ConcurrentHashMap<Integer, Producer<?>> topicProducers =
+                producers.computeIfAbsent(name, d -> new ConcurrentHashMap<>());
+
+        return (Producer<T>)
+                topicProducers.computeIfAbsent(
+                        index,
+                        i -> {
+                            try {
+                                return client().newProducer(schema).topic(topic).create();
+                            } catch (PulsarClientException e) {
+                                sneakyThrow(e);
+                                return null;
+                            }
+                        });
     }
 
-    // --------------------------- Serialization Logic -----------------------------
+    @SuppressWarnings("unchecked")
+    private <T> Consumer<T> createConsumer(String topic, Schema<T> schema)
+            throws PulsarClientException {
+        TopicName topicName = TopicName.get(topic);
+        String name = topicName.getPartitionedTopicName();
+        int index = topicName.getPartitionIndex();
+        ConcurrentHashMap<Integer, Consumer<?>> topicConsumers =
+                consumers.computeIfAbsent(name, d -> new ConcurrentHashMap<>());
+
+        return (Consumer<T>)
+                topicConsumers.computeIfAbsent(
+                        index,
+                        i -> {
+                            try {
+                                return client().newConsumer(schema)
+                                        .topic(topic)
+                                        .subscriptionName(SUBSCRIPTION_NAME)
+                                        .subscriptionMode(Durable)
+                                        .subscriptionType(Exclusive)
+                                        .subscriptionInitialPosition(
+                                                SubscriptionInitialPosition.Earliest)
+                                        .subscribe();
+                            } catch (PulsarClientException e) {
+                                sneakyThrow(e);
+                                return null;
+                            }
+                        });
+    }
 
-    private void writeObject(ObjectOutputStream oos) throws IOException {
-        oos.writeUTF(serviceUrl);
-        oos.writeUTF(adminUrl);
+    private void removeProducers(String topic) {
+        String topicName = topicName(topic);
+        ConcurrentHashMap<Integer, Producer<?>> integerProducers = producers.remove(topicName);
+        if (integerProducers != null) {
+            for (Producer<?> producer : integerProducers.values()) {
+                try {
+                    producer.close();
+                } catch (PulsarClientException e) {
+                    sneakyThrow(e);
+                }
+            }
+        }
+    }
+
+    private void removeConsumers(String topic) {
+        String topicName = topicName(topic);
+        ConcurrentHashMap<Integer, Consumer<?>> integerConsumers = consumers.remove(topicName);
+        if (integerConsumers != null) {
+            for (Consumer<?> consumer : integerConsumers.values()) {
+                try {
+                    consumer.close();
+                } catch (PulsarClientException e) {
+                    sneakyThrow(e);
+                }
+            }
+        }
     }
 
-    private void readObject(ObjectInputStream ois) throws ClassNotFoundException, IOException {
-        this.serviceUrl = ois.readUTF();
-        this.adminUrl = ois.readUTF();
-        initializeClients();
+    private <T> Message<T> drainOneMessage(Consumer<T> consumer) throws PulsarClientException {
+        Message<T> message = consumer.receive();
+        consumer.acknowledgeCumulative(message.getMessageId());
+        return message;
     }
 }
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/container/PulsarContainerRuntime.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/container/PulsarContainerRuntime.java
index 5560767..3d66728 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/container/PulsarContainerRuntime.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/container/PulsarContainerRuntime.java
@@ -28,16 +28,17 @@ import org.testcontainers.containers.BindMode;
 import org.testcontainers.containers.GenericContainer;
 import org.testcontainers.containers.PulsarContainer;
 import org.testcontainers.containers.output.Slf4jLogConsumer;
-import org.testcontainers.containers.wait.strategy.HttpWaitStrategy;
 import org.testcontainers.utility.DockerImageName;
 
-import java.io.IOException;
 import java.time.Duration;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import static org.apache.flink.util.DockerImageVersions.PULSAR;
+import static org.apache.flink.util.Preconditions.checkArgument;
 import static org.apache.flink.util.Preconditions.checkNotNull;
 import static org.testcontainers.containers.PulsarContainer.BROKER_HTTP_PORT;
 import static org.testcontainers.containers.PulsarContainer.BROKER_PORT;
+import static org.testcontainers.containers.wait.strategy.Wait.forHttp;
 
 /**
  * {@link PulsarRuntime} implementation, use the TestContainers as the backend. We would start a
@@ -45,13 +46,14 @@ import static org.testcontainers.containers.PulsarContainer.BROKER_PORT;
  */
 public class PulsarContainerRuntime implements PulsarRuntime {
     private static final Logger LOG = LoggerFactory.getLogger(PulsarContainerRuntime.class);
-    private static final String PULSAR_INTERNAL_HOSTNAME = "pulsar";
 
+    // The default host for connecting in docker environment.
+    private static final String PULSAR_INTERNAL_HOSTNAME = "pulsar";
     // This url is used on the container side.
-    public static final String PULSAR_SERVICE_URL =
+    private static final String PULSAR_SERVICE_URL =
             String.format("pulsar://%s:%d", PULSAR_INTERNAL_HOSTNAME, BROKER_PORT);
     // This url is used on the container side.
-    public static final String PULSAR_ADMIN_URL =
+    private static final String PULSAR_ADMIN_URL =
             String.format("http://%s:%d", PULSAR_INTERNAL_HOSTNAME, BROKER_HTTP_PORT);
 
     /**
@@ -60,50 +62,75 @@ public class PulsarContainerRuntime implements PulsarRuntime {
      */
     private final PulsarContainer container = new PulsarContainer(DockerImageName.parse(PULSAR));
 
+    private final AtomicBoolean started = new AtomicBoolean(false);
+
+    private boolean boundFlink = false;
     private PulsarRuntimeOperator operator;
 
     public PulsarContainerRuntime bindWithFlinkContainer(GenericContainer<?> flinkContainer) {
+        checkArgument(
+                !started.get(),
+                "This Pulsar container has been started, we can't bind it to a Flink container.");
+
         this.container
                 .withNetworkAliases(PULSAR_INTERNAL_HOSTNAME)
                 .dependsOn(flinkContainer)
                 .withNetwork(flinkContainer.getNetwork());
+        this.boundFlink = true;
         return this;
     }
 
     @Override
     public void startUp() {
-        // Prepare Pulsar Container.
+        boolean havenStartedBefore = started.compareAndSet(false, true);
+        if (!havenStartedBefore) {
+            LOG.warn("You have started the Pulsar Container. We will skip this execution.");
+            return;
+        }
+
+        // Override the default configuration in container for enabling the Pulsar transaction.
         container.withClasspathResourceMapping(
                 "containers/txnStandalone.conf",
                 "/pulsar/conf/standalone.conf",
                 BindMode.READ_ONLY);
-        container.addExposedPort(2181);
+        // Waiting for the Pulsar border is ready.
         container.waitingFor(
-                new HttpWaitStrategy()
+                forHttp("/admin/v2/namespaces/public/default")
                         .forPort(BROKER_HTTP_PORT)
                         .forStatusCode(200)
-                        .forPath("/admin/v2/namespaces/public/default")
                         .withStartupTimeout(Duration.ofMinutes(5)));
-
         // Start the Pulsar Container.
         container.start();
+        // Append the output to this runtime logger. Used for local debug purpose.
         container.followOutput(new Slf4jLogConsumer(LOG).withSeparateOutputStreams());
 
         // Create the operator.
-        this.operator =
-                new PulsarRuntimeOperator(
-                        container.getPulsarBrokerUrl(), container.getHttpServiceUrl());
+        if (boundFlink) {
+            this.operator =
+                    new PulsarRuntimeOperator(
+                            container.getPulsarBrokerUrl(),
+                            PULSAR_SERVICE_URL,
+                            container.getHttpServiceUrl(),
+                            PULSAR_ADMIN_URL);
+        } else {
+            this.operator =
+                    new PulsarRuntimeOperator(
+                            container.getPulsarBrokerUrl(), container.getHttpServiceUrl());
+        }
     }
 
     @Override
     public void tearDown() {
         try {
-            operator.close();
-            this.operator = null;
-        } catch (IOException e) {
+            if (operator != null) {
+                operator.close();
+                this.operator = null;
+            }
+            container.stop();
+            started.compareAndSet(true, false);
+        } catch (Exception e) {
             throw new IllegalStateException(e);
         }
-        container.stop();
     }
 
     @Override
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/embedded/PulsarEmbeddedRuntime.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/embedded/PulsarEmbeddedRuntime.java
new file mode 100644
index 0000000..d598e97
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/embedded/PulsarEmbeddedRuntime.java
@@ -0,0 +1,284 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.testutils.runtime.embedded;
+
+import org.apache.flink.connector.pulsar.testutils.runtime.PulsarRuntime;
+import org.apache.flink.connector.pulsar.testutils.runtime.PulsarRuntimeOperator;
+import org.apache.flink.util.FileUtils;
+
+import org.apache.bookkeeper.conf.ServerConfiguration;
+import org.apache.logging.log4j.LogManager;
+import org.apache.pulsar.broker.PulsarService;
+import org.apache.pulsar.broker.ServiceConfiguration;
+import org.apache.pulsar.client.admin.PulsarAdmin;
+import org.apache.pulsar.client.admin.PulsarAdminException;
+import org.apache.pulsar.common.configuration.PulsarConfigurationLoader;
+import org.apache.pulsar.common.naming.TopicName;
+import org.apache.pulsar.common.policies.data.ClusterData;
+import org.apache.pulsar.common.policies.data.TenantInfo;
+import org.apache.pulsar.common.policies.data.TenantInfoImpl;
+import org.apache.pulsar.functions.worker.WorkerConfig;
+import org.apache.pulsar.zookeeper.LocalBookkeeperEnsemble;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Collections;
+import java.util.List;
+import java.util.Optional;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+import static org.apache.pulsar.broker.ServiceConfigurationUtils.brokerUrl;
+import static org.apache.pulsar.broker.ServiceConfigurationUtils.webServiceUrl;
+import static org.apache.pulsar.common.naming.NamespaceName.SYSTEM_NAMESPACE;
+import static org.apache.pulsar.common.naming.TopicName.TRANSACTION_COORDINATOR_ASSIGN;
+
+/** Providing a embedded pulsar server. We use this runtime for transaction related tests. */
+public class PulsarEmbeddedRuntime implements PulsarRuntime {
+    private static final Logger LOG = LoggerFactory.getLogger(PulsarEmbeddedRuntime.class);
+
+    private static final String CONFIG_FILE_PATH;
+
+    static {
+        // Find the absolute path for containers/txnStandalone.conf
+        ClassLoader classLoader = PulsarEmbeddedRuntime.class.getClassLoader();
+        URL resource = classLoader.getResource("containers/txnStandalone.conf");
+        File file = new File(checkNotNull(resource).getFile());
+        CONFIG_FILE_PATH = file.getAbsolutePath();
+    }
+
+    private final Path tempDir;
+
+    private LocalBookkeeperEnsemble bookkeeper;
+    private PulsarService pulsarService;
+    private PulsarRuntimeOperator operator;
+
+    public PulsarEmbeddedRuntime() {
+        this.tempDir = createTempDir();
+    }
+
+    @Override
+    public void startUp() {
+        try {
+            startBookkeeper();
+            startPulsarService();
+
+            // Create the operator.
+            this.operator = new PulsarRuntimeOperator(getBrokerUrl(), getWebServiceUrl());
+        } catch (Exception e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    @Override
+    public void tearDown() {
+        try {
+            if (operator != null) {
+                operator.close();
+                this.operator = null;
+            }
+            if (pulsarService != null) {
+                pulsarService.close();
+            }
+            if (bookkeeper != null) {
+                bookkeeper.stop();
+            }
+        } catch (Exception e) {
+            throw new IllegalStateException(e);
+        } finally {
+            removeTempDir(tempDir);
+        }
+    }
+
+    @Override
+    public PulsarRuntimeOperator operator() {
+        return checkNotNull(operator, "You should start this embedded Pulsar first.");
+    }
+
+    private Path createTempDir() {
+        try {
+            return Files.createTempDirectory("pulsar");
+        } catch (IOException e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    private void removeTempDir(Path tempDir) {
+        try {
+            FileUtils.deleteDirectory(tempDir.normalize().toFile());
+        } catch (IOException e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    public void startBookkeeper() throws Exception {
+        Path zkPath = Paths.get("data", "standalone", "zookeeper");
+        Path bkPath = Paths.get("data", "standalone", "bookkeeper");
+
+        String zkDir = tempDir.resolve(zkPath).normalize().toString();
+        String bkDir = tempDir.resolve(bkPath).normalize().toString();
+
+        ServerConfiguration bkServerConf = new ServerConfiguration();
+        bkServerConf.loadConf(new File(CONFIG_FILE_PATH).toURI().toURL());
+        this.bookkeeper = new LocalBookkeeperEnsemble(1, 0, 0, zkDir, bkDir, true, "127.0.0.1");
+
+        // Start Bookkeeper & zookeeper.
+        bookkeeper.startStandalone(bkServerConf, false);
+    }
+
+    private void startPulsarService() throws Exception {
+        ServiceConfiguration config;
+        try (FileInputStream inputStream = new FileInputStream(CONFIG_FILE_PATH)) {
+            config = PulsarConfigurationLoader.create(inputStream, ServiceConfiguration.class);
+        } catch (IOException e) {
+            throw new IllegalStateException(e);
+        }
+
+        // Use runtime dynamic ports for broker.
+        config.setAdvertisedAddress("127.0.0.1");
+        config.setClusterName("standalone");
+
+        // Use random port.
+        config.setBrokerServicePort(Optional.of(0));
+        config.setWebServicePort(Optional.of(0));
+
+        // Select available port for bookkeeper and zookeeper.
+        int zkPort = getZkPort();
+        String zkConnect = "127.0.0.1" + ":" + zkPort;
+        config.setZookeeperServers(zkConnect);
+        config.setConfigurationStoreServers(zkConnect);
+        config.setRunningStandalone(true);
+
+        this.pulsarService =
+                new PulsarService(
+                        config,
+                        new WorkerConfig(),
+                        Optional.empty(),
+                        (exitCode) -> {
+                            LOG.info("Halting standalone process with code {}", exitCode);
+                            LogManager.shutdown();
+                            Runtime.getRuntime().halt(exitCode);
+                        });
+
+        // Start Pulsar Broker.
+        pulsarService.start();
+
+        // Create sample data environment.
+        String webServiceUrl = getWebServiceUrl();
+        String brokerUrl = getBrokerUrl();
+        try (PulsarAdmin admin = PulsarAdmin.builder().serviceHttpUrl(webServiceUrl).build()) {
+            ClusterData clusterData =
+                    ClusterData.builder()
+                            .serviceUrl(webServiceUrl)
+                            .brokerServiceUrl(brokerUrl)
+                            .build();
+            String cluster = config.getClusterName();
+            createSampleNameSpace(admin, clusterData, cluster);
+
+            // Create default namespace
+            createNameSpace(
+                    admin,
+                    cluster,
+                    TopicName.PUBLIC_TENANT,
+                    TopicName.PUBLIC_TENANT + "/" + TopicName.DEFAULT_NAMESPACE);
+
+            // Create Pulsar system namespace
+            createNameSpace(
+                    admin, cluster, SYSTEM_NAMESPACE.getTenant(), SYSTEM_NAMESPACE.toString());
+            // Enable transaction
+            if (config.isTransactionCoordinatorEnabled()
+                    && !admin.namespaces()
+                            .getTopics(SYSTEM_NAMESPACE.toString())
+                            .contains(TRANSACTION_COORDINATOR_ASSIGN.getPartition(0).toString())) {
+                admin.topics().createPartitionedTopic(TRANSACTION_COORDINATOR_ASSIGN.toString(), 1);
+            }
+        }
+    }
+
+    private int getZkPort() {
+        return checkNotNull(bookkeeper).getZookeeperPort();
+    }
+
+    private String getBrokerUrl() {
+        Integer port = pulsarService.getBrokerListenPort().orElseThrow(IllegalStateException::new);
+        return brokerUrl("127.0.0.1", port);
+    }
+
+    private String getWebServiceUrl() {
+        Integer port = pulsarService.getListenPortHTTP().orElseThrow(IllegalArgumentException::new);
+        return webServiceUrl("127.0.0.1", port);
+    }
+
+    private void createSampleNameSpace(PulsarAdmin admin, ClusterData clusterData, String cluster)
+            throws PulsarAdminException {
+        // Create a sample namespace
+        String tenant = "sample";
+        String globalCluster = "global";
+        String namespace = tenant + "/ns1";
+
+        List<String> clusters = admin.clusters().getClusters();
+        if (!clusters.contains(cluster)) {
+            admin.clusters().createCluster(cluster, clusterData);
+        } else {
+            admin.clusters().updateCluster(cluster, clusterData);
+        }
+        // Create marker for "global" cluster
+        if (!clusters.contains(globalCluster)) {
+            admin.clusters().createCluster(globalCluster, ClusterData.builder().build());
+        }
+
+        if (!admin.tenants().getTenants().contains(tenant)) {
+            admin.tenants()
+                    .createTenant(
+                            tenant,
+                            new TenantInfoImpl(
+                                    Collections.emptySet(), Collections.singleton(cluster)));
+        }
+
+        if (!admin.namespaces().getNamespaces(tenant).contains(namespace)) {
+            admin.namespaces().createNamespace(namespace);
+        }
+    }
+
+    private void createNameSpace(
+            PulsarAdmin admin, String cluster, String publicTenant, String defaultNamespace)
+            throws PulsarAdminException {
+        if (!admin.tenants().getTenants().contains(publicTenant)) {
+            admin.tenants()
+                    .createTenant(
+                            publicTenant,
+                            TenantInfo.builder()
+                                    .adminRoles(Collections.emptySet())
+                                    .allowedClusters(Collections.singleton(cluster))
+                                    .build());
+        }
+        if (!admin.namespaces().getNamespaces(publicTenant).contains(defaultNamespace)) {
+            admin.namespaces().createNamespace(defaultNamespace);
+            admin.namespaces()
+                    .setNamespaceReplicationClusters(
+                            defaultNamespace, Collections.singleton(cluster));
+        }
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/BlankBrokerInterceptor.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/BlankBrokerInterceptor.java
deleted file mode 100644
index 8355a23..0000000
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/BlankBrokerInterceptor.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.connector.pulsar.testutils.runtime.mock;
-
-import org.apache.pulsar.broker.PulsarService;
-import org.apache.pulsar.broker.intercept.BrokerInterceptor;
-import org.apache.pulsar.broker.service.ServerCnx;
-import org.apache.pulsar.common.api.proto.BaseCommand;
-
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-
-/** No operation for this BrokerInterceptor implementation. */
-public class BlankBrokerInterceptor implements BrokerInterceptor {
-
-    @Override
-    public void onPulsarCommand(BaseCommand command, ServerCnx cnx) {
-        // no-op
-    }
-
-    @Override
-    public void onConnectionClosed(ServerCnx cnx) {
-        // no-op
-    }
-
-    @Override
-    public void onWebserviceRequest(ServletRequest request) {
-        // no-op
-    }
-
-    @Override
-    public void onWebserviceResponse(ServletRequest request, ServletResponse response) {
-        // no-op
-    }
-
-    @Override
-    public void initialize(PulsarService pulsarService) {
-        // no-op
-    }
-
-    @Override
-    public void close() {
-        // no-op
-    }
-}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/MockBookKeeperClientFactory.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/MockBookKeeperClientFactory.java
deleted file mode 100644
index 41fad54..0000000
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/MockBookKeeperClientFactory.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.connector.pulsar.testutils.runtime.mock;
-
-import io.netty.channel.EventLoopGroup;
-import org.apache.bookkeeper.client.BookKeeper;
-import org.apache.bookkeeper.client.EnsemblePlacementPolicy;
-import org.apache.bookkeeper.common.util.OrderedExecutor;
-import org.apache.bookkeeper.stats.StatsLogger;
-import org.apache.pulsar.broker.BookKeeperClientFactory;
-import org.apache.pulsar.broker.ServiceConfiguration;
-import org.apache.zookeeper.ZooKeeper;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.Optional;
-
-/** A BookKeeperClientFactory implementation which returns a mocked bookkeeper. */
-public class MockBookKeeperClientFactory implements BookKeeperClientFactory {
-
-    private final OrderedExecutor executor =
-            OrderedExecutor.newBuilder().numThreads(1).name("mock-pulsar-bookkeeper").build();
-
-    private final BookKeeper bookKeeper = NonClosableMockBookKeeper.create(executor);
-
-    @Override
-    public BookKeeper create(
-            ServiceConfiguration conf,
-            ZooKeeper zkClient,
-            EventLoopGroup eventLoopGroup,
-            Optional<Class<? extends EnsemblePlacementPolicy>> ensemblePlacementPolicyClass,
-            Map<String, Object> ensemblePlacementPolicyProperties)
-            throws IOException {
-        return bookKeeper;
-    }
-
-    @Override
-    public BookKeeper create(
-            ServiceConfiguration conf,
-            ZooKeeper zkClient,
-            EventLoopGroup eventLoopGroup,
-            Optional<Class<? extends EnsemblePlacementPolicy>> ensemblePlacementPolicyClass,
-            Map<String, Object> ensemblePlacementPolicyProperties,
-            StatsLogger statsLogger)
-            throws IOException {
-        return bookKeeper;
-    }
-
-    @Override
-    public void close() {
-        try {
-            bookKeeper.close();
-            executor.shutdown();
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/MockPulsarService.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/MockPulsarService.java
deleted file mode 100644
index 6b6c412..0000000
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/MockPulsarService.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.connector.pulsar.testutils.runtime.mock;
-
-import org.apache.bookkeeper.common.util.OrderedExecutor;
-import org.apache.pulsar.broker.BookKeeperClientFactory;
-import org.apache.pulsar.broker.PulsarService;
-import org.apache.pulsar.broker.ServiceConfiguration;
-import org.apache.pulsar.broker.intercept.BrokerInterceptor;
-import org.apache.pulsar.broker.namespace.NamespaceService;
-import org.apache.pulsar.metadata.api.extended.MetadataStoreExtended;
-import org.apache.pulsar.metadata.impl.ZKMetadataStore;
-import org.apache.pulsar.zookeeper.ZooKeeperClientFactory;
-import org.apache.zookeeper.MockZooKeeperSession;
-
-import java.util.function.Supplier;
-
-/** A Mock pulsar service which would use the mocked zookeeper and bookkeeper. */
-public class MockPulsarService extends PulsarService {
-
-    private final int brokerServicePort;
-
-    private final MockZooKeeperClientFactory zooKeeperClientFactory =
-            new MockZooKeeperClientFactory();
-
-    private final MockZooKeeperSession zooKeeperSession =
-            MockZooKeeperSession.newInstance(zooKeeperClientFactory.getZooKeeper());
-
-    private final SameThreadOrderedSafeExecutor orderedExecutor =
-            new SameThreadOrderedSafeExecutor();
-
-    public MockPulsarService(ServiceConfiguration config) {
-        super(config);
-        this.brokerServicePort =
-                config.getBrokerServicePort().orElseThrow(IllegalArgumentException::new);
-    }
-
-    public ZooKeeperClientFactory getZooKeeperClientFactory() {
-        return zooKeeperClientFactory;
-    }
-
-    public BookKeeperClientFactory newBookKeeperClientFactory() {
-        return new MockBookKeeperClientFactory();
-    }
-
-    public MetadataStoreExtended createLocalMetadataStore() {
-        return new ZKMetadataStore(zooKeeperSession);
-    }
-
-    public MetadataStoreExtended createConfigurationMetadataStore() {
-        return new ZKMetadataStore(zooKeeperSession);
-    }
-
-    public Supplier<NamespaceService> getNamespaceServiceProvider() {
-        return () -> new NamespaceService(this);
-    }
-
-    @Override
-    public OrderedExecutor getOrderedExecutor() {
-        return orderedExecutor;
-    }
-
-    @Override
-    public BrokerInterceptor getBrokerInterceptor() {
-        return new BlankBrokerInterceptor();
-    }
-
-    public int getBrokerServicePort() {
-        return brokerServicePort;
-    }
-}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/MockZooKeeperClientFactory.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/MockZooKeeperClientFactory.java
deleted file mode 100644
index 3c89484..0000000
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/MockZooKeeperClientFactory.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.connector.pulsar.testutils.runtime.mock;
-
-import org.apache.flink.shaded.guava30.com.google.common.util.concurrent.MoreExecutors;
-
-import org.apache.bookkeeper.util.ZkUtils;
-import org.apache.pulsar.zookeeper.ZooKeeperClientFactory;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.MockZooKeeper;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.data.ACL;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.CompletableFuture;
-
-import static org.apache.pulsar.zookeeper.ZookeeperClientFactoryImpl.ENCODING_SCHEME;
-import static org.apache.zookeeper.CreateMode.PERSISTENT;
-
-/** A ZooKeeperClientFactory implementation which returns mocked zookeeper instead of normal zk. */
-public class MockZooKeeperClientFactory implements ZooKeeperClientFactory {
-
-    private final MockZooKeeper zooKeeper;
-
-    public MockZooKeeperClientFactory() {
-        this.zooKeeper = MockZooKeeper.newInstance(MoreExecutors.newDirectExecutorService());
-        List<ACL> dummyAclList = new ArrayList<>(0);
-
-        try {
-            ZkUtils.createFullPathOptimistic(
-                    zooKeeper,
-                    "/ledgers/available/192.168.1.1:" + 5000,
-                    "".getBytes(ENCODING_SCHEME),
-                    dummyAclList,
-                    PERSISTENT);
-
-            zooKeeper.create(
-                    "/ledgers/LAYOUT",
-                    "1\nflat:1".getBytes(ENCODING_SCHEME),
-                    dummyAclList,
-                    PERSISTENT);
-        } catch (KeeperException | InterruptedException e) {
-            throw new IllegalStateException(e);
-        }
-    }
-
-    @Override
-    public CompletableFuture<ZooKeeper> create(
-            String serverList, SessionType sessionType, int zkSessionTimeoutMillis) {
-        return CompletableFuture.completedFuture(zooKeeper);
-    }
-
-    MockZooKeeper getZooKeeper() {
-        return zooKeeper;
-    }
-}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/NonClosableMockBookKeeper.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/NonClosableMockBookKeeper.java
deleted file mode 100644
index b7001b8..0000000
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/NonClosableMockBookKeeper.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.connector.pulsar.testutils.runtime.mock;
-
-import org.apache.bookkeeper.client.BookKeeper;
-import org.apache.bookkeeper.client.PulsarMockBookKeeper;
-import org.apache.bookkeeper.common.util.OrderedExecutor;
-
-/**
- * Prevent the MockBookKeeper instance from being closed when the broker is restarted within a test.
- */
-public class NonClosableMockBookKeeper extends PulsarMockBookKeeper {
-
-    private NonClosableMockBookKeeper(OrderedExecutor executor) throws Exception {
-        super(executor);
-    }
-
-    @Override
-    public void close() {
-        // no-op
-    }
-
-    @Override
-    public void shutdown() {
-        // no-op
-    }
-
-    public void reallyShutdown() {
-        super.shutdown();
-    }
-
-    public static BookKeeper create(OrderedExecutor executor) {
-        try {
-            return new NonClosableMockBookKeeper(executor);
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/PulsarMockRuntime.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/PulsarMockRuntime.java
deleted file mode 100644
index b6f8aa4..0000000
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/PulsarMockRuntime.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.connector.pulsar.testutils.runtime.mock;
-
-import org.apache.flink.connector.pulsar.testutils.runtime.PulsarRuntime;
-import org.apache.flink.connector.pulsar.testutils.runtime.PulsarRuntimeOperator;
-
-import org.apache.flink.shaded.guava30.com.google.common.collect.ImmutableSet;
-
-import org.apache.pulsar.broker.PulsarServerException;
-import org.apache.pulsar.broker.ServiceConfiguration;
-import org.apache.pulsar.client.admin.PulsarAdmin;
-import org.apache.pulsar.client.admin.PulsarAdminException;
-import org.apache.pulsar.common.policies.data.ClusterData;
-import org.apache.pulsar.common.policies.data.RetentionPolicies;
-import org.apache.pulsar.common.policies.data.TenantInfo;
-
-import java.util.Optional;
-
-import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric;
-import static org.apache.flink.connector.pulsar.common.utils.PulsarExceptionUtils.sneakyAdmin;
-import static org.apache.flink.util.Preconditions.checkNotNull;
-
-/** Providing a mocked pulsar server. */
-public class PulsarMockRuntime implements PulsarRuntime {
-
-    private static final String CLUSTER_NAME = "mock-pulsar-" + randomAlphanumeric(6);
-    private final MockPulsarService pulsarService;
-    private PulsarRuntimeOperator operator;
-
-    public PulsarMockRuntime() {
-        this(createConfig());
-    }
-
-    public PulsarMockRuntime(ServiceConfiguration configuration) {
-        this.pulsarService = new MockPulsarService(configuration);
-    }
-
-    @Override
-    public void startUp() {
-        try {
-            pulsarService.start();
-        } catch (PulsarServerException e) {
-            throw new IllegalStateException(e);
-        }
-        this.operator =
-                new PulsarRuntimeOperator(
-                        pulsarService.getBrokerServiceUrl(), pulsarService.getWebServiceAddress());
-
-        // Successfully start a pulsar broker, we have to create the required resources.
-        sneakyAdmin(this::createTestResource);
-    }
-
-    @Override
-    public void tearDown() {
-        try {
-            pulsarService.close();
-            operator.close();
-            this.operator = null;
-        } catch (Exception e) {
-            throw new IllegalStateException(e);
-        }
-    }
-
-    @Override
-    public PulsarRuntimeOperator operator() {
-        return checkNotNull(operator, "You should start this mock pulsar first.");
-    }
-
-    private void createTestResource() throws PulsarAdminException {
-        PulsarAdmin admin = operator().admin();
-        if (!admin.clusters().getClusters().contains(CLUSTER_NAME)) {
-            // Make clients can test short names
-            ClusterData data =
-                    ClusterData.builder()
-                            .serviceUrl("http://127.0.0.1:" + pulsarService.getBrokerServicePort())
-                            .build();
-            admin.clusters().createCluster(CLUSTER_NAME, data);
-        }
-
-        createOrUpdateTenant("public");
-        createOrUpdateNamespace("public", "default");
-
-        createOrUpdateTenant("pulsar");
-        createOrUpdateNamespace("pulsar", "system");
-    }
-
-    private void createOrUpdateTenant(String tenant) throws PulsarAdminException {
-        PulsarAdmin admin = operator().admin();
-        TenantInfo info =
-                TenantInfo.builder()
-                        .adminRoles(ImmutableSet.of("appid1", "appid2"))
-                        .allowedClusters(ImmutableSet.of(CLUSTER_NAME))
-                        .build();
-        if (!admin.tenants().getTenants().contains(tenant)) {
-            admin.tenants().createTenant(tenant, info);
-        } else {
-            admin.tenants().updateTenant(tenant, info);
-        }
-    }
-
-    public void createOrUpdateNamespace(String tenant, String namespace)
-            throws PulsarAdminException {
-        PulsarAdmin admin = operator().admin();
-        String namespaceValue = tenant + "/" + namespace;
-        if (!admin.namespaces().getNamespaces(tenant).contains(namespaceValue)) {
-            admin.namespaces().createNamespace(namespaceValue);
-            admin.namespaces().setRetention(namespaceValue, new RetentionPolicies(60, 1000));
-        }
-    }
-
-    private static ServiceConfiguration createConfig() {
-        ServiceConfiguration configuration = new ServiceConfiguration();
-
-        configuration.setAdvertisedAddress("localhost");
-        configuration.setClusterName(CLUSTER_NAME);
-
-        configuration.setManagedLedgerCacheSizeMB(8);
-        configuration.setActiveConsumerFailoverDelayTimeMillis(0);
-        configuration.setDefaultRetentionTimeInMinutes(7);
-        configuration.setDefaultNumberOfNamespaceBundles(1);
-        configuration.setZookeeperServers("localhost:2181");
-        configuration.setConfigurationStoreServers("localhost:3181");
-
-        configuration.setAuthenticationEnabled(false);
-        configuration.setAuthorizationEnabled(false);
-        configuration.setAllowAutoTopicCreation(true);
-        configuration.setBrokerDeleteInactiveTopicsEnabled(false);
-
-        configuration.setWebSocketServiceEnabled(false);
-        // Use runtime dynamic ports
-        configuration.setBrokerServicePort(Optional.of(0));
-        configuration.setWebServicePort(Optional.of(0));
-
-        // Enable transaction with in memory.
-        configuration.setTransactionCoordinatorEnabled(true);
-        configuration.setTransactionMetadataStoreProviderClassName(
-                "org.apache.pulsar.transaction.coordinator.impl.InMemTransactionMetadataStoreProvider");
-        configuration.setTransactionBufferProviderClassName(
-                "org.apache.pulsar.broker.transaction.buffer.impl.InMemTransactionBufferProvider");
-
-        return configuration;
-    }
-}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/SameThreadOrderedSafeExecutor.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/SameThreadOrderedSafeExecutor.java
deleted file mode 100644
index 9667f08..0000000
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/runtime/mock/SameThreadOrderedSafeExecutor.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.connector.pulsar.testutils.runtime.mock;
-
-import org.apache.bookkeeper.common.util.OrderedExecutor;
-import org.apache.bookkeeper.common.util.SafeRunnable;
-import org.apache.bookkeeper.stats.NullStatsLogger;
-import org.apache.pulsar.shade.io.netty.util.concurrent.DefaultThreadFactory;
-
-/** Override the default bookkeeper executor for executing in one thread executor. */
-public class SameThreadOrderedSafeExecutor extends OrderedExecutor {
-
-    public SameThreadOrderedSafeExecutor() {
-        super(
-                "same-thread-executor",
-                1,
-                new DefaultThreadFactory("test"),
-                NullStatsLogger.INSTANCE,
-                false,
-                false,
-                100000,
-                -1,
-                false);
-    }
-
-    @Override
-    public void execute(Runnable r) {
-        r.run();
-    }
-
-    @Override
-    public void executeOrdered(int orderingKey, SafeRunnable r) {
-        r.run();
-    }
-
-    @Override
-    public void executeOrdered(long orderingKey, SafeRunnable r) {
-        r.run();
-    }
-}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/resources/containers/txnStandalone.conf b/flink-connectors/flink-connector-pulsar/src/test/resources/containers/txnStandalone.conf
index f1a40365..bf35c59 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/resources/containers/txnStandalone.conf
+++ b/flink-connectors/flink-connector-pulsar/src/test/resources/containers/txnStandalone.conf
@@ -1,15 +1,20 @@
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#   http://www.apache.org/licenses/LICENSE-2.0
 #
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
 #
 
 ### --- General broker settings --- ###
@@ -28,6 +33,9 @@ webServicePort=8080
 # Hostname or IP address the service binds on, default is 0.0.0.0.
 bindAddress=0.0.0.0
 
+# Extra bind addresses for the service: <listener_name>:<scheme>://<host>:<port>,[...]
+bindAddresses=
+
 # Hostname or IP address the service advertises to the outside world. If not set, the value of InetAddress.getLocalHost().getHostName() is used.
 advertisedAddress=
 
@@ -94,12 +102,19 @@ backlogQuotaDefaultLimitSecond=-1
 # Default ttl for namespaces if ttl is not already configured at namespace policies. (disable default-ttl with value 0)
 ttlDurationDefaultInSeconds=0
 
-# Enable the deletion of inactive topics
+# Enable the deletion of inactive topics. This parameter need to cooperate with the allowAutoTopicCreation parameter.
+# If brokerDeleteInactiveTopicsEnabled is set to true, we should ensure that allowAutoTopicCreation is also set to true.
 brokerDeleteInactiveTopicsEnabled=true
 
 # How often to check for inactive topics
 brokerDeleteInactiveTopicsFrequencySeconds=60
 
+# Allow you to delete a tenant forcefully.
+forceDeleteTenantAllowed=false
+
+# Allow you to delete a namespace forcefully.
+forceDeleteNamespaceAllowed=false
+
 # Max pending publish requests per connection to avoid keeping large number of pending
 # requests in memory. Default: 1000
 maxPendingPublishRequestsPerConnection=1000
@@ -107,6 +122,10 @@ maxPendingPublishRequestsPerConnection=1000
 # How frequently to proactively check and purge expired messages
 messageExpiryCheckIntervalInMinutes=5
 
+# Check between intervals to see if max message size in topic policies has been updated.
+# Default is 60s
+maxMessageSizeCheckIntervalInSeconds=60
+
 # How long to delay rewinding cursor and dispatching messages when active consumer is changed
 activeConsumerFailoverDelayTimeMillis=1000
 
@@ -157,6 +176,10 @@ defaultNumberOfNamespaceBundles=4
 # Using a value of 0, is disabling maxTopicsPerNamespace-limit check.
 maxTopicsPerNamespace=0
 
+# Allow schema to be auto updated at broker level. User can override this by
+# 'is_allow_auto_update_schema' of namespace policy.
+isAllowAutoUpdateSchemaEnabled=true
+
 # Enable check for minimum allowed client library version
 clientLibraryVersionCheckEnabled=false
 
@@ -215,6 +238,10 @@ dispatchThrottlingRatePerTopicInMsg=0
 # default message-byte dispatch-throttling
 dispatchThrottlingRatePerTopicInByte=0
 
+# Apply dispatch rate limiting on batch message instead individual
+# messages with in batch message. (Default is disabled)
+dispatchThrottlingOnBatchMessageEnabled=false
+
 # Dispatch rate-limiting relative to publish rate.
 # (Enabling flag will make broker to dynamically update dispatch-rate relatively to publish-rate:
 # throttle-dispatch-rate = (publish-rate + configured dispatch-rate).
@@ -224,6 +251,15 @@ dispatchThrottlingRateRelativeToPublishRate=false
 # backlog.
 dispatchThrottlingOnNonBacklogConsumerEnabled=true
 
+# The read failure backoff initial time in milliseconds. By default it is 15s.
+dispatcherReadFailureBackoffInitialTimeInMs=15000
+
+# The read failure backoff max time in milliseconds. By default it is 60s.
+dispatcherReadFailureBackoffMaxTimeInMs=60000
+
+# The read failure backoff mandatory stop time in milliseconds. By default it is 0s.
+dispatcherReadFailureBackoffMandatoryStopTimeInMs=0
+
 # Precise dispathcer flow control according to history message number of each entry
 preciseDispatcherFlowControl=false
 
@@ -284,6 +320,20 @@ maxConsumersPerSubscription=0
 # Use 0 or negative number to disable the check
 maxNumPartitionsPerPartitionedTopic=0
 
+### --- Metadata Store --- ###
+
+# Whether we should enable metadata operations batching
+metadataStoreBatchingEnabled=true
+
+# Maximum delay to impose on batching grouping
+metadataStoreBatchingMaxDelayMillis=5
+
+# Maximum number of operations to include in a singular batch
+metadataStoreBatchingMaxOperations=1000
+
+# Maximum size of a batch
+metadataStoreBatchingMaxSizeKb=128
+
 ### --- TLS --- ###
 # Deprecated - Use webServicePortTls and brokerServicePortTls instead
 tlsEnabled=false
@@ -585,7 +635,7 @@ managedLedgerDefaultAckQuorum=1
 
 # How frequently to flush the cursor positions that were accumulated due to rate limiting. (seconds).
 # Default is 60 seconds
-managedLedgerCursorPositionFlushSeconds = 60
+managedLedgerCursorPositionFlushSeconds=60
 
 # Default type of checksum to use when writing to BookKeeper. Default is "CRC32C"
 # Other possible options are "CRC32", "MAC" or "DUMMY" (no checksum).
@@ -622,10 +672,11 @@ managedLedgerCursorBackloggedThreshold=1000
 managedLedgerDefaultMarkDeleteRateLimit=0.1
 
 # Max number of entries to append to a ledger before triggering a rollover
-# A ledger rollover is triggered on these conditions
-#  * Either the max rollover time has been reached
-#  * or max entries have been written to the ledger and at least min-time
-#    has passed
+# A ledger rollover is triggered after the min rollover time has passed
+# and one of the following conditions is true:
+#  * The max rollover time has been reached
+#  * The max entries have been written to the ledger
+#  * The max ledger size has been written to the ledger
 managedLedgerMaxEntriesPerLedger=50000
 
 # Minimum time between ledger rollover for a topic
@@ -714,7 +765,7 @@ loadBalancerHostUsageCheckIntervalMinutes=1
 # some over-loaded broker to other under-loaded brokers
 loadBalancerSheddingIntervalMinutes=1
 
-# Prevent the same topics to be shed and moved to other broker more that once within this timeframe
+# Prevent the same topics to be shed and moved to other broker more than once within this timeframe
 loadBalancerSheddingGracePeriodMinutes=30
 
 # Usage threshold to allocate max number of topics to broker
@@ -778,6 +829,9 @@ loadBalancerDirectMemoryResourceWeight=1.0
 # It only takes effect in the ThresholdShedder strategy.
 loadBalancerBundleUnloadMinThroughputThreshold=10
 
+# Time to wait for the unloading of a namespace bundle
+namespaceBundleUnloadingTimeoutMs=60000
+
 ### --- Replication --- ###
 
 # Enable replication metrics
@@ -844,6 +898,15 @@ exposePublisherStats=true
 # Default is false.
 exposePreciseBacklogInPrometheus=false
 
+# Enable splitting topic and partition label in Prometheus.
+# If enabled, a topic name will split into 2 parts, one is topic name without partition index,
+# another one is partition index, e.g. (topic=xxx, partition=0).
+# If the topic is a non-partitioned topic, -1 will be used for the partition index.
+# If disabled, one label to represent the topic and partition, e.g. (topic=xxx-partition-0)
+# Default is false.
+
+splitTopicAndPartitionLabelInPrometheus=false
+
 ### --- Deprecated config variables --- ###
 
 # Deprecated. Use configurationStoreServers
@@ -904,7 +967,7 @@ journalSyncData=false
 
 # For each ledger dir, maximum disk space which can be used.
 # Default is 0.95f. i.e. 95% of disk can be used at most after which nothing will
-# be written to that partition. If all ledger dir partitions are full, then bookie
+# be written to that partition. If all ledger dir partions are full, then bookie
 # will turn to readonly mode if 'readOnlyModeEnabled=true' is set, else it will
 # shutdown.
 # Valid values should be in between 0 and 1 (exclusive).
@@ -952,8 +1015,7 @@ defaultNumPartitions=1
 ### --- Transaction config variables --- ###
 # Enable transaction coordinator in broker
 transactionCoordinatorEnabled=true
-; transactionMetadataStoreProviderClassName=org.apache.pulsar.transaction.coordinator.impl.MLTransactionMetadataStoreProvider
-transactionMetadataStoreProviderClassName=org.apache.pulsar.transaction.coordinator.impl.InMemTransactionMetadataStoreProvider
+transactionMetadataStoreProviderClassName=org.apache.pulsar.transaction.coordinator.impl.MLTransactionMetadataStoreProvider
 
 # Transaction buffer take snapshot transaction count
 transactionBufferSnapshotMaxTransactionCount=1000
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/flink/container/FlinkContainers.java b/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/flink/container/FlinkContainers.java
index 38c82ae..47cd1d9 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/flink/container/FlinkContainers.java
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-common/src/main/java/org/apache/flink/tests/util/flink/container/FlinkContainers.java
@@ -332,7 +332,7 @@ public class FlinkContainers implements BeforeAllCallback, AfterAllCallback {
             restClusterClient.close();
         }
         final Configuration clientConfiguration = new Configuration();
-        clientConfiguration.set(RestOptions.ADDRESS, "localhost");
+        clientConfiguration.set(RestOptions.ADDRESS, getJobManagerHost());
         clientConfiguration.set(
                 RestOptions.PORT, jobManager.getMappedPort(conf.get(RestOptions.PORT)));
         return new RestClusterClient<>(clientConfiguration, StandaloneClusterId.getInstance());
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/PulsarSourceOrderedE2ECase.java b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/PulsarSourceOrderedE2ECase.java
index 7d22e80..502b41d 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/PulsarSourceOrderedE2ECase.java
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/PulsarSourceOrderedE2ECase.java
@@ -19,7 +19,6 @@
 package org.apache.flink.tests.util.pulsar;
 
 import org.apache.flink.connector.pulsar.testutils.PulsarTestContextFactory;
-import org.apache.flink.connector.pulsar.testutils.PulsarTestEnvironment;
 import org.apache.flink.connector.testframe.junit.annotations.TestContext;
 import org.apache.flink.connector.testframe.junit.annotations.TestEnv;
 import org.apache.flink.connector.testframe.junit.annotations.TestExternalSystem;
@@ -29,8 +28,7 @@ import org.apache.flink.streaming.api.CheckpointingMode;
 import org.apache.flink.tests.util.pulsar.cases.ExclusiveSubscriptionContext;
 import org.apache.flink.tests.util.pulsar.cases.FailoverSubscriptionContext;
 import org.apache.flink.tests.util.pulsar.common.FlinkContainerWithPulsarEnvironment;
-
-import static org.apache.flink.connector.pulsar.testutils.runtime.PulsarRuntime.container;
+import org.apache.flink.tests.util.pulsar.common.PulsarContainerTestEnvironment;
 
 /**
  * Pulsar E2E test based on connector testing framework. It's used for Failover & Exclusive
@@ -48,8 +46,7 @@ public class PulsarSourceOrderedE2ECase extends SourceTestSuiteBase<String> {
 
     // Defines ConnectorExternalSystem.
     @TestExternalSystem
-    PulsarTestEnvironment pulsar =
-            new PulsarTestEnvironment(container(flink.getFlinkContainers().getJobManager()));
+    PulsarContainerTestEnvironment pulsar = new PulsarContainerTestEnvironment(flink);
 
     // Defines a set of external context Factories for different test cases.
     @TestContext
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/PulsarSourceUnorderedE2ECase.java b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/PulsarSourceUnorderedE2ECase.java
index d14d8f9..5039048 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/PulsarSourceUnorderedE2ECase.java
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/PulsarSourceUnorderedE2ECase.java
@@ -19,7 +19,6 @@
 package org.apache.flink.tests.util.pulsar;
 
 import org.apache.flink.connector.pulsar.testutils.PulsarTestContextFactory;
-import org.apache.flink.connector.pulsar.testutils.PulsarTestEnvironment;
 import org.apache.flink.connector.testframe.junit.annotations.TestContext;
 import org.apache.flink.connector.testframe.junit.annotations.TestEnv;
 import org.apache.flink.connector.testframe.junit.annotations.TestExternalSystem;
@@ -28,10 +27,9 @@ import org.apache.flink.streaming.api.CheckpointingMode;
 import org.apache.flink.tests.util.pulsar.cases.KeySharedSubscriptionContext;
 import org.apache.flink.tests.util.pulsar.cases.SharedSubscriptionContext;
 import org.apache.flink.tests.util.pulsar.common.FlinkContainerWithPulsarEnvironment;
+import org.apache.flink.tests.util.pulsar.common.PulsarContainerTestEnvironment;
 import org.apache.flink.tests.util.pulsar.common.UnorderedSourceTestSuiteBase;
 
-import static org.apache.flink.connector.pulsar.testutils.runtime.PulsarRuntime.container;
-
 /**
  * Pulsar E2E test based on connector testing framework. It's used for Shared & Key_Shared
  * subscription.
@@ -48,8 +46,7 @@ public class PulsarSourceUnorderedE2ECase extends UnorderedSourceTestSuiteBase<S
 
     // Defines ConnectorExternalSystem.
     @TestExternalSystem
-    PulsarTestEnvironment pulsar =
-            new PulsarTestEnvironment(container(flink.getFlinkContainers().getJobManager()));
+    PulsarContainerTestEnvironment pulsar = new PulsarContainerTestEnvironment(flink);
 
     // Defines a set of external context Factories for different test cases.
     @SuppressWarnings("unused")
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/ExclusiveSubscriptionContext.java b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/ExclusiveSubscriptionContext.java
index 1245e14..6fea0c9 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/ExclusiveSubscriptionContext.java
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/ExclusiveSubscriptionContext.java
@@ -27,12 +27,8 @@ import java.net.URL;
 import java.util.Collections;
 import java.util.List;
 
-import static org.apache.flink.connector.pulsar.testutils.runtime.container.PulsarContainerRuntime.PULSAR_ADMIN_URL;
-import static org.apache.flink.connector.pulsar.testutils.runtime.container.PulsarContainerRuntime.PULSAR_SERVICE_URL;
-
 /** We would consume from test splits by using {@link SubscriptionType#Exclusive} subscription. */
 public class ExclusiveSubscriptionContext extends MultipleTopicTemplateContext {
-    private static final long serialVersionUID = 1L;
 
     public ExclusiveSubscriptionContext(PulsarTestEnvironment environment) {
         this(environment, Collections.emptyList());
@@ -57,14 +53,4 @@ public class ExclusiveSubscriptionContext extends MultipleTopicTemplateContext {
     protected SubscriptionType subscriptionType() {
         return SubscriptionType.Exclusive;
     }
-
-    @Override
-    protected String serviceUrl() {
-        return PULSAR_SERVICE_URL;
-    }
-
-    @Override
-    protected String adminUrl() {
-        return PULSAR_ADMIN_URL;
-    }
 }
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/FailoverSubscriptionContext.java b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/FailoverSubscriptionContext.java
index 8ec1685..c473488 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/FailoverSubscriptionContext.java
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/FailoverSubscriptionContext.java
@@ -27,12 +27,8 @@ import java.net.URL;
 import java.util.Collections;
 import java.util.List;
 
-import static org.apache.flink.connector.pulsar.testutils.runtime.container.PulsarContainerRuntime.PULSAR_ADMIN_URL;
-import static org.apache.flink.connector.pulsar.testutils.runtime.container.PulsarContainerRuntime.PULSAR_SERVICE_URL;
-
 /** We would consume from test splits by using {@link SubscriptionType#Failover} subscription. */
 public class FailoverSubscriptionContext extends MultipleTopicTemplateContext {
-    private static final long serialVersionUID = 1L;
 
     public FailoverSubscriptionContext(PulsarTestEnvironment environment) {
         this(environment, Collections.emptyList());
@@ -57,14 +53,4 @@ public class FailoverSubscriptionContext extends MultipleTopicTemplateContext {
     protected SubscriptionType subscriptionType() {
         return SubscriptionType.Failover;
     }
-
-    @Override
-    protected String serviceUrl() {
-        return PULSAR_SERVICE_URL;
-    }
-
-    @Override
-    protected String adminUrl() {
-        return PULSAR_ADMIN_URL;
-    }
 }
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/KeySharedSubscriptionContext.java b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/KeySharedSubscriptionContext.java
index 303783a..5ad369b 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/KeySharedSubscriptionContext.java
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/KeySharedSubscriptionContext.java
@@ -46,13 +46,10 @@ import static java.util.Collections.singletonList;
 import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic;
 import static org.apache.flink.connector.pulsar.source.enumerator.topic.TopicRange.RANGE_SIZE;
 import static org.apache.flink.connector.pulsar.source.reader.deserializer.PulsarDeserializationSchema.pulsarSchema;
-import static org.apache.flink.connector.pulsar.testutils.runtime.container.PulsarContainerRuntime.PULSAR_ADMIN_URL;
-import static org.apache.flink.connector.pulsar.testutils.runtime.container.PulsarContainerRuntime.PULSAR_SERVICE_URL;
 import static org.apache.pulsar.client.api.Schema.STRING;
 
 /** We would consume from test splits by using {@link SubscriptionType#Key_Shared} subscription. */
 public class KeySharedSubscriptionContext extends PulsarTestContext<String> {
-    private static final long serialVersionUID = 1L;
 
     private int index = 0;
 
@@ -92,8 +89,8 @@ public class KeySharedSubscriptionContext extends PulsarTestContext<String> {
         PulsarSourceBuilder<String> builder =
                 PulsarSource.builder()
                         .setDeserializationSchema(pulsarSchema(STRING))
-                        .setServiceUrl(PULSAR_SERVICE_URL)
-                        .setAdminUrl(PULSAR_ADMIN_URL)
+                        .setServiceUrl(operator.serviceUrl())
+                        .setAdminUrl(operator.adminUrl())
                         .setTopicPattern(
                                 "pulsar-[0-9]+-key-shared", RegexSubscriptionMode.AllTopics)
                         .setSubscriptionType(SubscriptionType.Key_Shared)
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/SharedSubscriptionContext.java b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/SharedSubscriptionContext.java
index de53595..1a2db66 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/SharedSubscriptionContext.java
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/cases/SharedSubscriptionContext.java
@@ -40,13 +40,10 @@ import java.util.Collections;
 import java.util.List;
 
 import static org.apache.flink.connector.pulsar.source.reader.deserializer.PulsarDeserializationSchema.pulsarSchema;
-import static org.apache.flink.connector.pulsar.testutils.runtime.container.PulsarContainerRuntime.PULSAR_ADMIN_URL;
-import static org.apache.flink.connector.pulsar.testutils.runtime.container.PulsarContainerRuntime.PULSAR_SERVICE_URL;
 import static org.apache.pulsar.client.api.Schema.STRING;
 
 /** We would consuming from test splits by using {@link SubscriptionType#Shared} subscription. */
 public class SharedSubscriptionContext extends PulsarTestContext<String> {
-    private static final long serialVersionUID = 1L;
 
     private int index = 0;
 
@@ -71,8 +68,8 @@ public class SharedSubscriptionContext extends PulsarTestContext<String> {
         PulsarSourceBuilder<String> builder =
                 PulsarSource.builder()
                         .setDeserializationSchema(pulsarSchema(STRING))
-                        .setServiceUrl(PULSAR_SERVICE_URL)
-                        .setAdminUrl(PULSAR_ADMIN_URL)
+                        .setServiceUrl(operator.serviceUrl())
+                        .setAdminUrl(operator.adminUrl())
                         .setTopicPattern("pulsar-[0-9]+-shared", RegexSubscriptionMode.AllTopics)
                         .setSubscriptionType(SubscriptionType.Shared)
                         .setSubscriptionName("pulsar-shared");
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/common/PulsarContainerTestEnvironment.java b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/common/PulsarContainerTestEnvironment.java
new file mode 100644
index 0000000..654347b
--- /dev/null
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/common/PulsarContainerTestEnvironment.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.tests.util.pulsar.common;
+
+import org.apache.flink.connector.pulsar.testutils.PulsarTestEnvironment;
+
+import static org.apache.flink.connector.pulsar.testutils.runtime.PulsarRuntime.container;
+
+/** This test environment is used for create a Pulsar standalone instance for e2e tests. */
+public class PulsarContainerTestEnvironment extends PulsarTestEnvironment {
+
+    public PulsarContainerTestEnvironment(FlinkContainerWithPulsarEnvironment flinkEnvironment) {
+        super(container(flinkEnvironment.getFlinkContainers().getJobManager()));
+    }
+}

[flink] 02/09: [FLINK-26020][connector/pulsar] Unified Pulsar Connector config model for Pulsar source and sink.

Posted by fp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

fpaul pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 36de46dc9cf3144a02a4f99c973bfb6b0db60c7e
Author: Yufan Sheng <yu...@streamnative.io>
AuthorDate: Wed Feb 9 14:44:38 2022 +0800

    [FLINK-26020][connector/pulsar] Unified Pulsar Connector config model for Pulsar source and sink.
    
    1. Define new PulsarConfiguration for common config class.
    2. Define new PulsarConfigValidator for common config validation.
    3. Merge SourceConfiguration and Configuration into one class.
    4. Change source config options' description and regenerate the docs.
    5. Fix the compile error in tests.
    6. Drop Configuration in constructor parameters for all the source classes.
---
 .../generated/pulsar_client_configuration.html     |   8 +-
 .../generated/pulsar_source_configuration.html     |   2 +-
 .../5b9eed8a-5fb6-4373-98ac-3be2a71941b8           |  11 --
 ...arConfigUtils.java => PulsarClientFactory.java} | 209 ++++++++-------------
 .../pulsar/common/config/PulsarConfigBuilder.java  | 143 ++++++++++++++
 .../common/config/PulsarConfigValidator.java       | 105 +++++++++++
 .../pulsar/common/config/PulsarConfiguration.java  | 104 ++++++++++
 .../pulsar/common/config/PulsarOptions.java        |  18 +-
 .../connector/pulsar/source/PulsarSource.java      |  27 ++-
 .../pulsar/source/PulsarSourceBuilder.java         | 118 ++++++------
 .../pulsar/source/PulsarSourceOptions.java         |  12 +-
 .../pulsar/source/config/CursorVerification.java   |  23 ++-
 .../source/config/PulsarSourceConfigUtils.java     | 138 +++++---------
 .../pulsar/source/config/SourceConfiguration.java  | 190 +++++++++++--------
 .../source/enumerator/PulsarSourceEnumerator.java  |  18 +-
 .../source/enumerator/SplitsAssignmentState.java   |   2 +-
 .../source/enumerator/cursor/StopCursor.java       |   2 +
 .../cursor/stop/LatestMessageStopCursor.java       |   1 +
 .../enumerator/topic/range/RangeGenerator.java     |   8 +
 .../source/reader/PulsarSourceReaderFactory.java   |  19 +-
 .../deserializer/PulsarDeserializationSchema.java  |   9 +
 .../PulsarDeserializationSchemaWrapper.java        |   4 +-
 .../reader/deserializer/PulsarSchemaWrapper.java   |  12 +-
 .../reader/source/PulsarOrderedSourceReader.java   |   5 +-
 .../reader/source/PulsarSourceReaderBase.java      |   4 +-
 .../reader/source/PulsarUnorderedSourceReader.java |   3 -
 .../split/PulsarOrderedPartitionSplitReader.java   |   4 +-
 .../split/PulsarPartitionSplitReaderBase.java      |   6 +-
 .../split/PulsarUnorderedPartitionSplitReader.java |   4 +-
 .../common/config/PulsarConfigBuilderTest.java     |  76 ++++++++
 .../common/config/PulsarConfigValidatorTest.java   |  57 ++++++
 .../common/config/PulsarConfigurationTest.java     |  65 +++++++
 .../pulsar/source/PulsarSourceBuilderTest.java     |  69 +++----
 .../enumerator/PulsarSourceEnumeratorTest.java     |   1 -
 .../PulsarDeserializationSchemaTest.java           |   8 +-
 .../reader/source/PulsarSourceReaderTestBase.java  |   6 +-
 .../split/PulsarPartitionSplitReaderTestBase.java  |  11 +-
 .../pyflink/datastream/tests/test_connectors.py    |   2 +-
 38 files changed, 980 insertions(+), 524 deletions(-)

diff --git a/docs/layouts/shortcodes/generated/pulsar_client_configuration.html b/docs/layouts/shortcodes/generated/pulsar_client_configuration.html
index d44a93c..02a6d96 100644
--- a/docs/layouts/shortcodes/generated/pulsar_client_configuration.html
+++ b/docs/layouts/shortcodes/generated/pulsar_client_configuration.html
@@ -90,7 +90,7 @@
             <td><h5>pulsar.client.maxLookupRequest</h5></td>
             <td style="word-wrap: break-word;">50000</td>
             <td>Integer</td>
-            <td>The maximum number of lookup requests allowed on each broker connection to prevent overload on the broker. It should be greater than <code class="highlighter-rouge">maxConcurrentLookupRequests</code>. Requests that inside <code class="highlighter-rouge">maxConcurrentLookupRequests</code> are already sent to broker, and requests beyond <code class="highlighter-rouge">maxConcurrentLookupRequests</code> and under <code class="highlighter-rouge">maxLookupRequests</code> will  [...]
+            <td>The maximum number of lookup requests allowed on each broker connection to prevent overload on the broker. It should be greater than <code class="highlighter-rouge">pulsar.client.concurrentLookupRequest</code>. Requests that inside <code class="highlighter-rouge">pulsar.client.concurrentLookupRequest</code> are already sent to broker, and requests beyond <code class="highlighter-rouge">pulsar.client.concurrentLookupRequest</code> and under <code class="highlighter-rouge"> [...]
         </tr>
         <tr>
             <td><h5>pulsar.client.maxNumberOfRejectedRequestPerConnection</h5></td>
@@ -138,13 +138,13 @@
             <td><h5>pulsar.client.requestTimeoutMs</h5></td>
             <td style="word-wrap: break-word;">60000</td>
             <td>Integer</td>
-            <td>Maximum duration (in ms) for completing a request.</td>
+            <td>Maximum duration (in ms) for completing a request. This config option is not supported before Pulsar 2.8.1</td>
         </tr>
         <tr>
             <td><h5>pulsar.client.serviceUrl</h5></td>
             <td style="word-wrap: break-word;">(none)</td>
             <td>String</td>
-            <td>Service URL provider for Pulsar service.<br />To connect to Pulsar using client libraries, you need to specify a Pulsar protocol URL.<br />You can assign Pulsar protocol URLs to specific clusters and use the <code class="highlighter-rouge">pulsar</code> scheme.<br /><ul><li>This is an example of <code class="highlighter-rouge">localhost</code>: <code class="highlighter-rouge">pulsar://localhost:6650</code>.</li><li>If you have multiple brokers, the URL is as: <code class= [...]
+            <td>Service URL provider for Pulsar service.<br />To connect to Pulsar using client libraries, you need to specify a Pulsar protocol URL.<br />You can assign Pulsar protocol URLs to specific clusters and use the Pulsar scheme.<br /><ul><li>This is an example of <code class="highlighter-rouge">localhost</code>: <code class="highlighter-rouge">pulsar://localhost:6650</code>.</li><li>If you have multiple brokers, the URL is as: <code class="highlighter-rouge">pulsar://localhost: [...]
         </tr>
         <tr>
             <td><h5>pulsar.client.sslProvider</h5></td>
@@ -216,7 +216,7 @@
             <td><h5>pulsar.client.useTcpNoDelay</h5></td>
             <td style="word-wrap: break-word;">true</td>
             <td>Boolean</td>
-            <td>Whether to use the TCP no-delay flag on the connection to disable Nagle algorithm.<br />No-delay features ensures that packets are sent out on the network as soon as possible, and it is critical to achieve low latency publishes. On the other hand, sending out a huge number of small packets might limit the overall throughput. Therefore, if latency is not a concern, it is recommended to set the <code class="highlighter-rouge">useTcpNoDelay</code> flag to <code class="highli [...]
+            <td>Whether to use the TCP no-delay flag on the connection to disable Nagle algorithm.<br />No-delay features ensures that packets are sent out on the network as soon as possible, and it is critical to achieve low latency publishes. On the other hand, sending out a huge number of small packets might limit the overall throughput. Therefore, if latency is not a concern, it is recommended to set this option to <code class="highlighter-rouge">false</code>.<br />By default, it is  [...]
         </tr>
     </tbody>
 </table>
diff --git a/docs/layouts/shortcodes/generated/pulsar_source_configuration.html b/docs/layouts/shortcodes/generated/pulsar_source_configuration.html
index 02e512b..3bcdad8 100644
--- a/docs/layouts/shortcodes/generated/pulsar_source_configuration.html
+++ b/docs/layouts/shortcodes/generated/pulsar_source_configuration.html
@@ -48,7 +48,7 @@
             <td><h5>pulsar.source.verifyInitialOffsets</h5></td>
             <td style="word-wrap: break-word;">WARN_ON_MISMATCH</td>
             <td><p>Enum</p></td>
-            <td>Upon (re)starting the source, check whether the expected message can be read. If failure is enabled, the application fails. Otherwise, it logs a warning. A possible solution is to adjust the retention settings in Pulsar or ignoring the check result.<br /><br />Possible values:<ul><li>"FAIL_ON_MISMATCH"</li><li>"WARN_ON_MISMATCH"</li></ul></td>
+            <td>Upon (re)starting the source, check whether the expected message can be read. If failure is enabled, the application fails. Otherwise, it logs a warning. A possible solution is to adjust the retention settings in Pulsar or ignoring the check result.<br /><br />Possible values:<ul><li>"FAIL_ON_MISMATCH": Fail the consuming from Pulsar when we don't find the related cursor.</li><li>"WARN_ON_MISMATCH": Print a warn message and start consuming from the valid offset.</li></ul></td>
         </tr>
     </tbody>
 </table>
diff --git a/flink-architecture-tests/flink-architecture-tests-production/archunit-violations/5b9eed8a-5fb6-4373-98ac-3be2a71941b8 b/flink-architecture-tests/flink-architecture-tests-production/archunit-violations/5b9eed8a-5fb6-4373-98ac-3be2a71941b8
index 620eabf..4777a2d 100644
--- a/flink-architecture-tests/flink-architecture-tests-production/archunit-violations/5b9eed8a-5fb6-4373-98ac-3be2a71941b8
+++ b/flink-architecture-tests/flink-architecture-tests-production/archunit-violations/5b9eed8a-5fb6-4373-98ac-3be2a71941b8
@@ -93,17 +93,6 @@ org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema.builder():
 org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema.open(org.apache.flink.api.common.serialization.SerializationSchema$InitializationContext, org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema$KafkaSinkContext): Argument leaf type org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema$KafkaSinkContext does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated wi [...]
 org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema.serialize(java.lang.Object, org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema$KafkaSinkContext, java.lang.Long): Argument leaf type org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema$KafkaSinkContext does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated with @Deprecated
 org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer.getPartitionOffsets(java.util.Collection, org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer$PartitionOffsetsRetriever): Argument leaf type org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer$PartitionOffsetsRetriever does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or ann [...]
-org.apache.flink.connector.pulsar.source.PulsarSource.createEnumerator(org.apache.flink.api.connector.source.SplitEnumeratorContext): Argument leaf type org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated with @Deprecated
-org.apache.flink.connector.pulsar.source.PulsarSource.createEnumerator(org.apache.flink.api.connector.source.SplitEnumeratorContext): Returned leaf type org.apache.flink.connector.pulsar.source.enumerator.PulsarSourceEnumState does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated with @Deprecated
-org.apache.flink.connector.pulsar.source.PulsarSource.createEnumerator(org.apache.flink.api.connector.source.SplitEnumeratorContext): Returned leaf type org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated with @Deprecated
-org.apache.flink.connector.pulsar.source.PulsarSource.createReader(org.apache.flink.api.connector.source.SourceReaderContext): Returned leaf type org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated with @Deprecated
-org.apache.flink.connector.pulsar.source.PulsarSource.getEnumeratorCheckpointSerializer(): Returned leaf type org.apache.flink.connector.pulsar.source.enumerator.PulsarSourceEnumState does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated with @Deprecated
-org.apache.flink.connector.pulsar.source.PulsarSource.getSplitSerializer(): Returned leaf type org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated with @Deprecated
-org.apache.flink.connector.pulsar.source.PulsarSource.restoreEnumerator(org.apache.flink.api.connector.source.SplitEnumeratorContext, org.apache.flink.connector.pulsar.source.enumerator.PulsarSourceEnumState): Argument leaf type org.apache.flink.connector.pulsar.source.enumerator.PulsarSourceEnumState does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated with @Deprecated
-org.apache.flink.connector.pulsar.source.PulsarSource.restoreEnumerator(org.apache.flink.api.connector.source.SplitEnumeratorContext, org.apache.flink.connector.pulsar.source.enumerator.PulsarSourceEnumState): Argument leaf type org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated with @Deprecated
-org.apache.flink.connector.pulsar.source.PulsarSource.restoreEnumerator(org.apache.flink.api.connector.source.SplitEnumeratorContext, org.apache.flink.connector.pulsar.source.enumerator.PulsarSourceEnumState): Returned leaf type org.apache.flink.connector.pulsar.source.enumerator.PulsarSourceEnumState does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated with @Deprecated
-org.apache.flink.connector.pulsar.source.PulsarSource.restoreEnumerator(org.apache.flink.api.connector.source.SplitEnumeratorContext, org.apache.flink.connector.pulsar.source.enumerator.PulsarSourceEnumState): Returned leaf type org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated with @Deprecated
-org.apache.flink.connector.pulsar.source.enumerator.cursor.StopCursor.open(org.apache.pulsar.client.admin.PulsarAdmin, org.apache.flink.connector.pulsar.source.enumerator.topic.TopicPartition): Argument leaf type org.apache.flink.connector.pulsar.source.enumerator.topic.TopicPartition does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated with @Deprecated
 org.apache.flink.core.fs.EntropyInjector.createEntropyAware(org.apache.flink.core.fs.FileSystem, org.apache.flink.core.fs.Path, org.apache.flink.core.fs.FileSystem$WriteMode): Argument leaf type org.apache.flink.core.fs.FileSystem$WriteMode does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated with @Deprecated
 org.apache.flink.core.fs.EntropyInjector.createEntropyAware(org.apache.flink.core.fs.FileSystem, org.apache.flink.core.fs.Path, org.apache.flink.core.fs.FileSystem$WriteMode): Returned leaf type org.apache.flink.core.fs.OutputStreamAndPath does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated with @Deprecated
 org.apache.flink.core.fs.RecoverableFsDataOutputStream.closeForCommit(): Returned leaf type org.apache.flink.core.fs.RecoverableFsDataOutputStream$Committer does not satisfy: reside outside of package 'org.apache.flink..' or annotated with @Public or annotated with @PublicEvolving or annotated with @Deprecated
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigUtils.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarClientFactory.java
similarity index 55%
rename from flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigUtils.java
rename to flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarClientFactory.java
index ae5d784..b1214b5 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigUtils.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarClientFactory.java
@@ -19,8 +19,6 @@
 package org.apache.flink.connector.pulsar.common.config;
 
 import org.apache.flink.annotation.Internal;
-import org.apache.flink.configuration.ConfigOption;
-import org.apache.flink.configuration.Configuration;
 
 import org.apache.pulsar.client.admin.PulsarAdmin;
 import org.apache.pulsar.client.admin.PulsarAdminBuilder;
@@ -33,13 +31,11 @@ import org.apache.pulsar.client.impl.auth.AuthenticationDisabled;
 
 import java.util.Map;
 import java.util.TreeSet;
-import java.util.function.Consumer;
-import java.util.function.Function;
 
+import static java.util.Collections.singletonMap;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
 import static java.util.concurrent.TimeUnit.SECONDS;
-import static java.util.function.Function.identity;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_ADMIN_URL;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_AUTH_PARAMS;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_AUTH_PARAM_MAP;
@@ -66,6 +62,7 @@ import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULS
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_PROXY_SERVICE_URL;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_READ_TIMEOUT;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_REQUEST_TIMEOUT;
+import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_REQUEST_TIMEOUT_MS;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_SERVICE_URL;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_SSL_PROVIDER;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_STATS_INTERVAL_SECONDS;
@@ -82,133 +79,108 @@ import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULS
 import static org.apache.flink.connector.pulsar.common.utils.PulsarExceptionUtils.sneakyClient;
 import static org.apache.pulsar.client.api.SizeUnit.BYTES;
 
-/** The util for creating pulsar configuration class from flink's {@link Configuration}. */
+/** The factory for creating pulsar client classes from {@link PulsarConfiguration}. */
 @Internal
-public final class PulsarConfigUtils {
+public final class PulsarClientFactory {
 
-    private PulsarConfigUtils() {
+    private PulsarClientFactory() {
         // No need to create instance.
     }
 
     /** Create a PulsarClient by using the flink Configuration and the config customizer. */
-    public static PulsarClient createClient(Configuration configuration) {
+    public static PulsarClient createClient(PulsarConfiguration configuration) {
         ClientBuilder builder = PulsarClient.builder();
 
-        setOptionValue(configuration, PULSAR_SERVICE_URL, builder::serviceUrl);
-        setOptionValue(configuration, PULSAR_LISTENER_NAME, builder::listenerName);
+        // requestTimeoutMs don't have a setter method on ClientBuilder. We have to use low level
+        // setter method instead. So we put this at the beginning of the builder.
+        Integer requestTimeoutMs = configuration.get(PULSAR_REQUEST_TIMEOUT_MS);
+        builder.loadConf(singletonMap("requestTimeoutMs", requestTimeoutMs));
+
+        // Create the authentication instance for the Pulsar client.
         builder.authentication(createAuthentication(configuration));
-        setOptionValue(
-                configuration,
+
+        configuration.useOption(PULSAR_SERVICE_URL, builder::serviceUrl);
+        configuration.useOption(PULSAR_LISTENER_NAME, builder::listenerName);
+        configuration.useOption(
                 PULSAR_OPERATION_TIMEOUT_MS,
                 timeout -> builder.operationTimeout(timeout, MILLISECONDS));
-        setOptionValue(configuration, PULSAR_NUM_IO_THREADS, builder::ioThreads);
-        setOptionValue(configuration, PULSAR_NUM_LISTENER_THREADS, builder::listenerThreads);
-        setOptionValue(configuration, PULSAR_CONNECTIONS_PER_BROKER, builder::connectionsPerBroker);
-        setOptionValue(configuration, PULSAR_USE_TCP_NO_DELAY, builder::enableTcpNoDelay);
-        setOptionValue(
-                configuration, PULSAR_TLS_TRUST_CERTS_FILE_PATH, builder::tlsTrustCertsFilePath);
-        setOptionValue(
-                configuration,
-                PULSAR_TLS_ALLOW_INSECURE_CONNECTION,
-                builder::allowTlsInsecureConnection);
-        setOptionValue(
-                configuration,
-                PULSAR_TLS_HOSTNAME_VERIFICATION_ENABLE,
-                builder::enableTlsHostnameVerification);
-        setOptionValue(configuration, PULSAR_USE_KEY_STORE_TLS, builder::useKeyStoreTls);
-        setOptionValue(configuration, PULSAR_SSL_PROVIDER, builder::sslProvider);
-        setOptionValue(configuration, PULSAR_TLS_TRUST_STORE_TYPE, builder::tlsTrustStoreType);
-        setOptionValue(configuration, PULSAR_TLS_TRUST_STORE_PATH, builder::tlsTrustStorePath);
-        setOptionValue(
-                configuration, PULSAR_TLS_TRUST_STORE_PASSWORD, builder::tlsTrustStorePassword);
-        setOptionValue(configuration, PULSAR_TLS_CIPHERS, TreeSet::new, builder::tlsCiphers);
-        setOptionValue(configuration, PULSAR_TLS_PROTOCOLS, TreeSet::new, builder::tlsProtocols);
-        setOptionValue(
-                configuration,
-                PULSAR_MEMORY_LIMIT_BYTES,
-                bytes -> builder.memoryLimit(bytes, BYTES));
-        setOptionValue(
-                configuration,
-                PULSAR_STATS_INTERVAL_SECONDS,
-                v -> builder.statsInterval(v, SECONDS));
-        setOptionValue(
-                configuration,
-                PULSAR_CONCURRENT_LOOKUP_REQUEST,
-                builder::maxConcurrentLookupRequests);
-        setOptionValue(configuration, PULSAR_MAX_LOOKUP_REQUEST, builder::maxLookupRequests);
-        setOptionValue(configuration, PULSAR_MAX_LOOKUP_REDIRECTS, builder::maxLookupRedirects);
-        setOptionValue(
-                configuration,
+        configuration.useOption(PULSAR_NUM_IO_THREADS, builder::ioThreads);
+        configuration.useOption(PULSAR_NUM_LISTENER_THREADS, builder::listenerThreads);
+        configuration.useOption(PULSAR_CONNECTIONS_PER_BROKER, builder::connectionsPerBroker);
+        configuration.useOption(PULSAR_USE_TCP_NO_DELAY, builder::enableTcpNoDelay);
+        configuration.useOption(PULSAR_TLS_TRUST_CERTS_FILE_PATH, builder::tlsTrustCertsFilePath);
+        configuration.useOption(
+                PULSAR_TLS_ALLOW_INSECURE_CONNECTION, builder::allowTlsInsecureConnection);
+        configuration.useOption(
+                PULSAR_TLS_HOSTNAME_VERIFICATION_ENABLE, builder::enableTlsHostnameVerification);
+        configuration.useOption(PULSAR_USE_KEY_STORE_TLS, builder::useKeyStoreTls);
+        configuration.useOption(PULSAR_SSL_PROVIDER, builder::sslProvider);
+        configuration.useOption(PULSAR_TLS_TRUST_STORE_TYPE, builder::tlsTrustStoreType);
+        configuration.useOption(PULSAR_TLS_TRUST_STORE_PATH, builder::tlsTrustStorePath);
+        configuration.useOption(PULSAR_TLS_TRUST_STORE_PASSWORD, builder::tlsTrustStorePassword);
+        configuration.useOption(PULSAR_TLS_CIPHERS, TreeSet::new, builder::tlsCiphers);
+        configuration.useOption(PULSAR_TLS_PROTOCOLS, TreeSet::new, builder::tlsProtocols);
+        configuration.useOption(
+                PULSAR_MEMORY_LIMIT_BYTES, bytes -> builder.memoryLimit(bytes, BYTES));
+        configuration.useOption(
+                PULSAR_STATS_INTERVAL_SECONDS, v -> builder.statsInterval(v, SECONDS));
+        configuration.useOption(
+                PULSAR_CONCURRENT_LOOKUP_REQUEST, builder::maxConcurrentLookupRequests);
+        configuration.useOption(PULSAR_MAX_LOOKUP_REQUEST, builder::maxLookupRequests);
+        configuration.useOption(PULSAR_MAX_LOOKUP_REDIRECTS, builder::maxLookupRedirects);
+        configuration.useOption(
                 PULSAR_MAX_NUMBER_OF_REJECTED_REQUEST_PER_CONNECTION,
                 builder::maxNumberOfRejectedRequestPerConnection);
-        setOptionValue(
-                configuration,
-                PULSAR_KEEP_ALIVE_INTERVAL_SECONDS,
-                v -> builder.keepAliveInterval(v, SECONDS));
-        setOptionValue(
-                configuration,
-                PULSAR_CONNECTION_TIMEOUT_MS,
-                v -> builder.connectionTimeout(v, MILLISECONDS));
-        setOptionValue(
-                configuration,
+        configuration.useOption(
+                PULSAR_KEEP_ALIVE_INTERVAL_SECONDS, v -> builder.keepAliveInterval(v, SECONDS));
+        configuration.useOption(
+                PULSAR_CONNECTION_TIMEOUT_MS, v -> builder.connectionTimeout(v, MILLISECONDS));
+        configuration.useOption(
                 PULSAR_INITIAL_BACKOFF_INTERVAL_NANOS,
                 v -> builder.startingBackoffInterval(v, NANOSECONDS));
-        setOptionValue(
-                configuration,
-                PULSAR_MAX_BACKOFF_INTERVAL_NANOS,
-                v -> builder.maxBackoffInterval(v, NANOSECONDS));
-        setOptionValue(configuration, PULSAR_ENABLE_BUSY_WAIT, builder::enableBusyWait);
+        configuration.useOption(
+                PULSAR_MAX_BACKOFF_INTERVAL_NANOS, v -> builder.maxBackoffInterval(v, NANOSECONDS));
+        configuration.useOption(PULSAR_ENABLE_BUSY_WAIT, builder::enableBusyWait);
         if (configuration.contains(PULSAR_PROXY_SERVICE_URL)) {
             String proxyServiceUrl = configuration.get(PULSAR_PROXY_SERVICE_URL);
             ProxyProtocol proxyProtocol = configuration.get(PULSAR_PROXY_PROTOCOL);
             builder.proxyServiceUrl(proxyServiceUrl, proxyProtocol);
         }
-        setOptionValue(configuration, PULSAR_ENABLE_TRANSACTION, builder::enableTransaction);
+        configuration.useOption(PULSAR_ENABLE_TRANSACTION, builder::enableTransaction);
 
         return sneakyClient(builder::build);
     }
 
     /**
      * PulsarAdmin shares almost the same configuration with PulsarClient, but we separate this
-     * create method for directly create it.
+     * create method for directly creating it.
      */
-    public static PulsarAdmin createAdmin(Configuration configuration) {
+    public static PulsarAdmin createAdmin(PulsarConfiguration configuration) {
         PulsarAdminBuilder builder = PulsarAdmin.builder();
 
-        setOptionValue(configuration, PULSAR_ADMIN_URL, builder::serviceHttpUrl);
+        // Create the authentication instance for the Pulsar client.
         builder.authentication(createAuthentication(configuration));
-        setOptionValue(
-                configuration, PULSAR_TLS_TRUST_CERTS_FILE_PATH, builder::tlsTrustCertsFilePath);
-        setOptionValue(
-                configuration,
-                PULSAR_TLS_ALLOW_INSECURE_CONNECTION,
-                builder::allowTlsInsecureConnection);
-        setOptionValue(
-                configuration,
-                PULSAR_TLS_HOSTNAME_VERIFICATION_ENABLE,
-                builder::enableTlsHostnameVerification);
-        setOptionValue(configuration, PULSAR_USE_KEY_STORE_TLS, builder::useKeyStoreTls);
-        setOptionValue(configuration, PULSAR_SSL_PROVIDER, builder::sslProvider);
-        setOptionValue(configuration, PULSAR_TLS_TRUST_STORE_TYPE, builder::tlsTrustStoreType);
-        setOptionValue(configuration, PULSAR_TLS_TRUST_STORE_PATH, builder::tlsTrustStorePath);
-        setOptionValue(
-                configuration, PULSAR_TLS_TRUST_STORE_PASSWORD, builder::tlsTrustStorePassword);
-        setOptionValue(configuration, PULSAR_TLS_CIPHERS, TreeSet::new, builder::tlsCiphers);
-        setOptionValue(configuration, PULSAR_TLS_PROTOCOLS, TreeSet::new, builder::tlsProtocols);
-        setOptionValue(
-                configuration,
-                PULSAR_CONNECT_TIMEOUT,
-                v -> builder.connectionTimeout(v, MILLISECONDS));
-        setOptionValue(
-                configuration, PULSAR_READ_TIMEOUT, v -> builder.readTimeout(v, MILLISECONDS));
-        setOptionValue(
-                configuration,
-                PULSAR_REQUEST_TIMEOUT,
-                v -> builder.requestTimeout(v, MILLISECONDS));
-        setOptionValue(
-                configuration,
-                PULSAR_AUTO_CERT_REFRESH_TIME,
-                v -> builder.autoCertRefreshTime(v, MILLISECONDS));
+
+        configuration.useOption(PULSAR_ADMIN_URL, builder::serviceHttpUrl);
+        configuration.useOption(PULSAR_TLS_TRUST_CERTS_FILE_PATH, builder::tlsTrustCertsFilePath);
+        configuration.useOption(
+                PULSAR_TLS_ALLOW_INSECURE_CONNECTION, builder::allowTlsInsecureConnection);
+        configuration.useOption(
+                PULSAR_TLS_HOSTNAME_VERIFICATION_ENABLE, builder::enableTlsHostnameVerification);
+        configuration.useOption(PULSAR_USE_KEY_STORE_TLS, builder::useKeyStoreTls);
+        configuration.useOption(PULSAR_SSL_PROVIDER, builder::sslProvider);
+        configuration.useOption(PULSAR_TLS_TRUST_STORE_TYPE, builder::tlsTrustStoreType);
+        configuration.useOption(PULSAR_TLS_TRUST_STORE_PATH, builder::tlsTrustStorePath);
+        configuration.useOption(PULSAR_TLS_TRUST_STORE_PASSWORD, builder::tlsTrustStorePassword);
+        configuration.useOption(PULSAR_TLS_CIPHERS, TreeSet::new, builder::tlsCiphers);
+        configuration.useOption(PULSAR_TLS_PROTOCOLS, TreeSet::new, builder::tlsProtocols);
+        configuration.useOption(
+                PULSAR_CONNECT_TIMEOUT, v -> builder.connectionTimeout(v, MILLISECONDS));
+        configuration.useOption(PULSAR_READ_TIMEOUT, v -> builder.readTimeout(v, MILLISECONDS));
+        configuration.useOption(
+                PULSAR_REQUEST_TIMEOUT, v -> builder.requestTimeout(v, MILLISECONDS));
+        configuration.useOption(
+                PULSAR_AUTO_CERT_REFRESH_TIME, v -> builder.autoCertRefreshTime(v, MILLISECONDS));
 
         return sneakyClient(builder::build);
     }
@@ -220,7 +192,7 @@ public final class PulsarConfigUtils {
      *
      * <p>This method behavior is the same as the pulsar command line tools.
      */
-    private static Authentication createAuthentication(Configuration configuration) {
+    private static Authentication createAuthentication(PulsarConfiguration configuration) {
         if (configuration.contains(PULSAR_AUTH_PLUGIN_CLASS_NAME)) {
             String authPluginClassName = configuration.get(PULSAR_AUTH_PLUGIN_CLASS_NAME);
 
@@ -242,35 +214,4 @@ public final class PulsarConfigUtils {
 
         return AuthenticationDisabled.INSTANCE;
     }
-
-    /** Get the option value str from given config, convert it into the real value instance. */
-    public static <F, T> T getOptionValue(
-            Configuration configuration, ConfigOption<F> option, Function<F, T> convertor) {
-        F value = configuration.get(option);
-        if (value != null) {
-            return convertor.apply(value);
-        } else {
-            return null;
-        }
-    }
-
-    /** Set the config option's value to a given builder. */
-    public static <T> void setOptionValue(
-            Configuration configuration, ConfigOption<T> option, Consumer<T> setter) {
-        setOptionValue(configuration, option, identity(), setter);
-    }
-
-    /**
-     * Query the config option's value, convert it into a required type, set it to a given builder.
-     */
-    public static <T, V> void setOptionValue(
-            Configuration configuration,
-            ConfigOption<T> option,
-            Function<T, V> convertor,
-            Consumer<V> setter) {
-        if (configuration.contains(option)) {
-            V value = getOptionValue(configuration, option, convertor);
-            setter.accept(value);
-        }
-    }
 }
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigBuilder.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigBuilder.java
new file mode 100644
index 0000000..b8ce86f
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigBuilder.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.common.config;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.configuration.ConfigOption;
+import org.apache.flink.configuration.ConfigOptions;
+import org.apache.flink.configuration.Configuration;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.function.Function;
+
+import static org.apache.flink.util.Preconditions.checkArgument;
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * A builder for building the unmodifiable {@link Configuration} instance. Providing the common
+ * validate logic for Pulsar source & sink.
+ */
+@Internal
+public final class PulsarConfigBuilder {
+
+    private final Configuration configuration = new Configuration();
+
+    /** Validate if the config has a existed option. */
+    public <T> boolean contains(ConfigOption<T> option) {
+        return configuration.contains(option);
+    }
+
+    /**
+     * Get an option-related config value. We would return the default config value defined in the
+     * option if no value existed instead.
+     *
+     * @param key Config option instance.
+     */
+    public <T> T get(ConfigOption<T> key) {
+        return configuration.get(key);
+    }
+
+    /**
+     * Add a config option with a not null value. The config key shouldn't be duplicated.
+     *
+     * @param option Config option instance, contains key & type definition.
+     * @param value The config value which shouldn't be null.
+     */
+    public <T> void set(ConfigOption<T> option, T value) {
+        checkNotNull(option);
+        checkNotNull(value);
+
+        if (configuration.contains(option)) {
+            T oldValue = configuration.get(option);
+            checkArgument(
+                    Objects.equals(oldValue, value),
+                    "This option %s has been set to value %s.",
+                    option.key(),
+                    oldValue);
+        } else {
+            configuration.set(option, value);
+        }
+    }
+
+    /**
+     * Fill in a set of configs which shouldn't be duplicated.
+     *
+     * @param config A set of configs.
+     */
+    public void set(Configuration config) {
+        Map<String, String> existedConfigs = configuration.toMap();
+        List<String> duplicatedKeys = new ArrayList<>();
+        for (Map.Entry<String, String> entry : config.toMap().entrySet()) {
+            String key = entry.getKey();
+            if (existedConfigs.containsKey(key)) {
+                String value2 = existedConfigs.get(key);
+                if (!Objects.equals(value2, entry.getValue())) {
+                    duplicatedKeys.add(key);
+                }
+            }
+        }
+        checkArgument(
+                duplicatedKeys.isEmpty(),
+                "Invalid configuration, these keys %s are already exist with different config value.",
+                duplicatedKeys);
+        configuration.addAll(config);
+    }
+
+    /**
+     * Fill in a set of config properties which shouldn't be duplicated.
+     *
+     * @param properties A config which could be string type.
+     */
+    public void set(Properties properties) {
+        properties.keySet().stream()
+                .map(String::valueOf)
+                .forEach(
+                        key -> {
+                            ConfigOption<String> option =
+                                    ConfigOptions.key(key).stringType().noDefaultValue();
+                            Object value = properties.get(key);
+
+                            if (value != null) {
+                                set(option, value.toString());
+                            }
+                        });
+    }
+
+    /**
+     * Override the option with the given value. It will not check the existed option comparing to
+     * {@link #set(ConfigOption, Object)}.
+     */
+    public <T> void override(ConfigOption<T> option, T value) {
+        checkNotNull(option);
+        checkNotNull(value);
+
+        configuration.set(option, value);
+    }
+
+    /** Validate the current config instance and return a unmodifiable configuration. */
+    public <T extends PulsarConfiguration> T build(
+            PulsarConfigValidator validator, Function<Configuration, T> constructor) {
+        validator.validate(configuration);
+        return constructor.apply(configuration);
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigValidator.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigValidator.java
new file mode 100644
index 0000000..ac8f296
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigValidator.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.common.config;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.configuration.ConfigOption;
+import org.apache.flink.configuration.Configuration;
+
+import org.apache.flink.shaded.guava30.com.google.common.collect.ImmutableList;
+import org.apache.flink.shaded.guava30.com.google.common.collect.ImmutableSet;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.apache.flink.util.Preconditions.checkArgument;
+
+/**
+ * A config validator for building {@link PulsarConfiguration} in {@link PulsarConfigBuilder}. It's
+ * used for source & sink builder.
+ *
+ * <p>We would validate:
+ *
+ * <ul>
+ *   <li>If the user has provided the required config options.
+ *   <li>If the user has provided some conflict options.
+ * </ul>
+ */
+@Internal
+public class PulsarConfigValidator {
+
+    private final List<Set<ConfigOption<?>>> conflictOptions;
+    private final Set<ConfigOption<?>> requiredOptions;
+
+    private PulsarConfigValidator(
+            List<Set<ConfigOption<?>>> conflictOptions, Set<ConfigOption<?>> requiredOptions) {
+        this.conflictOptions = conflictOptions;
+        this.requiredOptions = requiredOptions;
+    }
+
+    /** Package private validating for using in {@link PulsarConfigBuilder}. */
+    void validate(Configuration configuration) {
+        requiredOptions.forEach(
+                option ->
+                        checkArgument(
+                                configuration.contains(option),
+                                "Config option %s is not provided for pulsar client.",
+                                option));
+        conflictOptions.forEach(
+                options -> {
+                    long nums = options.stream().filter(configuration::contains).count();
+                    checkArgument(
+                            nums <= 1,
+                            "Conflict config options %s were provided, we only support one of them for creating pulsar client.",
+                            options);
+                });
+    }
+
+    /** Return the builder for building {@link PulsarConfigValidator}. */
+    public static PulsarConfigValidatorBuilder builder() {
+        return new PulsarConfigValidatorBuilder();
+    }
+
+    /** Builder pattern for building {@link PulsarConfigValidator}. */
+    public static class PulsarConfigValidatorBuilder {
+
+        private final List<Set<ConfigOption<?>>> conflictOptions = new ArrayList<>();
+        private final Set<ConfigOption<?>> requiredOptions = new HashSet<>();
+
+        public PulsarConfigValidatorBuilder conflictOptions(ConfigOption<?>... options) {
+            checkArgument(options.length > 1, "You should provide at least two conflict options.");
+            conflictOptions.add(ImmutableSet.copyOf(options));
+            return this;
+        }
+
+        public PulsarConfigValidatorBuilder requiredOption(ConfigOption<?> option) {
+            requiredOptions.add(option);
+            return this;
+        }
+
+        public PulsarConfigValidator build() {
+            ImmutableList<Set<ConfigOption<?>>> conflict = ImmutableList.copyOf(conflictOptions);
+            Set<ConfigOption<?>> required = ImmutableSet.copyOf(requiredOptions);
+
+            return new PulsarConfigValidator(conflict, required);
+        }
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarConfiguration.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarConfiguration.java
new file mode 100644
index 0000000..3e64c66
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarConfiguration.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.common.config;
+
+import org.apache.flink.configuration.ConfigOption;
+import org.apache.flink.configuration.ConfigOptions;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.configuration.UnmodifiableConfiguration;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Consumer;
+import java.util.function.Function;
+
+import static java.util.function.Function.identity;
+import static java.util.stream.Collectors.toList;
+
+/**
+ * An unmodifiable {@link Configuration} for Pulsar. We provide extra methods for building the
+ * different Pulsar client instance.
+ */
+public abstract class PulsarConfiguration extends UnmodifiableConfiguration {
+    private static final long serialVersionUID = 3050894147145572345L;
+
+    /**
+     * Creates a new PulsarConfiguration, which holds a copy of the given configuration that can't
+     * be altered.
+     *
+     * @param config The configuration with the original contents.
+     */
+    protected PulsarConfiguration(Configuration config) {
+        super(config);
+    }
+
+    /**
+     * Get the option value by a prefix. We would return an empty map if the option doesn't exist.
+     */
+    public Map<String, String> getProperties(ConfigOption<Map<String, String>> option) {
+        Map<String, String> properties = new HashMap<>();
+        if (contains(option)) {
+            Map<String, String> map = get(option);
+            properties.putAll(map);
+        }
+
+        // Filter the sub config option. These options could be provided by SQL.
+        String prefix = option.key() + ".";
+        List<String> keys =
+                keySet().stream()
+                        .filter(key -> key.startsWith(prefix) && key.length() > prefix.length())
+                        .collect(toList());
+
+        // Put these config options' value into return result.
+        for (String key : keys) {
+            ConfigOption<String> o = ConfigOptions.key(key).stringType().noDefaultValue();
+            String value = get(o);
+            properties.put(key.substring(prefix.length()), value);
+        }
+
+        return properties;
+    }
+
+    /** Get an option value from the given config, convert it into the a new value instance. */
+    public <F, T> T get(ConfigOption<F> option, Function<F, T> convertor) {
+        F value = get(option);
+        if (value != null) {
+            return convertor.apply(value);
+        } else {
+            return null;
+        }
+    }
+
+    /** Set the config option's value to a given builder. */
+    public <T> void useOption(ConfigOption<T> option, Consumer<T> setter) {
+        useOption(option, identity(), setter);
+    }
+
+    /**
+     * Query the config option's value, convert it into a required type, set it to a given builder.
+     */
+    public <T, V> void useOption(
+            ConfigOption<T> option, Function<T, V> convertor, Consumer<V> setter) {
+        if (contains(option) || option.hasDefaultValue()) {
+            V value = get(option, convertor);
+            setter.accept(value);
+        }
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarOptions.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarOptions.java
index db90bf1..b06d9ed 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarOptions.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/config/PulsarOptions.java
@@ -47,7 +47,6 @@ import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.CLIE
             @ConfigGroup(name = "PulsarClient", keyPrefix = CLIENT_CONFIG_PREFIX),
             @ConfigGroup(name = "PulsarAdmin", keyPrefix = ADMIN_CONFIG_PREFIX)
         })
-@SuppressWarnings("java:S1192")
 public final class PulsarOptions {
 
     // Pulsar client API config prefix.
@@ -78,8 +77,7 @@ public final class PulsarOptions {
                                             "To connect to Pulsar using client libraries, you need to specify a Pulsar protocol URL.")
                                     .linebreak()
                                     .text(
-                                            "You can assign Pulsar protocol URLs to specific clusters and use the %s scheme.",
-                                            code("pulsar"))
+                                            "You can assign Pulsar protocol URLs to specific clusters and use the Pulsar scheme.")
                                     .linebreak()
                                     .list(
                                             text(
@@ -208,8 +206,8 @@ public final class PulsarOptions {
                                     .text(
                                             " On the other hand, sending out a huge number of small packets might limit the overall throughput.")
                                     .text(
-                                            " Therefore, if latency is not a concern, it is recommended to set the %s flag to %s.",
-                                            code("useTcpNoDelay"), code("false"))
+                                            " Therefore, if latency is not a concern, it is recommended to set this option to %s.",
+                                            code("false"))
                                     .linebreak()
                                     .text("By default, it is set to %s.", code("true"))
                                     .build());
@@ -265,13 +263,13 @@ public final class PulsarOptions {
                                             "The maximum number of lookup requests allowed on each broker connection to prevent overload on the broker.")
                                     .text(
                                             " It should be greater than %s.",
-                                            code("maxConcurrentLookupRequests"))
+                                            code("pulsar.client.concurrentLookupRequest"))
                                     .text(
                                             " Requests that inside %s are already sent to broker,",
-                                            code("maxConcurrentLookupRequests"))
+                                            code("pulsar.client.concurrentLookupRequest"))
                                     .text(
                                             " and requests beyond %s and under %s will wait in each client cnx.",
-                                            code("maxConcurrentLookupRequests"),
+                                            code("pulsar.client.concurrentLookupRequest"),
                                             code("maxLookupRequests"))
                                     .build());
 
@@ -314,12 +312,12 @@ public final class PulsarOptions {
                                             "If the duration passes without a response from a broker, the connection attempt is dropped.")
                                     .build());
 
-    // TODO This option would be exposed by Pulsar's ClientBuilder in the next Pulsar release.
     public static final ConfigOption<Integer> PULSAR_REQUEST_TIMEOUT_MS =
             ConfigOptions.key(CLIENT_CONFIG_PREFIX + "requestTimeoutMs")
                     .intType()
                     .defaultValue(60000)
-                    .withDescription("Maximum duration (in ms) for completing a request.");
+                    .withDescription(
+                            "Maximum duration (in ms) for completing a request. This config option is not supported before Pulsar 2.8.1");
 
     public static final ConfigOption<Long> PULSAR_INITIAL_BACKOFF_INTERVAL_NANOS =
             ConfigOptions.key(CLIENT_CONFIG_PREFIX + "initialBackoffIntervalNanos")
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSource.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSource.java
index f2b2e39..a6c48d1 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSource.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSource.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.connector.pulsar.source;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.connector.source.Boundedness;
@@ -27,7 +28,6 @@ import org.apache.flink.api.connector.source.SourceReaderContext;
 import org.apache.flink.api.connector.source.SplitEnumerator;
 import org.apache.flink.api.connector.source.SplitEnumeratorContext;
 import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
-import org.apache.flink.configuration.Configuration;
 import org.apache.flink.connector.pulsar.source.config.SourceConfiguration;
 import org.apache.flink.connector.pulsar.source.enumerator.PulsarSourceEnumState;
 import org.apache.flink.connector.pulsar.source.enumerator.PulsarSourceEnumStateSerializer;
@@ -72,11 +72,9 @@ public final class PulsarSource<OUT>
     private static final long serialVersionUID = 7773108631275567433L;
 
     /**
-     * The common configuration for pulsar source, we don't support the pulsar's configuration class
+     * The configuration for pulsar source, we don't support the pulsar's configuration class
      * directly.
      */
-    private final Configuration configuration;
-
     private final SourceConfiguration sourceConfiguration;
 
     private final PulsarSubscriber subscriber;
@@ -97,16 +95,14 @@ public final class PulsarSource<OUT>
      * PulsarSourceBuilder}.
      */
     PulsarSource(
-            Configuration configuration,
+            SourceConfiguration sourceConfiguration,
             PulsarSubscriber subscriber,
             RangeGenerator rangeGenerator,
             StartCursor startCursor,
             StopCursor stopCursor,
             Boundedness boundedness,
             PulsarDeserializationSchema<OUT> deserializationSchema) {
-
-        this.configuration = configuration;
-        this.sourceConfiguration = new SourceConfiguration(configuration);
+        this.sourceConfiguration = sourceConfiguration;
         this.subscriber = subscriber;
         this.rangeGenerator = rangeGenerator;
         this.startCursor = startCursor;
@@ -120,7 +116,6 @@ public final class PulsarSource<OUT>
      *
      * @return a Pulsar source builder.
      */
-    @SuppressWarnings("java:S4977")
     public static <OUT> PulsarSourceBuilder<OUT> builder() {
         return new PulsarSourceBuilder<>();
     }
@@ -130,17 +125,20 @@ public final class PulsarSource<OUT>
         return boundedness;
     }
 
+    @Internal
     @Override
     public SourceReader<OUT, PulsarPartitionSplit> createReader(SourceReaderContext readerContext)
             throws Exception {
         // Initialize the deserialization schema before creating the pulsar reader.
-        deserializationSchema.open(
-                new PulsarDeserializationSchemaInitializationContext(readerContext));
+        PulsarDeserializationSchemaInitializationContext initializationContext =
+                new PulsarDeserializationSchemaInitializationContext(readerContext);
+        deserializationSchema.open(initializationContext, sourceConfiguration);
 
         return PulsarSourceReaderFactory.create(
-                readerContext, deserializationSchema, configuration, sourceConfiguration);
+                readerContext, deserializationSchema, sourceConfiguration);
     }
 
+    @Internal
     @Override
     public SplitEnumerator<PulsarPartitionSplit, PulsarSourceEnumState> createEnumerator(
             SplitEnumeratorContext<PulsarPartitionSplit> enumContext) {
@@ -150,12 +148,12 @@ public final class PulsarSource<OUT>
                 subscriber,
                 startCursor,
                 rangeGenerator,
-                configuration,
                 sourceConfiguration,
                 enumContext,
                 assignmentState);
     }
 
+    @Internal
     @Override
     public SplitEnumerator<PulsarPartitionSplit, PulsarSourceEnumState> restoreEnumerator(
             SplitEnumeratorContext<PulsarPartitionSplit> enumContext,
@@ -166,17 +164,18 @@ public final class PulsarSource<OUT>
                 subscriber,
                 startCursor,
                 rangeGenerator,
-                configuration,
                 sourceConfiguration,
                 enumContext,
                 assignmentState);
     }
 
+    @Internal
     @Override
     public SimpleVersionedSerializer<PulsarPartitionSplit> getSplitSerializer() {
         return PulsarPartitionSplitSerializer.INSTANCE;
     }
 
+    @Internal
     @Override
     public SimpleVersionedSerializer<PulsarSourceEnumState> getEnumeratorCheckpointSerializer() {
         return PulsarSourceEnumStateSerializer.INSTANCE;
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSourceBuilder.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSourceBuilder.java
index dd7f41e..0959b1b 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSourceBuilder.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSourceBuilder.java
@@ -23,8 +23,9 @@ import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.connector.source.Boundedness;
 import org.apache.flink.configuration.ConfigOption;
 import org.apache.flink.configuration.Configuration;
-import org.apache.flink.configuration.UnmodifiableConfiguration;
+import org.apache.flink.connector.pulsar.common.config.PulsarConfigBuilder;
 import org.apache.flink.connector.pulsar.common.config.PulsarOptions;
+import org.apache.flink.connector.pulsar.source.config.SourceConfiguration;
 import org.apache.flink.connector.pulsar.source.enumerator.cursor.StartCursor;
 import org.apache.flink.connector.pulsar.source.enumerator.cursor.StopCursor;
 import org.apache.flink.connector.pulsar.source.enumerator.subscriber.PulsarSubscriber;
@@ -40,23 +41,22 @@ import org.apache.pulsar.client.api.SubscriptionType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
-import java.util.Map;
-import java.util.Objects;
+import java.util.Properties;
 import java.util.regex.Pattern;
 
 import static java.lang.Boolean.FALSE;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_ADMIN_URL;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_ENABLE_TRANSACTION;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_SERVICE_URL;
+import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_CONSUMER_NAME;
 import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_ENABLE_AUTO_ACKNOWLEDGE_MESSAGE;
 import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_PARTITION_DISCOVERY_INTERVAL_MS;
+import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_READ_TRANSACTION_TIMEOUT;
 import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_SUBSCRIPTION_NAME;
 import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_SUBSCRIPTION_TYPE;
-import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_TRANSACTION_TIMEOUT_MILLIS;
-import static org.apache.flink.connector.pulsar.source.config.PulsarSourceConfigUtils.checkConfigurations;
+import static org.apache.flink.connector.pulsar.source.config.PulsarSourceConfigUtils.SOURCE_CONFIG_VALIDATOR;
 import static org.apache.flink.util.InstantiationUtil.isSerializable;
 import static org.apache.flink.util.Preconditions.checkArgument;
 import static org.apache.flink.util.Preconditions.checkNotNull;
@@ -110,7 +110,8 @@ import static org.apache.flink.util.Preconditions.checkState;
 public final class PulsarSourceBuilder<OUT> {
     private static final Logger LOG = LoggerFactory.getLogger(PulsarSourceBuilder.class);
 
-    private final Configuration configuration;
+    private final PulsarConfigBuilder configBuilder;
+
     private PulsarSubscriber subscriber;
     private RangeGenerator rangeGenerator;
     private StartCursor startCursor;
@@ -120,9 +121,10 @@ public final class PulsarSourceBuilder<OUT> {
 
     // private builder constructor.
     PulsarSourceBuilder() {
-        this.configuration = new Configuration();
+        this.configBuilder = new PulsarConfigBuilder();
         this.startCursor = StartCursor.defaultStartCursor();
         this.stopCursor = StopCursor.defaultStopCursor();
+        this.boundedness = Boundedness.CONTINUOUS_UNBOUNDED;
     }
 
     /**
@@ -166,17 +168,7 @@ public final class PulsarSourceBuilder<OUT> {
      *     Subscriptions</a>
      */
     public PulsarSourceBuilder<OUT> setSubscriptionType(SubscriptionType subscriptionType) {
-        if (configuration.contains(PULSAR_SUBSCRIPTION_TYPE)) {
-            SubscriptionType existedType = configuration.get(PULSAR_SUBSCRIPTION_TYPE);
-            checkArgument(
-                    existedType == subscriptionType,
-                    "Can't override the subscription type %s with a new type %s",
-                    existedType,
-                    subscriptionType);
-        } else {
-            configuration.set(PULSAR_SUBSCRIPTION_TYPE, subscriptionType);
-        }
-        return this;
+        return setConfig(PULSAR_SUBSCRIPTION_TYPE, subscriptionType);
     }
 
     /**
@@ -266,6 +258,14 @@ public final class PulsarSourceBuilder<OUT> {
     }
 
     /**
+     * The consumer name is informative and it can be used to identify a particular consumer
+     * instance from the topic stats.
+     */
+    public PulsarSourceBuilder<OUT> setConsumerName(String consumerName) {
+        return setConfig(PULSAR_CONSUMER_NAME, consumerName);
+    }
+
+    /**
      * Set a topic range generator for Key_Shared subscription.
      *
      * @param rangeGenerator A generator which would generate a set of {@link TopicRange} for given
@@ -273,8 +273,8 @@ public final class PulsarSourceBuilder<OUT> {
      * @return this PulsarSourceBuilder.
      */
     public PulsarSourceBuilder<OUT> setRangeGenerator(RangeGenerator rangeGenerator) {
-        if (configuration.contains(PULSAR_SUBSCRIPTION_TYPE)) {
-            SubscriptionType subscriptionType = configuration.get(PULSAR_SUBSCRIPTION_TYPE);
+        if (configBuilder.contains(PULSAR_SUBSCRIPTION_TYPE)) {
+            SubscriptionType subscriptionType = configBuilder.get(PULSAR_SUBSCRIPTION_TYPE);
             checkArgument(
                     subscriptionType == SubscriptionType.Key_Shared,
                     "Key_Shared subscription should be used for custom rangeGenerator instead of %s",
@@ -358,7 +358,7 @@ public final class PulsarSourceBuilder<OUT> {
     }
 
     /**
-     * Set an arbitrary property for the PulsarSource and PulsarConsumer. The valid keys can be
+     * Set an arbitrary property for the PulsarSource and Pulsar Consumer. The valid keys can be
      * found in {@link PulsarSourceOptions} and {@link PulsarOptions}.
      *
      * <p>Make sure the option could be set only once or with same value.
@@ -368,43 +368,33 @@ public final class PulsarSourceBuilder<OUT> {
      * @return this PulsarSourceBuilder.
      */
     public <T> PulsarSourceBuilder<OUT> setConfig(ConfigOption<T> key, T value) {
-        checkNotNull(key);
-        checkNotNull(value);
-        if (configuration.contains(key)) {
-            T oldValue = configuration.get(key);
-            checkArgument(
-                    Objects.equals(oldValue, value),
-                    "This option %s has been already set to value %s.",
-                    key.key(),
-                    oldValue);
-        } else {
-            configuration.set(key, value);
-        }
+        configBuilder.set(key, value);
         return this;
     }
 
     /**
-     * Set arbitrary properties for the PulsarSource and PulsarConsumer. The valid keys can be found
-     * in {@link PulsarSourceOptions} and {@link PulsarOptions}.
+     * Set arbitrary properties for the PulsarSource and Pulsar Consumer. The valid keys can be
+     * found in {@link PulsarSourceOptions} and {@link PulsarOptions}.
      *
      * @param config the config to set for the PulsarSource.
      * @return this PulsarSourceBuilder.
      */
     public PulsarSourceBuilder<OUT> setConfig(Configuration config) {
-        Map<String, String> existedConfigs = configuration.toMap();
-        List<String> duplicatedKeys = new ArrayList<>();
-        for (Map.Entry<String, String> entry : config.toMap().entrySet()) {
-            String key = entry.getKey();
-            String value2 = existedConfigs.get(key);
-            if (value2 != null && !value2.equals(entry.getValue())) {
-                duplicatedKeys.add(key);
-            }
-        }
-        checkArgument(
-                duplicatedKeys.isEmpty(),
-                "Invalid configuration, these keys %s are already exist with different config value.",
-                duplicatedKeys);
-        configuration.addAll(config);
+        configBuilder.set(config);
+        return this;
+    }
+
+    /**
+     * Set arbitrary properties for the PulsarSource and Pulsar Consumer. The valid keys can be
+     * found in {@link PulsarSourceOptions} and {@link PulsarOptions}.
+     *
+     * <p>This method is mainly used for future flink SQL binding.
+     *
+     * @param properties the config properties to set for the PulsarSource.
+     * @return this PulsarSourceBuilder.
+     */
+    public PulsarSourceBuilder<OUT> setProperties(Properties properties) {
+        configBuilder.set(properties);
         return this;
     }
 
@@ -415,13 +405,11 @@ public final class PulsarSourceBuilder<OUT> {
      */
     @SuppressWarnings("java:S3776")
     public PulsarSource<OUT> build() {
-        // Check builder configuration.
-        checkConfigurations(configuration);
 
         // Ensure the topic subscriber for pulsar.
         checkNotNull(subscriber, "No topic names or topic pattern are provided.");
 
-        SubscriptionType subscriptionType = configuration.get(PULSAR_SUBSCRIPTION_TYPE);
+        SubscriptionType subscriptionType = configBuilder.get(PULSAR_SUBSCRIPTION_TYPE);
         if (subscriptionType == SubscriptionType.Key_Shared) {
             if (rangeGenerator == null) {
                 LOG.warn(
@@ -439,30 +427,30 @@ public final class PulsarSourceBuilder<OUT> {
             this.boundedness = Boundedness.CONTINUOUS_UNBOUNDED;
         }
         if (boundedness == Boundedness.BOUNDED
-                && configuration.get(PULSAR_PARTITION_DISCOVERY_INTERVAL_MS) >= 0) {
+                && configBuilder.get(PULSAR_PARTITION_DISCOVERY_INTERVAL_MS) >= 0) {
             LOG.warn(
                     "{} property is overridden to -1 because the source is bounded.",
                     PULSAR_PARTITION_DISCOVERY_INTERVAL_MS);
-            configuration.set(PULSAR_PARTITION_DISCOVERY_INTERVAL_MS, -1L);
+            configBuilder.override(PULSAR_PARTITION_DISCOVERY_INTERVAL_MS, -1L);
         }
 
         checkNotNull(deserializationSchema, "deserializationSchema should be set.");
 
         // Enable transaction if the cursor auto commit is disabled for Key_Shared & Shared.
-        if (FALSE.equals(configuration.get(PULSAR_ENABLE_AUTO_ACKNOWLEDGE_MESSAGE))
+        if (FALSE.equals(configBuilder.get(PULSAR_ENABLE_AUTO_ACKNOWLEDGE_MESSAGE))
                 && (subscriptionType == SubscriptionType.Key_Shared
                         || subscriptionType == SubscriptionType.Shared)) {
             LOG.info(
                     "Pulsar cursor auto commit is disabled, make sure checkpoint is enabled "
                             + "and your pulsar cluster is support the transaction.");
-            configuration.set(PULSAR_ENABLE_TRANSACTION, true);
+            configBuilder.override(PULSAR_ENABLE_TRANSACTION, true);
 
-            if (!configuration.contains(PULSAR_TRANSACTION_TIMEOUT_MILLIS)) {
+            if (!configBuilder.contains(PULSAR_READ_TRANSACTION_TIMEOUT)) {
                 LOG.warn(
                         "The default pulsar transaction timeout is 3 hours, "
                                 + "make sure it was greater than your checkpoint interval.");
             } else {
-                Long timeout = configuration.get(PULSAR_TRANSACTION_TIMEOUT_MILLIS);
+                Long timeout = configBuilder.get(PULSAR_READ_TRANSACTION_TIMEOUT);
                 LOG.warn(
                         "The configured transaction timeout is {} mille seconds, "
                                 + "make sure it was greater than your checkpoint interval.",
@@ -470,16 +458,22 @@ public final class PulsarSourceBuilder<OUT> {
             }
         }
 
+        if (!configBuilder.contains(PULSAR_CONSUMER_NAME)) {
+            LOG.warn(
+                    "We recommend set a readable consumer name through setConsumerName(String) in production mode.");
+        }
+
         // Since these implementation could be a lambda, make sure they are serializable.
         checkState(isSerializable(startCursor), "StartCursor isn't serializable");
         checkState(isSerializable(stopCursor), "StopCursor isn't serializable");
         checkState(isSerializable(rangeGenerator), "RangeGenerator isn't serializable");
 
-        // Make the configuration unmodifiable.
-        UnmodifiableConfiguration config = new UnmodifiableConfiguration(configuration);
+        // Check builder configuration.
+        SourceConfiguration sourceConfiguration =
+                configBuilder.build(SOURCE_CONFIG_VALIDATOR, SourceConfiguration::new);
 
         return new PulsarSource<>(
-                config,
+                sourceConfiguration,
                 subscriber,
                 rangeGenerator,
                 startCursor,
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSourceOptions.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSourceOptions.java
index c319915..39a7397 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSourceOptions.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSourceOptions.java
@@ -47,7 +47,7 @@ import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.SOURC
  * PulsarSourceBuilder#setConfig(ConfigOption, Object)}. The {@link PulsarOptions} is also required
  * for pulsar source.
  *
- * @see PulsarOptions
+ * @see PulsarOptions for shared configure options.
  */
 @PublicEvolving
 @ConfigGroups(
@@ -122,7 +122,7 @@ public final class PulsarSourceOptions {
                                             " We would automatically commit the cursor using the given period (in ms).")
                                     .build());
 
-    public static final ConfigOption<Long> PULSAR_TRANSACTION_TIMEOUT_MILLIS =
+    public static final ConfigOption<Long> PULSAR_READ_TRANSACTION_TIMEOUT =
             ConfigOptions.key(SOURCE_CONFIG_PREFIX + "transactionTimeoutMillis")
                     .longType()
                     .defaultValue(Duration.ofHours(3).toMillis())
@@ -139,6 +139,14 @@ public final class PulsarSourceOptions {
                                             "The value (in ms) should be greater than the checkpoint interval.")
                                     .build());
 
+    /**
+     * @deprecated Use {@link #PULSAR_READ_TRANSACTION_TIMEOUT} instead. This would be removed in
+     *     the next release.
+     */
+    @Deprecated
+    public static final ConfigOption<Long> PULSAR_TRANSACTION_TIMEOUT_MILLIS =
+            PULSAR_READ_TRANSACTION_TIMEOUT;
+
     public static final ConfigOption<Long> PULSAR_MAX_FETCH_TIME =
             ConfigOptions.key(SOURCE_CONFIG_PREFIX + "maxFetchTime")
                     .longType()
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/config/CursorVerification.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/config/CursorVerification.java
index 5b77293..fc70bdd 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/config/CursorVerification.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/config/CursorVerification.java
@@ -18,15 +18,32 @@
 
 package org.apache.flink.connector.pulsar.source.config;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.configuration.DescribedEnum;
+import org.apache.flink.configuration.description.InlineElement;
+
+import static org.apache.flink.configuration.description.TextElement.text;
 
 /** The enum class for defining the cursor verify behavior. */
 @PublicEvolving
-public enum CursorVerification {
+public enum CursorVerification implements DescribedEnum {
 
     /** We would just fail the consuming. */
-    FAIL_ON_MISMATCH,
+    FAIL_ON_MISMATCH(text("Fail the consuming from Pulsar when we don't find the related cursor.")),
 
     /** Print a warn message and start consuming from the valid offset. */
-    WARN_ON_MISMATCH,
+    WARN_ON_MISMATCH(text("Print a warn message and start consuming from the valid offset."));
+
+    private final InlineElement desc;
+
+    CursorVerification(InlineElement desc) {
+        this.desc = desc;
+    }
+
+    @Internal
+    @Override
+    public InlineElement getDescription() {
+        return desc;
+    }
 }
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/config/PulsarSourceConfigUtils.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/config/PulsarSourceConfigUtils.java
index 7ac480a..adb8a03 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/config/PulsarSourceConfigUtils.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/config/PulsarSourceConfigUtils.java
@@ -19,12 +19,7 @@
 package org.apache.flink.connector.pulsar.source.config;
 
 import org.apache.flink.annotation.Internal;
-import org.apache.flink.configuration.ConfigOption;
-import org.apache.flink.configuration.Configuration;
-import org.apache.flink.util.Preconditions;
-
-import org.apache.flink.shaded.guava30.com.google.common.collect.ImmutableList;
-import org.apache.flink.shaded.guava30.com.google.common.collect.ImmutableSet;
+import org.apache.flink.connector.pulsar.common.config.PulsarConfigValidator;
 
 import org.apache.pulsar.client.api.Consumer;
 import org.apache.pulsar.client.api.ConsumerBuilder;
@@ -32,14 +27,12 @@ import org.apache.pulsar.client.api.DeadLetterPolicy;
 import org.apache.pulsar.client.api.PulsarClient;
 import org.apache.pulsar.client.api.Schema;
 
-import java.util.List;
+import java.util.Map;
 import java.util.Optional;
-import java.util.Set;
 
 import static java.util.concurrent.TimeUnit.MICROSECONDS;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.SECONDS;
-import static org.apache.flink.connector.pulsar.common.config.PulsarConfigUtils.setOptionValue;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_ADMIN_URL;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_AUTH_PARAMS;
 import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_AUTH_PARAM_MAP;
@@ -79,117 +72,78 @@ public final class PulsarSourceConfigUtils {
         // No need to create instance.
     }
 
-    private static final List<Set<ConfigOption<?>>> CONFLICT_SOURCE_OPTIONS =
-            ImmutableList.<Set<ConfigOption<?>>>builder()
-                    .add(ImmutableSet.of(PULSAR_AUTH_PARAMS, PULSAR_AUTH_PARAM_MAP))
-                    .build();
-
-    private static final Set<ConfigOption<?>> REQUIRED_SOURCE_OPTIONS =
-            ImmutableSet.<ConfigOption<?>>builder()
-                    .add(PULSAR_SERVICE_URL)
-                    .add(PULSAR_ADMIN_URL)
-                    .add(PULSAR_SUBSCRIPTION_NAME)
+    public static final PulsarConfigValidator SOURCE_CONFIG_VALIDATOR =
+            PulsarConfigValidator.builder()
+                    .requiredOption(PULSAR_SERVICE_URL)
+                    .requiredOption(PULSAR_ADMIN_URL)
+                    .requiredOption(PULSAR_SUBSCRIPTION_NAME)
+                    .conflictOptions(PULSAR_AUTH_PARAMS, PULSAR_AUTH_PARAM_MAP)
                     .build();
 
-    /**
-     * Helper method for checking client related config options. We would validate:
-     *
-     * <ul>
-     *   <li>If user have provided the required client config options.
-     *   <li>If user have provided some conflict options.
-     * </ul>
-     */
-    public static void checkConfigurations(Configuration configuration) {
-        REQUIRED_SOURCE_OPTIONS.forEach(
-                option ->
-                        Preconditions.checkArgument(
-                                configuration.contains(option),
-                                "Config option %s is not provided for pulsar source.",
-                                option));
-
-        CONFLICT_SOURCE_OPTIONS.forEach(
-                options -> {
-                    long nums = options.stream().filter(configuration::contains).count();
-                    Preconditions.checkArgument(
-                            nums <= 1,
-                            "Conflict config options %s were provided, we only support one of them for creating pulsar source.",
-                            options);
-                });
-    }
-
     /** Create a pulsar consumer builder by using the given Configuration. */
     public static <T> ConsumerBuilder<T> createConsumerBuilder(
-            PulsarClient client, Schema<T> schema, Configuration configuration) {
+            PulsarClient client, Schema<T> schema, SourceConfiguration configuration) {
         ConsumerBuilder<T> builder = client.newConsumer(schema);
 
-        setOptionValue(configuration, PULSAR_SUBSCRIPTION_NAME, builder::subscriptionName);
-        setOptionValue(
-                configuration, PULSAR_ACK_TIMEOUT_MILLIS, v -> builder.ackTimeout(v, MILLISECONDS));
-        setOptionValue(configuration, PULSAR_ACK_RECEIPT_ENABLED, builder::isAckReceiptEnabled);
-        setOptionValue(
-                configuration,
-                PULSAR_TICK_DURATION_MILLIS,
-                v -> builder.ackTimeoutTickTime(v, MILLISECONDS));
-        setOptionValue(
-                configuration,
+        configuration.useOption(PULSAR_SUBSCRIPTION_NAME, builder::subscriptionName);
+        configuration.useOption(
+                PULSAR_ACK_TIMEOUT_MILLIS, v -> builder.ackTimeout(v, MILLISECONDS));
+        configuration.useOption(PULSAR_ACK_RECEIPT_ENABLED, builder::isAckReceiptEnabled);
+        configuration.useOption(
+                PULSAR_TICK_DURATION_MILLIS, v -> builder.ackTimeoutTickTime(v, MILLISECONDS));
+        configuration.useOption(
                 PULSAR_NEGATIVE_ACK_REDELIVERY_DELAY_MICROS,
                 v -> builder.negativeAckRedeliveryDelay(v, MICROSECONDS));
-        setOptionValue(configuration, PULSAR_SUBSCRIPTION_TYPE, builder::subscriptionType);
-        setOptionValue(configuration, PULSAR_SUBSCRIPTION_MODE, builder::subscriptionMode);
-        setOptionValue(configuration, PULSAR_CRYPTO_FAILURE_ACTION, builder::cryptoFailureAction);
-        setOptionValue(configuration, PULSAR_RECEIVER_QUEUE_SIZE, builder::receiverQueueSize);
-        setOptionValue(
-                configuration,
+        configuration.useOption(PULSAR_SUBSCRIPTION_TYPE, builder::subscriptionType);
+        configuration.useOption(PULSAR_SUBSCRIPTION_MODE, builder::subscriptionMode);
+        configuration.useOption(PULSAR_CRYPTO_FAILURE_ACTION, builder::cryptoFailureAction);
+        configuration.useOption(PULSAR_RECEIVER_QUEUE_SIZE, builder::receiverQueueSize);
+        configuration.useOption(
                 PULSAR_ACKNOWLEDGEMENTS_GROUP_TIME_MICROS,
                 v -> builder.acknowledgmentGroupTime(v, MICROSECONDS));
-        setOptionValue(
-                configuration,
-                PULSAR_REPLICATE_SUBSCRIPTION_STATE,
-                builder::replicateSubscriptionState);
-        setOptionValue(
-                configuration,
+        configuration.useOption(
+                PULSAR_REPLICATE_SUBSCRIPTION_STATE, builder::replicateSubscriptionState);
+        configuration.useOption(
                 PULSAR_MAX_TOTAL_RECEIVER_QUEUE_SIZE_ACROSS_PARTITIONS,
                 builder::maxTotalReceiverQueueSizeAcrossPartitions);
-        setOptionValue(configuration, PULSAR_CONSUMER_NAME, builder::consumerName);
-        setOptionValue(configuration, PULSAR_READ_COMPACTED, builder::readCompacted);
-        setOptionValue(configuration, PULSAR_PRIORITY_LEVEL, builder::priorityLevel);
-        setOptionValue(configuration, PULSAR_CONSUMER_PROPERTIES, builder::properties);
-        setOptionValue(
-                configuration,
-                PULSAR_SUBSCRIPTION_INITIAL_POSITION,
-                builder::subscriptionInitialPosition);
+        configuration.useOption(PULSAR_CONSUMER_NAME, builder::consumerName);
+        configuration.useOption(PULSAR_READ_COMPACTED, builder::readCompacted);
+        configuration.useOption(PULSAR_PRIORITY_LEVEL, builder::priorityLevel);
+        configuration.useOption(
+                PULSAR_SUBSCRIPTION_INITIAL_POSITION, builder::subscriptionInitialPosition);
         createDeadLetterPolicy(configuration).ifPresent(builder::deadLetterPolicy);
-        setOptionValue(
-                configuration,
+        configuration.useOption(
                 PULSAR_AUTO_UPDATE_PARTITIONS_INTERVAL_SECONDS,
                 v -> builder.autoUpdatePartitionsInterval(v, SECONDS));
-        setOptionValue(configuration, PULSAR_RETRY_ENABLE, builder::enableRetry);
-        setOptionValue(
-                configuration,
-                PULSAR_MAX_PENDING_CHUNKED_MESSAGE,
-                builder::maxPendingChunkedMessage);
-        setOptionValue(
-                configuration,
+        configuration.useOption(PULSAR_RETRY_ENABLE, builder::enableRetry);
+        configuration.useOption(
+                PULSAR_MAX_PENDING_CHUNKED_MESSAGE, builder::maxPendingChunkedMessage);
+        configuration.useOption(
                 PULSAR_AUTO_ACK_OLDEST_CHUNKED_MESSAGE_ON_QUEUE_FULL,
                 builder::autoAckOldestChunkedMessageOnQueueFull);
-        setOptionValue(
-                configuration,
+        configuration.useOption(
                 PULSAR_EXPIRE_TIME_OF_INCOMPLETE_CHUNKED_MESSAGE_MILLIS,
                 v -> builder.expireTimeOfIncompleteChunkedMessage(v, MILLISECONDS));
-        setOptionValue(configuration, PULSAR_POOL_MESSAGES, builder::poolMessages);
+        configuration.useOption(PULSAR_POOL_MESSAGES, builder::poolMessages);
+
+        Map<String, String> properties = configuration.getProperties(PULSAR_CONSUMER_PROPERTIES);
+        if (!properties.isEmpty()) {
+            builder.properties(properties);
+        }
 
         return builder;
     }
 
-    private static Optional<DeadLetterPolicy> createDeadLetterPolicy(Configuration configuration) {
+    private static Optional<DeadLetterPolicy> createDeadLetterPolicy(
+            SourceConfiguration configuration) {
         if (configuration.contains(PULSAR_MAX_REDELIVER_COUNT)
                 || configuration.contains(PULSAR_RETRY_LETTER_TOPIC)
                 || configuration.contains(PULSAR_DEAD_LETTER_TOPIC)) {
             DeadLetterPolicy.DeadLetterPolicyBuilder builder = DeadLetterPolicy.builder();
 
-            setOptionValue(configuration, PULSAR_MAX_REDELIVER_COUNT, builder::maxRedeliverCount);
-            setOptionValue(configuration, PULSAR_RETRY_LETTER_TOPIC, builder::retryLetterTopic);
-            setOptionValue(configuration, PULSAR_DEAD_LETTER_TOPIC, builder::deadLetterTopic);
+            configuration.useOption(PULSAR_MAX_REDELIVER_COUNT, builder::maxRedeliverCount);
+            configuration.useOption(PULSAR_RETRY_LETTER_TOPIC, builder::retryLetterTopic);
+            configuration.useOption(PULSAR_DEAD_LETTER_TOPIC, builder::deadLetterTopic);
 
             return Optional.of(builder.build());
         } else {
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/config/SourceConfiguration.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/config/SourceConfiguration.java
index f13fbf2..806fe4a 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/config/SourceConfiguration.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/config/SourceConfiguration.java
@@ -22,6 +22,7 @@ import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.api.connector.source.SourceReaderContext;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.connector.base.source.reader.RecordsWithSplitIds;
+import org.apache.flink.connector.pulsar.common.config.PulsarConfiguration;
 import org.apache.flink.connector.pulsar.source.enumerator.cursor.CursorPosition;
 import org.apache.flink.connector.pulsar.source.enumerator.cursor.StartCursor;
 
@@ -29,28 +30,67 @@ import org.apache.pulsar.client.api.ConsumerBuilder;
 import org.apache.pulsar.client.api.SubscriptionMode;
 import org.apache.pulsar.client.api.SubscriptionType;
 
-import java.io.Serializable;
 import java.time.Duration;
+import java.util.Objects;
 
-import static org.apache.flink.connector.pulsar.common.config.PulsarConfigUtils.getOptionValue;
+import static org.apache.flink.connector.base.source.reader.SourceReaderOptions.ELEMENT_QUEUE_CAPACITY;
 import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_AUTO_COMMIT_CURSOR_INTERVAL;
 import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_ENABLE_AUTO_ACKNOWLEDGE_MESSAGE;
 import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_MAX_FETCH_RECORDS;
 import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_MAX_FETCH_TIME;
 import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_PARTITION_DISCOVERY_INTERVAL_MS;
+import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_READ_TRANSACTION_TIMEOUT;
 import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_SUBSCRIPTION_MODE;
 import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_SUBSCRIPTION_NAME;
 import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_SUBSCRIPTION_TYPE;
-import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_TRANSACTION_TIMEOUT_MILLIS;
 import static org.apache.flink.connector.pulsar.source.PulsarSourceOptions.PULSAR_VERIFY_INITIAL_OFFSETS;
 
 /** The configure class for pulsar source. */
 @PublicEvolving
-public class SourceConfiguration implements Serializable {
+public class SourceConfiguration extends PulsarConfiguration {
     private static final long serialVersionUID = 8488507275800787580L;
 
-    /** The interval in millis for flink querying topic partition information. */
+    private final int messageQueueCapacity;
     private final long partitionDiscoveryIntervalMs;
+    private final boolean enableAutoAcknowledgeMessage;
+    private final long autoCommitCursorInterval;
+    private final long transactionTimeoutMillis;
+    private final Duration maxFetchTime;
+    private final int maxFetchRecords;
+    private final CursorVerification verifyInitialOffsets;
+    private final String subscriptionName;
+    private final SubscriptionType subscriptionType;
+    private final SubscriptionMode subscriptionMode;
+
+    public SourceConfiguration(Configuration configuration) {
+        super(configuration);
+
+        this.messageQueueCapacity = getInteger(ELEMENT_QUEUE_CAPACITY);
+        this.partitionDiscoveryIntervalMs = get(PULSAR_PARTITION_DISCOVERY_INTERVAL_MS);
+        this.enableAutoAcknowledgeMessage = get(PULSAR_ENABLE_AUTO_ACKNOWLEDGE_MESSAGE);
+        this.autoCommitCursorInterval = get(PULSAR_AUTO_COMMIT_CURSOR_INTERVAL);
+        this.transactionTimeoutMillis = get(PULSAR_READ_TRANSACTION_TIMEOUT);
+        this.maxFetchTime = get(PULSAR_MAX_FETCH_TIME, Duration::ofMillis);
+        this.maxFetchRecords = get(PULSAR_MAX_FETCH_RECORDS);
+        this.verifyInitialOffsets = get(PULSAR_VERIFY_INITIAL_OFFSETS);
+        this.subscriptionName = get(PULSAR_SUBSCRIPTION_NAME);
+        this.subscriptionType = get(PULSAR_SUBSCRIPTION_TYPE);
+        this.subscriptionMode = get(PULSAR_SUBSCRIPTION_MODE);
+    }
+
+    /** The capacity of the element queue in the source reader. */
+    public int getMessageQueueCapacity() {
+        return messageQueueCapacity;
+    }
+
+    public boolean isEnablePartitionDiscovery() {
+        return getPartitionDiscoveryIntervalMs() > 0;
+    }
+
+    /** The interval in millis for flink querying topic partition information. */
+    public long getPartitionDiscoveryIntervalMs() {
+        return partitionDiscoveryIntervalMs;
+    }
 
     /**
      * This is used for all subscription type. But the behavior may not be the same among them. If
@@ -60,42 +100,54 @@ public class SourceConfiguration implements Serializable {
      *   <li>{@link SubscriptionType#Shared} and {@link SubscriptionType#Key_Shared} would
      *       immediately acknowledge the message after consuming it.
      *   <li>{@link SubscriptionType#Failover} and {@link SubscriptionType#Exclusive} would perform
-     *       a incremental acknowledge in a fixed {@link #autoCommitCursorInterval}.
+     *       a incremental acknowledge in a fixed {@link #getAutoCommitCursorInterval}.
      * </ul>
      */
-    private final boolean enableAutoAcknowledgeMessage;
+    public boolean isEnableAutoAcknowledgeMessage() {
+        return enableAutoAcknowledgeMessage;
+    }
 
     /**
      * The interval in millis for acknowledge message when you enable {@link
-     * #enableAutoAcknowledgeMessage} and use {@link SubscriptionType#Failover} or {@link
+     * #isEnableAutoAcknowledgeMessage} and use {@link SubscriptionType#Failover} or {@link
      * SubscriptionType#Exclusive} as your consuming subscription type.
      */
-    private final long autoCommitCursorInterval;
+    public long getAutoCommitCursorInterval() {
+        return autoCommitCursorInterval;
+    }
 
     /**
      * Pulsar's transaction have a timeout mechanism for uncommitted transaction. We use transaction
      * for {@link SubscriptionType#Shared} and {@link SubscriptionType#Key_Shared} when user disable
-     * {@link #enableAutoAcknowledgeMessage} and enable flink checkpoint. Since the checkpoint
+     * {@link #isEnableAutoAcknowledgeMessage} and enable flink checkpoint. Since the checkpoint
      * interval couldn't be acquired from {@link SourceReaderContext#getConfiguration()}, we have to
      * expose this option. Make sure this value is greater than the checkpoint interval.
      */
-    private final long transactionTimeoutMillis;
+    public long getTransactionTimeoutMillis() {
+        return transactionTimeoutMillis;
+    }
 
     /**
      * The fetch time for flink split reader polling message. We would stop polling message and
      * return the message in {@link RecordsWithSplitIds} when timeout or exceed the {@link
-     * #maxFetchRecords}.
+     * #getMaxFetchRecords}.
      */
-    private final Duration maxFetchTime;
+    public Duration getMaxFetchTime() {
+        return maxFetchTime;
+    }
 
     /**
      * The fetch counts for a split reader. We would stop polling message and return the message in
-     * {@link RecordsWithSplitIds} when timeout {@link #maxFetchTime} or exceed this value.
+     * {@link RecordsWithSplitIds} when timeout {@link #getMaxFetchTime} or exceed this value.
      */
-    private final int maxFetchRecords;
+    public int getMaxFetchRecords() {
+        return maxFetchRecords;
+    }
 
     /** Validate the {@link CursorPosition} generated by {@link StartCursor}. */
-    private final CursorVerification verifyInitialOffsets;
+    public CursorVerification getVerifyInitialOffsets() {
+        return verifyInitialOffsets;
+    }
 
     /**
      * The pulsar's subscription name for this flink source. All the readers would share this
@@ -103,7 +155,9 @@ public class SourceConfiguration implements Serializable {
      *
      * @see ConsumerBuilder#subscriptionName
      */
-    private final String subscriptionName;
+    public String getSubscriptionName() {
+        return subscriptionName;
+    }
 
     /**
      * The pulsar's subscription type for this flink source. All the readers would share this
@@ -111,7 +165,9 @@ public class SourceConfiguration implements Serializable {
      *
      * @see SubscriptionType
      */
-    private final SubscriptionType subscriptionType;
+    public SubscriptionType getSubscriptionType() {
+        return subscriptionType;
+    }
 
     /**
      * The pulsar's subscription mode for this flink source. All the readers would share this
@@ -119,64 +175,6 @@ public class SourceConfiguration implements Serializable {
      *
      * @see SubscriptionMode
      */
-    private final SubscriptionMode subscriptionMode;
-
-    public SourceConfiguration(Configuration configuration) {
-        this.partitionDiscoveryIntervalMs =
-                configuration.get(PULSAR_PARTITION_DISCOVERY_INTERVAL_MS);
-        this.enableAutoAcknowledgeMessage =
-                configuration.get(PULSAR_ENABLE_AUTO_ACKNOWLEDGE_MESSAGE);
-        this.autoCommitCursorInterval = configuration.get(PULSAR_AUTO_COMMIT_CURSOR_INTERVAL);
-        this.transactionTimeoutMillis = configuration.get(PULSAR_TRANSACTION_TIMEOUT_MILLIS);
-        this.maxFetchTime =
-                getOptionValue(configuration, PULSAR_MAX_FETCH_TIME, Duration::ofMillis);
-        this.maxFetchRecords = configuration.get(PULSAR_MAX_FETCH_RECORDS);
-        this.verifyInitialOffsets = configuration.get(PULSAR_VERIFY_INITIAL_OFFSETS);
-        this.subscriptionName = configuration.get(PULSAR_SUBSCRIPTION_NAME);
-        this.subscriptionType = configuration.get(PULSAR_SUBSCRIPTION_TYPE);
-        this.subscriptionMode = configuration.get(PULSAR_SUBSCRIPTION_MODE);
-    }
-
-    public boolean enablePartitionDiscovery() {
-        return partitionDiscoveryIntervalMs > 0;
-    }
-
-    public long getPartitionDiscoveryIntervalMs() {
-        return partitionDiscoveryIntervalMs;
-    }
-
-    public boolean isEnableAutoAcknowledgeMessage() {
-        return enableAutoAcknowledgeMessage;
-    }
-
-    public long getAutoCommitCursorInterval() {
-        return autoCommitCursorInterval;
-    }
-
-    public long getTransactionTimeoutMillis() {
-        return transactionTimeoutMillis;
-    }
-
-    public Duration getMaxFetchTime() {
-        return maxFetchTime;
-    }
-
-    public int getMaxFetchRecords() {
-        return maxFetchRecords;
-    }
-
-    public CursorVerification getVerifyInitialOffsets() {
-        return verifyInitialOffsets;
-    }
-
-    public String getSubscriptionName() {
-        return subscriptionName;
-    }
-
-    public SubscriptionType getSubscriptionType() {
-        return subscriptionType;
-    }
-
     public SubscriptionMode getSubscriptionMode() {
         return subscriptionMode;
     }
@@ -190,4 +188,44 @@ public class SourceConfiguration implements Serializable {
                 + getSubscriptionMode()
                 + ")";
     }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        if (!super.equals(o)) {
+            return false;
+        }
+        SourceConfiguration that = (SourceConfiguration) o;
+        return partitionDiscoveryIntervalMs == that.partitionDiscoveryIntervalMs
+                && enableAutoAcknowledgeMessage == that.enableAutoAcknowledgeMessage
+                && autoCommitCursorInterval == that.autoCommitCursorInterval
+                && transactionTimeoutMillis == that.transactionTimeoutMillis
+                && maxFetchRecords == that.maxFetchRecords
+                && Objects.equals(maxFetchTime, that.maxFetchTime)
+                && verifyInitialOffsets == that.verifyInitialOffsets
+                && Objects.equals(subscriptionName, that.subscriptionName)
+                && subscriptionType == that.subscriptionType
+                && subscriptionMode == that.subscriptionMode;
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(
+                super.hashCode(),
+                partitionDiscoveryIntervalMs,
+                enableAutoAcknowledgeMessage,
+                autoCommitCursorInterval,
+                transactionTimeoutMillis,
+                maxFetchTime,
+                maxFetchRecords,
+                verifyInitialOffsets,
+                subscriptionName,
+                subscriptionType,
+                subscriptionMode);
+    }
 }
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/PulsarSourceEnumerator.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/PulsarSourceEnumerator.java
index 67cc3c7..7890dcf 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/PulsarSourceEnumerator.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/PulsarSourceEnumerator.java
@@ -21,7 +21,6 @@ package org.apache.flink.connector.pulsar.source.enumerator;
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.connector.source.SplitEnumerator;
 import org.apache.flink.api.connector.source.SplitEnumeratorContext;
-import org.apache.flink.configuration.Configuration;
 import org.apache.flink.connector.pulsar.source.config.SourceConfiguration;
 import org.apache.flink.connector.pulsar.source.enumerator.cursor.StartCursor;
 import org.apache.flink.connector.pulsar.source.enumerator.subscriber.PulsarSubscriber;
@@ -52,8 +51,8 @@ import java.util.List;
 import java.util.Set;
 
 import static java.util.Collections.singletonList;
-import static org.apache.flink.connector.pulsar.common.config.PulsarConfigUtils.createAdmin;
-import static org.apache.flink.connector.pulsar.common.config.PulsarConfigUtils.createClient;
+import static org.apache.flink.connector.pulsar.common.config.PulsarClientFactory.createAdmin;
+import static org.apache.flink.connector.pulsar.common.config.PulsarClientFactory.createClient;
 import static org.apache.flink.connector.pulsar.common.utils.PulsarExceptionUtils.sneakyClient;
 import static org.apache.flink.connector.pulsar.source.config.CursorVerification.FAIL_ON_MISMATCH;
 import static org.apache.flink.connector.pulsar.source.config.PulsarSourceConfigUtils.createConsumerBuilder;
@@ -70,7 +69,6 @@ public class PulsarSourceEnumerator
     private final PulsarSubscriber subscriber;
     private final StartCursor startCursor;
     private final RangeGenerator rangeGenerator;
-    private final Configuration configuration;
     private final SourceConfiguration sourceConfiguration;
     private final SplitEnumeratorContext<PulsarPartitionSplit> context;
     private final SplitsAssignmentState assignmentState;
@@ -79,16 +77,14 @@ public class PulsarSourceEnumerator
             PulsarSubscriber subscriber,
             StartCursor startCursor,
             RangeGenerator rangeGenerator,
-            Configuration configuration,
             SourceConfiguration sourceConfiguration,
             SplitEnumeratorContext<PulsarPartitionSplit> context,
             SplitsAssignmentState assignmentState) {
-        this.pulsarAdmin = createAdmin(configuration);
-        this.pulsarClient = createClient(configuration);
+        this.pulsarAdmin = createAdmin(sourceConfiguration);
+        this.pulsarClient = createClient(sourceConfiguration);
         this.subscriber = subscriber;
         this.startCursor = startCursor;
         this.rangeGenerator = rangeGenerator;
-        this.configuration = configuration;
         this.sourceConfiguration = sourceConfiguration;
         this.context = context;
         this.assignmentState = assignmentState;
@@ -96,10 +92,10 @@ public class PulsarSourceEnumerator
 
     @Override
     public void start() {
-        rangeGenerator.open(configuration, sourceConfiguration);
+        rangeGenerator.open(sourceConfiguration);
 
         // Check the pulsar topic information and convert it into source split.
-        if (sourceConfiguration.enablePartitionDiscovery()) {
+        if (sourceConfiguration.isEnablePartitionDiscovery()) {
             LOG.info(
                     "Starting the PulsarSourceEnumerator for subscription {} "
                             + "with partition discovery interval of {} ms.",
@@ -206,7 +202,7 @@ public class PulsarSourceEnumerator
 
     private ConsumerBuilder<byte[]> consumerBuilder() {
         ConsumerBuilder<byte[]> builder =
-                createConsumerBuilder(pulsarClient, Schema.BYTES, configuration);
+                createConsumerBuilder(pulsarClient, Schema.BYTES, sourceConfiguration);
         if (sourceConfiguration.getSubscriptionType() == SubscriptionType.Key_Shared) {
             Range range = TopicRange.createFullRange().toPulsarRange();
             KeySharedPolicySticky keySharedPolicy = KeySharedPolicy.stickyHashRange().ranges(range);
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/SplitsAssignmentState.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/SplitsAssignmentState.java
index a8460d4..cbc4826 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/SplitsAssignmentState.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/SplitsAssignmentState.java
@@ -162,7 +162,7 @@ public class SplitsAssignmentState {
      *     assignment.
      */
     public boolean noMoreNewPartitionSplits() {
-        return !sourceConfiguration.enablePartitionDiscovery()
+        return !sourceConfiguration.isEnablePartitionDiscovery()
                 && initialized
                 && pendingPartitionSplits.isEmpty();
     }
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/StopCursor.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/StopCursor.java
index aaec143..b85944f 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/StopCursor.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/StopCursor.java
@@ -18,6 +18,7 @@
 
 package org.apache.flink.connector.pulsar.source.enumerator.cursor;
 
+import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.PublicEvolving;
 import org.apache.flink.connector.pulsar.source.enumerator.cursor.stop.LatestMessageStopCursor;
 import org.apache.flink.connector.pulsar.source.enumerator.cursor.stop.MessageIdStopCursor;
@@ -41,6 +42,7 @@ import java.io.Serializable;
 public interface StopCursor extends Serializable {
 
     /** The open method for the cursor initializer. This method could be executed multiple times. */
+    @Internal
     default void open(PulsarAdmin admin, TopicPartition partition) {}
 
     /**
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/stop/LatestMessageStopCursor.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/stop/LatestMessageStopCursor.java
index e42064c..257081f 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/stop/LatestMessageStopCursor.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/cursor/stop/LatestMessageStopCursor.java
@@ -33,6 +33,7 @@ import static org.apache.flink.connector.pulsar.common.utils.PulsarExceptionUtil
  * PulsarSourceEnumerator}.
  */
 public class LatestMessageStopCursor implements StopCursor {
+    private static final long serialVersionUID = 1702059838323965723L;
 
     private MessageId messageId;
 
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/topic/range/RangeGenerator.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/topic/range/RangeGenerator.java
index 58fa12f..825ebdb 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/topic/range/RangeGenerator.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/topic/range/RangeGenerator.java
@@ -51,6 +51,14 @@ public interface RangeGenerator extends Serializable {
      */
     List<TopicRange> range(TopicMetadata metadata, int parallelism);
 
+    /** Initialize some extra resource when bootstrap the source. */
+    default void open(SourceConfiguration sourceConfiguration) {
+        // This method is used for user implementation.
+        open(sourceConfiguration, sourceConfiguration);
+    }
+
+    /** @deprecated Use {@link #open(SourceConfiguration)} instead. */
+    @Deprecated
     default void open(Configuration configuration, SourceConfiguration sourceConfiguration) {
         // This method is used for user implementation.
     }
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/PulsarSourceReaderFactory.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/PulsarSourceReaderFactory.java
index e60ef99..6a5d515 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/PulsarSourceReaderFactory.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/PulsarSourceReaderFactory.java
@@ -21,7 +21,6 @@ package org.apache.flink.connector.pulsar.source.reader;
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.connector.source.SourceReader;
 import org.apache.flink.api.connector.source.SourceReaderContext;
-import org.apache.flink.configuration.Configuration;
 import org.apache.flink.connector.base.source.reader.RecordsWithSplitIds;
 import org.apache.flink.connector.base.source.reader.synchronization.FutureCompletingBlockingQueue;
 import org.apache.flink.connector.pulsar.source.config.SourceConfiguration;
@@ -41,9 +40,8 @@ import org.apache.pulsar.client.impl.PulsarClientImpl;
 
 import java.util.function.Supplier;
 
-import static org.apache.flink.connector.base.source.reader.SourceReaderOptions.ELEMENT_QUEUE_CAPACITY;
-import static org.apache.flink.connector.pulsar.common.config.PulsarConfigUtils.createAdmin;
-import static org.apache.flink.connector.pulsar.common.config.PulsarConfigUtils.createClient;
+import static org.apache.flink.connector.pulsar.common.config.PulsarClientFactory.createAdmin;
+import static org.apache.flink.connector.pulsar.common.config.PulsarClientFactory.createClient;
 
 /**
  * This factory class is used for creating different types of source reader for different
@@ -65,16 +63,15 @@ public final class PulsarSourceReaderFactory {
     public static <OUT> SourceReader<OUT, PulsarPartitionSplit> create(
             SourceReaderContext readerContext,
             PulsarDeserializationSchema<OUT> deserializationSchema,
-            Configuration configuration,
             SourceConfiguration sourceConfiguration) {
 
-        PulsarClient pulsarClient = createClient(configuration);
-        PulsarAdmin pulsarAdmin = createAdmin(configuration);
+        PulsarClient pulsarClient = createClient(sourceConfiguration);
+        PulsarAdmin pulsarAdmin = createAdmin(sourceConfiguration);
 
         // Create a message queue with the predefined source option.
-        int queueSize = configuration.getInteger(ELEMENT_QUEUE_CAPACITY);
+        int queueCapacity = sourceConfiguration.getMessageQueueCapacity();
         FutureCompletingBlockingQueue<RecordsWithSplitIds<PulsarMessage<OUT>>> elementsQueue =
-                new FutureCompletingBlockingQueue<>(queueSize);
+                new FutureCompletingBlockingQueue<>(queueCapacity);
 
         // Create different pulsar source reader by subscription type.
         SubscriptionType subscriptionType = sourceConfiguration.getSubscriptionType();
@@ -86,14 +83,12 @@ public final class PulsarSourceReaderFactory {
                             new PulsarOrderedPartitionSplitReader<>(
                                     pulsarClient,
                                     pulsarAdmin,
-                                    configuration,
                                     sourceConfiguration,
                                     deserializationSchema);
 
             return new PulsarOrderedSourceReader<>(
                     elementsQueue,
                     splitReaderSupplier,
-                    configuration,
                     readerContext,
                     sourceConfiguration,
                     pulsarClient,
@@ -112,7 +107,6 @@ public final class PulsarSourceReaderFactory {
                             new PulsarUnorderedPartitionSplitReader<>(
                                     pulsarClient,
                                     pulsarAdmin,
-                                    configuration,
                                     sourceConfiguration,
                                     deserializationSchema,
                                     coordinatorClient);
@@ -120,7 +114,6 @@ public final class PulsarSourceReaderFactory {
             return new PulsarUnorderedSourceReader<>(
                     elementsQueue,
                     splitReaderSupplier,
-                    configuration,
                     readerContext,
                     sourceConfiguration,
                     pulsarClient,
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchema.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchema.java
index 38ca456..4a116cd 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchema.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchema.java
@@ -25,6 +25,7 @@ import org.apache.flink.api.common.serialization.DeserializationSchema.Initializ
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
 import org.apache.flink.connector.pulsar.common.schema.PulsarSchema;
+import org.apache.flink.connector.pulsar.source.config.SourceConfiguration;
 import org.apache.flink.util.Collector;
 
 import org.apache.pulsar.client.api.Message;
@@ -50,7 +51,15 @@ public interface PulsarDeserializationSchema<T> extends Serializable, ResultType
      * as e.g. registering user metrics.
      *
      * @param context Contextual information that can be used during initialization.
+     * @param configuration The Pulsar related source configuration.
      */
+    default void open(InitializationContext context, SourceConfiguration configuration)
+            throws Exception {
+        open(context);
+    }
+
+    /** @deprecated Use {{@link #open(InitializationContext, SourceConfiguration)}} instead. */
+    @Deprecated
     default void open(InitializationContext context) throws Exception {
         // Nothing to do here for the default implementation.
     }
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchemaWrapper.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchemaWrapper.java
index e642e51..e9b2779 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchemaWrapper.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchemaWrapper.java
@@ -22,6 +22,7 @@ import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.serialization.DeserializationSchema;
 import org.apache.flink.api.common.serialization.DeserializationSchema.InitializationContext;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.connector.pulsar.source.config.SourceConfiguration;
 import org.apache.flink.util.Collector;
 
 import org.apache.pulsar.client.api.Message;
@@ -44,7 +45,8 @@ class PulsarDeserializationSchemaWrapper<T> implements PulsarDeserializationSche
     }
 
     @Override
-    public void open(InitializationContext context) throws Exception {
+    public void open(InitializationContext context, SourceConfiguration configuration)
+            throws Exception {
         // Initialize it for some custom logic.
         deserializationSchema.open(context);
     }
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarSchemaWrapper.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarSchemaWrapper.java
index 2c33ad7..7926d80 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarSchemaWrapper.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarSchemaWrapper.java
@@ -18,7 +18,6 @@
 package org.apache.flink.connector.pulsar.source.reader.deserializer;
 
 import org.apache.flink.annotation.Internal;
-import org.apache.flink.api.common.serialization.DeserializationSchema.InitializationContext;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.connector.pulsar.common.schema.PulsarSchema;
 import org.apache.flink.util.Collector;
@@ -42,22 +41,13 @@ class PulsarSchemaWrapper<T> implements PulsarDeserializationSchema<T> {
     /** The serializable pulsar schema, it wrap the schema with type class. */
     private final PulsarSchema<T> pulsarSchema;
 
-    @SuppressWarnings("java:S2065")
-    private transient Schema<T> schema;
-
     public PulsarSchemaWrapper(PulsarSchema<T> pulsarSchema) {
         this.pulsarSchema = pulsarSchema;
     }
 
     @Override
-    public void open(InitializationContext context) throws Exception {
-        if (schema == null) {
-            this.schema = pulsarSchema.getPulsarSchema();
-        }
-    }
-
-    @Override
     public void deserialize(Message<byte[]> message, Collector<T> out) throws Exception {
+        Schema<T> schema = this.pulsarSchema.getPulsarSchema();
         byte[] bytes = message.getData();
         T instance = schema.decode(bytes);
 
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarOrderedSourceReader.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarOrderedSourceReader.java
index db62eb3..8c197af 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarOrderedSourceReader.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarOrderedSourceReader.java
@@ -22,7 +22,6 @@ import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.api.connector.source.ReaderOutput;
 import org.apache.flink.api.connector.source.SourceReaderContext;
-import org.apache.flink.configuration.Configuration;
 import org.apache.flink.connector.base.source.reader.RecordsWithSplitIds;
 import org.apache.flink.connector.base.source.reader.synchronization.FutureCompletingBlockingQueue;
 import org.apache.flink.connector.pulsar.source.config.SourceConfiguration;
@@ -70,7 +69,6 @@ public class PulsarOrderedSourceReader<OUT> extends PulsarSourceReaderBase<OUT>
     public PulsarOrderedSourceReader(
             FutureCompletingBlockingQueue<RecordsWithSplitIds<PulsarMessage<OUT>>> elementsQueue,
             Supplier<PulsarOrderedPartitionSplitReader<OUT>> splitReaderSupplier,
-            Configuration configuration,
             SourceReaderContext context,
             SourceConfiguration sourceConfiguration,
             PulsarClient pulsarClient,
@@ -78,7 +76,6 @@ public class PulsarOrderedSourceReader<OUT> extends PulsarSourceReaderBase<OUT>
         super(
                 elementsQueue,
                 new PulsarOrderedFetcherManager<>(elementsQueue, splitReaderSupplier::get),
-                configuration,
                 context,
                 sourceConfiguration,
                 pulsarClient,
@@ -146,7 +143,7 @@ public class PulsarOrderedSourceReader<OUT> extends PulsarSourceReaderBase<OUT>
     }
 
     @Override
-    public void notifyCheckpointComplete(long checkpointId) throws Exception {
+    public void notifyCheckpointComplete(long checkpointId) {
         LOG.debug("Committing cursors for checkpoint {}", checkpointId);
         Map<TopicPartition, MessageId> cursors = cursorsToCommit.get(checkpointId);
         try {
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarSourceReaderBase.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarSourceReaderBase.java
index 21f89be..0122021 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarSourceReaderBase.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarSourceReaderBase.java
@@ -19,7 +19,6 @@
 package org.apache.flink.connector.pulsar.source.reader.source;
 
 import org.apache.flink.api.connector.source.SourceReaderContext;
-import org.apache.flink.configuration.Configuration;
 import org.apache.flink.connector.base.source.reader.RecordsWithSplitIds;
 import org.apache.flink.connector.base.source.reader.SourceReaderBase;
 import org.apache.flink.connector.base.source.reader.synchronization.FutureCompletingBlockingQueue;
@@ -49,7 +48,6 @@ abstract class PulsarSourceReaderBase<OUT>
     protected PulsarSourceReaderBase(
             FutureCompletingBlockingQueue<RecordsWithSplitIds<PulsarMessage<OUT>>> elementsQueue,
             PulsarFetcherManagerBase<OUT> splitFetcherManager,
-            Configuration configuration,
             SourceReaderContext context,
             SourceConfiguration sourceConfiguration,
             PulsarClient pulsarClient,
@@ -58,7 +56,7 @@ abstract class PulsarSourceReaderBase<OUT>
                 elementsQueue,
                 splitFetcherManager,
                 new PulsarRecordEmitter<>(),
-                configuration,
+                sourceConfiguration,
                 context);
 
         this.sourceConfiguration = sourceConfiguration;
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarUnorderedSourceReader.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarUnorderedSourceReader.java
index ce57a00..2af77d9 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarUnorderedSourceReader.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarUnorderedSourceReader.java
@@ -21,7 +21,6 @@ package org.apache.flink.connector.pulsar.source.reader.source;
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.api.connector.source.SourceReaderContext;
-import org.apache.flink.configuration.Configuration;
 import org.apache.flink.connector.base.source.reader.RecordsWithSplitIds;
 import org.apache.flink.connector.base.source.reader.synchronization.FutureCompletingBlockingQueue;
 import org.apache.flink.connector.pulsar.source.config.SourceConfiguration;
@@ -63,7 +62,6 @@ public class PulsarUnorderedSourceReader<OUT> extends PulsarSourceReaderBase<OUT
     public PulsarUnorderedSourceReader(
             FutureCompletingBlockingQueue<RecordsWithSplitIds<PulsarMessage<OUT>>> elementsQueue,
             Supplier<PulsarUnorderedPartitionSplitReader<OUT>> splitReaderSupplier,
-            Configuration configuration,
             SourceReaderContext context,
             SourceConfiguration sourceConfiguration,
             PulsarClient pulsarClient,
@@ -72,7 +70,6 @@ public class PulsarUnorderedSourceReader<OUT> extends PulsarSourceReaderBase<OUT
         super(
                 elementsQueue,
                 new PulsarUnorderedFetcherManager<>(elementsQueue, splitReaderSupplier::get),
-                configuration,
                 context,
                 sourceConfiguration,
                 pulsarClient,
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarOrderedPartitionSplitReader.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarOrderedPartitionSplitReader.java
index 8643891..bb6d796 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarOrderedPartitionSplitReader.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarOrderedPartitionSplitReader.java
@@ -19,7 +19,6 @@
 package org.apache.flink.connector.pulsar.source.reader.split;
 
 import org.apache.flink.annotation.Internal;
-import org.apache.flink.configuration.Configuration;
 import org.apache.flink.connector.pulsar.source.config.SourceConfiguration;
 import org.apache.flink.connector.pulsar.source.enumerator.cursor.StartCursor;
 import org.apache.flink.connector.pulsar.source.enumerator.topic.TopicPartition;
@@ -56,10 +55,9 @@ public class PulsarOrderedPartitionSplitReader<OUT> extends PulsarPartitionSplit
     public PulsarOrderedPartitionSplitReader(
             PulsarClient pulsarClient,
             PulsarAdmin pulsarAdmin,
-            Configuration configuration,
             SourceConfiguration sourceConfiguration,
             PulsarDeserializationSchema<OUT> deserializationSchema) {
-        super(pulsarClient, pulsarAdmin, configuration, sourceConfiguration, deserializationSchema);
+        super(pulsarClient, pulsarAdmin, sourceConfiguration, deserializationSchema);
     }
 
     @Override
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarPartitionSplitReaderBase.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarPartitionSplitReaderBase.java
index 69c7b5e..37b5630 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarPartitionSplitReaderBase.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarPartitionSplitReaderBase.java
@@ -19,7 +19,6 @@
 package org.apache.flink.connector.pulsar.source.reader.split;
 
 import org.apache.flink.api.common.time.Deadline;
-import org.apache.flink.configuration.Configuration;
 import org.apache.flink.connector.base.source.reader.RecordsBySplits;
 import org.apache.flink.connector.base.source.reader.RecordsWithSplitIds;
 import org.apache.flink.connector.base.source.reader.splitreader.SplitReader;
@@ -69,7 +68,6 @@ abstract class PulsarPartitionSplitReaderBase<OUT>
 
     protected final PulsarClient pulsarClient;
     protected final PulsarAdmin pulsarAdmin;
-    protected final Configuration configuration;
     protected final SourceConfiguration sourceConfiguration;
     protected final PulsarDeserializationSchema<OUT> deserializationSchema;
     protected final AtomicBoolean wakeup;
@@ -80,12 +78,10 @@ abstract class PulsarPartitionSplitReaderBase<OUT>
     protected PulsarPartitionSplitReaderBase(
             PulsarClient pulsarClient,
             PulsarAdmin pulsarAdmin,
-            Configuration configuration,
             SourceConfiguration sourceConfiguration,
             PulsarDeserializationSchema<OUT> deserializationSchema) {
         this.pulsarClient = pulsarClient;
         this.pulsarAdmin = pulsarAdmin;
-        this.configuration = configuration;
         this.sourceConfiguration = sourceConfiguration;
         this.deserializationSchema = deserializationSchema;
         this.wakeup = new AtomicBoolean(false);
@@ -217,7 +213,7 @@ abstract class PulsarPartitionSplitReaderBase<OUT>
     /** Create a specified {@link Consumer} by the given topic partition. */
     protected Consumer<byte[]> createPulsarConsumer(TopicPartition partition) {
         ConsumerBuilder<byte[]> consumerBuilder =
-                createConsumerBuilder(pulsarClient, Schema.BYTES, configuration);
+                createConsumerBuilder(pulsarClient, Schema.BYTES, sourceConfiguration);
 
         consumerBuilder.topic(partition.getFullTopicName());
 
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarUnorderedPartitionSplitReader.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarUnorderedPartitionSplitReader.java
index 7262863..2027df4 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarUnorderedPartitionSplitReader.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarUnorderedPartitionSplitReader.java
@@ -19,7 +19,6 @@
 package org.apache.flink.connector.pulsar.source.reader.split;
 
 import org.apache.flink.annotation.Internal;
-import org.apache.flink.configuration.Configuration;
 import org.apache.flink.connector.pulsar.source.config.SourceConfiguration;
 import org.apache.flink.connector.pulsar.source.reader.deserializer.PulsarDeserializationSchema;
 import org.apache.flink.connector.pulsar.source.reader.source.PulsarUnorderedSourceReader;
@@ -67,11 +66,10 @@ public class PulsarUnorderedPartitionSplitReader<OUT> extends PulsarPartitionSpl
     public PulsarUnorderedPartitionSplitReader(
             PulsarClient pulsarClient,
             PulsarAdmin pulsarAdmin,
-            Configuration configuration,
             SourceConfiguration sourceConfiguration,
             PulsarDeserializationSchema<OUT> deserializationSchema,
             TransactionCoordinatorClient coordinatorClient) {
-        super(pulsarClient, pulsarAdmin, configuration, sourceConfiguration, deserializationSchema);
+        super(pulsarClient, pulsarAdmin, sourceConfiguration, deserializationSchema);
 
         this.coordinatorClient = coordinatorClient;
     }
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigBuilderTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigBuilderTest.java
new file mode 100644
index 0000000..9b3528f
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigBuilderTest.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.common.config;
+
+import org.apache.flink.configuration.ConfigOption;
+import org.apache.flink.configuration.ConfigOptions;
+import org.apache.flink.configuration.Configuration;
+
+import org.junit.jupiter.api.Test;
+
+import java.util.Properties;
+
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+/** Unit tests for {@link PulsarConfigBuilder}. */
+class PulsarConfigBuilderTest {
+
+    @Test
+    void canNotSetSameOptionTwiceWithDifferentValue() {
+        ConfigOption<String> option = ConfigOptions.key("some.key").stringType().noDefaultValue();
+        PulsarConfigBuilder builder = new PulsarConfigBuilder();
+        builder.set(option, "value1");
+
+        assertDoesNotThrow(() -> builder.set(option, "value1"));
+        assertThrows(IllegalArgumentException.class, () -> builder.set(option, "value2"));
+    }
+
+    @Test
+    void setConfigurationCanNotOverrideExistedKeysWithNewValue() {
+        ConfigOption<String> option = ConfigOptions.key("string.k1").stringType().noDefaultValue();
+        PulsarConfigBuilder builder = new PulsarConfigBuilder();
+
+        Configuration configuration = new Configuration();
+        configuration.set(option, "value1");
+
+        builder.set(option, "value1");
+        assertDoesNotThrow(() -> builder.set(configuration));
+
+        configuration.set(option, "value2");
+        assertThrows(IllegalArgumentException.class, () -> builder.set(configuration));
+    }
+
+    @Test
+    void setPropertiesCanNotOverrideExistedKeysWithNewValueAndSupportTypeConversion() {
+        ConfigOption<Integer> option = ConfigOptions.key("int.type").intType().defaultValue(3);
+        PulsarConfigBuilder builder = new PulsarConfigBuilder();
+
+        Properties properties = new Properties();
+        properties.put("int.type", "6");
+        assertDoesNotThrow(() -> builder.set(properties));
+
+        properties.put("int.type", "1");
+        assertThrows(IllegalArgumentException.class, () -> builder.set(properties));
+
+        Integer value = builder.get(option);
+        assertEquals(value, 6);
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigValidatorTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigValidatorTest.java
new file mode 100644
index 0000000..8f71286
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigValidatorTest.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.common.config;
+
+import org.apache.flink.configuration.ConfigOption;
+import org.apache.flink.configuration.ConfigOptions;
+import org.apache.flink.configuration.Configuration;
+
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+/** Unit tests for {@link PulsarConfigValidator}. */
+class PulsarConfigValidatorTest {
+
+    @Test
+    void conflictKeysAndRequiredKeysValidation() {
+        ConfigOption<String> required = ConfigOptions.key("required").stringType().noDefaultValue();
+        ConfigOption<String> c1 = ConfigOptions.key("conflict1").stringType().noDefaultValue();
+        ConfigOption<String> c2 = ConfigOptions.key("conflict2").stringType().noDefaultValue();
+
+        PulsarConfigValidator validator =
+                PulsarConfigValidator.builder()
+                        .requiredOption(required)
+                        .conflictOptions(c1, c2)
+                        .build();
+        Configuration configuration = new Configuration();
+
+        // Required options
+        assertThrows(IllegalArgumentException.class, () -> validator.validate(configuration));
+        configuration.set(required, "required");
+        assertDoesNotThrow(() -> validator.validate(configuration));
+
+        // Conflict options
+        configuration.set(c1, "c1");
+        assertDoesNotThrow(() -> validator.validate(configuration));
+        configuration.set(c2, "c2");
+        assertThrows(IllegalArgumentException.class, () -> validator.validate(configuration));
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigurationTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigurationTest.java
new file mode 100644
index 0000000..cf6f757
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/common/config/PulsarConfigurationTest.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.common.config;
+
+import org.apache.flink.configuration.ConfigOption;
+import org.apache.flink.configuration.ConfigOptions;
+import org.apache.flink.configuration.Configuration;
+
+import org.junit.jupiter.api.Test;
+
+import java.util.Map;
+import java.util.Properties;
+
+import static java.util.Collections.emptyMap;
+import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/** Unit tests for {@link PulsarConfiguration}. */
+class PulsarConfigurationTest {
+
+    private static final ConfigOption<Map<String, String>> PROP_OP =
+            ConfigOptions.key("some.config").mapType().defaultValue(emptyMap());
+
+    @Test
+    void pulsarConfigurationCanGetMapWithPrefix() {
+        Properties expectProp = new Properties();
+        for (int i = 0; i < 10; i++) {
+            expectProp.put(randomAlphabetic(10), randomAlphabetic(10));
+        }
+
+        Configuration configuration = new Configuration();
+
+        for (String name : expectProp.stringPropertyNames()) {
+            configuration.setString(PROP_OP.key() + "." + name, expectProp.getProperty(name));
+        }
+
+        TestConfiguration configuration1 = new TestConfiguration(configuration);
+        Map<String, String> properties = configuration1.getProperties(PROP_OP);
+        assertEquals(properties, expectProp);
+    }
+
+    private static final class TestConfiguration extends PulsarConfiguration {
+        private static final long serialVersionUID = 944689984000450917L;
+
+        private TestConfiguration(Configuration config) {
+            super(config);
+        }
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/PulsarSourceBuilderTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/PulsarSourceBuilderTest.java
index b96173d..5825e4e 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/PulsarSourceBuilderTest.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/PulsarSourceBuilderTest.java
@@ -26,32 +26,38 @@ import org.apache.pulsar.client.api.SubscriptionType;
 import org.junit.jupiter.api.Test;
 
 import static org.apache.flink.connector.pulsar.source.reader.deserializer.PulsarDeserializationSchema.pulsarSchema;
-import static org.assertj.core.api.Assertions.assertThatCode;
 import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import static org.junit.jupiter.api.Assertions.assertAll;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 
 /** Unit tests for {@link PulsarSourceBuilder}. */
-@SuppressWarnings("java:S5778")
 class PulsarSourceBuilderTest {
 
     @Test
     void someSetterMethodCouldOnlyBeCalledOnce() {
-        PulsarSourceBuilder<String> builder = new PulsarSourceBuilder<>();
-        assertThatThrownBy(() -> builder.setAdminUrl("admin-url").setAdminUrl("admin-url2"))
-                .isInstanceOf(IllegalArgumentException.class);
-
-        assertThatThrownBy(() -> builder.setServiceUrl("service-url").setServiceUrl("service-url2"))
-                .isInstanceOf(IllegalArgumentException.class);
-
-        assertThatThrownBy(
-                        () ->
-                                builder.setSubscriptionName("set_subscription_name")
-                                        .setSubscriptionName("set_subscription_name2"))
-                .isInstanceOf(IllegalArgumentException.class);
-        assertThatThrownBy(
-                        () ->
-                                builder.setSubscriptionType(SubscriptionType.Exclusive)
-                                        .setSubscriptionType(SubscriptionType.Shared))
-                .isInstanceOf(IllegalArgumentException.class);
+        PulsarSourceBuilder<String> builder =
+                new PulsarSourceBuilder<String>()
+                        .setAdminUrl("admin-url")
+                        .setServiceUrl("service-url")
+                        .setSubscriptionName("set_subscription_name")
+                        .setSubscriptionType(SubscriptionType.Exclusive);
+        assertAll(
+                () ->
+                        assertThrows(
+                                IllegalArgumentException.class,
+                                () -> builder.setAdminUrl("admin-url2")),
+                () ->
+                        assertThrows(
+                                IllegalArgumentException.class,
+                                () -> builder.setServiceUrl("service-url2")),
+                () ->
+                        assertThrows(
+                                IllegalArgumentException.class,
+                                () -> builder.setSubscriptionName("set_subscription_name2")),
+                () ->
+                        assertThrows(
+                                IllegalArgumentException.class,
+                                () -> builder.setSubscriptionType(SubscriptionType.Shared)));
     }
 
     @Test
@@ -67,34 +73,13 @@ class PulsarSourceBuilderTest {
     void rangeGeneratorRequiresKeyShared() {
         PulsarSourceBuilder<String> builder = new PulsarSourceBuilder<>();
         builder.setSubscriptionType(SubscriptionType.Shared);
+        UniformRangeGenerator rangeGenerator = new UniformRangeGenerator();
 
-        assertThatThrownBy(() -> builder.setRangeGenerator(new UniformRangeGenerator()))
+        assertThatThrownBy(() -> builder.setRangeGenerator(rangeGenerator))
                 .isInstanceOf(IllegalArgumentException.class);
     }
 
     @Test
-    void missingRequiredField() {
-        PulsarSourceBuilder<String> builder = new PulsarSourceBuilder<>();
-        assertThatThrownBy(builder::build).isInstanceOf(IllegalArgumentException.class);
-        builder.setAdminUrl("admin-url");
-        assertThatThrownBy(builder::build).isInstanceOf(IllegalArgumentException.class);
-        builder.setServiceUrl("service-url");
-        assertThatThrownBy(builder::build).isInstanceOf(IllegalArgumentException.class);
-        builder.setSubscriptionName("subscription-name");
-        assertThatThrownBy(builder::build).isInstanceOf(NullPointerException.class);
-        builder.setTopics("topic");
-        assertThatThrownBy(builder::build).isInstanceOf(NullPointerException.class);
-        builder.setDeserializationSchema(pulsarSchema(Schema.STRING));
-        assertThatCode(builder::build).doesNotThrowAnyException();
-    }
-
-    @Test
-    void defaultBuilder() {
-        PulsarSourceBuilder<String> builder = new PulsarSourceBuilder<>();
-        assertThatThrownBy(builder::build).isInstanceOf(IllegalArgumentException.class);
-    }
-
-    @Test
     void subscriptionTypeShouldNotBeOverriddenBySetMethod() {
         PulsarSourceBuilder<String> builder = new PulsarSourceBuilder<>();
         fillRequiredFields(builder);
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/enumerator/PulsarSourceEnumeratorTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/enumerator/PulsarSourceEnumeratorTest.java
index c52c514..1dcbe84 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/enumerator/PulsarSourceEnumeratorTest.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/enumerator/PulsarSourceEnumeratorTest.java
@@ -381,7 +381,6 @@ class PulsarSourceEnumeratorTest extends PulsarTestSuiteBase {
                 subscriber,
                 StartCursor.earliest(),
                 new FullRangeGenerator(),
-                configuration,
                 sourceConfiguration,
                 enumContext,
                 assignmentState);
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchemaTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchemaTest.java
index 48e6e7a..18888df 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchemaTest.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchemaTest.java
@@ -21,6 +21,7 @@ package org.apache.flink.connector.pulsar.source.reader.deserializer;
 import org.apache.flink.api.common.serialization.SimpleStringSchema;
 import org.apache.flink.api.common.typeinfo.Types;
 import org.apache.flink.connector.pulsar.SampleMessage.TestMessage;
+import org.apache.flink.connector.pulsar.source.config.SourceConfiguration;
 import org.apache.flink.connector.testutils.source.deserialization.TestingDeserializationContext;
 import org.apache.flink.core.memory.DataOutputSerializer;
 import org.apache.flink.types.StringValue;
@@ -46,6 +47,7 @@ import static org.apache.pulsar.client.api.Schema.PROTOBUF_NATIVE;
 import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.mockito.Mockito.mock;
 
 /** Unit tests for {@link PulsarDeserializationSchema}. */
 class PulsarDeserializationSchemaTest {
@@ -53,7 +55,7 @@ class PulsarDeserializationSchemaTest {
     @Test
     void createFromFlinkDeserializationSchema() throws Exception {
         PulsarDeserializationSchema<String> schema = flinkSchema(new SimpleStringSchema());
-        schema.open(new TestingDeserializationContext());
+        schema.open(new TestingDeserializationContext(), mock(SourceConfiguration.class));
         assertDoesNotThrow(() -> InstantiationUtil.clone(schema));
 
         Message<byte[]> message = getMessage("some-sample-message", String::getBytes);
@@ -68,7 +70,7 @@ class PulsarDeserializationSchemaTest {
     void createFromPulsarSchema() throws Exception {
         Schema<TestMessage> schema1 = PROTOBUF_NATIVE(TestMessage.class);
         PulsarDeserializationSchema<TestMessage> schema2 = pulsarSchema(schema1, TestMessage.class);
-        schema2.open(new TestingDeserializationContext());
+        schema2.open(new TestingDeserializationContext(), mock(SourceConfiguration.class));
         assertDoesNotThrow(() -> InstantiationUtil.clone(schema2));
 
         TestMessage message1 =
@@ -88,7 +90,7 @@ class PulsarDeserializationSchemaTest {
     @Test
     void createFromFlinkTypeInformation() throws Exception {
         PulsarDeserializationSchema<String> schema = flinkTypeInfo(Types.STRING, null);
-        schema.open(new TestingDeserializationContext());
+        schema.open(new TestingDeserializationContext(), mock(SourceConfiguration.class));
         assertDoesNotThrow(() -> InstantiationUtil.clone(schema));
 
         Message<byte[]> message =
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarSourceReaderTestBase.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarSourceReaderTestBase.java
index 1300b3b..f7cb120 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarSourceReaderTestBase.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/source/PulsarSourceReaderTestBase.java
@@ -74,6 +74,7 @@ import static org.apache.flink.connector.pulsar.testutils.extension.TestOrderlin
 import static org.apache.flink.util.Preconditions.checkNotNull;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.fail;
+import static org.mockito.Mockito.mock;
 
 @ExtendWith({
     TestOrderlinessExtension.class,
@@ -146,7 +147,8 @@ abstract class PulsarSourceReaderTestBase extends PulsarTestSuiteBase {
         SourceReaderContext context = new TestingReaderContext();
         try {
             deserializationSchema.open(
-                    new PulsarDeserializationSchemaInitializationContext(context));
+                    new PulsarDeserializationSchemaInitializationContext(context),
+                    mock(SourceConfiguration.class));
         } catch (Exception e) {
             fail("Error while opening deserializationSchema");
         }
@@ -154,7 +156,7 @@ abstract class PulsarSourceReaderTestBase extends PulsarTestSuiteBase {
         SourceConfiguration sourceConfiguration = new SourceConfiguration(configuration);
         return (PulsarSourceReaderBase<Integer>)
                 PulsarSourceReaderFactory.create(
-                        context, deserializationSchema, configuration, sourceConfiguration);
+                        context, deserializationSchema, sourceConfiguration);
     }
 
     public class PulsarSourceReaderInvocationContextProvider
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarPartitionSplitReaderTestBase.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarPartitionSplitReaderTestBase.java
index 8ee1c68..538e458 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarPartitionSplitReaderTestBase.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarPartitionSplitReaderTestBase.java
@@ -92,18 +92,15 @@ public abstract class PulsarPartitionSplitReaderTestBase extends PulsarTestSuite
     PulsarSplitReaderInvocationContextProvider provider =
             new PulsarSplitReaderInvocationContextProvider();
 
-    /** Default reader config: max message 1, fetch timeout 1s. */
-    private Configuration readerConfig() {
+    /** Default source config: max message 1, fetch timeout 1s. */
+    private SourceConfiguration sourceConfig() {
         Configuration config = operator().config();
         config.set(PULSAR_MAX_FETCH_RECORDS, 1);
         config.set(PULSAR_MAX_FETCH_TIME, 1000L);
         config.set(PULSAR_SUBSCRIPTION_NAME, randomAlphabetic(10));
         config.set(PULSAR_ENABLE_AUTO_ACKNOWLEDGE_MESSAGE, true);
-        return config;
-    }
 
-    private SourceConfiguration sourceConfig() {
-        return new SourceConfiguration(readerConfig());
+        return new SourceConfiguration(config);
     }
 
     protected void handleSplit(
@@ -337,14 +334,12 @@ public abstract class PulsarPartitionSplitReaderTestBase extends PulsarTestSuite
             return new PulsarOrderedPartitionSplitReader<>(
                     operator().client(),
                     operator().admin(),
-                    readerConfig(),
                     sourceConfig(),
                     flinkSchema(new SimpleStringSchema()));
         } else {
             return new PulsarUnorderedPartitionSplitReader<>(
                     operator().client(),
                     operator().admin(),
-                    readerConfig(),
                     sourceConfig(),
                     flinkSchema(new SimpleStringSchema()),
                     null);
diff --git a/flink-python/pyflink/datastream/tests/test_connectors.py b/flink-python/pyflink/datastream/tests/test_connectors.py
index c4d5bc8..407af8e 100644
--- a/flink-python/pyflink/datastream/tests/test_connectors.py
+++ b/flink-python/pyflink/datastream/tests/test_connectors.py
@@ -197,7 +197,7 @@ class FlinkPulsarTest(ConnectorTestBase):
         plan = eval(self.env.get_execution_plan())
         self.assertEqual('Source: pulsar source', plan['nodes'][0]['type'])
 
-        configuration = get_field_value(pulsar_source.get_java_function(), "configuration")
+        configuration = get_field_value(pulsar_source.get_java_function(), "sourceConfiguration")
         self.assertEqual(
             configuration.getString(
                 ConfigOptions.key('pulsar.client.serviceUrl')

[flink] 04/09: [FLINK-26023][connector/pulsar] Create a Pulsar sink config model for matching ProducerConfigurationData.

Posted by fp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

fpaul pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 9bc8b0f37bec419bcdc4b8cdee3abf5320df5399
Author: Yufan Sheng <yu...@streamnative.io>
AuthorDate: Wed Feb 9 14:56:54 2022 +0800

    [FLINK-26023][connector/pulsar] Create a Pulsar sink config model for matching ProducerConfigurationData.
---
 .../connector/pulsar/sink/PulsarSinkOptions.java   | 259 +++++++++++++++++++++
 .../pulsar/sink/config/PulsarSinkConfigUtils.java  | 112 +++++++++
 .../pulsar/sink/config/SinkConfiguration.java      | 147 ++++++++++++
 3 files changed, 518 insertions(+)

diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkOptions.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkOptions.java
new file mode 100644
index 0000000..0e16830
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkOptions.java
@@ -0,0 +1,259 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.annotation.docs.ConfigGroup;
+import org.apache.flink.annotation.docs.ConfigGroups;
+import org.apache.flink.configuration.ConfigOption;
+import org.apache.flink.configuration.ConfigOptions;
+import org.apache.flink.configuration.description.Description;
+import org.apache.flink.connector.base.DeliveryGuarantee;
+import org.apache.flink.connector.pulsar.common.config.PulsarOptions;
+
+import org.apache.pulsar.client.api.CompressionType;
+
+import java.time.Duration;
+import java.util.Map;
+
+import static java.util.Collections.emptyMap;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.flink.configuration.description.LinkElement.link;
+import static org.apache.flink.configuration.description.TextElement.code;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PRODUCER_CONFIG_PREFIX;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.SINK_CONFIG_PREFIX;
+import static org.apache.pulsar.client.impl.conf.ProducerConfigurationData.DEFAULT_BATCHING_MAX_MESSAGES;
+import static org.apache.pulsar.client.impl.conf.ProducerConfigurationData.DEFAULT_MAX_PENDING_MESSAGES;
+import static org.apache.pulsar.client.impl.conf.ProducerConfigurationData.DEFAULT_MAX_PENDING_MESSAGES_ACROSS_PARTITIONS;
+
+/**
+ * Configurations for PulsarSink. All the options list here could be configured in {@code
+ * PulsarSinkBuilder#setConfig(ConfigOption, Object)}. The {@link PulsarOptions} is also required
+ * for pulsar source.
+ *
+ * @see PulsarOptions for shared configure options.
+ */
+@PublicEvolving
+@ConfigGroups(
+        groups = {
+            @ConfigGroup(name = "PulsarSink", keyPrefix = SINK_CONFIG_PREFIX),
+            @ConfigGroup(name = "PulsarProducer", keyPrefix = PRODUCER_CONFIG_PREFIX)
+        })
+public final class PulsarSinkOptions {
+
+    // Pulsar sink connector config prefix.
+    public static final String SINK_CONFIG_PREFIX = "pulsar.sink.";
+    // Pulsar producer API config prefix.
+    public static final String PRODUCER_CONFIG_PREFIX = "pulsar.producer.";
+
+    private PulsarSinkOptions() {
+        // This is a constant class
+    }
+
+    ///////////////////////////////////////////////////////////////////////////////
+    //
+    // The configuration for pulsar sink part.
+    // All the configuration listed below should have the pulsar.sink prefix.
+    //
+    ///////////////////////////////////////////////////////////////////////////////
+
+    public static final ConfigOption<DeliveryGuarantee> PULSAR_WRITE_DELIVERY_GUARANTEE =
+            ConfigOptions.key(SINK_CONFIG_PREFIX + "deliveryGuarantee")
+                    .enumType(DeliveryGuarantee.class)
+                    .defaultValue(DeliveryGuarantee.NONE)
+                    .withDescription("Optional delivery guarantee when committing.");
+
+    public static final ConfigOption<Long> PULSAR_WRITE_TRANSACTION_TIMEOUT =
+            ConfigOptions.key(SINK_CONFIG_PREFIX + "transactionTimeoutMillis")
+                    .longType()
+                    .defaultValue(Duration.ofHours(3).toMillis())
+                    .withDescription(
+                            Description.builder()
+                                    .text(
+                                            "This option is used when the user require the %s semantic.",
+                                            code("DeliveryGuarantee.EXACTLY_ONCE"))
+                                    .text(
+                                            "We would use transaction for making sure the message could be write only once.")
+                                    .build());
+
+    public static final ConfigOption<Long> PULSAR_TOPIC_METADATA_REFRESH_INTERVAL =
+            ConfigOptions.key(SINK_CONFIG_PREFIX + "topicMetadataRefreshInterval")
+                    .longType()
+                    .defaultValue(Duration.ofMinutes(30).toMillis())
+                    .withDescription(
+                            "Auto update the topic metadata in a fixed interval (in ms). The default value is 30 minutes.");
+
+    public static final ConfigOption<Boolean> PULSAR_WRITE_SCHEMA_EVOLUTION =
+            ConfigOptions.key(SINK_CONFIG_PREFIX + "enableSchemaEvolution")
+                    .booleanType()
+                    .defaultValue(false)
+                    .withDescription(
+                            Description.builder()
+                                    .text(
+                                            "If you enable this option, we would consume and deserialize the message by using Pulsar's %s.",
+                                            code("Schema"))
+                                    .build());
+
+    public static final ConfigOption<Integer> PULSAR_MAX_RECOMMIT_TIMES =
+            ConfigOptions.key(SINK_CONFIG_PREFIX + "maxRecommitTimes")
+                    .intType()
+                    .defaultValue(5)
+                    .withDescription(
+                            "The allowed transaction recommit times if we meet some retryable exception."
+                                    + " This is used in Pulsar Transaction.");
+
+    ///////////////////////////////////////////////////////////////////////////////
+    //
+    // The configuration for ProducerConfigurationData part.
+    // All the configuration listed below should have the pulsar.producer prefix.
+    //
+    ///////////////////////////////////////////////////////////////////////////////
+
+    public static final ConfigOption<String> PULSAR_PRODUCER_NAME =
+            ConfigOptions.key(PRODUCER_CONFIG_PREFIX + "producerName")
+                    .stringType()
+                    .noDefaultValue()
+                    .withDescription(
+                            "A producer name which would be displayed in the Pulsar's dashboard."
+                                    + " If no producer name was provided, we would use a Pulsar generated name instead.");
+
+    public static final ConfigOption<Long> PULSAR_SEND_TIMEOUT_MS =
+            ConfigOptions.key(PRODUCER_CONFIG_PREFIX + "sendTimeoutMs")
+                    .longType()
+                    .defaultValue(30000L)
+                    .withDescription(
+                            Description.builder()
+                                    .text("Message send timeout in ms.")
+                                    .text(
+                                            "If a message is not acknowledged by a server before the %s expires, an error occurs.",
+                                            code("sendTimeout"))
+                                    .build());
+
+    public static final ConfigOption<Integer> PULSAR_MAX_PENDING_MESSAGES =
+            ConfigOptions.key(PRODUCER_CONFIG_PREFIX + "maxPendingMessages")
+                    .intType()
+                    .defaultValue(DEFAULT_MAX_PENDING_MESSAGES)
+                    .withDescription(
+                            Description.builder()
+                                    .text("The maximum size of a queue holding pending messages.")
+                                    .linebreak()
+                                    .text(
+                                            "For example, a message waiting to receive an acknowledgment from a %s.",
+                                            link(
+                                                    "broker",
+                                                    "https://pulsar.apache.org/docs/en/reference-terminology#broker"))
+                                    .linebreak()
+                                    .text(
+                                            "By default, when the queue is full, all calls to the %s and %s methods fail unless you set %s to true.",
+                                            code("Send"),
+                                            code("SendAsync"),
+                                            code("BlockIfQueueFull"))
+                                    .build());
+
+    public static final ConfigOption<Integer> PULSAR_MAX_PENDING_MESSAGES_ACROSS_PARTITIONS =
+            ConfigOptions.key(PRODUCER_CONFIG_PREFIX + "maxPendingMessagesAcrossPartitions")
+                    .intType()
+                    .defaultValue(DEFAULT_MAX_PENDING_MESSAGES_ACROSS_PARTITIONS)
+                    .withDescription(
+                            Description.builder()
+                                    .text(
+                                            "The maximum number of pending messages across partitions.")
+                                    .linebreak()
+                                    .text(
+                                            "Use the setting to lower the max pending messages for each partition (%s) if the total number exceeds the configured value.",
+                                            code("setMaxPendingMessages"))
+                                    .build());
+
+    public static final ConfigOption<Long> PULSAR_BATCHING_MAX_PUBLISH_DELAY_MICROS =
+            ConfigOptions.key(PRODUCER_CONFIG_PREFIX + "batchingMaxPublishDelayMicros")
+                    .longType()
+                    .defaultValue(MILLISECONDS.toMicros(1))
+                    .withDescription("Batching time period of sending messages.");
+
+    public static final ConfigOption<Integer>
+            PULSAR_BATCHING_PARTITION_SWITCH_FREQUENCY_BY_PUBLISH_DELAY =
+                    ConfigOptions.key(
+                                    PRODUCER_CONFIG_PREFIX
+                                            + "batchingPartitionSwitchFrequencyByPublishDelay")
+                            .intType()
+                            .defaultValue(10)
+                            .withDescription(
+                                    "The maximum wait time for switching topic partitions.");
+
+    public static final ConfigOption<Integer> PULSAR_BATCHING_MAX_MESSAGES =
+            ConfigOptions.key(PRODUCER_CONFIG_PREFIX + "batchingMaxMessages")
+                    .intType()
+                    .defaultValue(DEFAULT_BATCHING_MAX_MESSAGES)
+                    .withDescription("The maximum number of messages permitted in a batch.");
+
+    public static final ConfigOption<Integer> PULSAR_BATCHING_MAX_BYTES =
+            ConfigOptions.key(PRODUCER_CONFIG_PREFIX + "batchingMaxBytes")
+                    .intType()
+                    .defaultValue(128 * 1024)
+                    .withDescription(
+                            "The maximum size of messages permitted in a batch. Keep the maximum consistent as previous versions.");
+
+    public static final ConfigOption<Boolean> PULSAR_BATCHING_ENABLED =
+            ConfigOptions.key(PRODUCER_CONFIG_PREFIX + "batchingEnabled")
+                    .booleanType()
+                    .defaultValue(true)
+                    .withDescription("Enable batch send ability, it was enabled by default.");
+
+    public static final ConfigOption<Boolean> PULSAR_CHUNKING_ENABLED =
+            ConfigOptions.key(PRODUCER_CONFIG_PREFIX + "chunkingEnabled")
+                    .booleanType()
+                    .defaultValue(false)
+                    .withDescription("");
+
+    public static final ConfigOption<CompressionType> PULSAR_COMPRESSION_TYPE =
+            ConfigOptions.key(PRODUCER_CONFIG_PREFIX + "compressionType")
+                    .enumType(CompressionType.class)
+                    .defaultValue(CompressionType.NONE)
+                    .withDescription(
+                            Description.builder()
+                                    .text("Message data compression type used by a producer.")
+                                    .text("Available options:")
+                                    .list(
+                                            link("LZ4", "https://github.com/lz4/lz4"),
+                                            link("ZLIB", "https://zlib.net/"),
+                                            link("ZSTD", "https://facebook.github.io/zstd/"),
+                                            link("SNAPPY", "https://google.github.io/snappy/"))
+                                    .build());
+
+    public static final ConfigOption<Long> PULSAR_INITIAL_SEQUENCE_ID =
+            ConfigOptions.key(PRODUCER_CONFIG_PREFIX + "initialSequenceId")
+                    .longType()
+                    .noDefaultValue()
+                    .withDescription(
+                            "The sequence id for avoiding the duplication, it's used when Pulsar doesn't have transaction.");
+
+    public static final ConfigOption<Map<String, String>> PULSAR_PRODUCER_PROPERTIES =
+            ConfigOptions.key(PRODUCER_CONFIG_PREFIX + "properties")
+                    .mapType()
+                    .defaultValue(emptyMap())
+                    .withDescription(
+                            Description.builder()
+                                    .text("A name or value property of this consumer.")
+                                    .text(
+                                            " %s is application defined metadata attached to a consumer.",
+                                            code("properties"))
+                                    .text(
+                                            " When getting a topic stats, associate this metadata with the consumer stats for easier identification.")
+                                    .build());
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/config/PulsarSinkConfigUtils.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/config/PulsarSinkConfigUtils.java
new file mode 100644
index 0000000..13821fe
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/config/PulsarSinkConfigUtils.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.config;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.connector.pulsar.common.config.PulsarConfigValidator;
+
+import org.apache.pulsar.client.api.Producer;
+import org.apache.pulsar.client.api.ProducerBuilder;
+import org.apache.pulsar.client.api.PulsarClient;
+import org.apache.pulsar.client.api.Schema;
+
+import java.util.Map;
+
+import static java.util.concurrent.TimeUnit.MICROSECONDS;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_ADMIN_URL;
+import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_AUTH_PARAMS;
+import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_AUTH_PARAM_MAP;
+import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_SERVICE_URL;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_BATCHING_ENABLED;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_BATCHING_MAX_BYTES;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_BATCHING_MAX_MESSAGES;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_BATCHING_MAX_PUBLISH_DELAY_MICROS;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_BATCHING_PARTITION_SWITCH_FREQUENCY_BY_PUBLISH_DELAY;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_CHUNKING_ENABLED;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_COMPRESSION_TYPE;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_INITIAL_SEQUENCE_ID;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_MAX_PENDING_MESSAGES;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_MAX_PENDING_MESSAGES_ACROSS_PARTITIONS;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_PRODUCER_NAME;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_PRODUCER_PROPERTIES;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_SEND_TIMEOUT_MS;
+import static org.apache.pulsar.client.api.MessageRoutingMode.SinglePartition;
+import static org.apache.pulsar.client.api.ProducerAccessMode.Shared;
+
+/** Create the {@link Producer} to send message and a validator for building sink config. */
+@Internal
+public final class PulsarSinkConfigUtils {
+
+    private PulsarSinkConfigUtils() {
+        // No need to create instance.
+    }
+
+    public static final PulsarConfigValidator SINK_CONFIG_VALIDATOR =
+            PulsarConfigValidator.builder()
+                    .requiredOption(PULSAR_SERVICE_URL)
+                    .requiredOption(PULSAR_ADMIN_URL)
+                    .conflictOptions(PULSAR_AUTH_PARAMS, PULSAR_AUTH_PARAM_MAP)
+                    .build();
+
+    /** Create a pulsar producer builder by using the given Configuration. */
+    public static <T> ProducerBuilder<T> createProducerBuilder(
+            PulsarClient client, Schema<T> schema, SinkConfiguration configuration) {
+        ProducerBuilder<T> builder = client.newProducer(schema);
+
+        configuration.useOption(PULSAR_PRODUCER_NAME, builder::producerName);
+        configuration.useOption(
+                PULSAR_SEND_TIMEOUT_MS,
+                Math::toIntExact,
+                ms -> builder.sendTimeout(ms, MILLISECONDS));
+        configuration.useOption(PULSAR_MAX_PENDING_MESSAGES, builder::maxPendingMessages);
+        configuration.useOption(
+                PULSAR_MAX_PENDING_MESSAGES_ACROSS_PARTITIONS,
+                builder::maxPendingMessagesAcrossPartitions);
+        configuration.useOption(
+                PULSAR_BATCHING_MAX_PUBLISH_DELAY_MICROS,
+                s -> builder.batchingMaxPublishDelay(s, MICROSECONDS));
+        configuration.useOption(
+                PULSAR_BATCHING_PARTITION_SWITCH_FREQUENCY_BY_PUBLISH_DELAY,
+                builder::roundRobinRouterBatchingPartitionSwitchFrequency);
+        configuration.useOption(PULSAR_BATCHING_MAX_MESSAGES, builder::batchingMaxMessages);
+        configuration.useOption(PULSAR_BATCHING_MAX_BYTES, builder::batchingMaxBytes);
+        configuration.useOption(PULSAR_BATCHING_ENABLED, builder::enableBatching);
+        configuration.useOption(PULSAR_CHUNKING_ENABLED, builder::enableChunking);
+        configuration.useOption(PULSAR_COMPRESSION_TYPE, builder::compressionType);
+        configuration.useOption(PULSAR_INITIAL_SEQUENCE_ID, builder::initialSequenceId);
+
+        // Set producer properties
+        Map<String, String> properties = configuration.getProperties(PULSAR_PRODUCER_PROPERTIES);
+        if (!properties.isEmpty()) {
+            builder.properties(properties);
+        }
+
+        // Set the default value for current producer builder.
+        // We use non-partitioned producer by default. This wouldn't be changed in the future.
+        builder.blockIfQueueFull(true)
+                .messageRoutingMode(SinglePartition)
+                .enableMultiSchema(false)
+                .autoUpdatePartitions(false)
+                .accessMode(Shared)
+                .enableLazyStartPartitionedProducers(false);
+
+        return builder;
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/config/SinkConfiguration.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/config/SinkConfiguration.java
new file mode 100644
index 0000000..e0ef7ff
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/config/SinkConfiguration.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.config;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.api.connector.sink.Sink.InitContext;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.connector.base.DeliveryGuarantee;
+import org.apache.flink.connector.pulsar.common.config.PulsarConfiguration;
+
+import org.apache.pulsar.client.api.Schema;
+
+import java.util.Objects;
+
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_BATCHING_MAX_MESSAGES;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_MAX_PENDING_MESSAGES_ACROSS_PARTITIONS;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_MAX_RECOMMIT_TIMES;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_TOPIC_METADATA_REFRESH_INTERVAL;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_WRITE_DELIVERY_GUARANTEE;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_WRITE_SCHEMA_EVOLUTION;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_WRITE_TRANSACTION_TIMEOUT;
+
+/** The configured class for pulsar sink. */
+@PublicEvolving
+public class SinkConfiguration extends PulsarConfiguration {
+    private static final long serialVersionUID = 4941360605051251153L;
+
+    private final DeliveryGuarantee deliveryGuarantee;
+    private final long transactionTimeoutMillis;
+    private final long topicMetadataRefreshInterval;
+    private final int partitionSwitchSize;
+    private final boolean enableSchemaEvolution;
+    private final int maxPendingMessages;
+    private final int maxRecommitTimes;
+
+    public SinkConfiguration(Configuration configuration) {
+        super(configuration);
+
+        this.deliveryGuarantee = get(PULSAR_WRITE_DELIVERY_GUARANTEE);
+        this.transactionTimeoutMillis = getLong(PULSAR_WRITE_TRANSACTION_TIMEOUT);
+        this.topicMetadataRefreshInterval = getLong(PULSAR_TOPIC_METADATA_REFRESH_INTERVAL);
+        this.partitionSwitchSize = getInteger(PULSAR_BATCHING_MAX_MESSAGES);
+        this.enableSchemaEvolution = get(PULSAR_WRITE_SCHEMA_EVOLUTION);
+        this.maxPendingMessages = get(PULSAR_MAX_PENDING_MESSAGES_ACROSS_PARTITIONS);
+        this.maxRecommitTimes = get(PULSAR_MAX_RECOMMIT_TIMES);
+    }
+
+    /** The delivery guarantee changes the behavior of {@code PulsarWriter}. */
+    public DeliveryGuarantee getDeliveryGuarantee() {
+        return deliveryGuarantee;
+    }
+
+    /**
+     * Pulsar's transactions have a timeout mechanism for the uncommitted transaction. We use
+     * transactions for making sure the message could be written only once. Since the checkpoint
+     * interval couldn't be acquired from {@link InitContext}, we have to expose this option. Make
+     * sure this value is greater than the checkpoint interval. Create a pulsar producer builder by
+     * using the given Configuration.
+     */
+    public long getTransactionTimeoutMillis() {
+        return transactionTimeoutMillis;
+    }
+
+    /**
+     * Auto-update the topic metadata in a fixed interval (in ms). The default value is 30 minutes.
+     */
+    public long getTopicMetadataRefreshInterval() {
+        return topicMetadataRefreshInterval;
+    }
+
+    /**
+     * Switch the partition to write when we have written the given size of messages. It's used for
+     * a round-robin topic router.
+     */
+    public int getPartitionSwitchSize() {
+        return partitionSwitchSize;
+    }
+
+    /**
+     * If we should serialize and send the message with a specified Pulsar {@link Schema} instead
+     * the default {@link Schema#BYTES}. This switch is only used for {@code PulsarSchemaWrapper}.
+     */
+    public boolean isEnableSchemaEvolution() {
+        return enableSchemaEvolution;
+    }
+
+    /**
+     * Pulsar message is sent asynchronously. Set this option for limiting the pending messages in a
+     * Pulsar writer instance.
+     */
+    public int getMaxPendingMessages() {
+        return maxPendingMessages;
+    }
+
+    /** The maximum allowed recommitting time for a Pulsar transaction. */
+    public int getMaxRecommitTimes() {
+        return maxRecommitTimes;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        if (!super.equals(o)) {
+            return false;
+        }
+        SinkConfiguration that = (SinkConfiguration) o;
+        return transactionTimeoutMillis == that.transactionTimeoutMillis
+                && topicMetadataRefreshInterval == that.topicMetadataRefreshInterval
+                && partitionSwitchSize == that.partitionSwitchSize
+                && enableSchemaEvolution == that.enableSchemaEvolution
+                && maxPendingMessages == that.maxPendingMessages
+                && maxRecommitTimes == that.maxRecommitTimes;
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(
+                super.hashCode(),
+                transactionTimeoutMillis,
+                topicMetadataRefreshInterval,
+                partitionSwitchSize,
+                enableSchemaEvolution,
+                maxPendingMessages,
+                maxRecommitTimes);
+    }
+}

[flink] 05/09: [FLINK-26024][connector/pulsar] Create a PulsarSerializationSchema for better records serialization.

Posted by fp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

fpaul pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 0e72bfede70a00146f466b3e7491fc0f83eb6c41
Author: Yufan Sheng <yu...@streamnative.io>
AuthorDate: Tue Feb 15 22:21:50 2022 +0800

    [FLINK-26024][connector/pulsar] Create a PulsarSerializationSchema for better records serialization.
---
 .../pulsar/sink/writer/message/PulsarMessage.java  | 111 ++++++++++++++++++
 .../sink/writer/message/PulsarMessageBuilder.java  | 127 ++++++++++++++++++++
 .../writer/serializer/PulsarSchemaWrapper.java     |  59 ++++++++++
 .../serializer/PulsarSerializationSchema.java      | 129 +++++++++++++++++++++
 .../PulsarSerializationSchemaWrapper.java          |  59 ++++++++++
 5 files changed, 485 insertions(+)

diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/message/PulsarMessage.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/message/PulsarMessage.java
new file mode 100644
index 0000000..0c45763
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/message/PulsarMessage.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.message;
+
+import org.apache.flink.annotation.PublicEvolving;
+
+import org.apache.pulsar.client.api.Schema;
+import org.apache.pulsar.client.api.TypedMessageBuilder;
+
+import javax.annotation.Nullable;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * The message instance would be used for {@link TypedMessageBuilder}. We create this class because
+ * the Pulsar lacks such kind of POJO class.
+ */
+@PublicEvolving
+public class PulsarMessage<T> {
+
+    @Nullable private final byte[] orderingKey;
+    @Nullable private final String key;
+    private final long eventTime;
+    private final Schema<T> schema;
+    @Nullable private final T value;
+    @Nullable private final Map<String, String> properties;
+    @Nullable private final Long sequenceId;
+    @Nullable private final List<String> replicationClusters;
+    private final boolean disableReplication;
+
+    /** Package private for building this class only in {@link PulsarMessageBuilder}. */
+    PulsarMessage(
+            @Nullable byte[] orderingKey,
+            @Nullable String key,
+            long eventTime,
+            Schema<T> schema,
+            @Nullable T value,
+            @Nullable Map<String, String> properties,
+            @Nullable Long sequenceId,
+            @Nullable List<String> replicationClusters,
+            boolean disableReplication) {
+        this.orderingKey = orderingKey;
+        this.key = key;
+        this.eventTime = eventTime;
+        this.schema = schema;
+        this.value = value;
+        this.properties = properties;
+        this.sequenceId = sequenceId;
+        this.replicationClusters = replicationClusters;
+        this.disableReplication = disableReplication;
+    }
+
+    @Nullable
+    public byte[] getOrderingKey() {
+        return orderingKey;
+    }
+
+    @Nullable
+    public String getKey() {
+        return key;
+    }
+
+    public long getEventTime() {
+        return eventTime;
+    }
+
+    public Schema<T> getSchema() {
+        return schema;
+    }
+
+    @Nullable
+    public T getValue() {
+        return value;
+    }
+
+    @Nullable
+    public Map<String, String> getProperties() {
+        return properties;
+    }
+
+    @Nullable
+    public Long getSequenceId() {
+        return sequenceId;
+    }
+
+    @Nullable
+    public List<String> getReplicationClusters() {
+        return replicationClusters;
+    }
+
+    public boolean isDisableReplication() {
+        return disableReplication;
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/message/PulsarMessageBuilder.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/message/PulsarMessageBuilder.java
new file mode 100644
index 0000000..9330d09
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/message/PulsarMessageBuilder.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.message;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.connector.pulsar.sink.writer.router.KeyHashTopicRouter;
+
+import org.apache.pulsar.client.api.Schema;
+import org.apache.pulsar.client.api.TypedMessageBuilder;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/** {@link TypedMessageBuilder} wrapper for providing the required method for end-users. */
+@PublicEvolving
+public class PulsarMessageBuilder<T> {
+
+    private byte[] orderingKey;
+    private String key;
+    private long eventTime;
+    Schema<T> schema;
+    private T value;
+    private Map<String, String> properties = new HashMap<>();
+    private Long sequenceId;
+    private List<String> replicationClusters;
+    private boolean disableReplication = false;
+
+    /** Method wrapper of {@link TypedMessageBuilder#orderingKey(byte[])}. */
+    public PulsarMessageBuilder<T> orderingKey(byte[] orderingKey) {
+        this.orderingKey = checkNotNull(orderingKey);
+        return this;
+    }
+
+    /**
+     * Property {@link TypedMessageBuilder#key(String)}. This property would also be used in {@link
+     * KeyHashTopicRouter}.
+     */
+    public PulsarMessageBuilder<T> key(String key) {
+        this.key = checkNotNull(key);
+        return null;
+    }
+
+    /** Method wrapper of {@link TypedMessageBuilder#eventTime(long)}. */
+    public PulsarMessageBuilder<T> eventTime(long eventTime) {
+        this.eventTime = eventTime;
+        return this;
+    }
+
+    /**
+     * Method wrapper of {@link TypedMessageBuilder#value(Object)}. You can pass any schema for
+     * validating it on Pulsar. This is called schema evolution. But the topic on Pulsar should bind
+     * to a fixed {@link Schema}. You can't have multiple schemas on the same topic unless it's
+     * compatible with each other.
+     *
+     * @param value The value could be null, which is called tombstones message in Pulsar. (It will
+     *     be skipped and considered deleted.)
+     */
+    public PulsarMessageBuilder<T> value(Schema<T> schema, T value) {
+        this.schema = checkNotNull(schema);
+        this.value = value;
+        return this;
+    }
+
+    /** Method wrapper of {@link TypedMessageBuilder#property(String, String)}. */
+    public PulsarMessageBuilder<T> property(String key, String value) {
+        this.properties.put(checkNotNull(key), checkNotNull(value));
+        return this;
+    }
+
+    /** Method wrapper of {@link TypedMessageBuilder#properties(Map)}. */
+    public PulsarMessageBuilder<T> properties(Map<String, String> properties) {
+        this.properties.putAll(checkNotNull(properties));
+        return this;
+    }
+
+    /** Method wrapper of {@link TypedMessageBuilder#sequenceId(long)}. */
+    public PulsarMessageBuilder<T> sequenceId(long sequenceId) {
+        this.sequenceId = sequenceId;
+        return this;
+    }
+
+    /** Method wrapper of {@link TypedMessageBuilder#replicationClusters(List)}. */
+    public PulsarMessageBuilder<T> replicationClusters(List<String> replicationClusters) {
+        this.replicationClusters = checkNotNull(replicationClusters);
+        return this;
+    }
+
+    /** Method wrapper of {@link TypedMessageBuilder#disableReplication()}. */
+    public PulsarMessageBuilder<T> disableReplication() {
+        this.disableReplication = true;
+        return this;
+    }
+
+    public PulsarMessage<T> build() {
+        checkNotNull(schema, "Schema should be provided.");
+
+        return new PulsarMessage<>(
+                orderingKey,
+                key,
+                eventTime,
+                schema,
+                value,
+                properties,
+                sequenceId,
+                replicationClusters,
+                disableReplication);
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/serializer/PulsarSchemaWrapper.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/serializer/PulsarSchemaWrapper.java
new file mode 100644
index 0000000..0d5aaf0
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/serializer/PulsarSchemaWrapper.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.serializer;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.connector.pulsar.common.schema.PulsarSchema;
+import org.apache.flink.connector.pulsar.sink.writer.context.PulsarSinkContext;
+import org.apache.flink.connector.pulsar.sink.writer.message.PulsarMessage;
+import org.apache.flink.connector.pulsar.sink.writer.message.PulsarMessageBuilder;
+
+import org.apache.pulsar.client.api.Schema;
+
+/**
+ * Wrap the Pulsar's Schema into PulsarSerializationSchema. We support schema evolution out of box
+ * by this implementation.
+ */
+@Internal
+public class PulsarSchemaWrapper<IN> implements PulsarSerializationSchema<IN> {
+    private static final long serialVersionUID = -2567052498398184194L;
+
+    private final PulsarSchema<IN> pulsarSchema;
+
+    public PulsarSchemaWrapper(PulsarSchema<IN> pulsarSchema) {
+        this.pulsarSchema = pulsarSchema;
+    }
+
+    @Override
+    public PulsarMessage<?> serialize(IN element, PulsarSinkContext sinkContext) {
+        Schema<IN> schema = this.pulsarSchema.getPulsarSchema();
+        if (sinkContext.isEnableSchemaEvolution()) {
+            PulsarMessageBuilder<IN> builder = new PulsarMessageBuilder<>();
+            builder.value(schema, element);
+
+            return builder.build();
+        } else {
+            PulsarMessageBuilder<byte[]> builder = new PulsarMessageBuilder<>();
+            byte[] bytes = schema.encode(element);
+            builder.value(Schema.BYTES, bytes);
+
+            return builder.build();
+        }
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/serializer/PulsarSerializationSchema.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/serializer/PulsarSerializationSchema.java
new file mode 100644
index 0000000..da7f706
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/serializer/PulsarSerializationSchema.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.serializer;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.api.common.serialization.SerializationSchema.InitializationContext;
+import org.apache.flink.connector.pulsar.common.schema.PulsarSchema;
+import org.apache.flink.connector.pulsar.sink.PulsarSinkBuilder;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.sink.writer.context.PulsarSinkContext;
+import org.apache.flink.connector.pulsar.sink.writer.message.PulsarMessage;
+import org.apache.flink.connector.pulsar.sink.writer.message.PulsarMessageBuilder;
+
+import org.apache.pulsar.client.api.Schema;
+import org.apache.pulsar.client.api.TypedMessageBuilder;
+import org.apache.pulsar.common.schema.KeyValue;
+
+import java.io.Serializable;
+
+/**
+ * The serialization schema for how to serialize records into Pulsar.
+ *
+ * @param <IN> The message type send to Pulsar.
+ */
+@PublicEvolving
+public interface PulsarSerializationSchema<IN> extends Serializable {
+
+    /**
+     * Initialization method for the schema. It is called before the actual working methods {@link
+     * #serialize(Object, PulsarSinkContext)} and thus suitable for one-time setup work.
+     *
+     * <p>The provided {@link InitializationContext} can be used to access additional features such
+     * as registering user metrics.
+     *
+     * @param initializationContext Contextual information that can be used during initialization.
+     * @param sinkContext Runtime information i.e. partitions, subtaskId.
+     * @param sinkConfiguration All the configure options for the Pulsar sink. You can add custom
+     *     options.
+     */
+    default void open(
+            InitializationContext initializationContext,
+            PulsarSinkContext sinkContext,
+            SinkConfiguration sinkConfiguration)
+            throws Exception {
+        // Nothing to do by default.
+    }
+
+    /**
+     * Serializes the given element into bytes and {@link Schema#BYTES}. Or you can convert it to a
+     * new type of instance with a {@link Schema}. The return value {@link PulsarMessage} can be
+     * built by {@link PulsarMessageBuilder}. All the methods provided in the {@link
+     * PulsarMessageBuilder} is just equals to the {@link TypedMessageBuilder}.
+     *
+     * @param element Element to be serialized.
+     * @param sinkContext Context to provide extra information.
+     */
+    PulsarMessage<?> serialize(IN element, PulsarSinkContext sinkContext);
+
+    /**
+     * Create a PulsarSerializationSchema by using the flink's {@link SerializationSchema}. It would
+     * serialize the message into byte array and send it to Pulsar with {@link Schema#BYTES}.
+     */
+    static <T> PulsarSerializationSchema<T> flinkSchema(
+            SerializationSchema<T> serializationSchema) {
+        return new PulsarSerializationSchemaWrapper<>(serializationSchema);
+    }
+
+    /**
+     * Create a PulsarSerializationSchema by using the Pulsar {@link Schema} instance. We can send
+     * message with the given schema to Pulsar, this would be enabled by {@link
+     * PulsarSinkBuilder#enableSchemaEvolution()}. We would serialize the message into bytes and
+     * send it as {@link Schema#BYTES} by default.
+     *
+     * <p>We only support <a
+     * href="https://pulsar.apache.org/docs/en/schema-understand/#primitive-type">primitive
+     * types</a> here.
+     */
+    static <T> PulsarSerializationSchema<T> pulsarSchema(Schema<T> schema) {
+        PulsarSchema<T> pulsarSchema = new PulsarSchema<>(schema);
+        return new PulsarSchemaWrapper<>(pulsarSchema);
+    }
+
+    /**
+     * Create a PulsarSerializationSchema by using the Pulsar {@link Schema} instance. We can send
+     * message with the given schema to Pulsar, this would be enabled by {@link
+     * PulsarSinkBuilder#enableSchemaEvolution()}. We would serialize the message into bytes and
+     * send it as {@link Schema#BYTES} by default.
+     *
+     * <p>We only support <a
+     * href="https://pulsar.apache.org/docs/en/schema-understand/#struct">struct types</a> here.
+     */
+    static <T> PulsarSerializationSchema<T> pulsarSchema(Schema<T> schema, Class<T> typeClass) {
+        PulsarSchema<T> pulsarSchema = new PulsarSchema<>(schema, typeClass);
+        return new PulsarSchemaWrapper<>(pulsarSchema);
+    }
+
+    /**
+     * Create a PulsarSerializationSchema by using the Pulsar {@link Schema} instance. We can send
+     * message with the given schema to Pulsar, this would be enabled by {@link
+     * PulsarSinkBuilder#enableSchemaEvolution()}. We would serialize the message into bytes and
+     * send it as {@link Schema#BYTES} by default.
+     *
+     * <p>We only support <a
+     * href="https://pulsar.apache.org/docs/en/schema-understand/#keyvalue">keyvalue types</a> here.
+     */
+    static <K, V> PulsarSerializationSchema<KeyValue<K, V>> pulsarSchema(
+            Schema<KeyValue<K, V>> schema, Class<K> keyClass, Class<V> valueClass) {
+        PulsarSchema<KeyValue<K, V>> pulsarSchema =
+                new PulsarSchema<>(schema, keyClass, valueClass);
+        return new PulsarSchemaWrapper<>(pulsarSchema);
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/serializer/PulsarSerializationSchemaWrapper.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/serializer/PulsarSerializationSchemaWrapper.java
new file mode 100644
index 0000000..716d2db
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/serializer/PulsarSerializationSchemaWrapper.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.serializer;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.api.common.serialization.SerializationSchema.InitializationContext;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.sink.writer.context.PulsarSinkContext;
+import org.apache.flink.connector.pulsar.sink.writer.message.PulsarMessage;
+import org.apache.flink.connector.pulsar.sink.writer.message.PulsarMessageBuilder;
+
+import org.apache.pulsar.client.api.Schema;
+
+/** Wrap the Flink's SerializationSchema into PulsarSerializationSchema. */
+@Internal
+public class PulsarSerializationSchemaWrapper<IN> implements PulsarSerializationSchema<IN> {
+    private static final long serialVersionUID = 4948155843623161119L;
+
+    private final SerializationSchema<IN> serializationSchema;
+
+    public PulsarSerializationSchemaWrapper(SerializationSchema<IN> serializationSchema) {
+        this.serializationSchema = serializationSchema;
+    }
+
+    @Override
+    public void open(
+            InitializationContext initializationContext,
+            PulsarSinkContext sinkContext,
+            SinkConfiguration sinkConfiguration)
+            throws Exception {
+        serializationSchema.open(initializationContext);
+    }
+
+    @Override
+    public PulsarMessage<?> serialize(IN element, PulsarSinkContext sinkContext) {
+        PulsarMessageBuilder<byte[]> builder = new PulsarMessageBuilder<>();
+        byte[] value = serializationSchema.serialize(element);
+        builder.value(Schema.BYTES, value);
+
+        return builder.build();
+    }
+}

[flink] 01/09: [FLINK-24246][connector/pulsar] Bump PulsarClient version to latest 2.9.1

Posted by fp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

fpaul pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 1602e4b7d26cf52cea993c410769b7b15a672aff
Author: Yufan Sheng <yu...@streamnative.io>
AuthorDate: Wed Feb 9 13:00:53 2022 +0800

    [FLINK-24246][connector/pulsar] Bump PulsarClient version to latest 2.9.1
    
    1. Bump the pulsar-client-all version in pom file.
    2. Exclude useless dependencies for pulsar-client-all.
    3. Bump the Pulsar docker version.
    4. Change the dependencies to pass the tests.
    5. Drop PulsarTransactionUtils and fix compile issues in tests.
    6. Add bouncycastle to Pulsar e2e tests.
---
 flink-connectors/flink-connector-pulsar/pom.xml    |  74 +++++++++++--
 .../common/utils/PulsarTransactionUtils.java       | 118 ---------------------
 .../split/PulsarUnorderedPartitionSplitReader.java |   3 +-
 .../PulsarDeserializationSchemaTest.java           |   2 +-
 .../src/main/resources/META-INF/NOTICE             |  16 +--
 .../flink-end-to-end-tests-pulsar/pom.xml          |  43 +++++++-
 .../FlinkContainerWithPulsarEnvironment.java       |   5 +
 .../org/apache/flink/util/DockerImageVersions.java |   2 +-
 8 files changed, 124 insertions(+), 139 deletions(-)

diff --git a/flink-connectors/flink-connector-pulsar/pom.xml b/flink-connectors/flink-connector-pulsar/pom.xml
index 87b6ba0..45047eb 100644
--- a/flink-connectors/flink-connector-pulsar/pom.xml
+++ b/flink-connectors/flink-connector-pulsar/pom.xml
@@ -36,12 +36,14 @@ under the License.
 	<packaging>jar</packaging>
 
 	<properties>
-		<pulsar.version>2.8.0</pulsar.version>
+		<pulsar.version>2.9.1</pulsar.version>
 
 		<!-- Test Libraries -->
 		<protobuf-maven-plugin.version>0.6.1</protobuf-maven-plugin.version>
-		<commons-lang3.version>3.11</commons-lang3.version>
-		<grpc.version>1.33.0</grpc.version>
+		<pulsar-commons-lang3.version>3.11</pulsar-commons-lang3.version>
+		<pulsar-zookeeper.version>3.6.3</pulsar-zookeeper.version>
+		<pulsar-netty.version>4.1.72.Final</pulsar-netty.version>
+		<pulsar-grpc.version>1.33.0</pulsar-grpc.version>
 	</properties>
 
 	<dependencies>
@@ -138,12 +140,22 @@ under the License.
 			<version>${pulsar.version}</version>
 			<scope>test</scope>
 		</dependency>
+
 		<!-- Pulsar use a newer commons-lang3 in broker. -->
 		<!-- Bump the version only for testing. -->
 		<dependency>
 			<groupId>org.apache.commons</groupId>
 			<artifactId>commons-lang3</artifactId>
-			<version>${commons-lang3.version}</version>
+			<version>${pulsar-commons-lang3.version}</version>
+			<scope>test</scope>
+		</dependency>
+
+		<!-- Pulsar use a newer zookeeper in broker. -->
+		<!-- Bump the version only for testing. -->
+		<dependency>
+			<groupId>org.apache.zookeeper</groupId>
+			<artifactId>zookeeper</artifactId>
+			<version>${pulsar-zookeeper.version}</version>
 			<scope>test</scope>
 		</dependency>
 
@@ -156,9 +168,41 @@ under the License.
 			<version>${pulsar.version}</version>
 			<exclusions>
 				<exclusion>
+					<groupId>com.sun.activation</groupId>
+					<artifactId>javax.activation</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>jakarta.activation</groupId>
+					<artifactId>jakarta.activation-api</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>jakarta.ws.rs</groupId>
+					<artifactId>jakarta.ws.rs-api</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>jakarta.xml.bind</groupId>
+					<artifactId>jakarta.xml.bind-api</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>javax.validation</groupId>
+					<artifactId>validation-api</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>javax.xml.bind</groupId>
+					<artifactId>jaxb-api</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>net.jcip</groupId>
+					<artifactId>jcip-annotations</artifactId>
+				</exclusion>
+				<exclusion>
 					<groupId>org.apache.pulsar</groupId>
 					<artifactId>pulsar-package-core</artifactId>
 				</exclusion>
+				<exclusion>
+					<groupId>com.beust</groupId>
+					<artifactId>jcommander</artifactId>
+				</exclusion>
 			</exclusions>
 		</dependency>
 
@@ -171,13 +215,23 @@ under the License.
 		</dependency>
 	</dependencies>
 
-	<!-- gRPC use version range which don't support by flink ci. -->
+
 	<dependencyManagement>
 		<dependencies>
+			<!-- Pulsar use higher gRPC version. -->
 			<dependency>
 				<groupId>io.grpc</groupId>
 				<artifactId>grpc-bom</artifactId>
-				<version>${grpc.version}</version>
+				<version>${pulsar-grpc.version}</version>
+				<type>pom</type>
+				<scope>import</scope>
+			</dependency>
+
+			<!-- Pulsar use higher netty version. -->
+			<dependency>
+				<groupId>io.netty</groupId>
+				<artifactId>netty-bom</artifactId>
+				<version>${pulsar-netty.version}</version>
 				<type>pom</type>
 				<scope>import</scope>
 			</dependency>
@@ -200,7 +254,9 @@ under the License.
 				<configuration>
 					<!-- Enforce single fork execution due to heavy mini cluster use in the tests -->
 					<forkCount>1</forkCount>
-					<argLine>-Xms256m -Xmx2048m -Dmvn.forkNumber=${surefire.forkNumber} -XX:-UseGCOverheadLimit -Duser.country=US -Duser.language=en</argLine>
+					<argLine>-Xms256m -Xmx2048m -Dmvn.forkNumber=${surefire.forkNumber}
+						-XX:-UseGCOverheadLimit -Duser.country=US -Duser.language=en
+					</argLine>
 				</configuration>
 			</plugin>
 			<plugin>
@@ -222,7 +278,9 @@ under the License.
 					<outputDirectory>
 						${project.build.directory}/generated-test-sources/protobuf/java
 					</outputDirectory>
-					<protocArtifact>com.google.protobuf:protoc:${protoc.version}:exe:${os.detected.classifier}</protocArtifact>
+					<protocArtifact>
+						com.google.protobuf:protoc:${protoc.version}:exe:${os.detected.classifier}
+					</protocArtifact>
 				</configuration>
 				<executions>
 					<execution>
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/utils/PulsarTransactionUtils.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/utils/PulsarTransactionUtils.java
deleted file mode 100644
index ef54779..0000000
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/utils/PulsarTransactionUtils.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.connector.pulsar.common.utils;
-
-import org.apache.flink.annotation.Internal;
-
-import org.apache.pulsar.client.api.transaction.Transaction;
-import org.apache.pulsar.client.api.transaction.TxnID;
-import org.apache.pulsar.client.impl.transaction.TransactionImpl;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-
-import static org.apache.flink.util.Preconditions.checkNotNull;
-
-/**
- * Transaction was introduced into pulsar since 2.7.0, but the interface {@link Transaction} didn't
- * provide a id method until 2.8.1. We have to add this util for acquiring the {@link TxnID} for
- * compatible consideration.
- *
- * <p>TODO Remove this hack after pulsar 2.8.1 release.
- */
-@Internal
-@SuppressWarnings("java:S3011")
-public final class PulsarTransactionUtils {
-
-    private static volatile Field mostBitsField;
-    private static volatile Field leastBitsField;
-
-    private PulsarTransactionUtils() {
-        // No public constructor
-    }
-
-    public static TxnID getId(Transaction transaction) {
-        // 2.8.1 and after.
-        try {
-            Method getId = Transaction.class.getDeclaredMethod("getTxnID");
-            return (TxnID) getId.invoke(transaction);
-        } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) {
-            // 2.8.0 and before.
-            TransactionImpl impl = (TransactionImpl) transaction;
-            Long txnIdMostBits = getTxnIdMostBits(impl);
-            Long txnIdLeastBits = getTxnIdLeastBits(impl);
-
-            checkNotNull(txnIdMostBits, "Failed to get txnIdMostBits");
-            checkNotNull(txnIdLeastBits, "Failed to get txnIdLeastBits");
-
-            return new TxnID(txnIdMostBits, txnIdLeastBits);
-        }
-    }
-
-    private static Long getTxnIdMostBits(TransactionImpl transaction) {
-        if (mostBitsField == null) {
-            synchronized (PulsarTransactionUtils.class) {
-                if (mostBitsField == null) {
-                    try {
-                        mostBitsField = TransactionImpl.class.getDeclaredField("txnIdMostBits");
-                        mostBitsField.setAccessible(true);
-                    } catch (NoSuchFieldException e) {
-                        // Nothing to do for this exception.
-                    }
-                }
-            }
-        }
-
-        if (mostBitsField != null) {
-            try {
-                return (Long) mostBitsField.get(transaction);
-            } catch (IllegalAccessException e) {
-                // Nothing to do for this exception.
-            }
-        }
-
-        return null;
-    }
-
-    private static Long getTxnIdLeastBits(TransactionImpl transaction) {
-        if (leastBitsField == null) {
-            synchronized (PulsarTransactionUtils.class) {
-                if (leastBitsField == null) {
-                    try {
-                        leastBitsField = TransactionImpl.class.getDeclaredField("txnIdLeastBits");
-                        leastBitsField.setAccessible(true);
-                    } catch (NoSuchFieldException e) {
-                        // Nothing to do for this exception.
-                    }
-                }
-            }
-        }
-
-        if (leastBitsField != null) {
-            try {
-                return (Long) leastBitsField.get(transaction);
-            } catch (IllegalAccessException e) {
-                // Nothing to do for this exception.
-            }
-        }
-
-        return null;
-    }
-}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarUnorderedPartitionSplitReader.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarUnorderedPartitionSplitReader.java
index 846101d..7262863 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarUnorderedPartitionSplitReader.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/reader/split/PulsarUnorderedPartitionSplitReader.java
@@ -20,7 +20,6 @@ package org.apache.flink.connector.pulsar.source.reader.split;
 
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.configuration.Configuration;
-import org.apache.flink.connector.pulsar.common.utils.PulsarTransactionUtils;
 import org.apache.flink.connector.pulsar.source.config.SourceConfiguration;
 import org.apache.flink.connector.pulsar.source.reader.deserializer.PulsarDeserializationSchema;
 import org.apache.flink.connector.pulsar.source.reader.source.PulsarUnorderedSourceReader;
@@ -155,7 +154,7 @@ public class PulsarUnorderedPartitionSplitReader<OUT> extends PulsarPartitionSpl
 
         // Avoiding NP problem when Pulsar don't get the message before Flink checkpoint.
         if (uncommittedTransaction != null) {
-            TxnID txnID = PulsarTransactionUtils.getId(uncommittedTransaction);
+            TxnID txnID = uncommittedTransaction.getTxnID();
             this.uncommittedTransaction = newTransaction();
             state.setUncommittedTransactionId(txnID);
         }
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchemaTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchemaTest.java
index aa4bcee..48e6e7a 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchemaTest.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/reader/deserializer/PulsarDeserializationSchemaTest.java
@@ -113,7 +113,7 @@ class PulsarDeserializationSchemaTest {
         MessageMetadata metadata = new MessageMetadata();
         ByteBuffer payload = ByteBuffer.wrap(bytes);
 
-        return MessageImpl.create(metadata, payload, Schema.BYTES);
+        return MessageImpl.create(metadata, payload, Schema.BYTES, "");
     }
 
     /** This collector is used for collecting only one message. Used for test purpose. */
diff --git a/flink-connectors/flink-sql-connector-pulsar/src/main/resources/META-INF/NOTICE b/flink-connectors/flink-sql-connector-pulsar/src/main/resources/META-INF/NOTICE
index 79ebbfc..56ad187 100644
--- a/flink-connectors/flink-sql-connector-pulsar/src/main/resources/META-INF/NOTICE
+++ b/flink-connectors/flink-sql-connector-pulsar/src/main/resources/META-INF/NOTICE
@@ -6,12 +6,12 @@ The Apache Software Foundation (http://www.apache.org/).
 
 This project bundles the following dependencies under the Apache Software License 2.0 (http://www.apache.org/licenses/LICENSE-2.0.txt)
 
-- org.apache.pulsar:bouncy-castle-bc:pkg:2.8.0
-- org.apache.pulsar:pulsar-client-admin-api:2.8.0
-- org.apache.pulsar:pulsar-client-all:2.8.0
-- org.apache.pulsar:pulsar-client-api:2.8.0
-- org.bouncycastle:bcpkix-jdk15on:1.68
-- org.bouncycastle:bcprov-ext-jdk15on:1.68
-- org.bouncycastle:bcprov-jdk15on:1.68
-- org.bouncycastle:bcutil-jdk15on:1.68
+- org.apache.pulsar:bouncy-castle-bc:pkg:2.9.1
+- org.apache.pulsar:pulsar-client-admin-api:2.9.1
+- org.apache.pulsar:pulsar-client-all:2.9.1
+- org.apache.pulsar:pulsar-client-api:2.9.1
+- org.bouncycastle:bcpkix-jdk15on:1.69
+- org.bouncycastle:bcprov-ext-jdk15on:1.69
+- org.bouncycastle:bcprov-jdk15on:1.69
+- org.bouncycastle:bcutil-jdk15on:1.69
 - org.slf4j:jul-to-slf4j:1.7.25
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/pom.xml b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/pom.xml
index e7caf8b..7c87ec7 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/pom.xml
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/pom.xml
@@ -31,7 +31,8 @@ under the License.
 	<name>Flink : E2E Tests : Pulsar</name>
 
 	<properties>
-		<pulsar.version>2.8.0</pulsar.version>
+		<pulsar.version>2.9.1</pulsar.version>
+		<bouncycastle.version>1.69</bouncycastle.version>
 	</properties>
 
 	<dependencies>
@@ -105,6 +106,46 @@ under the License.
 							<type>jar</type>
 							<outputDirectory>${project.build.directory}/dependencies</outputDirectory>
 						</artifactItem>
+						<dependency>
+							<groupId>org.apache.pulsar</groupId>
+							<artifactId>bouncy-castle-bc</artifactId>
+							<version>${pulsar.version}</version>
+							<destFileName>bouncy-castle-bc.jar</destFileName>
+							<type>jar</type>
+							<outputDirectory>${project.build.directory}/dependencies</outputDirectory>
+						</dependency>
+						<dependency>
+							<groupId>org.bouncycastle</groupId>
+							<artifactId>bcpkix-jdk15on</artifactId>
+							<version>${bouncycastle.version}</version>
+							<destFileName>bcpkix-jdk15on.jar</destFileName>
+							<type>jar</type>
+							<outputDirectory>${project.build.directory}/dependencies</outputDirectory>
+						</dependency>
+						<dependency>
+							<groupId>org.bouncycastle</groupId>
+							<artifactId>bcprov-jdk15on</artifactId>
+							<version>${bouncycastle.version}</version>
+							<destFileName>bcprov-jdk15on.jar</destFileName>
+							<type>jar</type>
+							<outputDirectory>${project.build.directory}/dependencies</outputDirectory>
+						</dependency>
+						<dependency>
+							<groupId>org.bouncycastle</groupId>
+							<artifactId>bcutil-jdk15on</artifactId>
+							<version>${bouncycastle.version}</version>
+							<destFileName>bcutil-jdk15on.jar</destFileName>
+							<type>jar</type>
+							<outputDirectory>${project.build.directory}/dependencies</outputDirectory>
+						</dependency>
+						<dependency>
+							<groupId>org.bouncycastle</groupId>
+							<artifactId>bcprov-ext-jdk15on</artifactId>
+							<version>${bouncycastle.version}</version>
+							<destFileName>bcprov-ext-jdk15on.jar</destFileName>
+							<type>jar</type>
+							<outputDirectory>${project.build.directory}/dependencies</outputDirectory>
+						</dependency>
 						<artifactItem>
 							<groupId>org.slf4j</groupId>
 							<artifactId>jul-to-slf4j</artifactId>
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/common/FlinkContainerWithPulsarEnvironment.java b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/common/FlinkContainerWithPulsarEnvironment.java
index 52957fc..ccfe277 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/common/FlinkContainerWithPulsarEnvironment.java
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-pulsar/src/test/java/org/apache/flink/tests/util/pulsar/common/FlinkContainerWithPulsarEnvironment.java
@@ -37,6 +37,11 @@ public class FlinkContainerWithPulsarEnvironment extends FlinkContainerTestEnvir
                 resourcePath("pulsar-client-all.jar"),
                 resourcePath("pulsar-client-api.jar"),
                 resourcePath("pulsar-admin-api.jar"),
+                resourcePath("bouncy-castle-bc.jar"),
+                resourcePath("bcpkix-jdk15on.jar"),
+                resourcePath("bcprov-jdk15on.jar"),
+                resourcePath("bcutil-jdk15on.jar"),
+                resourcePath("bcprov-ext-jdk15on.jar"),
                 resourcePath("jul-to-slf4j.jar"));
     }
 
diff --git a/flink-test-utils-parent/flink-test-utils-junit/src/main/java/org/apache/flink/util/DockerImageVersions.java b/flink-test-utils-parent/flink-test-utils-junit/src/main/java/org/apache/flink/util/DockerImageVersions.java
index 04298b4..273cee8 100644
--- a/flink-test-utils-parent/flink-test-utils-junit/src/main/java/org/apache/flink/util/DockerImageVersions.java
+++ b/flink-test-utils-parent/flink-test-utils-junit/src/main/java/org/apache/flink/util/DockerImageVersions.java
@@ -42,7 +42,7 @@ public class DockerImageVersions {
 
     public static final String LOCALSTACK = "localstack/localstack:0.13.3";
 
-    public static final String PULSAR = "apachepulsar/pulsar:2.8.0";
+    public static final String PULSAR = "apachepulsar/pulsar:2.9.1";
 
     public static final String CASSANDRA_3 = "cassandra:3.0";
 

[flink] 08/09: [FLINK-26026][connector/pulsar] Create unit tests for Pulsar sink connector.

Posted by fp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

fpaul pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 714dd80df2ed00b370af762e8684d500645e2b3c
Author: Yufan Sheng <yu...@streamnative.io>
AuthorDate: Thu Feb 10 19:18:56 2022 +0800

    [FLINK-26026][connector/pulsar] Create unit tests for Pulsar sink connector.
---
 .../pulsar/sink/PulsarSinkBuilderTest.java         | 108 ++++++++++
 .../connector/pulsar/sink/PulsarSinkITCase.java    |  99 +++++++++
 .../committer/PulsarCommittableSerializerTest.java |  53 +++++
 .../pulsar/sink/writer/PulsarWriterTest.java       | 199 ++++++++++++++++++
 .../sink/writer/router/KeyHashTopicRouterTest.java | 111 ++++++++++
 .../writer/router/RoundRobinTopicRouterTest.java   |  88 ++++++++
 .../writer/topic/TopicMetadataListenerTest.java    | 140 +++++++++++++
 .../writer/topic/TopicProducerRegisterTest.java    |  91 ++++++++
 .../pulsar/testutils/function/ControlSource.java   | 228 +++++++++++++++++++++
 9 files changed, 1117 insertions(+)

diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilderTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilderTest.java
new file mode 100644
index 0000000..188e718
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilderTest.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink;
+
+import org.apache.flink.api.common.serialization.SimpleStringSchema;
+import org.apache.flink.configuration.Configuration;
+
+import org.junit.jupiter.api.Test;
+
+import java.util.Properties;
+
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_SEND_TIMEOUT_MS;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_WRITE_SCHEMA_EVOLUTION;
+import static org.apache.flink.connector.pulsar.sink.writer.router.TopicRoutingMode.CUSTOM;
+import static org.apache.flink.connector.pulsar.sink.writer.router.TopicRoutingMode.MESSAGE_KEY_HASH;
+import static org.apache.flink.connector.pulsar.sink.writer.router.TopicRoutingMode.ROUND_ROBIN;
+import static org.apache.flink.connector.pulsar.sink.writer.serializer.PulsarSerializationSchema.flinkSchema;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+/** Unit tests for {@link PulsarSinkBuilder}. */
+class PulsarSinkBuilderTest {
+
+    @Test
+    void topicNameCouldBeSetOnlyOnce() {
+        PulsarSinkBuilder<String> builder = PulsarSink.builder();
+        builder.setTopics("a", "b");
+
+        assertThrows(IllegalStateException.class, () -> builder.setTopics("c"));
+    }
+
+    @Test
+    void topicRoutingModeCouldNotBeCustom() {
+        PulsarSinkBuilder<String> builder = PulsarSink.builder();
+
+        assertDoesNotThrow(() -> builder.setTopicRoutingMode(ROUND_ROBIN));
+        assertDoesNotThrow(() -> builder.setTopicRoutingMode(MESSAGE_KEY_HASH));
+        assertThrows(IllegalArgumentException.class, () -> builder.setTopicRoutingMode(CUSTOM));
+    }
+
+    @Test
+    void setConfigCouldNotOverrideExistedConfigs() {
+        PulsarSinkBuilder<String> builder = PulsarSink.builder();
+        builder.setConfig(PULSAR_SEND_TIMEOUT_MS, 1L);
+
+        assertDoesNotThrow(() -> builder.setConfig(PULSAR_SEND_TIMEOUT_MS, 1L));
+
+        assertThrows(
+                IllegalArgumentException.class,
+                () -> builder.setConfig(PULSAR_SEND_TIMEOUT_MS, 2L));
+
+        Configuration configuration = new Configuration();
+        configuration.set(PULSAR_SEND_TIMEOUT_MS, 3L);
+        assertThrows(IllegalArgumentException.class, () -> builder.setConfig(configuration));
+
+        Properties properties = new Properties();
+        properties.put(PULSAR_SEND_TIMEOUT_MS.key(), 4L);
+        assertThrows(IllegalArgumentException.class, () -> builder.setProperties(properties));
+    }
+
+    @Test
+    void serializationSchemaIsRequired() {
+        PulsarSinkBuilder<String> builder = PulsarSink.builder();
+        NullPointerException exception = assertThrows(NullPointerException.class, builder::build);
+
+        assertThat(exception).hasMessage("serializationSchema must be set.");
+    }
+
+    @Test
+    void emptyTopicShouldHaveCustomTopicRouter() {
+        PulsarSinkBuilder<String> builder = PulsarSink.builder();
+        builder.setSerializationSchema(flinkSchema(new SimpleStringSchema()));
+
+        NullPointerException exception = assertThrows(NullPointerException.class, builder::build);
+        assertThat(exception).hasMessage("No topic names or custom topic router are provided.");
+    }
+
+    @Test
+    void serviceUrlAndAdminUrlMustBeProvided() {
+        PulsarSinkBuilder<String> builder = PulsarSink.builder();
+        builder.setSerializationSchema(flinkSchema(new SimpleStringSchema()));
+        builder.setTopics("a", "b");
+        assertThrows(IllegalArgumentException.class, builder::build);
+
+        builder.setServiceUrl("pulsar://127.0.0.1:8888");
+        assertThrows(IllegalArgumentException.class, builder::build);
+
+        builder.setAdminUrl("http://127.0.0.1:9999");
+        assertDoesNotThrow(builder::build);
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/PulsarSinkITCase.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/PulsarSinkITCase.java
new file mode 100644
index 0000000..ef67997
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/PulsarSinkITCase.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink;
+
+import org.apache.flink.api.common.serialization.SimpleStringSchema;
+import org.apache.flink.connector.base.DeliveryGuarantee;
+import org.apache.flink.connector.pulsar.testutils.PulsarTestSuiteBase;
+import org.apache.flink.connector.pulsar.testutils.function.ControlSource;
+import org.apache.flink.core.testutils.AllCallbackWrapper;
+import org.apache.flink.runtime.minicluster.RpcServiceSharing;
+import org.apache.flink.runtime.testutils.MiniClusterExtension;
+import org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration;
+import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
+import org.apache.flink.testutils.junit.SharedObjectsExtension;
+
+import org.junit.jupiter.api.extension.RegisterExtension;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.EnumSource;
+
+import java.time.Duration;
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic;
+import static org.apache.flink.connector.pulsar.sink.writer.serializer.PulsarSerializationSchema.flinkSchema;
+import static org.assertj.core.api.Assertions.assertThat;
+
+/** Tests for using PulsarSink writing to a Pulsar cluster. */
+class PulsarSinkITCase extends PulsarTestSuiteBase {
+
+    private static final int PARALLELISM = 1;
+
+    public static final MiniClusterExtension MINI_CLUSTER_RESOURCE =
+            new MiniClusterExtension(
+                    new MiniClusterResourceConfiguration.Builder()
+                            .setNumberTaskManagers(1)
+                            .setNumberSlotsPerTaskManager(PARALLELISM)
+                            .setRpcServiceSharing(RpcServiceSharing.DEDICATED)
+                            .withHaLeadershipControl()
+                            .build());
+
+    @SuppressWarnings("unused")
+    @RegisterExtension
+    public static final AllCallbackWrapper<MiniClusterExtension> CALLBACK_WRAPPER =
+            new AllCallbackWrapper<>(MINI_CLUSTER_RESOURCE);
+
+    // Using this extension for creating shared reference which would be used in source function.
+    @RegisterExtension final SharedObjectsExtension sharedObjects = SharedObjectsExtension.create();
+
+    @ParameterizedTest
+    @EnumSource(DeliveryGuarantee.class)
+    void writeRecordsToPulsar(DeliveryGuarantee guarantee) throws Exception {
+        // A random topic with partition 1.
+        String topic = randomAlphabetic(8);
+        operator().createTopic(topic, 4);
+        int counts = ThreadLocalRandom.current().nextInt(100, 200);
+
+        ControlSource source =
+                new ControlSource(
+                        sharedObjects, operator(), topic, guarantee, counts, Duration.ofMinutes(5));
+        PulsarSink<String> sink =
+                PulsarSink.builder()
+                        .setServiceUrl(operator().serviceUrl())
+                        .setAdminUrl(operator().adminUrl())
+                        .setDeliveryGuarantee(guarantee)
+                        .setTopics(topic)
+                        .setSerializationSchema(flinkSchema(new SimpleStringSchema()))
+                        .build();
+
+        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
+        env.setParallelism(PARALLELISM);
+        env.enableCheckpointing(100L);
+        env.addSource(source).sinkTo(sink);
+        env.execute();
+
+        List<String> expectedRecords = source.getExpectedRecords();
+        List<String> consumedRecords = source.getConsumedRecords();
+
+        assertThat(consumedRecords)
+                .hasSameSizeAs(expectedRecords)
+                .containsExactlyInAnyOrderElementsOf(expectedRecords);
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommittableSerializerTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommittableSerializerTest.java
new file mode 100644
index 0000000..393485b
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommittableSerializerTest.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.committer;
+
+import org.apache.pulsar.client.api.transaction.TxnID;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.util.concurrent.ThreadLocalRandom;
+
+import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * Tests for serializing and deserializing {@link PulsarCommittable} with {@link
+ * PulsarCommittableSerializer}.
+ */
+class PulsarCommittableSerializerTest {
+
+    private static final PulsarCommittableSerializer INSTANCE = new PulsarCommittableSerializer();
+
+    @Test
+    void committableSerDe() throws IOException {
+        String topic = randomAlphabetic(10);
+        TxnID txnID =
+                new TxnID(
+                        ThreadLocalRandom.current().nextLong(),
+                        ThreadLocalRandom.current().nextLong());
+
+        PulsarCommittable committable = new PulsarCommittable(txnID, topic);
+
+        byte[] bytes = INSTANCE.serialize(committable);
+        PulsarCommittable committable1 = INSTANCE.deserialize(INSTANCE.getVersion(), bytes);
+
+        assertEquals(committable1, committable);
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriterTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriterTest.java
new file mode 100644
index 0000000..1534fb5
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriterTest.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer;
+
+import org.apache.flink.api.common.operators.MailboxExecutor;
+import org.apache.flink.api.common.operators.ProcessingTimeService;
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.api.connector.sink2.Sink.InitContext;
+import org.apache.flink.api.connector.sink2.SinkWriter;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.connector.base.DeliveryGuarantee;
+import org.apache.flink.connector.pulsar.sink.committer.PulsarCommittable;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.sink.writer.router.RoundRobinTopicRouter;
+import org.apache.flink.connector.pulsar.sink.writer.serializer.PulsarSerializationSchema;
+import org.apache.flink.connector.pulsar.sink.writer.topic.TopicMetadataListener;
+import org.apache.flink.connector.pulsar.testutils.PulsarTestSuiteBase;
+import org.apache.flink.metrics.MetricGroup;
+import org.apache.flink.metrics.groups.OperatorIOMetricGroup;
+import org.apache.flink.metrics.groups.SinkWriterMetricGroup;
+import org.apache.flink.metrics.testutils.MetricListener;
+import org.apache.flink.runtime.mailbox.SyncMailboxExecutor;
+import org.apache.flink.runtime.metrics.groups.InternalSinkWriterMetricGroup;
+import org.apache.flink.runtime.metrics.groups.UnregisteredMetricGroups;
+import org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService;
+import org.apache.flink.util.UserCodeClassLoader;
+
+import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClient;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.EnumSource;
+
+import java.util.Collection;
+import java.util.OptionalLong;
+
+import static java.util.Collections.singletonList;
+import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic;
+import static org.apache.flink.connector.base.DeliveryGuarantee.EXACTLY_ONCE;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_WRITE_SCHEMA_EVOLUTION;
+import static org.apache.flink.connector.pulsar.sink.writer.serializer.PulsarSerializationSchema.pulsarSchema;
+import static org.apache.pulsar.client.api.Schema.STRING;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/** Unit tests for {@link PulsarWriter}. */
+class PulsarWriterTest extends PulsarTestSuiteBase {
+
+    private static final SinkWriter.Context CONTEXT = new MockSinkWriterContext();
+
+    @Test
+    void writeMessageWithGuarantee() throws Exception {
+        writeMessageWithoutGuarantee(EXACTLY_ONCE);
+    }
+
+    @ParameterizedTest
+    @EnumSource(
+            value = DeliveryGuarantee.class,
+            names = {"AT_LEAST_ONCE", "NONE"})
+    void writeMessageWithoutGuarantee(DeliveryGuarantee guarantee) throws Exception {
+        String topic = randomAlphabetic(10);
+        operator().createTopic(topic, 8);
+
+        SinkConfiguration configuration = sinkConfiguration(guarantee);
+        PulsarSerializationSchema<String> schema = pulsarSchema(STRING);
+        TopicMetadataListener listener = new TopicMetadataListener(singletonList(topic));
+        RoundRobinTopicRouter<String> router = new RoundRobinTopicRouter<>(configuration);
+        MockInitContext initContext = new MockInitContext();
+
+        PulsarWriter<String> writer =
+                new PulsarWriter<>(configuration, schema, listener, router, initContext);
+
+        writer.flush(false);
+        writer.prepareCommit();
+        writer.flush(false);
+        writer.prepareCommit();
+
+        String message = randomAlphabetic(10);
+        writer.write(message, CONTEXT);
+        writer.flush(false);
+
+        Collection<PulsarCommittable> committables = writer.prepareCommit();
+        if (guarantee != EXACTLY_ONCE) {
+            assertThat(committables).isEmpty();
+        } else {
+            assertThat(committables).hasSize(1);
+            PulsarCommittable committable =
+                    committables.stream().findFirst().orElseThrow(IllegalArgumentException::new);
+            TransactionCoordinatorClient coordinatorClient = operator().coordinatorClient();
+            coordinatorClient.commit(committable.getTxnID());
+        }
+
+        String consumedMessage = operator().receiveMessage(topic, STRING).getValue();
+        assertEquals(consumedMessage, message);
+    }
+
+    private SinkConfiguration sinkConfiguration(DeliveryGuarantee deliveryGuarantee) {
+        Configuration configuration = operator().sinkConfig(deliveryGuarantee);
+        configuration.set(PULSAR_WRITE_SCHEMA_EVOLUTION, true);
+
+        return new SinkConfiguration(configuration);
+    }
+
+    private static class MockInitContext implements InitContext {
+
+        private final MetricListener metricListener;
+        private final OperatorIOMetricGroup ioMetricGroup;
+        private final SinkWriterMetricGroup metricGroup;
+        private final ProcessingTimeService timeService;
+
+        private MockInitContext() {
+            this.metricListener = new MetricListener();
+            this.ioMetricGroup =
+                    UnregisteredMetricGroups.createUnregisteredOperatorMetricGroup()
+                            .getIOMetricGroup();
+            MetricGroup metricGroup = metricListener.getMetricGroup();
+            this.metricGroup = InternalSinkWriterMetricGroup.mock(metricGroup, ioMetricGroup);
+            this.timeService = new TestProcessingTimeService();
+        }
+
+        @Override
+        public UserCodeClassLoader getUserCodeClassLoader() {
+            throw new UnsupportedOperationException("Not implemented.");
+        }
+
+        @Override
+        public MailboxExecutor getMailboxExecutor() {
+            return new SyncMailboxExecutor();
+        }
+
+        @Override
+        public ProcessingTimeService getProcessingTimeService() {
+            return timeService;
+        }
+
+        @Override
+        public int getSubtaskId() {
+            return 0;
+        }
+
+        @Override
+        public int getNumberOfParallelSubtasks() {
+            return 1;
+        }
+
+        @Override
+        public SinkWriterMetricGroup metricGroup() {
+            return metricGroup;
+        }
+
+        @Override
+        public OptionalLong getRestoredCheckpointId() {
+            return OptionalLong.empty();
+        }
+
+        @Override
+        public SerializationSchema.InitializationContext
+                asSerializationSchemaInitializationContext() {
+            return new SerializationSchema.InitializationContext() {
+                @Override
+                public MetricGroup getMetricGroup() {
+                    return metricGroup;
+                }
+
+                @Override
+                public UserCodeClassLoader getUserCodeClassLoader() {
+                    return null;
+                }
+            };
+        }
+    }
+
+    private static class MockSinkWriterContext implements SinkWriter.Context {
+        @Override
+        public long currentWatermark() {
+            return 0;
+        }
+
+        @Override
+        public Long timestamp() {
+            return null;
+        }
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/router/KeyHashTopicRouterTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/router/KeyHashTopicRouterTest.java
new file mode 100644
index 0000000..a136aa5
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/router/KeyHashTopicRouterTest.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.router;
+
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.sink.writer.context.PulsarSinkContext;
+
+import org.apache.pulsar.client.impl.Hash;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.EnumSource;
+
+import java.util.List;
+import java.util.stream.Stream;
+
+import static java.util.Collections.emptyList;
+import static java.util.Collections.singletonList;
+import static java.util.stream.Collectors.toList;
+import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_MESSAGE_KEY_HASH;
+import static org.apache.pulsar.client.util.MathUtils.signSafeMod;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.mockito.Mockito.mock;
+
+/** Unit tests for {@link KeyHashTopicRouter}. */
+class KeyHashTopicRouterTest {
+
+    @ParameterizedTest
+    @EnumSource(MessageKeyHash.class)
+    void routeWithEmptyPartition(MessageKeyHash keyHash) {
+        SinkConfiguration configuration = sinkConfiguration(keyHash);
+        KeyHashTopicRouter<String> router = new KeyHashTopicRouter<>(configuration);
+
+        String message = randomAlphanumeric(10);
+        String key = randomAlphanumeric(10);
+        List<String> emptyTopics = emptyList();
+        PulsarSinkContext sinkContext = mock(PulsarSinkContext.class);
+
+        assertThrows(
+                IllegalArgumentException.class,
+                () -> router.route(message, key, emptyTopics, sinkContext));
+    }
+
+    @ParameterizedTest
+    @EnumSource(MessageKeyHash.class)
+    void routeOnlyOnePartition(MessageKeyHash keyHash) {
+        SinkConfiguration configuration = sinkConfiguration(keyHash);
+        List<String> topics = singletonList(randomAlphanumeric(10));
+
+        KeyHashTopicRouter<String> router1 = new KeyHashTopicRouter<>(configuration);
+        String topic1 =
+                router1.route(
+                        randomAlphanumeric(10),
+                        randomAlphanumeric(10),
+                        topics,
+                        mock(PulsarSinkContext.class));
+        assertEquals(topic1, topics.get(0));
+
+        KeyHashTopicRouter<String> router2 = new KeyHashTopicRouter<>(configuration);
+        String topic2 =
+                router2.route(randomAlphanumeric(10), null, topics, mock(PulsarSinkContext.class));
+        assertEquals(topic2, topics.get(0));
+    }
+
+    @ParameterizedTest
+    @EnumSource(MessageKeyHash.class)
+    void routeMessageByMessageKey(MessageKeyHash keyHash) {
+        SinkConfiguration configuration = sinkConfiguration(keyHash);
+        String messageKey = randomAlphanumeric(10);
+        KeyHashTopicRouter<String> router = new KeyHashTopicRouter<>(configuration);
+
+        List<String> topics =
+                Stream.generate(() -> randomAlphanumeric(10))
+                        .distinct()
+                        .limit(10)
+                        .collect(toList());
+
+        Hash hash = keyHash.getHash();
+        int index = signSafeMod(hash.makeHash(messageKey), topics.size());
+        String desiredTopic = topics.get(index);
+        String message = randomAlphanumeric(10);
+
+        String topic = router.route(message, messageKey, topics, mock(PulsarSinkContext.class));
+
+        assertEquals(topic, desiredTopic);
+    }
+
+    private SinkConfiguration sinkConfiguration(MessageKeyHash hash) {
+        Configuration configuration = new Configuration();
+        configuration.set(PULSAR_MESSAGE_KEY_HASH, hash);
+
+        return new SinkConfiguration(configuration);
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/router/RoundRobinTopicRouterTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/router/RoundRobinTopicRouterTest.java
new file mode 100644
index 0000000..8c7890b
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/router/RoundRobinTopicRouterTest.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.router;
+
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.sink.writer.context.PulsarSinkContext;
+
+import org.apache.flink.shaded.guava30.com.google.common.collect.ImmutableList;
+
+import org.junit.jupiter.api.Test;
+
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+import static java.util.Collections.emptyList;
+import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_BATCHING_MAX_MESSAGES;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.mockito.Mockito.mock;
+
+/** Unit tests for {@link RoundRobinTopicRouter}. */
+class RoundRobinTopicRouterTest {
+
+    @Test
+    void routeMessageByEmptyTopics() {
+        SinkConfiguration configuration = sinkConfiguration(10);
+        RoundRobinTopicRouter<String> router = new RoundRobinTopicRouter<>(configuration);
+
+        String message = randomAlphabetic(10);
+        List<String> partitions = emptyList();
+        PulsarSinkContext context = mock(PulsarSinkContext.class);
+
+        assertThrows(
+                IllegalArgumentException.class,
+                () -> router.route(message, null, partitions, context));
+    }
+
+    @Test
+    void routeMessagesInRoundRobin() {
+        int batchSize = ThreadLocalRandom.current().nextInt(20);
+        SinkConfiguration configuration = sinkConfiguration(batchSize);
+        RoundRobinTopicRouter<String> router = new RoundRobinTopicRouter<>(configuration);
+
+        List<String> topics = ImmutableList.of("topic1", "topic2");
+        PulsarSinkContext context = mock(PulsarSinkContext.class);
+
+        for (int i = 0; i < batchSize; i++) {
+            String message = randomAlphabetic(10);
+            String topic = router.route(message, null, topics, context);
+            assertEquals(topic, topics.get(0));
+        }
+
+        for (int i = 0; i < batchSize; i++) {
+            String message = randomAlphabetic(10);
+            String topic = router.route(message, null, topics, context);
+            assertEquals(topic, topics.get(1));
+        }
+
+        String message = randomAlphabetic(10);
+        String topic = router.route(message, null, topics, context);
+        assertEquals(topic, topics.get(0));
+    }
+
+    private SinkConfiguration sinkConfiguration(int switchSize) {
+        Configuration configuration = new Configuration();
+        configuration.set(PULSAR_BATCHING_MAX_MESSAGES, switchSize);
+
+        return new SinkConfiguration(configuration);
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicMetadataListenerTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicMetadataListenerTest.java
new file mode 100644
index 0000000..f65b238
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicMetadataListenerTest.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.topic;
+
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.testutils.PulsarTestSuiteBase;
+import org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService;
+
+import org.junit.jupiter.api.Test;
+
+import java.time.Duration;
+import java.util.List;
+import java.util.stream.IntStream;
+
+import static java.util.Collections.singletonList;
+import static java.util.stream.Collectors.toList;
+import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_TOPIC_METADATA_REFRESH_INTERVAL;
+import static org.apache.flink.connector.pulsar.source.enumerator.topic.TopicNameUtils.topicNameWithPartition;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/** Unit tests for {@link TopicMetadataListener}. */
+class TopicMetadataListenerTest extends PulsarTestSuiteBase {
+
+    @Test
+    void listenEmptyTopics() {
+        TopicMetadataListener listener = new TopicMetadataListener();
+        SinkConfiguration configuration = sinkConfiguration(Duration.ofMinutes(5).toMillis());
+        TestProcessingTimeService timeService = new TestProcessingTimeService();
+
+        List<String> topics = listener.availableTopics();
+        assertThat(topics).isEmpty();
+
+        listener.open(configuration, timeService);
+        topics = listener.availableTopics();
+        assertThat(topics).isEmpty();
+    }
+
+    @Test
+    void listenOnPartitions() throws Exception {
+        String topic = randomAlphabetic(10);
+        operator().createTopic(topic, 6);
+        List<String> partitions = topicPartitions(topic, 6);
+
+        TopicMetadataListener listener = new TopicMetadataListener(partitions);
+        long interval = Duration.ofMinutes(15).toMillis();
+        SinkConfiguration configuration = sinkConfiguration(interval);
+        TestProcessingTimeService timeService = new TestProcessingTimeService();
+
+        List<String> topics = listener.availableTopics();
+        assertEquals(topics, partitions);
+
+        listener.open(configuration, timeService);
+        topics = listener.availableTopics();
+        assertEquals(topics, partitions);
+
+        operator().increaseTopicPartitions(topic, 12);
+        timeService.advance(interval);
+        topics = listener.availableTopics();
+        assertEquals(topics, partitions);
+    }
+
+    @Test
+    void fetchTopicPartitionInformation() {
+        String topic = randomAlphabetic(10);
+        operator().createTopic(topic, 8);
+
+        TopicMetadataListener listener = new TopicMetadataListener(singletonList(topic));
+        SinkConfiguration configuration = sinkConfiguration(Duration.ofMinutes(10).toMillis());
+        TestProcessingTimeService timeService = new TestProcessingTimeService();
+
+        List<String> topics = listener.availableTopics();
+        assertThat(topics).isEmpty();
+
+        listener.open(configuration, timeService);
+        topics = listener.availableTopics();
+        List<String> desiredTopics = topicPartitions(topic, 8);
+
+        assertThat(topics).hasSize(8).isEqualTo(desiredTopics);
+    }
+
+    @Test
+    void fetchTopicPartitionUpdate() throws Exception {
+        String topic = randomAlphabetic(10);
+        operator().createTopic(topic, 8);
+
+        long interval = Duration.ofMinutes(20).toMillis();
+
+        TopicMetadataListener listener = new TopicMetadataListener(singletonList(topic));
+        SinkConfiguration configuration = sinkConfiguration(interval);
+        TestProcessingTimeService timeService = new TestProcessingTimeService();
+        timeService.setCurrentTime(System.currentTimeMillis());
+
+        listener.open(configuration, timeService);
+        List<String> topics = listener.availableTopics();
+        List<String> desiredTopics = topicPartitions(topic, 8);
+
+        assertThat(topics).isEqualTo(desiredTopics);
+
+        // Increase topic partitions and trigger the metadata update logic.
+        operator().increaseTopicPartitions(topic, 16);
+        timeService.advance(interval);
+
+        topics = listener.availableTopics();
+        desiredTopics = topicPartitions(topic, 16);
+        assertThat(topics).isEqualTo(desiredTopics);
+    }
+
+    private List<String> topicPartitions(String topic, int partitionSize) {
+        return IntStream.range(0, partitionSize)
+                .boxed()
+                .map(i -> topicNameWithPartition(topic, i))
+                .collect(toList());
+    }
+
+    private SinkConfiguration sinkConfiguration(long interval) {
+        Configuration configuration = operator().config();
+        configuration.set(PULSAR_TOPIC_METADATA_REFRESH_INTERVAL, interval);
+
+        return new SinkConfiguration(configuration);
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicProducerRegisterTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicProducerRegisterTest.java
new file mode 100644
index 0000000..2e36bfb
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicProducerRegisterTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.topic;
+
+import org.apache.flink.connector.base.DeliveryGuarantee;
+import org.apache.flink.connector.pulsar.sink.committer.PulsarCommittable;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.testutils.PulsarTestSuiteBase;
+
+import org.apache.pulsar.client.api.Message;
+import org.apache.pulsar.client.api.Schema;
+import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClient;
+import org.apache.pulsar.client.api.transaction.TxnID;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.EnumSource;
+
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic;
+import static org.apache.flink.connector.base.DeliveryGuarantee.EXACTLY_ONCE;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/** Unit tests for {@link TopicProducerRegister}. */
+class TopicProducerRegisterTest extends PulsarTestSuiteBase {
+
+    @ParameterizedTest
+    @EnumSource(DeliveryGuarantee.class)
+    void createMessageBuilderForSendingMessage(DeliveryGuarantee deliveryGuarantee)
+            throws IOException {
+        String topic = randomAlphabetic(10);
+        operator().createTopic(topic, 8);
+
+        SinkConfiguration configuration = sinkConfiguration(deliveryGuarantee);
+        TopicProducerRegister register = new TopicProducerRegister(configuration);
+
+        String message = randomAlphabetic(10);
+        register.createMessageBuilder(topic, Schema.STRING).value(message).send();
+
+        if (deliveryGuarantee == EXACTLY_ONCE) {
+            List<PulsarCommittable> committables = register.prepareCommit();
+            for (PulsarCommittable committable : committables) {
+                TxnID txnID = committable.getTxnID();
+                TransactionCoordinatorClient coordinatorClient = operator().coordinatorClient();
+                coordinatorClient.commit(txnID);
+            }
+        }
+
+        Message<String> receiveMessage = operator().receiveMessage(topic, Schema.STRING);
+        assertEquals(receiveMessage.getValue(), message);
+    }
+
+    @ParameterizedTest
+    @EnumSource(
+            value = DeliveryGuarantee.class,
+            names = {"AT_LEAST_ONCE", "NONE"})
+    void noneAndAtLeastOnceWouldNotCreateTransaction(DeliveryGuarantee deliveryGuarantee) {
+        String topic = randomAlphabetic(10);
+        operator().createTopic(topic, 8);
+
+        SinkConfiguration configuration = sinkConfiguration(deliveryGuarantee);
+        TopicProducerRegister register = new TopicProducerRegister(configuration);
+
+        String message = randomAlphabetic(10);
+        register.createMessageBuilder(topic, Schema.STRING).value(message).sendAsync();
+
+        List<PulsarCommittable> committables = register.prepareCommit();
+        assertThat(committables).isEmpty();
+    }
+
+    private SinkConfiguration sinkConfiguration(DeliveryGuarantee deliveryGuarantee) {
+        return new SinkConfiguration(operator().sinkConfig(deliveryGuarantee));
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/function/ControlSource.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/function/ControlSource.java
new file mode 100644
index 0000000..6e35027
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/testutils/function/ControlSource.java
@@ -0,0 +1,228 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.testutils.function;
+
+import org.apache.flink.api.common.functions.AbstractRichFunction;
+import org.apache.flink.api.common.state.CheckpointListener;
+import org.apache.flink.connector.base.DeliveryGuarantee;
+import org.apache.flink.connector.pulsar.testutils.runtime.PulsarRuntimeOperator;
+import org.apache.flink.runtime.state.FunctionInitializationContext;
+import org.apache.flink.runtime.state.FunctionSnapshotContext;
+import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
+import org.apache.flink.streaming.api.functions.source.SourceFunction;
+import org.apache.flink.testutils.junit.SharedObjectsExtension;
+import org.apache.flink.testutils.junit.SharedReference;
+
+import org.apache.pulsar.client.api.Message;
+import org.apache.pulsar.client.api.Schema;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric;
+
+/**
+ * This source is used for testing in Pulsar sink. We would generate a fix number of records by the
+ * topic name and message index.
+ */
+public class ControlSource extends AbstractRichFunction
+        implements SourceFunction<String>, CheckpointListener, CheckpointedFunction {
+    private static final long serialVersionUID = -3124248855144675017L;
+
+    private static final Logger LOG = LoggerFactory.getLogger(StopSignal.class);
+
+    private final SharedReference<MessageGenerator> sharedGenerator;
+    private final SharedReference<StopSignal> sharedSignal;
+    private Object lock;
+
+    public ControlSource(
+            SharedObjectsExtension sharedObjects,
+            PulsarRuntimeOperator operator,
+            String topic,
+            DeliveryGuarantee guarantee,
+            int messageCounts,
+            Duration timeout) {
+        MessageGenerator generator = new MessageGenerator(topic, guarantee, messageCounts);
+        StopSignal signal = new StopSignal(operator, topic, messageCounts, timeout);
+
+        this.sharedGenerator = sharedObjects.add(generator);
+        this.sharedSignal = sharedObjects.add(signal);
+    }
+
+    @Override
+    public void run(SourceContext<String> ctx) {
+        MessageGenerator generator = sharedGenerator.get();
+        StopSignal signal = sharedSignal.get();
+        this.lock = ctx.getCheckpointLock();
+
+        while (!signal.canStop()) {
+            synchronized (lock) {
+                if (generator.hasNext()) {
+                    String message = generator.next();
+                    ctx.collect(message);
+                }
+            }
+        }
+    }
+
+    public List<String> getExpectedRecords() {
+        MessageGenerator generator = sharedGenerator.get();
+        return generator.getExpectedRecords();
+    }
+
+    public List<String> getConsumedRecords() {
+        StopSignal signal = sharedSignal.get();
+        return signal.getConsumedRecords();
+    }
+
+    @Override
+    public void cancel() {
+        LOG.warn("Triggering cancel action. Set the stop timeout to zero.");
+        StopSignal signal = sharedSignal.get();
+        signal.deadline.set(System.currentTimeMillis());
+    }
+
+    @Override
+    public void close() throws Exception {
+        StopSignal signal = sharedSignal.get();
+        signal.close();
+    }
+
+    @Override
+    public void notifyCheckpointComplete(long checkpointId) {
+        // Nothing to do.
+    }
+
+    @Override
+    public void snapshotState(FunctionSnapshotContext context) {
+        // Nothing to do.
+    }
+
+    @Override
+    public void initializeState(FunctionInitializationContext context) {
+        // Nothing to do.
+    }
+
+    private static class MessageGenerator implements Iterator<String> {
+
+        private final String topic;
+        private final DeliveryGuarantee guarantee;
+        private final int messageCounts;
+        private final List<String> expectedRecords;
+
+        public MessageGenerator(String topic, DeliveryGuarantee guarantee, int messageCounts) {
+            this.topic = topic;
+            this.guarantee = guarantee;
+            this.messageCounts = messageCounts;
+            this.expectedRecords = new ArrayList<>(messageCounts);
+        }
+
+        @Override
+        public boolean hasNext() {
+            return messageCounts > expectedRecords.size();
+        }
+
+        @Override
+        public String next() {
+            String content =
+                    guarantee.name()
+                            + "-"
+                            + topic
+                            + "-"
+                            + expectedRecords.size()
+                            + "-"
+                            + randomAlphanumeric(10);
+            expectedRecords.add(content);
+            return content;
+        }
+
+        public List<String> getExpectedRecords() {
+            return expectedRecords;
+        }
+    }
+
+    /**
+     * This is used in {@link ControlSource}, we can stop the source by this method. Make sure you
+     * wrap this instance into a {@link SharedReference}.
+     */
+    private static class StopSignal implements Closeable {
+        private static final Logger LOG = LoggerFactory.getLogger(StopSignal.class);
+
+        private final String topic;
+        private final int desiredCounts;
+        // This is a thread-safe list.
+        private final List<String> consumedRecords;
+        private final AtomicLong deadline;
+        private final ExecutorService executor;
+
+        public StopSignal(
+                PulsarRuntimeOperator operator, String topic, int messageCounts, Duration timeout) {
+            this.topic = topic;
+            this.desiredCounts = messageCounts;
+            this.consumedRecords = Collections.synchronizedList(new ArrayList<>(messageCounts));
+            this.deadline = new AtomicLong(timeout.toMillis() + System.currentTimeMillis());
+            this.executor = Executors.newSingleThreadExecutor();
+
+            // Start consuming.
+            executor.execute(
+                    () -> {
+                        while (consumedRecords.size() < desiredCounts) {
+                            // This method would block until we consumed a message.
+                            int counts = desiredCounts - consumedRecords.size();
+                            List<Message<String>> messages =
+                                    operator.receiveMessages(this.topic, Schema.STRING, counts);
+                            for (Message<String> message : messages) {
+                                consumedRecords.add(message.getValue());
+                            }
+                        }
+                    });
+        }
+
+        public boolean canStop() {
+            if (deadline.get() < System.currentTimeMillis()) {
+                String errorMsg =
+                        String.format(
+                                "Timeout for waiting the records from Pulsar. We have consumed %d messages, expect %d messages.",
+                                consumedRecords.size(), desiredCounts);
+                LOG.warn(errorMsg);
+                return true;
+            }
+
+            return consumedRecords.size() >= desiredCounts;
+        }
+
+        public List<String> getConsumedRecords() {
+            return consumedRecords;
+        }
+
+        @Override
+        public void close() {
+            executor.shutdown();
+        }
+    }
+}

[flink] 03/09: [FLINK-26021][connector/pulsar] Add the ability to merge the partitioned Pulsar topics.

Posted by fp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

fpaul pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit a195f729ba4d1ffe657d82888b26ab9db9121ab8
Author: Yufan Sheng <yu...@streamnative.io>
AuthorDate: Wed Feb 9 14:47:28 2022 +0800

    [FLINK-26021][connector/pulsar] Add the ability to merge the partitioned Pulsar topics.
---
 .../pulsar/source/PulsarSourceBuilder.java         |  4 +-
 .../source/enumerator/topic/TopicNameUtils.java    | 45 ++++++++++++++++++++++
 .../enumerator/topic/TopicNameUtilsTest.java       | 16 ++++++++
 3 files changed, 64 insertions(+), 1 deletion(-)

diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSourceBuilder.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSourceBuilder.java
index 0959b1b..b1f6250 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSourceBuilder.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/PulsarSourceBuilder.java
@@ -29,6 +29,7 @@ import org.apache.flink.connector.pulsar.source.config.SourceConfiguration;
 import org.apache.flink.connector.pulsar.source.enumerator.cursor.StartCursor;
 import org.apache.flink.connector.pulsar.source.enumerator.cursor.StopCursor;
 import org.apache.flink.connector.pulsar.source.enumerator.subscriber.PulsarSubscriber;
+import org.apache.flink.connector.pulsar.source.enumerator.topic.TopicNameUtils;
 import org.apache.flink.connector.pulsar.source.enumerator.topic.TopicRange;
 import org.apache.flink.connector.pulsar.source.enumerator.topic.range.FullRangeGenerator;
 import org.apache.flink.connector.pulsar.source.enumerator.topic.range.RangeGenerator;
@@ -195,7 +196,8 @@ public final class PulsarSourceBuilder<OUT> {
      */
     public PulsarSourceBuilder<OUT> setTopics(List<String> topics) {
         ensureSubscriberIsNull("topics");
-        this.subscriber = PulsarSubscriber.getTopicListSubscriber(topics);
+        List<String> distinctTopics = TopicNameUtils.distinctTopics(topics);
+        this.subscriber = PulsarSubscriber.getTopicListSubscriber(distinctTopics);
         return this;
     }
 
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/topic/TopicNameUtils.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/topic/TopicNameUtils.java
index 446622c..b5d814a 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/topic/TopicNameUtils.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/source/enumerator/topic/TopicNameUtils.java
@@ -20,8 +20,17 @@ package org.apache.flink.connector.pulsar.source.enumerator.topic;
 
 import org.apache.flink.annotation.Internal;
 
+import org.apache.flink.shaded.guava30.com.google.common.collect.ImmutableList;
+
 import org.apache.pulsar.common.naming.TopicName;
 
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import static org.apache.flink.util.Preconditions.checkArgument;
 
 /** util for topic name. */
@@ -42,4 +51,40 @@ public final class TopicNameUtils {
         checkArgument(partitionId >= 0, "Illegal partition id %s", partitionId);
         return TopicName.get(topic).getPartition(partitionId).toString();
     }
+
+    public static boolean isPartitioned(String topic) {
+        return TopicName.get(topic).isPartitioned();
+    }
+
+    /** Merge the same topics into one topics. */
+    public static List<String> distinctTopics(List<String> topics) {
+        Set<String> fullTopics = new HashSet<>();
+        Map<String, List<Integer>> partitionedTopics = new HashMap<>();
+
+        for (String topic : topics) {
+            TopicName topicName = TopicName.get(topic);
+            String partitionedTopicName = topicName.getPartitionedTopicName();
+
+            if (!topicName.isPartitioned()) {
+                fullTopics.add(partitionedTopicName);
+                partitionedTopics.remove(partitionedTopicName);
+            } else if (!fullTopics.contains(partitionedTopicName)) {
+                List<Integer> partitionIds =
+                        partitionedTopics.computeIfAbsent(
+                                partitionedTopicName, k -> new ArrayList<>());
+                partitionIds.add(topicName.getPartitionIndex());
+            }
+        }
+
+        ImmutableList.Builder<String> builder = ImmutableList.<String>builder().addAll(fullTopics);
+
+        for (Map.Entry<String, List<Integer>> topicSet : partitionedTopics.entrySet()) {
+            String topicName = topicSet.getKey();
+            for (Integer partitionId : topicSet.getValue()) {
+                builder.add(topicNameWithPartition(topicName, partitionId));
+            }
+        }
+
+        return builder.build();
+    }
 }
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/enumerator/topic/TopicNameUtilsTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/enumerator/topic/TopicNameUtilsTest.java
index 54e5e4e..0abacc4 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/enumerator/topic/TopicNameUtilsTest.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/source/enumerator/topic/TopicNameUtilsTest.java
@@ -20,6 +20,10 @@ package org.apache.flink.connector.pulsar.source.enumerator.topic;
 
 import org.junit.jupiter.api.Test;
 
+import java.util.Arrays;
+import java.util.List;
+
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 
@@ -68,4 +72,16 @@ class TopicNameUtilsTest {
         String name4 = TopicNameUtils.topicNameWithPartition(topicNameWithoutCluster, 8);
         assertEquals(name4, topicNameWithoutCluster + "-partition-8");
     }
+
+    @Test
+    void mergeTheTopicNamesIntoOneSet() {
+        List<String> topics =
+                Arrays.asList("short-topic-partition-8", "short-topic", "long-topic-partition-1");
+        List<String> results = TopicNameUtils.distinctTopics(topics);
+
+        assertThat(results)
+                .containsExactlyInAnyOrder(
+                        "persistent://public/default/short-topic",
+                        "persistent://public/default/long-topic-partition-1");
+    }
 }

[flink] 06/09: [FLINK-26022][connector/pulsar] Implement at-least-once and exactly-once Pulsar Sink.

Posted by fp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

fpaul pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 136add5d0c9c5b9b2869a9ee194f78449065b18e
Author: Yufan Sheng <yu...@streamnative.io>
AuthorDate: Tue Feb 15 22:22:19 2022 +0800

    [FLINK-26022][connector/pulsar] Implement at-least-once and exactly-once Pulsar Sink.
---
 .../common/utils/PulsarTransactionUtils.java       |  68 ++++
 .../flink/connector/pulsar/sink/PulsarSink.java    | 136 ++++++++
 .../connector/pulsar/sink/PulsarSinkBuilder.java   | 354 +++++++++++++++++++++
 .../connector/pulsar/sink/PulsarSinkOptions.java   |  14 +-
 .../pulsar/sink/committer/PulsarCommittable.java   |  71 +++++
 .../committer/PulsarCommittableSerializer.java     |  65 ++++
 .../pulsar/sink/committer/PulsarCommitter.java     | 174 ++++++++++
 .../pulsar/sink/config/SinkConfiguration.java      |  17 +-
 .../connector/pulsar/sink/writer/PulsarWriter.java | 264 +++++++++++++++
 .../sink/writer/context/PulsarSinkContext.java     |  46 +++
 .../sink/writer/context/PulsarSinkContextImpl.java |  61 ++++
 .../sink/writer/router/KeyHashTopicRouter.java     |  71 +++++
 .../pulsar/sink/writer/router/MessageKeyHash.java  |  85 +++++
 .../sink/writer/router/RoundRobinTopicRouter.java  |  63 ++++
 .../pulsar/sink/writer/router/TopicRouter.java     |  64 ++++
 .../sink/writer/router/TopicRoutingMode.java       |  87 +++++
 .../sink/writer/topic/TopicMetadataListener.java   | 173 ++++++++++
 .../sink/writer/topic/TopicProducerRegister.java   | 202 ++++++++++++
 18 files changed, 2011 insertions(+), 4 deletions(-)

diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/utils/PulsarTransactionUtils.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/utils/PulsarTransactionUtils.java
new file mode 100644
index 0000000..a48b4d4
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/common/utils/PulsarTransactionUtils.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.common.utils;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.util.FlinkRuntimeException;
+
+import org.apache.pulsar.client.api.PulsarClient;
+import org.apache.pulsar.client.api.transaction.Transaction;
+import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClientException;
+
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.flink.connector.pulsar.common.utils.PulsarExceptionUtils.sneakyClient;
+import static org.apache.flink.util.ExceptionUtils.findThrowable;
+
+/** A suit of workarounds for the Pulsar Transaction. */
+@Internal
+public final class PulsarTransactionUtils {
+
+    private PulsarTransactionUtils() {
+        // No public constructor
+    }
+
+    /** Create transaction with given timeout millis. */
+    public static Transaction createTransaction(PulsarClient pulsarClient, long timeoutMs) {
+        try {
+            CompletableFuture<Transaction> future =
+                    sneakyClient(pulsarClient::newTransaction)
+                            .withTransactionTimeout(timeoutMs, TimeUnit.MILLISECONDS)
+                            .build();
+
+            return future.get();
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+            throw new IllegalStateException(e);
+        } catch (ExecutionException e) {
+            throw new FlinkRuntimeException(e);
+        }
+    }
+
+    /**
+     * This is a bug in original {@link TransactionCoordinatorClientException#unwrap(Throwable)}
+     * method. Pulsar wraps the {@link ExecutionException} which hides the real execution exception.
+     */
+    public static TransactionCoordinatorClientException unwrap(
+            TransactionCoordinatorClientException e) {
+        return findThrowable(e.getCause(), TransactionCoordinatorClientException.class).orElse(e);
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSink.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSink.java
new file mode 100644
index 0000000..811d5b5
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSink.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.api.connector.sink2.TwoPhaseCommittingSink;
+import org.apache.flink.connector.base.DeliveryGuarantee;
+import org.apache.flink.connector.pulsar.sink.committer.PulsarCommittable;
+import org.apache.flink.connector.pulsar.sink.committer.PulsarCommittableSerializer;
+import org.apache.flink.connector.pulsar.sink.committer.PulsarCommitter;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.sink.writer.PulsarWriter;
+import org.apache.flink.connector.pulsar.sink.writer.router.KeyHashTopicRouter;
+import org.apache.flink.connector.pulsar.sink.writer.router.RoundRobinTopicRouter;
+import org.apache.flink.connector.pulsar.sink.writer.router.TopicRouter;
+import org.apache.flink.connector.pulsar.sink.writer.router.TopicRoutingMode;
+import org.apache.flink.connector.pulsar.sink.writer.serializer.PulsarSerializationSchema;
+import org.apache.flink.connector.pulsar.sink.writer.topic.TopicMetadataListener;
+import org.apache.flink.core.io.SimpleVersionedSerializer;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * The Sink implementation of Pulsar. Please use a {@link PulsarSinkBuilder} to construct a {@link
+ * PulsarSink}. The following example shows how to create a PulsarSink receiving records of {@code
+ * String} type.
+ *
+ * <pre>{@code
+ * PulsarSink<String> sink = PulsarSink.builder()
+ *      .setServiceUrl(operator().serviceUrl())
+ *      .setAdminUrl(operator().adminUrl())
+ *      .setTopic(topic)
+ *      .setSerializationSchema(PulsarSerializationSchema.pulsarSchema(Schema.STRING))
+ *      .build();
+ * }</pre>
+ *
+ * <p>The sink supports all delivery guarantees described by {@link DeliveryGuarantee}.
+ *
+ * <ul>
+ *   <li>{@link DeliveryGuarantee#NONE} does not provide any guarantees: messages may be lost in
+ *       case of issues on the Pulsar broker and messages may be duplicated in case of a Flink
+ *       failure.
+ *   <li>{@link DeliveryGuarantee#AT_LEAST_ONCE} the sink will wait for all outstanding records in
+ *       the Pulsar buffers to be acknowledged by the Pulsar producer on a checkpoint. No messages
+ *       will be lost in case of any issue with the Pulsar brokers but messages may be duplicated
+ *       when Flink restarts.
+ *   <li>{@link DeliveryGuarantee#EXACTLY_ONCE}: In this mode the PulsarSink will write all messages
+ *       in a Pulsar transaction that will be committed to Pulsar on a checkpoint. Thus, no
+ *       duplicates will be seen in case of a Flink restart. However, this delays record writing
+ *       effectively until a checkpoint is written, so adjust the checkpoint duration accordingly.
+ *       Additionally, it is highly recommended to tweak Pulsar transaction timeout (link) >>
+ *       maximum checkpoint duration + maximum restart duration or data loss may happen when Pulsar
+ *       expires an uncommitted transaction.
+ * </ul>
+ *
+ * <p>See {@link PulsarSinkBuilder} for more details.
+ *
+ * @param <IN> The input type of the sink.
+ */
+@PublicEvolving
+public class PulsarSink<IN> implements TwoPhaseCommittingSink<IN, PulsarCommittable> {
+    private static final long serialVersionUID = 4416714587951282119L;
+
+    private final SinkConfiguration sinkConfiguration;
+    private final PulsarSerializationSchema<IN> serializationSchema;
+    private final TopicMetadataListener metadataListener;
+    private final TopicRouter<IN> topicRouter;
+
+    PulsarSink(
+            SinkConfiguration sinkConfiguration,
+            PulsarSerializationSchema<IN> serializationSchema,
+            TopicMetadataListener metadataListener,
+            TopicRoutingMode topicRoutingMode,
+            TopicRouter<IN> topicRouter) {
+        this.sinkConfiguration = checkNotNull(sinkConfiguration);
+        this.serializationSchema = checkNotNull(serializationSchema);
+        this.metadataListener = checkNotNull(metadataListener);
+        checkNotNull(topicRoutingMode);
+
+        // Create topic router supplier.
+        if (topicRoutingMode == TopicRoutingMode.CUSTOM) {
+            this.topicRouter = checkNotNull(topicRouter);
+        } else if (topicRoutingMode == TopicRoutingMode.ROUND_ROBIN) {
+            this.topicRouter = new RoundRobinTopicRouter<>(sinkConfiguration);
+        } else {
+            this.topicRouter = new KeyHashTopicRouter<>(sinkConfiguration);
+        }
+    }
+
+    /**
+     * Create a {@link PulsarSinkBuilder} to construct a new {@link PulsarSink}.
+     *
+     * @param <IN> Type of incoming records.
+     * @return A Pulsar sink builder.
+     */
+    public static <IN> PulsarSinkBuilder<IN> builder() {
+        return new PulsarSinkBuilder<>();
+    }
+
+    @Internal
+    @Override
+    public PrecommittingSinkWriter<IN, PulsarCommittable> createWriter(InitContext initContext) {
+        return new PulsarWriter<>(
+                sinkConfiguration, serializationSchema, metadataListener, topicRouter, initContext);
+    }
+
+    @Internal
+    @Override
+    public Committer<PulsarCommittable> createCommitter() {
+        return new PulsarCommitter(sinkConfiguration);
+    }
+
+    @Internal
+    @Override
+    public SimpleVersionedSerializer<PulsarCommittable> getCommittableSerializer() {
+        return new PulsarCommittableSerializer();
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilder.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilder.java
new file mode 100644
index 0000000..a0352f5
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilder.java
@@ -0,0 +1,354 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.configuration.ConfigOption;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.connector.base.DeliveryGuarantee;
+import org.apache.flink.connector.pulsar.common.config.PulsarConfigBuilder;
+import org.apache.flink.connector.pulsar.common.config.PulsarOptions;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.sink.writer.router.TopicRouter;
+import org.apache.flink.connector.pulsar.sink.writer.router.TopicRoutingMode;
+import org.apache.flink.connector.pulsar.sink.writer.serializer.PulsarSchemaWrapper;
+import org.apache.flink.connector.pulsar.sink.writer.serializer.PulsarSerializationSchema;
+import org.apache.flink.connector.pulsar.sink.writer.topic.TopicMetadataListener;
+
+import org.apache.pulsar.client.api.Schema;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Properties;
+
+import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_ADMIN_URL;
+import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_ENABLE_TRANSACTION;
+import static org.apache.flink.connector.pulsar.common.config.PulsarOptions.PULSAR_SERVICE_URL;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_PRODUCER_NAME;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_SEND_TIMEOUT_MS;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_WRITE_DELIVERY_GUARANTEE;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_WRITE_SCHEMA_EVOLUTION;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_WRITE_TRANSACTION_TIMEOUT;
+import static org.apache.flink.connector.pulsar.sink.config.PulsarSinkConfigUtils.SINK_CONFIG_VALIDATOR;
+import static org.apache.flink.connector.pulsar.source.enumerator.topic.TopicNameUtils.distinctTopics;
+import static org.apache.flink.util.Preconditions.checkArgument;
+import static org.apache.flink.util.Preconditions.checkNotNull;
+import static org.apache.flink.util.Preconditions.checkState;
+
+/**
+ * The builder class for {@link PulsarSink} to make it easier for the users to construct a {@link
+ * PulsarSink}.
+ *
+ * <p>The following example shows the minimum setup to create a PulsarSink that reads the String
+ * values from a Pulsar topic.
+ *
+ * <pre>{@code
+ * PulsarSink<String> sink = PulsarSink.builder()
+ *     .setServiceUrl(operator().serviceUrl())
+ *     .setAdminUrl(operator().adminUrl())
+ *     .setTopics(topic)
+ *     .setSerializationSchema(PulsarSerializationSchema.pulsarSchema(Schema.STRING))
+ *     .build();
+ * }</pre>
+ *
+ * <p>The service url, admin url, and the record serializer are required fields that must be set. If
+ * you don't set the topics, make sure you have provided a custom {@link TopicRouter}. Otherwise,
+ * you must provide the topics to produce.
+ *
+ * <p>To specify the delivery guarantees of PulsarSink, one can call {@link
+ * #setDeliveryGuarantee(DeliveryGuarantee)}. The default value of the delivery guarantee is {@link
+ * DeliveryGuarantee#NONE}, and it wouldn't promise the consistence when write the message into
+ * Pulsar.
+ *
+ * <pre>{@code
+ * PulsarSink<String> sink = PulsarSink.builder()
+ *     .setServiceUrl(operator().serviceUrl())
+ *     .setAdminUrl(operator().adminUrl())
+ *     .setTopics(topic)
+ *     .setSerializationSchema(PulsarSerializationSchema.pulsarSchema(Schema.STRING))
+ *     .setDeliveryGuarantee(deliveryGuarantee)
+ *     .build();
+ * }</pre>
+ *
+ * @see PulsarSink for a more detailed explanation of the different guarantees.
+ * @param <IN> The input type of the sink.
+ */
+@PublicEvolving
+public class PulsarSinkBuilder<IN> {
+    private static final Logger LOG = LoggerFactory.getLogger(PulsarSinkBuilder.class);
+
+    private final PulsarConfigBuilder configBuilder;
+
+    private PulsarSerializationSchema<IN> serializationSchema;
+    private TopicMetadataListener metadataListener;
+    private TopicRoutingMode topicRoutingMode;
+    private TopicRouter<IN> topicRouter;
+
+    // private builder constructor.
+    PulsarSinkBuilder() {
+        this.configBuilder = new PulsarConfigBuilder();
+    }
+
+    /**
+     * Sets the admin endpoint for the PulsarAdmin of the PulsarSink.
+     *
+     * @param adminUrl The url for the PulsarAdmin.
+     * @return this PulsarSinkBuilder.
+     */
+    public PulsarSinkBuilder<IN> setAdminUrl(String adminUrl) {
+        return setConfig(PULSAR_ADMIN_URL, adminUrl);
+    }
+
+    /**
+     * Sets the server's link for the PulsarProducer of the PulsarSink.
+     *
+     * @param serviceUrl The server url of the Pulsar cluster.
+     * @return this PulsarSinkBuilder.
+     */
+    public PulsarSinkBuilder<IN> setServiceUrl(String serviceUrl) {
+        return setConfig(PULSAR_SERVICE_URL, serviceUrl);
+    }
+
+    /**
+     * The producer name is informative, and it can be used to identify a particular producer
+     * instance from the topic stats.
+     *
+     * @param producerName The name of the producer used in Pulsar sink.
+     * @return this PulsarSinkBuilder.
+     */
+    public PulsarSinkBuilder<IN> setProducerName(String producerName) {
+        return setConfig(PULSAR_PRODUCER_NAME, producerName);
+    }
+
+    /**
+     * Set a pulsar topic list for flink sink. Some topic may not exist currently, write to this
+     * non-existed topic wouldn't throw any exception.
+     *
+     * @param topics The topic list you would like to consume message.
+     * @return this PulsarSinkBuilder.
+     */
+    public PulsarSinkBuilder<IN> setTopics(String... topics) {
+        return setTopics(Arrays.asList(topics));
+    }
+
+    /**
+     * Set a pulsar topic list for flink sink. Some topic may not exist currently, consuming this
+     * non-existed topic wouldn't throw any exception.
+     *
+     * @param topics The topic list you would like to consume message.
+     * @return this PulsarSinkBuilder.
+     */
+    public PulsarSinkBuilder<IN> setTopics(List<String> topics) {
+        checkState(metadataListener == null, "setTopics couldn't be set twice.");
+        // Making sure the topic should be distinct.
+        List<String> topicSet = distinctTopics(topics);
+        this.metadataListener = new TopicMetadataListener(topicSet);
+        return this;
+    }
+
+    /**
+     * Sets the wanted the {@link DeliveryGuarantee}. The default delivery guarantee is {@link
+     * DeliveryGuarantee#NONE}.
+     *
+     * @param deliveryGuarantee Deliver guarantees.
+     * @return this PulsarSinkBuilder.
+     */
+    public PulsarSinkBuilder<IN> setDeliveryGuarantee(DeliveryGuarantee deliveryGuarantee) {
+        checkNotNull(deliveryGuarantee, "deliveryGuarantee");
+        configBuilder.override(PULSAR_WRITE_DELIVERY_GUARANTEE, deliveryGuarantee);
+        return this;
+    }
+
+    /**
+     * Set a routing mode for choosing right topic partition to send messages.
+     *
+     * @param topicRoutingMode Routing policy for choosing the desired topic.
+     * @return this PulsarSinkBuilder.
+     */
+    public PulsarSinkBuilder<IN> setTopicRoutingMode(TopicRoutingMode topicRoutingMode) {
+        checkArgument(
+                topicRoutingMode != TopicRoutingMode.CUSTOM,
+                "CUSTOM mode should be set by using setTopicRouter method.");
+        this.topicRoutingMode = checkNotNull(topicRoutingMode, "topicRoutingMode");
+        return this;
+    }
+
+    /**
+     * Use a custom topic router instead predefine topic routing.
+     *
+     * @param topicRouter The router for choosing topic to send message.
+     * @return this PulsarSinkBuilder.
+     */
+    public PulsarSinkBuilder<IN> setTopicRouter(TopicRouter<IN> topicRouter) {
+        if (topicRoutingMode != null && topicRoutingMode != TopicRoutingMode.CUSTOM) {
+            LOG.warn("We would override topicRoutingMode to CUSTOM if you provide TopicRouter.");
+        }
+        this.topicRoutingMode = TopicRoutingMode.CUSTOM;
+        this.topicRouter = checkNotNull(topicRouter, "topicRouter");
+        return this;
+    }
+
+    /**
+     * Sets the {@link PulsarSerializationSchema} that transforms incoming records to bytes.
+     *
+     * @param serializationSchema Pulsar specified serialize logic.
+     * @return this PulsarSinkBuilder.
+     */
+    public <T extends IN> PulsarSinkBuilder<T> setSerializationSchema(
+            PulsarSerializationSchema<T> serializationSchema) {
+        PulsarSinkBuilder<T> self = specialized();
+        self.serializationSchema = serializationSchema;
+        return self;
+    }
+
+    /**
+     * If you enable this option, we would consume and deserialize the message by using Pulsar
+     * {@link Schema}.
+     *
+     * @return this PulsarSinkBuilder.
+     */
+    public PulsarSinkBuilder<IN> enableSchemaEvolution() {
+        configBuilder.override(PULSAR_WRITE_SCHEMA_EVOLUTION, true);
+        return this;
+    }
+
+    /**
+     * Set an arbitrary property for the PulsarSink and Pulsar Producer. The valid keys can be found
+     * in {@link PulsarSinkOptions} and {@link PulsarOptions}.
+     *
+     * <p>Make sure the option could be set only once or with same value.
+     *
+     * @param key The key of the property.
+     * @param value The value of the property.
+     * @return this PulsarSinkBuilder.
+     */
+    public <T> PulsarSinkBuilder<IN> setConfig(ConfigOption<T> key, T value) {
+        configBuilder.set(key, value);
+        return this;
+    }
+
+    /**
+     * Set arbitrary properties for the PulsarSink and Pulsar Producer. The valid keys can be found
+     * in {@link PulsarSinkOptions} and {@link PulsarOptions}.
+     *
+     * @param config The config to set for the PulsarSink.
+     * @return this PulsarSinkBuilder.
+     */
+    public PulsarSinkBuilder<IN> setConfig(Configuration config) {
+        configBuilder.set(config);
+        return this;
+    }
+
+    /**
+     * Set arbitrary properties for the PulsarSink and Pulsar Producer. The valid keys can be found
+     * in {@link PulsarSinkOptions} and {@link PulsarOptions}.
+     *
+     * <p>This method is mainly used for future flink SQL binding.
+     *
+     * @param properties The config properties to set for the PulsarSink.
+     * @return this PulsarSinkBuilder.
+     */
+    public PulsarSinkBuilder<IN> setProperties(Properties properties) {
+        configBuilder.set(properties);
+        return this;
+    }
+
+    /**
+     * Build the {@link PulsarSink}.
+     *
+     * @return a PulsarSink with the settings made for this builder.
+     */
+    public PulsarSink<IN> build() {
+        // Change delivery guarantee.
+        DeliveryGuarantee deliveryGuarantee = configBuilder.get(PULSAR_WRITE_DELIVERY_GUARANTEE);
+        if (deliveryGuarantee == DeliveryGuarantee.NONE) {
+            LOG.warn(
+                    "You haven't set delivery guarantee or set it to NONE, this would cause data loss. Make sure you have known this shortcoming.");
+        } else if (deliveryGuarantee == DeliveryGuarantee.EXACTLY_ONCE) {
+            LOG.info(
+                    "Exactly once require flink checkpoint and your pulsar cluster should support the transaction.");
+            configBuilder.override(PULSAR_ENABLE_TRANSACTION, true);
+            configBuilder.override(PULSAR_SEND_TIMEOUT_MS, 0L);
+
+            if (!configBuilder.contains(PULSAR_WRITE_TRANSACTION_TIMEOUT)) {
+                LOG.warn(
+                        "The default pulsar transaction timeout is 3 hours, make sure it was greater than your checkpoint interval.");
+            } else {
+                Long timeout = configBuilder.get(PULSAR_WRITE_TRANSACTION_TIMEOUT);
+                LOG.warn(
+                        "The configured transaction timeout is {} mille seconds, make sure it was greater than your checkpoint interval.",
+                        timeout);
+            }
+        }
+
+        if (!configBuilder.contains(PULSAR_PRODUCER_NAME)) {
+            LOG.warn(
+                    "We recommend set a readable producer name through setProducerName(String) in production mode.");
+        }
+
+        checkNotNull(serializationSchema, "serializationSchema must be set.");
+        if (serializationSchema instanceof PulsarSchemaWrapper
+                && !Boolean.TRUE.equals(configBuilder.get(PULSAR_WRITE_SCHEMA_EVOLUTION))) {
+            LOG.info(
+                    "It seems like you want to send message in Pulsar Schema."
+                            + " You can enableSchemaEvolution for using this feature."
+                            + " We would use Schema.BYTES as the default schema if you don't enable this option.");
+        }
+
+        // Topic metadata listener validation.
+        if (metadataListener == null) {
+            if (topicRouter == null) {
+                throw new NullPointerException(
+                        "No topic names or custom topic router are provided.");
+            } else {
+                LOG.warn(
+                        "No topic set has been provided, make sure your custom topic router support empty topic set.");
+                this.metadataListener = new TopicMetadataListener();
+            }
+        }
+
+        // Topic routing mode validate.
+        if (topicRoutingMode == null) {
+            LOG.info("No topic routing mode has been chosen. We use round-robin mode as default.");
+            this.topicRoutingMode = TopicRoutingMode.ROUND_ROBIN;
+        }
+
+        // This is an unmodifiable configuration for Pulsar.
+        // We don't use Pulsar's built-in configure classes for compatible requirement.
+        SinkConfiguration sinkConfiguration =
+                configBuilder.build(SINK_CONFIG_VALIDATOR, SinkConfiguration::new);
+
+        return new PulsarSink<>(
+                sinkConfiguration,
+                serializationSchema,
+                metadataListener,
+                topicRoutingMode,
+                topicRouter);
+    }
+
+    // ------------- private helpers  --------------
+
+    /** Helper method for java compiler recognize the generic type. */
+    @SuppressWarnings("unchecked")
+    private <T extends IN> PulsarSinkBuilder<T> specialized() {
+        return (PulsarSinkBuilder<T>) this;
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkOptions.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkOptions.java
index 0e16830..3a7c5bc 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkOptions.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkOptions.java
@@ -26,6 +26,7 @@ import org.apache.flink.configuration.ConfigOptions;
 import org.apache.flink.configuration.description.Description;
 import org.apache.flink.connector.base.DeliveryGuarantee;
 import org.apache.flink.connector.pulsar.common.config.PulsarOptions;
+import org.apache.flink.connector.pulsar.sink.writer.router.MessageKeyHash;
 
 import org.apache.pulsar.client.api.CompressionType;
 
@@ -38,12 +39,13 @@ import static org.apache.flink.configuration.description.LinkElement.link;
 import static org.apache.flink.configuration.description.TextElement.code;
 import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PRODUCER_CONFIG_PREFIX;
 import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.SINK_CONFIG_PREFIX;
+import static org.apache.flink.connector.pulsar.sink.writer.router.MessageKeyHash.MURMUR3_32_HASH;
 import static org.apache.pulsar.client.impl.conf.ProducerConfigurationData.DEFAULT_BATCHING_MAX_MESSAGES;
 import static org.apache.pulsar.client.impl.conf.ProducerConfigurationData.DEFAULT_MAX_PENDING_MESSAGES;
 import static org.apache.pulsar.client.impl.conf.ProducerConfigurationData.DEFAULT_MAX_PENDING_MESSAGES_ACROSS_PARTITIONS;
 
 /**
- * Configurations for PulsarSink. All the options list here could be configured in {@code
+ * Configurations for PulsarSink. All the options list here could be configured in {@link
  * PulsarSinkBuilder#setConfig(ConfigOption, Object)}. The {@link PulsarOptions} is also required
  * for pulsar source.
  *
@@ -99,6 +101,13 @@ public final class PulsarSinkOptions {
                     .withDescription(
                             "Auto update the topic metadata in a fixed interval (in ms). The default value is 30 minutes.");
 
+    public static final ConfigOption<MessageKeyHash> PULSAR_MESSAGE_KEY_HASH =
+            ConfigOptions.key(SINK_CONFIG_PREFIX + "messageKeyHash")
+                    .enumType(MessageKeyHash.class)
+                    .defaultValue(MURMUR3_32_HASH)
+                    .withDescription(
+                            "The hash policy for routing message by calculating the hash code of message key.");
+
     public static final ConfigOption<Boolean> PULSAR_WRITE_SCHEMA_EVOLUTION =
             ConfigOptions.key(SINK_CONFIG_PREFIX + "enableSchemaEvolution")
                     .booleanType()
@@ -106,7 +115,8 @@ public final class PulsarSinkOptions {
                     .withDescription(
                             Description.builder()
                                     .text(
-                                            "If you enable this option, we would consume and deserialize the message by using Pulsar's %s.",
+                                            "If you enable this option and use PulsarSerializationSchema.pulsarSchema(),"
+                                                    + " we would consume and deserialize the message by using Pulsar's %s.",
                                             code("Schema"))
                                     .build());
 
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommittable.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommittable.java
new file mode 100644
index 0000000..cca8e80
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommittable.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.committer;
+
+import org.apache.flink.annotation.Internal;
+
+import org.apache.pulsar.client.api.transaction.TxnID;
+
+import java.util.Objects;
+
+/** The writer state for Pulsar connector. We would used in Pulsar committer. */
+@Internal
+public class PulsarCommittable {
+
+    /** The transaction id. */
+    private final TxnID txnID;
+
+    /** The topic name with partition information. */
+    private final String topic;
+
+    public PulsarCommittable(TxnID txnID, String topic) {
+        this.txnID = txnID;
+        this.topic = topic;
+    }
+
+    public TxnID getTxnID() {
+        return txnID;
+    }
+
+    public String getTopic() {
+        return topic;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        PulsarCommittable that = (PulsarCommittable) o;
+        return Objects.equals(txnID, that.txnID) && Objects.equals(topic, that.topic);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(txnID, topic);
+    }
+
+    @Override
+    public String toString() {
+        return "PulsarCommittable{" + "txnID=" + txnID + ", topic='" + topic + '\'' + '}';
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommittableSerializer.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommittableSerializer.java
new file mode 100644
index 0000000..324a7c6
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommittableSerializer.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.committer;
+
+import org.apache.flink.core.io.SimpleVersionedSerializer;
+
+import org.apache.pulsar.client.api.transaction.TxnID;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+/** A serializer used to serialize {@link PulsarCommittable}. */
+public class PulsarCommittableSerializer implements SimpleVersionedSerializer<PulsarCommittable> {
+
+    private static final int CURRENT_VERSION = 1;
+
+    @Override
+    public int getVersion() {
+        return CURRENT_VERSION;
+    }
+
+    @Override
+    public byte[] serialize(PulsarCommittable obj) throws IOException {
+        try (final ByteArrayOutputStream baos = new ByteArrayOutputStream();
+                final DataOutputStream out = new DataOutputStream(baos)) {
+            TxnID txnID = obj.getTxnID();
+            out.writeLong(txnID.getMostSigBits());
+            out.writeLong(txnID.getLeastSigBits());
+            out.writeUTF(obj.getTopic());
+            out.flush();
+            return baos.toByteArray();
+        }
+    }
+
+    @Override
+    public PulsarCommittable deserialize(int version, byte[] serialized) throws IOException {
+        try (final ByteArrayInputStream bais = new ByteArrayInputStream(serialized);
+                final DataInputStream in = new DataInputStream(bais)) {
+            long mostSigBits = in.readLong();
+            long leastSigBits = in.readLong();
+            TxnID txnID = new TxnID(mostSigBits, leastSigBits);
+            String topic = in.readUTF();
+            return new PulsarCommittable(txnID, topic);
+        }
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommitter.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommitter.java
new file mode 100644
index 0000000..8389bdc
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/committer/PulsarCommitter.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.committer;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.connector.base.DeliveryGuarantee;
+import org.apache.flink.connector.pulsar.common.utils.PulsarTransactionUtils;
+import org.apache.flink.connector.pulsar.sink.PulsarSink;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.util.FlinkRuntimeException;
+
+import org.apache.pulsar.client.api.PulsarClient;
+import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClient;
+import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClientException;
+import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClientException.CoordinatorNotFoundException;
+import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClientException.InvalidTxnStatusException;
+import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClientException.MetaStoreHandlerNotExistsException;
+import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClientException.TransactionNotFoundException;
+import org.apache.pulsar.client.api.transaction.TxnID;
+import org.apache.pulsar.client.impl.PulsarClientImpl;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Collection;
+
+import static org.apache.flink.connector.pulsar.common.config.PulsarClientFactory.createClient;
+import static org.apache.flink.util.Preconditions.checkNotNull;
+import static org.apache.pulsar.common.naming.TopicName.TRANSACTION_COORDINATOR_ASSIGN;
+
+/**
+ * Committer implementation for {@link PulsarSink}.
+ *
+ * <p>The committer is responsible to finalize the Pulsar transactions by committing them.
+ */
+@Internal
+public class PulsarCommitter implements Committer<PulsarCommittable>, Closeable {
+    private static final Logger LOG = LoggerFactory.getLogger(PulsarCommitter.class);
+
+    private final SinkConfiguration sinkConfiguration;
+
+    private PulsarClient pulsarClient;
+    private TransactionCoordinatorClient coordinatorClient;
+
+    public PulsarCommitter(SinkConfiguration sinkConfiguration) {
+        this.sinkConfiguration = checkNotNull(sinkConfiguration);
+    }
+
+    @Override
+    public void commit(Collection<CommitRequest<PulsarCommittable>> requests)
+            throws IOException, InterruptedException {
+        TransactionCoordinatorClient client = transactionCoordinatorClient();
+
+        for (CommitRequest<PulsarCommittable> request : requests) {
+            PulsarCommittable committable = request.getCommittable();
+            TxnID txnID = committable.getTxnID();
+            String topic = committable.getTopic();
+
+            LOG.debug("Start committing the Pulsar transaction {} for topic {}", txnID, topic);
+            try {
+                client.commit(txnID);
+            } catch (TransactionCoordinatorClientException e) {
+                // This is a known bug for Pulsar Transaction.
+                // We have to use instanceof instead of catching them.
+                TransactionCoordinatorClientException ex = PulsarTransactionUtils.unwrap(e);
+                if (ex instanceof CoordinatorNotFoundException) {
+                    LOG.error(
+                            "We couldn't find the Transaction Coordinator from Pulsar broker {}. "
+                                    + "Check your broker configuration.",
+                            committable,
+                            ex);
+                    request.signalFailedWithKnownReason(ex);
+                } else if (ex instanceof InvalidTxnStatusException) {
+                    LOG.error(
+                            "Unable to commit transaction ({}) because it's in an invalid state. "
+                                    + "Most likely the transaction has been aborted for some reason. "
+                                    + "Please check the Pulsar broker logs for more details.",
+                            committable,
+                            ex);
+                    request.signalAlreadyCommitted();
+                } else if (ex instanceof TransactionNotFoundException) {
+                    if (request.getNumberOfRetries() == 0) {
+                        LOG.error(
+                                "Unable to commit transaction ({}) because it's not found on Pulsar broker. "
+                                        + "Most likely the checkpoint interval exceed the transaction timeout.",
+                                committable,
+                                ex);
+                        request.signalFailedWithKnownReason(ex);
+                    } else {
+                        LOG.warn(
+                                "We can't find the transaction {} after {} retry committing. "
+                                        + "This may mean that the transaction have been committed in previous but failed with timeout. "
+                                        + "So we just mark it as committed.",
+                                txnID,
+                                request.getNumberOfRetries());
+                        request.signalAlreadyCommitted();
+                    }
+                } else if (ex instanceof MetaStoreHandlerNotExistsException) {
+                    LOG.error(
+                            "We can't find the meta store handler by the mostSigBits from TxnID {}. "
+                                    + "Did you change the metadata for topic {}?",
+                            committable,
+                            TRANSACTION_COORDINATOR_ASSIGN,
+                            ex);
+                    request.signalFailedWithKnownReason(ex);
+                } else {
+                    LOG.error(
+                            "Encountered retriable exception while committing transaction {} for topic {}.",
+                            committable,
+                            topic,
+                            ex);
+                    int maxRecommitTimes = sinkConfiguration.getMaxRecommitTimes();
+                    if (request.getNumberOfRetries() < maxRecommitTimes) {
+                        request.retryLater();
+                    } else {
+                        String message =
+                                String.format(
+                                        "Failed to commit transaction %s after retrying %d times",
+                                        txnID, maxRecommitTimes);
+                        request.signalFailedWithKnownReason(new FlinkRuntimeException(message, ex));
+                    }
+                }
+            } catch (Exception e) {
+                LOG.error(
+                        "Transaction ({}) encountered unknown error and data could be potentially lost.",
+                        committable,
+                        e);
+                request.signalFailedWithUnknownReason(e);
+            }
+        }
+    }
+
+    /**
+     * Lazy initialize this backend Pulsar client. This committer may not be used in {@link
+     * DeliveryGuarantee#NONE} and {@link DeliveryGuarantee#AT_LEAST_ONCE}. So we couldn't create
+     * the Pulsar client at first.
+     */
+    private TransactionCoordinatorClient transactionCoordinatorClient() {
+        if (coordinatorClient == null) {
+            this.pulsarClient = createClient(sinkConfiguration);
+            this.coordinatorClient = ((PulsarClientImpl) pulsarClient).getTcClient();
+
+            // Ensure you have enabled transaction.
+            checkNotNull(coordinatorClient, "You haven't enable transaction in Pulsar client.");
+        }
+
+        return coordinatorClient;
+    }
+
+    @Override
+    public void close() throws IOException {
+        if (pulsarClient != null) {
+            pulsarClient.close();
+        }
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/config/SinkConfiguration.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/config/SinkConfiguration.java
index e0ef7ff..fe1204e 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/config/SinkConfiguration.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/config/SinkConfiguration.java
@@ -23,6 +23,9 @@ import org.apache.flink.api.connector.sink.Sink.InitContext;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.connector.base.DeliveryGuarantee;
 import org.apache.flink.connector.pulsar.common.config.PulsarConfiguration;
+import org.apache.flink.connector.pulsar.sink.writer.PulsarWriter;
+import org.apache.flink.connector.pulsar.sink.writer.router.MessageKeyHash;
+import org.apache.flink.connector.pulsar.sink.writer.serializer.PulsarSchemaWrapper;
 
 import org.apache.pulsar.client.api.Schema;
 
@@ -31,6 +34,7 @@ import java.util.Objects;
 import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_BATCHING_MAX_MESSAGES;
 import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_MAX_PENDING_MESSAGES_ACROSS_PARTITIONS;
 import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_MAX_RECOMMIT_TIMES;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_MESSAGE_KEY_HASH;
 import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_TOPIC_METADATA_REFRESH_INTERVAL;
 import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_WRITE_DELIVERY_GUARANTEE;
 import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_WRITE_SCHEMA_EVOLUTION;
@@ -45,6 +49,7 @@ public class SinkConfiguration extends PulsarConfiguration {
     private final long transactionTimeoutMillis;
     private final long topicMetadataRefreshInterval;
     private final int partitionSwitchSize;
+    private final MessageKeyHash messageKeyHash;
     private final boolean enableSchemaEvolution;
     private final int maxPendingMessages;
     private final int maxRecommitTimes;
@@ -56,12 +61,13 @@ public class SinkConfiguration extends PulsarConfiguration {
         this.transactionTimeoutMillis = getLong(PULSAR_WRITE_TRANSACTION_TIMEOUT);
         this.topicMetadataRefreshInterval = getLong(PULSAR_TOPIC_METADATA_REFRESH_INTERVAL);
         this.partitionSwitchSize = getInteger(PULSAR_BATCHING_MAX_MESSAGES);
+        this.messageKeyHash = get(PULSAR_MESSAGE_KEY_HASH);
         this.enableSchemaEvolution = get(PULSAR_WRITE_SCHEMA_EVOLUTION);
         this.maxPendingMessages = get(PULSAR_MAX_PENDING_MESSAGES_ACROSS_PARTITIONS);
         this.maxRecommitTimes = get(PULSAR_MAX_RECOMMIT_TIMES);
     }
 
-    /** The delivery guarantee changes the behavior of {@code PulsarWriter}. */
+    /** The delivery guarantee changes the behavior of {@link PulsarWriter}. */
     public DeliveryGuarantee getDeliveryGuarantee() {
         return deliveryGuarantee;
     }
@@ -92,9 +98,14 @@ public class SinkConfiguration extends PulsarConfiguration {
         return partitionSwitchSize;
     }
 
+    /** The message key's hash logic for routing the message into one Pulsar partition. */
+    public MessageKeyHash getMessageKeyHash() {
+        return messageKeyHash;
+    }
+
     /**
      * If we should serialize and send the message with a specified Pulsar {@link Schema} instead
-     * the default {@link Schema#BYTES}. This switch is only used for {@code PulsarSchemaWrapper}.
+     * the default {@link Schema#BYTES}. This switch is only used for {@link PulsarSchemaWrapper}.
      */
     public boolean isEnableSchemaEvolution() {
         return enableSchemaEvolution;
@@ -129,6 +140,7 @@ public class SinkConfiguration extends PulsarConfiguration {
                 && topicMetadataRefreshInterval == that.topicMetadataRefreshInterval
                 && partitionSwitchSize == that.partitionSwitchSize
                 && enableSchemaEvolution == that.enableSchemaEvolution
+                && messageKeyHash == that.messageKeyHash
                 && maxPendingMessages == that.maxPendingMessages
                 && maxRecommitTimes == that.maxRecommitTimes;
     }
@@ -140,6 +152,7 @@ public class SinkConfiguration extends PulsarConfiguration {
                 transactionTimeoutMillis,
                 topicMetadataRefreshInterval,
                 partitionSwitchSize,
+                messageKeyHash,
                 enableSchemaEvolution,
                 maxPendingMessages,
                 maxRecommitTimes);
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriter.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriter.java
new file mode 100644
index 0000000..9b3c931
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriter.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.operators.MailboxExecutor;
+import org.apache.flink.api.common.operators.ProcessingTimeService;
+import org.apache.flink.api.common.serialization.SerializationSchema.InitializationContext;
+import org.apache.flink.api.connector.sink2.Sink.InitContext;
+import org.apache.flink.api.connector.sink2.TwoPhaseCommittingSink.PrecommittingSinkWriter;
+import org.apache.flink.connector.base.DeliveryGuarantee;
+import org.apache.flink.connector.pulsar.sink.committer.PulsarCommittable;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.sink.writer.context.PulsarSinkContext;
+import org.apache.flink.connector.pulsar.sink.writer.context.PulsarSinkContextImpl;
+import org.apache.flink.connector.pulsar.sink.writer.message.PulsarMessage;
+import org.apache.flink.connector.pulsar.sink.writer.router.TopicRouter;
+import org.apache.flink.connector.pulsar.sink.writer.serializer.PulsarSerializationSchema;
+import org.apache.flink.connector.pulsar.sink.writer.topic.TopicMetadataListener;
+import org.apache.flink.connector.pulsar.sink.writer.topic.TopicProducerRegister;
+import org.apache.flink.util.FlinkRuntimeException;
+
+import org.apache.flink.shaded.guava30.com.google.common.base.Strings;
+
+import org.apache.pulsar.client.api.Schema;
+import org.apache.pulsar.client.api.TypedMessageBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+import static java.util.Collections.emptyList;
+import static org.apache.flink.util.IOUtils.closeAll;
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * This class is responsible to write records in a Pulsar topic and to handle the different delivery
+ * {@link DeliveryGuarantee}s.
+ *
+ * @param <IN> The type of the input elements.
+ */
+@Internal
+public class PulsarWriter<IN> implements PrecommittingSinkWriter<IN, PulsarCommittable> {
+    private static final Logger LOG = LoggerFactory.getLogger(PulsarWriter.class);
+
+    private final SinkConfiguration sinkConfiguration;
+    private final PulsarSerializationSchema<IN> serializationSchema;
+    private final TopicMetadataListener metadataListener;
+    private final TopicRouter<IN> topicRouter;
+    private final DeliveryGuarantee deliveryGuarantee;
+    private final PulsarSinkContext sinkContext;
+    private final MailboxExecutor mailboxExecutor;
+    private final TopicProducerRegister producerRegister;
+
+    private long pendingMessages = 0;
+
+    /**
+     * Constructor creating a Pulsar writer.
+     *
+     * <p>It will throw a {@link RuntimeException} if {@link
+     * PulsarSerializationSchema#open(InitializationContext, PulsarSinkContext, SinkConfiguration)}
+     * fails.
+     *
+     * @param sinkConfiguration The configuration to configure the Pulsar producer.
+     * @param serializationSchema Transform the incoming records into different message properties.
+     * @param metadataListener The listener for querying topic metadata.
+     * @param topicRouter Topic router to choose topic by incoming records.
+     * @param initContext Context to provide information about the runtime environment.
+     */
+    public PulsarWriter(
+            SinkConfiguration sinkConfiguration,
+            PulsarSerializationSchema<IN> serializationSchema,
+            TopicMetadataListener metadataListener,
+            TopicRouter<IN> topicRouter,
+            InitContext initContext) {
+        this.sinkConfiguration = checkNotNull(sinkConfiguration);
+        this.serializationSchema = checkNotNull(serializationSchema);
+        this.metadataListener = checkNotNull(metadataListener);
+        this.topicRouter = checkNotNull(topicRouter);
+        checkNotNull(initContext);
+
+        this.deliveryGuarantee = sinkConfiguration.getDeliveryGuarantee();
+        this.sinkContext = new PulsarSinkContextImpl(initContext, sinkConfiguration);
+        this.mailboxExecutor = initContext.getMailboxExecutor();
+
+        // Initialize topic metadata listener.
+        LOG.debug("Initialize topic metadata after creating Pulsar writer.");
+        ProcessingTimeService timeService = initContext.getProcessingTimeService();
+        this.metadataListener.open(sinkConfiguration, timeService);
+
+        // Initialize topic router.
+        this.topicRouter.open(sinkConfiguration);
+
+        // Initialize the serialization schema.
+        try {
+            InitializationContext initializationContext =
+                    initContext.asSerializationSchemaInitializationContext();
+            this.serializationSchema.open(initializationContext, sinkContext, sinkConfiguration);
+        } catch (Exception e) {
+            throw new FlinkRuntimeException("Cannot initialize schema.", e);
+        }
+
+        // Create this producer register after opening serialization schema!
+        this.producerRegister = new TopicProducerRegister(sinkConfiguration);
+    }
+
+    @Override
+    public void write(IN element, Context context) throws IOException, InterruptedException {
+        PulsarMessage<?> message = serializationSchema.serialize(element, sinkContext);
+
+        // Choose the right topic to send.
+        String key = message.getKey();
+        List<String> availableTopics = metadataListener.availableTopics();
+        String topic = topicRouter.route(element, key, availableTopics, sinkContext);
+
+        // Create message builder for sending message.
+        TypedMessageBuilder<?> builder = createMessageBuilder(topic, context, message);
+
+        // Perform message sending.
+        if (deliveryGuarantee == DeliveryGuarantee.NONE) {
+            // We would just ignore the sending exception. This may cause data loss.
+            builder.sendAsync();
+        } else {
+            // Waiting for permits to write message.
+            requirePermits();
+            mailboxExecutor.execute(
+                    () -> enqueueMessageSending(topic, builder),
+                    "Failed to send message to Pulsar");
+        }
+    }
+
+    private void enqueueMessageSending(String topic, TypedMessageBuilder<?> builder)
+            throws ExecutionException, InterruptedException {
+        // Block the mailbox executor for yield method.
+        builder.sendAsync()
+                .whenComplete(
+                        (id, ex) -> {
+                            this.releasePermits();
+                            if (ex != null) {
+                                throw new FlinkRuntimeException(
+                                        "Failed to send data to Pulsar " + topic, ex);
+                            } else {
+                                LOG.debug(
+                                        "Sent message to Pulsar {} with message id {}", topic, id);
+                            }
+                        })
+                .get();
+    }
+
+    private void requirePermits() throws InterruptedException {
+        while (pendingMessages >= sinkConfiguration.getMaxPendingMessages()) {
+            LOG.info("Waiting for the available permits.");
+            mailboxExecutor.yield();
+        }
+        pendingMessages++;
+    }
+
+    private void releasePermits() {
+        this.pendingMessages -= 1;
+    }
+
+    @SuppressWarnings("rawtypes")
+    private TypedMessageBuilder<?> createMessageBuilder(
+            String topic, Context context, PulsarMessage<?> message) {
+
+        Schema<?> schema = message.getSchema();
+        TypedMessageBuilder<?> builder = producerRegister.createMessageBuilder(topic, schema);
+
+        byte[] orderingKey = message.getOrderingKey();
+        if (orderingKey != null && orderingKey.length > 0) {
+            builder.orderingKey(orderingKey);
+        }
+
+        String key = message.getKey();
+        if (!Strings.isNullOrEmpty(key)) {
+            builder.key(key);
+        }
+
+        long eventTime = message.getEventTime();
+        if (eventTime > 0) {
+            builder.eventTime(eventTime);
+        } else {
+            // Set default message timestamp if flink has provided one.
+            Long timestamp = context.timestamp();
+            if (timestamp != null) {
+                builder.eventTime(timestamp);
+            }
+        }
+
+        // Schema evolution would serialize the message by Pulsar Schema in TypedMessageBuilder.
+        // The type has been checked in PulsarMessageBuilder#value.
+        ((TypedMessageBuilder) builder).value(message.getValue());
+
+        Map<String, String> properties = message.getProperties();
+        if (properties != null && !properties.isEmpty()) {
+            builder.properties(properties);
+        }
+
+        Long sequenceId = message.getSequenceId();
+        if (sequenceId != null) {
+            builder.sequenceId(sequenceId);
+        }
+
+        List<String> clusters = message.getReplicationClusters();
+        if (clusters != null && !clusters.isEmpty()) {
+            builder.replicationClusters(clusters);
+        }
+
+        if (message.isDisableReplication()) {
+            builder.disableReplication();
+        }
+
+        return builder;
+    }
+
+    @Override
+    public void flush(boolean endOfInput) throws IOException, InterruptedException {
+        if (endOfInput) {
+            // Try flush only once when we meet the end of the input.
+            producerRegister.flush();
+        } else {
+            while (pendingMessages != 0 && deliveryGuarantee != DeliveryGuarantee.NONE) {
+                producerRegister.flush();
+                LOG.info("Flush the pending messages to Pulsar.");
+                mailboxExecutor.yield();
+            }
+        }
+    }
+
+    @Override
+    public Collection<PulsarCommittable> prepareCommit() {
+        if (deliveryGuarantee == DeliveryGuarantee.EXACTLY_ONCE) {
+            return producerRegister.prepareCommit();
+        } else {
+            return emptyList();
+        }
+    }
+
+    @Override
+    public void close() throws Exception {
+        // Close all the resources and throw the exception at last.
+        closeAll(metadataListener, producerRegister);
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/context/PulsarSinkContext.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/context/PulsarSinkContext.java
new file mode 100644
index 0000000..5c93339
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/context/PulsarSinkContext.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.context;
+
+import org.apache.flink.annotation.PublicEvolving;
+
+/** This context provides information on the pulsar record target location. */
+@PublicEvolving
+public interface PulsarSinkContext {
+
+    /**
+     * Get the number of the subtask that PulsarSink is running on. The numbering starts from 0 and
+     * goes up to parallelism-1. (parallelism as returned by {@link #getNumberOfParallelInstances()}
+     *
+     * @return number of subtask
+     */
+    int getParallelInstanceId();
+
+    /** @return number of parallel PulsarSink tasks. */
+    int getNumberOfParallelInstances();
+
+    /**
+     * Pulsar can check the schema and upgrade the schema automatically. If you enable this option,
+     * we wouldn't serialize the record into bytes, we send and serialize it in the client.
+     */
+    boolean isEnableSchemaEvolution();
+
+    /** Returns the current process time in flink. */
+    long processTime();
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/context/PulsarSinkContextImpl.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/context/PulsarSinkContextImpl.java
new file mode 100644
index 0000000..681b25a
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/context/PulsarSinkContextImpl.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.context;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.operators.ProcessingTimeService;
+import org.apache.flink.api.connector.sink2.Sink.InitContext;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+
+/** An implementation that would contain all the required context. */
+@Internal
+public class PulsarSinkContextImpl implements PulsarSinkContext {
+
+    private final int numberOfParallelSubtasks;
+    private final int parallelInstanceId;
+    private final ProcessingTimeService processingTimeService;
+    private final boolean enableSchemaEvolution;
+
+    public PulsarSinkContextImpl(InitContext initContext, SinkConfiguration sinkConfiguration) {
+        this.parallelInstanceId = initContext.getSubtaskId();
+        this.numberOfParallelSubtasks = initContext.getNumberOfParallelSubtasks();
+        this.processingTimeService = initContext.getProcessingTimeService();
+        this.enableSchemaEvolution = sinkConfiguration.isEnableSchemaEvolution();
+    }
+
+    @Override
+    public int getParallelInstanceId() {
+        return parallelInstanceId;
+    }
+
+    @Override
+    public int getNumberOfParallelInstances() {
+        return numberOfParallelSubtasks;
+    }
+
+    @Override
+    public boolean isEnableSchemaEvolution() {
+        return enableSchemaEvolution;
+    }
+
+    @Override
+    public long processTime() {
+        return processingTimeService.getCurrentProcessingTime();
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/KeyHashTopicRouter.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/KeyHashTopicRouter.java
new file mode 100644
index 0000000..433d79c
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/KeyHashTopicRouter.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.router;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.sink.writer.context.PulsarSinkContext;
+
+import org.apache.flink.shaded.guava30.com.google.common.base.Strings;
+
+import org.apache.pulsar.client.impl.Hash;
+
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+import static org.apache.flink.shaded.guava30.com.google.common.base.Preconditions.checkArgument;
+import static org.apache.pulsar.client.util.MathUtils.signSafeMod;
+
+/**
+ * If you choose the {@link TopicRoutingMode#MESSAGE_KEY_HASH} policy, we would use this
+ * implementation. We would pick the topic by the message key's hash code. If no message key was
+ * provided, we would randomly pick one.
+ *
+ * @param <IN> The message type which should write to Pulsar.
+ */
+@Internal
+public class KeyHashTopicRouter<IN> implements TopicRouter<IN> {
+    private static final long serialVersionUID = 2475614648095079804L;
+
+    private final MessageKeyHash messageKeyHash;
+
+    public KeyHashTopicRouter(SinkConfiguration sinkConfiguration) {
+        this.messageKeyHash = sinkConfiguration.getMessageKeyHash();
+    }
+
+    @Override
+    public String route(IN in, String key, List<String> partitions, PulsarSinkContext context) {
+        checkArgument(
+                !partitions.isEmpty(),
+                "You should provide topics for routing topic by message key hash.");
+
+        int topicIndex;
+        if (Strings.isNullOrEmpty(key)) {
+            // We would randomly pick one topic to write.
+            topicIndex = ThreadLocalRandom.current().nextInt(partitions.size());
+        } else {
+            // Hash the message key and choose the topic to write.
+            Hash hash = messageKeyHash.getHash();
+            int code = hash.makeHash(key);
+            topicIndex = signSafeMod(code, partitions.size());
+        }
+
+        return partitions.get(topicIndex);
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/MessageKeyHash.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/MessageKeyHash.java
new file mode 100644
index 0000000..7f35760
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/MessageKeyHash.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.router;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.configuration.DescribedEnum;
+import org.apache.flink.configuration.description.InlineElement;
+
+import org.apache.pulsar.client.impl.Hash;
+import org.apache.pulsar.client.impl.JavaStringHash;
+import org.apache.pulsar.client.impl.Murmur3_32Hash;
+
+import static org.apache.flink.configuration.description.LinkElement.link;
+import static org.apache.flink.configuration.description.TextElement.code;
+import static org.apache.flink.configuration.description.TextElement.text;
+
+/** Predefined the available hash function for routing the message. */
+@PublicEvolving
+public enum MessageKeyHash implements DescribedEnum {
+
+    /** Use regular <code>String.hashCode()</code>. */
+    JAVA_HASH(
+            "java-hash",
+            text(
+                    "This hash would use %s to calculate the message key string's hash code.",
+                    code("String.hashCode()"))) {
+        @Override
+        public Hash getHash() {
+            return JavaStringHash.getInstance();
+        }
+    },
+    /**
+     * Use Murmur3 hashing function. <a
+     * href="https://en.wikipedia.org/wiki/MurmurHash">https://en.wikipedia.org/wiki/MurmurHash</a>
+     */
+    MURMUR3_32_HASH(
+            "murmur-3-32-hash",
+            text(
+                    "This hash would calculate message key's hash code by using %s algorithm.",
+                    link("https://en.wikipedia.org/wiki/MurmurHash", "Murmur3"))) {
+        @Override
+        public Hash getHash() {
+            return Murmur3_32Hash.getInstance();
+        }
+    };
+
+    private final String name;
+    private final InlineElement desc;
+
+    MessageKeyHash(String name, InlineElement desc) {
+        this.name = name;
+        this.desc = desc;
+    }
+
+    @Internal
+    public abstract Hash getHash();
+
+    @Override
+    public String toString() {
+        return name;
+    }
+
+    @Internal
+    @Override
+    public InlineElement getDescription() {
+        return desc;
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/RoundRobinTopicRouter.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/RoundRobinTopicRouter.java
new file mode 100644
index 0000000..b9c654a
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/RoundRobinTopicRouter.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.router;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.sink.writer.context.PulsarSinkContext;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.apache.flink.shaded.guava30.com.google.common.base.Preconditions.checkArgument;
+
+/**
+ * If you choose the {@link TopicRoutingMode#ROUND_ROBIN} policy, we would use this implementation.
+ * We would pick the topic one by one in a fixed batch size.
+ *
+ * @param <IN> The message type which should write to Pulsar.
+ */
+@Internal
+public class RoundRobinTopicRouter<IN> implements TopicRouter<IN> {
+    private static final long serialVersionUID = -1160533263474038206L;
+
+    /** The internal counter for counting the messages. */
+    private final AtomicLong counter = new AtomicLong(0);
+
+    /** The size when we switch to another topic. */
+    private final int partitionSwitchSize;
+
+    public RoundRobinTopicRouter(SinkConfiguration configuration) {
+        this.partitionSwitchSize = configuration.getPartitionSwitchSize();
+    }
+
+    @Override
+    public String route(IN in, String key, List<String> partitions, PulsarSinkContext context) {
+        checkArgument(
+                !partitions.isEmpty(),
+                "You should provide topics for routing topic by message key hash.");
+
+        long counts = counter.getAndAdd(1);
+        long index = (counts / partitionSwitchSize) % partitions.size();
+        // Avoid digit overflow for message counter.
+        int topicIndex = (int) (Math.abs(index) % Integer.MAX_VALUE);
+
+        return partitions.get(topicIndex);
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/TopicRouter.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/TopicRouter.java
new file mode 100644
index 0000000..a2c0589
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/TopicRouter.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.router;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.connector.pulsar.sink.PulsarSinkBuilder;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.sink.writer.context.PulsarSinkContext;
+import org.apache.flink.connector.pulsar.sink.writer.message.PulsarMessageBuilder;
+import org.apache.flink.connector.pulsar.source.enumerator.topic.TopicNameUtils;
+
+import java.io.Serializable;
+import java.util.List;
+
+/**
+ * The router for choosing the desired topic to write the Flink records. The user can implement this
+ * router for complex requirements. We have provided some easy-to-use implementations.
+ *
+ * <p>This topic router is stateless and doesn't have any initialize logic. Make sure you don't
+ * require some dynamic state.
+ *
+ * @param <IN> The record type needs to be written to Pulsar.
+ */
+@PublicEvolving
+public interface TopicRouter<IN> extends Serializable {
+
+    /**
+     * Choose the topic by given record & available partition list. You can return a new topic name
+     * if you need it.
+     *
+     * @param in The record instance which need to be written to Pulsar.
+     * @param key The key of the message from {@link PulsarMessageBuilder#key(String)}. It could be
+     *     null, if message doesn't have a key.
+     * @param partitions The available partition list. This could be empty if you don't provide any
+     *     topics in {@link PulsarSinkBuilder#setTopics(String...)}. You can return a custom topic,
+     *     but make sure it should contain a partition index in naming. Using {@link
+     *     TopicNameUtils#topicNameWithPartition(String, int)} can easily create a topic name with
+     *     partition index.
+     * @param context The context contains useful information for determining the topic.
+     * @return The topic name to use.
+     */
+    String route(IN in, String key, List<String> partitions, PulsarSinkContext context);
+
+    /** Implement this method if you have some non-serializable field. */
+    default void open(SinkConfiguration sinkConfiguration) {
+        // Nothing to do by default.
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/TopicRoutingMode.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/TopicRoutingMode.java
new file mode 100644
index 0000000..c327435
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/router/TopicRoutingMode.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.router;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.configuration.DescribedEnum;
+import org.apache.flink.configuration.description.InlineElement;
+
+import static org.apache.flink.configuration.description.TextElement.code;
+import static org.apache.flink.configuration.description.TextElement.text;
+import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_BATCHING_MAX_MESSAGES;
+
+/** The routing policy for choosing the desired topic by the given message. */
+@PublicEvolving
+public enum TopicRoutingMode implements DescribedEnum {
+
+    /**
+     * The producer will publish messages across all partitions in a round-robin fashion to achieve
+     * maximum throughput. Please note that round-robin is not done per individual message but
+     * rather it's set to the same boundary of batching delay, to ensure batching is effective.
+     */
+    ROUND_ROBIN(
+            "round-robin",
+            text(
+                    "The producer will publish messages across all partitions in a round-robin fashion to achieve maximum throughput."
+                            + " Please note that round-robin is not done per individual message"
+                            + " but rather it's set to the same boundary of %s, to ensure batching is effective.",
+                    code(PULSAR_BATCHING_MAX_MESSAGES.key()))),
+
+    /**
+     * If no key is provided, The partitioned producer will randomly pick one single topic partition
+     * and publish all the messages into that partition. If a key is provided on the message, the
+     * partitioned producer will hash the key and assign the message to a particular partition.
+     */
+    MESSAGE_KEY_HASH(
+            "message-key-hash",
+            text(
+                    "If no key is provided, The partitioned producer will randomly pick one single topic partition"
+                            + " and publish all the messages into that partition. If a key is provided on the message,"
+                            + " the partitioned producer will hash the key and assign the message to a particular partition.")),
+
+    /**
+     * Use custom topic router implementation that will be called to determine the partition for a
+     * particular message.
+     */
+    CUSTOM(
+            "custom",
+            text(
+                    "Use custom %s implementation that will be called to determine the partition for a particular message.",
+                    code(TopicRouter.class.getSimpleName())));
+
+    private final String name;
+    private final InlineElement desc;
+
+    TopicRoutingMode(String name, InlineElement desc) {
+        this.name = name;
+        this.desc = desc;
+    }
+
+    @Internal
+    @Override
+    public InlineElement getDescription() {
+        return desc;
+    }
+
+    @Override
+    public String toString() {
+        return name;
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicMetadataListener.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicMetadataListener.java
new file mode 100644
index 0000000..acd1c61
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicMetadataListener.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.topic;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.operators.ProcessingTimeService;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+
+import org.apache.flink.shaded.guava30.com.google.common.base.Objects;
+import org.apache.flink.shaded.guava30.com.google.common.collect.ImmutableList;
+
+import org.apache.pulsar.client.admin.PulsarAdmin;
+import org.apache.pulsar.client.admin.PulsarAdminException;
+import org.apache.pulsar.common.partition.PartitionedTopicMetadata;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static java.util.Collections.emptyList;
+import static org.apache.flink.connector.pulsar.common.config.PulsarClientFactory.createAdmin;
+import static org.apache.flink.connector.pulsar.common.utils.PulsarExceptionUtils.sneakyAdmin;
+import static org.apache.flink.connector.pulsar.source.enumerator.topic.TopicNameUtils.isPartitioned;
+import static org.apache.flink.connector.pulsar.source.enumerator.topic.TopicNameUtils.topicNameWithPartition;
+
+/**
+ * We need the latest topic metadata for making sure the newly created topic partitions would be
+ * used by the Pulsar sink. This routing policy would be different compared with Pulsar Client
+ * built-in logic. We use Flink's ProcessingTimer as the executor.
+ */
+@Internal
+public class TopicMetadataListener implements Serializable, Closeable {
+    private static final long serialVersionUID = 6186948471557507522L;
+
+    private static final Logger LOG = LoggerFactory.getLogger(TopicMetadataListener.class);
+
+    private final ImmutableList<String> partitionedTopics;
+    private final Map<String, Integer> topicMetadata;
+    private volatile ImmutableList<String> availableTopics;
+
+    // Dynamic fields.
+    private transient PulsarAdmin pulsarAdmin;
+    private transient Long topicMetadataRefreshInterval;
+    private transient ProcessingTimeService timeService;
+
+    public TopicMetadataListener() {
+        this(emptyList());
+    }
+
+    public TopicMetadataListener(List<String> topics) {
+        List<String> partitions = new ArrayList<>(topics.size());
+        Map<String, Integer> metadata = new HashMap<>(topics.size());
+        for (String topic : topics) {
+            if (isPartitioned(topic)) {
+                partitions.add(topic);
+            } else {
+                // This would be updated when open writing.
+                metadata.put(topic, -1);
+            }
+        }
+
+        this.partitionedTopics = ImmutableList.copyOf(partitions);
+        this.topicMetadata = metadata;
+        this.availableTopics = ImmutableList.of();
+    }
+
+    /** Register the topic metadata update in process time service. */
+    public void open(SinkConfiguration sinkConfiguration, ProcessingTimeService timeService) {
+        if (topicMetadata.isEmpty()) {
+            LOG.info("No topics have been provided, skip listener initialize.");
+            return;
+        }
+
+        // Initialize listener properties.
+        this.pulsarAdmin = createAdmin(sinkConfiguration);
+        this.topicMetadataRefreshInterval = sinkConfiguration.getTopicMetadataRefreshInterval();
+        this.timeService = timeService;
+
+        // Initialize the topic metadata. Quit if fail to connect to Pulsar.
+        sneakyAdmin(this::updateTopicMetadata);
+
+        // Register time service.
+        triggerNextTopicMetadataUpdate(true);
+    }
+
+    /**
+     * Return all the available topic partitions. We would recalculate the partitions if the topic
+     * metadata has been changed. Otherwise, we would return the cached result for better
+     * performance.
+     */
+    public List<String> availableTopics() {
+        if (availableTopics.isEmpty()
+                && (!partitionedTopics.isEmpty() || !topicMetadata.isEmpty())) {
+            List<String> results = new ArrayList<>();
+            for (Map.Entry<String, Integer> entry : topicMetadata.entrySet()) {
+                for (int i = 0; i < entry.getValue(); i++) {
+                    results.add(topicNameWithPartition(entry.getKey(), i));
+                }
+            }
+
+            results.addAll(partitionedTopics);
+            this.availableTopics = ImmutableList.copyOf(results);
+        }
+
+        return availableTopics;
+    }
+
+    @Override
+    public void close() throws IOException {
+        if (pulsarAdmin != null) {
+            pulsarAdmin.close();
+        }
+    }
+
+    private void triggerNextTopicMetadataUpdate(boolean initial) {
+        if (!initial) {
+            // We should update the topic metadata, ignore the pulsar admin exception.
+            try {
+                updateTopicMetadata();
+            } catch (PulsarAdminException e) {
+                LOG.warn("", e);
+            }
+        }
+
+        // Register next timer.
+        long currentProcessingTime = timeService.getCurrentProcessingTime();
+        long triggerTime = currentProcessingTime + topicMetadataRefreshInterval;
+        timeService.registerTimer(triggerTime, time -> triggerNextTopicMetadataUpdate(false));
+    }
+
+    private void updateTopicMetadata() throws PulsarAdminException {
+        boolean shouldUpdate = false;
+
+        for (Map.Entry<String, Integer> entry : topicMetadata.entrySet()) {
+            String topic = entry.getKey();
+            PartitionedTopicMetadata metadata =
+                    pulsarAdmin.topics().getPartitionedTopicMetadata(topic);
+
+            // Update topic metadata if it has been changed.
+            if (!Objects.equal(entry.getValue(), metadata.partitions)) {
+                entry.setValue(metadata.partitions);
+                shouldUpdate = true;
+            }
+        }
+
+        // Clear available topics if the topic metadata has been changed.
+        if (shouldUpdate) {
+            this.availableTopics = ImmutableList.of();
+        }
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicProducerRegister.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicProducerRegister.java
new file mode 100644
index 0000000..9bb1753
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/topic/TopicProducerRegister.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.topic;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.connector.base.DeliveryGuarantee;
+import org.apache.flink.connector.pulsar.sink.committer.PulsarCommittable;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.util.FlinkRuntimeException;
+
+import org.apache.flink.shaded.guava30.com.google.common.io.Closer;
+
+import org.apache.pulsar.client.api.Producer;
+import org.apache.pulsar.client.api.ProducerBuilder;
+import org.apache.pulsar.client.api.PulsarClient;
+import org.apache.pulsar.client.api.Schema;
+import org.apache.pulsar.client.api.TypedMessageBuilder;
+import org.apache.pulsar.client.api.transaction.Transaction;
+import org.apache.pulsar.client.api.transaction.TransactionCoordinatorClient;
+import org.apache.pulsar.client.api.transaction.TxnID;
+import org.apache.pulsar.client.impl.PulsarClientImpl;
+import org.apache.pulsar.common.schema.SchemaInfo;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.flink.connector.pulsar.common.config.PulsarClientFactory.createClient;
+import static org.apache.flink.connector.pulsar.common.utils.PulsarExceptionUtils.sneakyClient;
+import static org.apache.flink.connector.pulsar.common.utils.PulsarTransactionUtils.createTransaction;
+import static org.apache.flink.connector.pulsar.sink.config.PulsarSinkConfigUtils.createProducerBuilder;
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * All the Pulsar Producers share the same Client, but self hold the queue for a specified topic. So
+ * we have to create different instances for different topics.
+ */
+@Internal
+public class TopicProducerRegister implements Closeable {
+
+    private final PulsarClient pulsarClient;
+    private final SinkConfiguration sinkConfiguration;
+    private final Map<String, Map<SchemaInfo, Producer<?>>> producerRegister;
+    private final Map<String, Transaction> transactionRegister;
+
+    public TopicProducerRegister(SinkConfiguration sinkConfiguration) {
+        this.pulsarClient = createClient(sinkConfiguration);
+        this.sinkConfiguration = sinkConfiguration;
+        this.producerRegister = new HashMap<>();
+        this.transactionRegister = new HashMap<>();
+    }
+
+    /**
+     * Create a TypedMessageBuilder which could be sent to Pulsar directly. First, we would create a
+     * topic-related producer or use a cached instead. Then we would try to find a topic-related
+     * transaction. We would generate a transaction instance if there is no transaction. Finally, we
+     * create the message builder and put the element into it.
+     */
+    public <T> TypedMessageBuilder<T> createMessageBuilder(String topic, Schema<T> schema) {
+        Producer<T> producer = getOrCreateProducer(topic, schema);
+        DeliveryGuarantee deliveryGuarantee = sinkConfiguration.getDeliveryGuarantee();
+
+        if (deliveryGuarantee == DeliveryGuarantee.EXACTLY_ONCE) {
+            Transaction transaction = getOrCreateTransaction(topic);
+            return producer.newMessage(transaction);
+        } else {
+            return producer.newMessage();
+        }
+    }
+
+    /**
+     * Convert the transactions into a committable list for Pulsar Committer. The transactions would
+     * be removed until Flink triggered a checkpoint.
+     */
+    public List<PulsarCommittable> prepareCommit() {
+        List<PulsarCommittable> committables = new ArrayList<>(transactionRegister.size());
+        transactionRegister.forEach(
+                (topic, transaction) -> {
+                    TxnID txnID = transaction.getTxnID();
+                    PulsarCommittable committable = new PulsarCommittable(txnID, topic);
+                    committables.add(committable);
+                });
+
+        clearTransactions();
+        return committables;
+    }
+
+    /**
+     * Flush all the messages buffered in the client and wait until all messages have been
+     * successfully persisted.
+     */
+    public void flush() throws IOException {
+        Collection<Map<SchemaInfo, Producer<?>>> collection = producerRegister.values();
+        for (Map<SchemaInfo, Producer<?>> producers : collection) {
+            for (Producer<?> producer : producers.values()) {
+                producer.flush();
+            }
+        }
+    }
+
+    @Override
+    public void close() throws IOException {
+        try (Closer closer = Closer.create()) {
+            // Flush all the pending messages to Pulsar. This wouldn't cause exception.
+            closer.register(this::flush);
+
+            // Abort all the existed transactions.
+            closer.register(this::abortTransactions);
+
+            // Remove all the producers.
+            closer.register(producerRegister::clear);
+
+            // All the producers would be closed by this method.
+            // We would block until all the producers have been successfully closed.
+            closer.register(pulsarClient);
+        }
+    }
+
+    /** Create or return the cached topic-related producer. */
+    @SuppressWarnings("unchecked")
+    private <T> Producer<T> getOrCreateProducer(String topic, Schema<T> schema) {
+        Map<SchemaInfo, Producer<?>> producers =
+                producerRegister.computeIfAbsent(topic, key -> new HashMap<>());
+        SchemaInfo schemaInfo = schema.getSchemaInfo();
+
+        if (producers.containsKey(schemaInfo)) {
+            return (Producer<T>) producers.get(schemaInfo);
+        } else {
+            ProducerBuilder<T> builder =
+                    createProducerBuilder(pulsarClient, schema, sinkConfiguration);
+            // Set the required topic name.
+            builder.topic(topic);
+            Producer<T> producer = sneakyClient(builder::create);
+            producers.put(schemaInfo, producer);
+
+            return producer;
+        }
+    }
+
+    /**
+     * Get the cached topic-related transaction. Or create a new transaction after checkpointing.
+     */
+    private Transaction getOrCreateTransaction(String topic) {
+        return transactionRegister.computeIfAbsent(
+                topic,
+                t -> {
+                    long timeoutMillis = sinkConfiguration.getTransactionTimeoutMillis();
+                    return createTransaction(pulsarClient, timeoutMillis);
+                });
+    }
+
+    /** Abort the existed transactions. This method would be used when closing PulsarWriter. */
+    private void abortTransactions() {
+        if (transactionRegister.isEmpty()) {
+            return;
+        }
+
+        TransactionCoordinatorClient coordinatorClient =
+                ((PulsarClientImpl) pulsarClient).getTcClient();
+        // This null check is used for making sure transaction is enabled in client.
+        checkNotNull(coordinatorClient);
+
+        try (Closer closer = Closer.create()) {
+            for (Transaction transaction : transactionRegister.values()) {
+                TxnID txnID = transaction.getTxnID();
+                closer.register(() -> coordinatorClient.abort(txnID));
+            }
+
+            clearTransactions();
+        } catch (IOException e) {
+            throw new FlinkRuntimeException(e);
+        }
+    }
+
+    /**
+     * Clean these transactions. All transactions should be passed to Pulsar committer, we would
+     * create new transaction when new message comes.
+     */
+    private void clearTransactions() {
+        transactionRegister.clear();
+    }
+}

[flink] 09/09: [FLINK-26038][connector/pulsar] Support delay message on PulsarSink.

Posted by fp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

fpaul pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 07f23e0d3383941ceb475cdd753a59d07100bdf5
Author: Yufan Sheng <yu...@streamnative.io>
AuthorDate: Fri Feb 11 12:19:42 2022 +0800

    [FLINK-26038][connector/pulsar] Support delay message on PulsarSink.
---
 .../flink/connector/pulsar/sink/PulsarSink.java    | 13 ++++-
 .../connector/pulsar/sink/PulsarSinkBuilder.java   | 20 ++++++-
 .../connector/pulsar/sink/writer/PulsarWriter.java | 10 ++++
 .../sink/writer/delayer/FixedMessageDelayer.java   | 43 +++++++++++++++
 .../pulsar/sink/writer/delayer/MessageDelayer.java | 62 ++++++++++++++++++++++
 .../pulsar/sink/PulsarSinkBuilderTest.java         |  1 -
 .../pulsar/sink/writer/PulsarWriterTest.java       |  5 +-
 7 files changed, 149 insertions(+), 5 deletions(-)

diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSink.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSink.java
index 811d5b5..4c6c4a9 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSink.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSink.java
@@ -28,6 +28,7 @@ import org.apache.flink.connector.pulsar.sink.committer.PulsarCommittableSeriali
 import org.apache.flink.connector.pulsar.sink.committer.PulsarCommitter;
 import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
 import org.apache.flink.connector.pulsar.sink.writer.PulsarWriter;
+import org.apache.flink.connector.pulsar.sink.writer.delayer.MessageDelayer;
 import org.apache.flink.connector.pulsar.sink.writer.router.KeyHashTopicRouter;
 import org.apache.flink.connector.pulsar.sink.writer.router.RoundRobinTopicRouter;
 import org.apache.flink.connector.pulsar.sink.writer.router.TopicRouter;
@@ -82,6 +83,7 @@ public class PulsarSink<IN> implements TwoPhaseCommittingSink<IN, PulsarCommitta
     private final SinkConfiguration sinkConfiguration;
     private final PulsarSerializationSchema<IN> serializationSchema;
     private final TopicMetadataListener metadataListener;
+    private final MessageDelayer<IN> messageDelayer;
     private final TopicRouter<IN> topicRouter;
 
     PulsarSink(
@@ -89,10 +91,12 @@ public class PulsarSink<IN> implements TwoPhaseCommittingSink<IN, PulsarCommitta
             PulsarSerializationSchema<IN> serializationSchema,
             TopicMetadataListener metadataListener,
             TopicRoutingMode topicRoutingMode,
-            TopicRouter<IN> topicRouter) {
+            TopicRouter<IN> topicRouter,
+            MessageDelayer<IN> messageDelayer) {
         this.sinkConfiguration = checkNotNull(sinkConfiguration);
         this.serializationSchema = checkNotNull(serializationSchema);
         this.metadataListener = checkNotNull(metadataListener);
+        this.messageDelayer = checkNotNull(messageDelayer);
         checkNotNull(topicRoutingMode);
 
         // Create topic router supplier.
@@ -119,7 +123,12 @@ public class PulsarSink<IN> implements TwoPhaseCommittingSink<IN, PulsarCommitta
     @Override
     public PrecommittingSinkWriter<IN, PulsarCommittable> createWriter(InitContext initContext) {
         return new PulsarWriter<>(
-                sinkConfiguration, serializationSchema, metadataListener, topicRouter, initContext);
+                sinkConfiguration,
+                serializationSchema,
+                metadataListener,
+                topicRouter,
+                messageDelayer,
+                initContext);
     }
 
     @Internal
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilder.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilder.java
index a0352f5..1668e3d 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilder.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilder.java
@@ -25,6 +25,7 @@ import org.apache.flink.connector.base.DeliveryGuarantee;
 import org.apache.flink.connector.pulsar.common.config.PulsarConfigBuilder;
 import org.apache.flink.connector.pulsar.common.config.PulsarOptions;
 import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.sink.writer.delayer.MessageDelayer;
 import org.apache.flink.connector.pulsar.sink.writer.router.TopicRouter;
 import org.apache.flink.connector.pulsar.sink.writer.router.TopicRoutingMode;
 import org.apache.flink.connector.pulsar.sink.writer.serializer.PulsarSchemaWrapper;
@@ -101,6 +102,7 @@ public class PulsarSinkBuilder<IN> {
     private TopicMetadataListener metadataListener;
     private TopicRoutingMode topicRoutingMode;
     private TopicRouter<IN> topicRouter;
+    private MessageDelayer<IN> messageDelayer;
 
     // private builder constructor.
     PulsarSinkBuilder() {
@@ -231,6 +233,17 @@ public class PulsarSinkBuilder<IN> {
     }
 
     /**
+     * Set a message delayer for enable Pulsar message delay delivery.
+     *
+     * @param messageDelayer The delayer which would defined when to send the message to consumer.
+     * @return this PulsarSinkBuilder.
+     */
+    public PulsarSinkBuilder<IN> delaySendingMessage(MessageDelayer<IN> messageDelayer) {
+        this.messageDelayer = checkNotNull(messageDelayer);
+        return this;
+    }
+
+    /**
      * Set an arbitrary property for the PulsarSink and Pulsar Producer. The valid keys can be found
      * in {@link PulsarSinkOptions} and {@link PulsarOptions}.
      *
@@ -331,6 +344,10 @@ public class PulsarSinkBuilder<IN> {
             this.topicRoutingMode = TopicRoutingMode.ROUND_ROBIN;
         }
 
+        if (messageDelayer == null) {
+            this.messageDelayer = MessageDelayer.never();
+        }
+
         // This is an unmodifiable configuration for Pulsar.
         // We don't use Pulsar's built-in configure classes for compatible requirement.
         SinkConfiguration sinkConfiguration =
@@ -341,7 +358,8 @@ public class PulsarSinkBuilder<IN> {
                 serializationSchema,
                 metadataListener,
                 topicRoutingMode,
-                topicRouter);
+                topicRouter,
+                messageDelayer);
     }
 
     // ------------- private helpers  --------------
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriter.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriter.java
index 9b3c931..1e4113a 100644
--- a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriter.java
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriter.java
@@ -29,6 +29,7 @@ import org.apache.flink.connector.pulsar.sink.committer.PulsarCommittable;
 import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
 import org.apache.flink.connector.pulsar.sink.writer.context.PulsarSinkContext;
 import org.apache.flink.connector.pulsar.sink.writer.context.PulsarSinkContextImpl;
+import org.apache.flink.connector.pulsar.sink.writer.delayer.MessageDelayer;
 import org.apache.flink.connector.pulsar.sink.writer.message.PulsarMessage;
 import org.apache.flink.connector.pulsar.sink.writer.router.TopicRouter;
 import org.apache.flink.connector.pulsar.sink.writer.serializer.PulsarSerializationSchema;
@@ -67,6 +68,7 @@ public class PulsarWriter<IN> implements PrecommittingSinkWriter<IN, PulsarCommi
     private final PulsarSerializationSchema<IN> serializationSchema;
     private final TopicMetadataListener metadataListener;
     private final TopicRouter<IN> topicRouter;
+    private final MessageDelayer<IN> messageDelayer;
     private final DeliveryGuarantee deliveryGuarantee;
     private final PulsarSinkContext sinkContext;
     private final MailboxExecutor mailboxExecutor;
@@ -92,11 +94,13 @@ public class PulsarWriter<IN> implements PrecommittingSinkWriter<IN, PulsarCommi
             PulsarSerializationSchema<IN> serializationSchema,
             TopicMetadataListener metadataListener,
             TopicRouter<IN> topicRouter,
+            MessageDelayer<IN> messageDelayer,
             InitContext initContext) {
         this.sinkConfiguration = checkNotNull(sinkConfiguration);
         this.serializationSchema = checkNotNull(serializationSchema);
         this.metadataListener = checkNotNull(metadataListener);
         this.topicRouter = checkNotNull(topicRouter);
+        this.messageDelayer = checkNotNull(messageDelayer);
         checkNotNull(initContext);
 
         this.deliveryGuarantee = sinkConfiguration.getDeliveryGuarantee();
@@ -136,6 +140,12 @@ public class PulsarWriter<IN> implements PrecommittingSinkWriter<IN, PulsarCommi
         // Create message builder for sending message.
         TypedMessageBuilder<?> builder = createMessageBuilder(topic, context, message);
 
+        // Message Delay delivery.
+        long deliverAt = messageDelayer.deliverAt(element, sinkContext);
+        if (deliverAt > 0) {
+            builder.deliverAt(deliverAt);
+        }
+
         // Perform message sending.
         if (deliveryGuarantee == DeliveryGuarantee.NONE) {
             // We would just ignore the sending exception. This may cause data loss.
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/delayer/FixedMessageDelayer.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/delayer/FixedMessageDelayer.java
new file mode 100644
index 0000000..c11d2f8
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/delayer/FixedMessageDelayer.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.delayer;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.connector.pulsar.sink.writer.context.PulsarSinkContext;
+
+/** A delayer for making sure all the messages would be sent in a fixed delay duration. */
+@PublicEvolving
+public class FixedMessageDelayer<IN> implements MessageDelayer<IN> {
+    private static final long serialVersionUID = -7550834520312097614L;
+
+    private final long delayDuration;
+
+    public FixedMessageDelayer(long delayDuration) {
+        this.delayDuration = delayDuration;
+    }
+
+    @Override
+    public long deliverAt(IN message, PulsarSinkContext sinkContext) {
+        if (delayDuration > 0) {
+            return sinkContext.processTime() + delayDuration;
+        } else {
+            return delayDuration;
+        }
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/delayer/MessageDelayer.java b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/delayer/MessageDelayer.java
new file mode 100644
index 0000000..53a345b
--- /dev/null
+++ b/flink-connectors/flink-connector-pulsar/src/main/java/org/apache/flink/connector/pulsar/sink/writer/delayer/MessageDelayer.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.pulsar.sink.writer.delayer;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.sink.writer.context.PulsarSinkContext;
+
+import org.apache.pulsar.client.api.SubscriptionType;
+
+import java.io.Serializable;
+import java.time.Duration;
+
+/**
+ * A delayer for Pulsar broker passing the sent message to the downstream consumer. This is only
+ * works in {@link SubscriptionType#Shared} subscription.
+ *
+ * <p>Read <a
+ * href="https://pulsar.apache.org/docs/en/next/concepts-messaging/#delayed-message-delivery">delayed
+ * message delivery</a> for better understanding this feature.
+ */
+@PublicEvolving
+public interface MessageDelayer<IN> extends Serializable {
+
+    /**
+     * Return the send time for this message. You should calculate the timestamp by using {@link
+     * PulsarSinkContext#processTime()} and the non-positive value indicate this message should be
+     * sent immediately.
+     */
+    long deliverAt(IN message, PulsarSinkContext sinkContext);
+
+    /** Implement this method if you have some non-serializable field. */
+    default void open(SinkConfiguration sinkConfiguration) {
+        // Nothing to do by default.
+    }
+
+    /** All the messages should be consumed immediately. */
+    static <IN> FixedMessageDelayer<IN> never() {
+        return new FixedMessageDelayer<>(-1L);
+    }
+
+    /** All the messages should be consumed in a fixed duration. */
+    static <IN> FixedMessageDelayer<IN> fixed(Duration duration) {
+        return new FixedMessageDelayer<>(duration.toMillis());
+    }
+}
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilderTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilderTest.java
index 188e718..0e0db88 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilderTest.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/PulsarSinkBuilderTest.java
@@ -26,7 +26,6 @@ import org.junit.jupiter.api.Test;
 import java.util.Properties;
 
 import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_SEND_TIMEOUT_MS;
-import static org.apache.flink.connector.pulsar.sink.PulsarSinkOptions.PULSAR_WRITE_SCHEMA_EVOLUTION;
 import static org.apache.flink.connector.pulsar.sink.writer.router.TopicRoutingMode.CUSTOM;
 import static org.apache.flink.connector.pulsar.sink.writer.router.TopicRoutingMode.MESSAGE_KEY_HASH;
 import static org.apache.flink.connector.pulsar.sink.writer.router.TopicRoutingMode.ROUND_ROBIN;
diff --git a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriterTest.java b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriterTest.java
index 1534fb5..942b759 100644
--- a/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriterTest.java
+++ b/flink-connectors/flink-connector-pulsar/src/test/java/org/apache/flink/connector/pulsar/sink/writer/PulsarWriterTest.java
@@ -27,6 +27,8 @@ import org.apache.flink.configuration.Configuration;
 import org.apache.flink.connector.base.DeliveryGuarantee;
 import org.apache.flink.connector.pulsar.sink.committer.PulsarCommittable;
 import org.apache.flink.connector.pulsar.sink.config.SinkConfiguration;
+import org.apache.flink.connector.pulsar.sink.writer.delayer.FixedMessageDelayer;
+import org.apache.flink.connector.pulsar.sink.writer.delayer.MessageDelayer;
 import org.apache.flink.connector.pulsar.sink.writer.router.RoundRobinTopicRouter;
 import org.apache.flink.connector.pulsar.sink.writer.serializer.PulsarSerializationSchema;
 import org.apache.flink.connector.pulsar.sink.writer.topic.TopicMetadataListener;
@@ -80,10 +82,11 @@ class PulsarWriterTest extends PulsarTestSuiteBase {
         PulsarSerializationSchema<String> schema = pulsarSchema(STRING);
         TopicMetadataListener listener = new TopicMetadataListener(singletonList(topic));
         RoundRobinTopicRouter<String> router = new RoundRobinTopicRouter<>(configuration);
+        FixedMessageDelayer<String> delayer = MessageDelayer.never();
         MockInitContext initContext = new MockInitContext();
 
         PulsarWriter<String> writer =
-                new PulsarWriter<>(configuration, schema, listener, router, initContext);
+                new PulsarWriter<>(configuration, schema, listener, router, delayer, initContext);
 
         writer.flush(false);
         writer.prepareCommit();