You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flink.apache.org by da...@apache.org on 2022/12/03 18:59:52 UTC

[flink-connector-aws] branch main updated (1686962 -> aa10d88)

This is an automated email from the ASF dual-hosted git repository.

dannycranmer pushed a change to branch main
in repository https://gitbox.apache.org/repos/asf/flink-connector-aws.git


    from 1686962   [hotfix] Setup auto-linking
     new 569049e  [FLINK-29907][Connectors/DynamoDB] Improve maven module names
     new 89d4a55  [FLINK-29907][Connectors/AWS] Externalize AWS Base from Flink repo
     new 5993e34  [FLINK-29907][Connectors/Firehose] Externalize Amazon Firehose connectors from Flink repo
     new d5375d8  [FLINK-29907][Connectors/Kinesis] Externalize Amazon Kinesis v2 connectors from Flink repo
     new a23e101  [FLINK-29907][Connectors/Kinesis] Externalize Amazon Kinesis connectors from Flink repo
     new 8844ea1  [FLINK-29907][Connectors/AWS] Update NOTICE files
     new d5d6a63  [FLINK-29907][Connectors/Kinesis] Sync changes from release-1.16 branch
     new aa10d88  [FLINK-29907][Connectors/AWS] Fix NOTICE file for Firehose SQL connector

The 8 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .gitignore                                         |    3 +-
 .../33252236-fc9f-4f63-b537-39e2322f7ccd           |    0
 .../733a854b-2487-43da-a5fa-9b089af5fb4e           |    0
 .../archunit-violations/stored.rules               |    6 +-
 .../pom.xml                                        |   96 +-
 .../connector/aws/config/AWSConfigConstants.java   |  176 +++
 .../connector/aws/table/util/AWSOptionUtils.java   |   87 ++
 .../aws/table/util/AsyncClientOptionsUtils.java    |  109 ++
 .../flink/connector/aws/util/AWSAsyncSinkUtil.java |  164 +++
 .../aws/util/AWSAuthenticationException.java       |   26 +-
 .../AWSCredentialFatalExceptionClassifiers.java    |   43 +
 .../flink/connector/aws/util/AWSGeneralUtil.java   |  396 ++++++
 .../src/main/resources/log4j2.properties           |    0
 .../architecture/TestCodeArchitectureTest.java     |    2 +-
 .../aws/table/util/AWSOptionsUtilTest.java         |  137 ++
 .../table/util/AsyncClientOptionsUtilsTest.java    |  146 ++
 .../aws/testutils/AWSServicesTestUtils.java        |  146 ++
 .../aws/testutils/LocalstackContainer.java         |   85 ++
 .../connector/aws/util/AWSAsyncSinkUtilTest.java   |  253 ++++
 .../connector/aws/util/AWSGeneralUtilTest.java     |  792 +++++++++++
 .../apache/flink/connector/aws/util/TestUtil.java  |   54 +
 .../org.junit.jupiter.api.extension.Extension      |    2 +-
 .../src/test/resources/archunit.properties         |    0
 .../src/test/resources/log4j2-test.properties      |   28 +
 .../src/test/resources/profile                     |    7 +
 .../54da9a7d-14d2-4632-a045-1dd8fc665c8f           |    0
 .../a6cbd99c-b115-447a-8f19-43c1094db549           |    6 +
 .../archunit-violations/stored.rules               |    6 +-
 .../pom.xml                                        |   93 +-
 .../sink/KinesisFirehoseConfigConstants.java       |   16 +-
 .../firehose/sink/KinesisFirehoseException.java    |   54 +
 .../firehose/sink/KinesisFirehoseSink.java         |  135 ++
 .../firehose/sink/KinesisFirehoseSinkBuilder.java  |  164 +++
 .../sink/KinesisFirehoseSinkElementConverter.java  |  104 ++
 .../firehose/sink/KinesisFirehoseSinkWriter.java   |  264 ++++
 .../sink/KinesisFirehoseStateSerializer.java       |   28 +-
 .../table/KinesisFirehoseConnectorOptions.java     |   29 +-
 .../firehose/table/KinesisFirehoseDynamicSink.java |  183 +++
 .../table/KinesisFirehoseDynamicTableFactory.java  |   89 ++
 .../util/KinesisFirehoseConnectorOptionUtils.java  |   50 +-
 .../org.apache.flink.table.factories.Factory       |    2 +-
 .../src/main/resources/log4j2.properties           |    0
 .../architecture/TestCodeArchitectureTest.java     |    2 +-
 .../sink/KinesisFirehoseSinkBuilderTest.java       |   81 ++
 .../KinesisFirehoseSinkElementConverterTest.java   |   54 +
 .../firehose/sink/KinesisFirehoseSinkITCase.java   |  125 ++
 .../firehose/sink/KinesisFirehoseSinkTest.java     |  132 ++
 .../sink/KinesisFirehoseSinkWriterTest.java        |  106 ++
 .../sink/KinesisFirehoseStateSerializerTest.java   |   56 +
 .../sink/testutils/KinesisFirehoseTestUtils.java   |   86 ++
 .../KinesisFirehoseDynamicTableFactoryTest.java    |  157 +++
 .../org.junit.jupiter.api.extension.Extension      |    2 +-
 .../src/test/resources/archunit.properties         |    0
 .../src/test/resources/log4j2-test.properties      |   28 +
 .../75596a92-3816-4a44-85ac-7c96e72f443a           |    0
 .../7e2560a3-23eb-40cc-8669-e7943e393b88           |    0
 .../84abeb9c-8355-4165-96aa-dda65b04e5e7           |    6 +
 .../archunit-violations/stored.rules               |    6 +-
 .../pom.xml                                        |   79 +-
 .../sink/KinesisStreamsConfigConstants.java        |   16 +-
 .../kinesis/sink/KinesisStreamsException.java      |   51 +
 .../connector/kinesis/sink/KinesisStreamsSink.java |  160 +++
 .../kinesis/sink/KinesisStreamsSinkBuilder.java    |  134 ++
 .../sink/KinesisStreamsSinkElementConverter.java   |  127 ++
 .../kinesis/sink/KinesisStreamsSinkWriter.java     |  256 ++++
 .../sink/KinesisStreamsStateSerializer.java        |   83 ++
 .../kinesis/sink/PartitionKeyGenerator.java        |   21 +-
 .../table/FixedKinesisPartitionKeyGenerator.java   |   73 +
 .../kinesis/table/KinesisConnectorOptions.java     |   94 ++
 .../kinesis/table/KinesisDynamicSink.java          |  258 ++++
 .../table/KinesisDynamicTableSinkFactory.java      |  126 ++
 .../table/KinesisPartitionKeyGeneratorFactory.java |  111 ++
 .../table/RandomKinesisPartitionKeyGenerator.java  |   29 +-
 .../RowDataFieldsKinesisPartitionKeyGenerator.java |  266 ++++
 .../util/KinesisStreamsConnectorOptionsUtils.java  |  272 ++++
 .../org.apache.flink.table.factories.Factory       |    2 +-
 .../src/main/resources/log4j2.properties           |    0
 .../architecture/TestCodeArchitectureTest.java     |    2 +-
 .../sink/KinesisStreamsSinkBuilderTest.java        |   93 ++
 .../kinesis/sink/KinesisStreamsSinkITCase.java     |  566 ++++++++
 .../sink/KinesisStreamsStateSerializerTest.java    |   56 +
 .../kinesis/sink/examples/SinkIntoKinesis.java     |   74 +
 .../table/KinesisDynamicTableSinkFactoryTest.java  |  306 ++++
 ...DataFieldsKinesisPartitionKeyGeneratorTest.java |  305 ++++
 .../util/KinesisProducerOptionsMapperTest.java     |   79 ++
 .../kinesis/testutils/KinesaliteContainer.java     |   81 +-
 .../org.junit.jupiter.api.extension.Extension      |    2 +-
 .../src/test/resources/archunit.properties         |    0
 .../src/test/resources/log4j2-test.properties      |   28 +
 .../src/test/resources/profile                     |    7 +
 flink-connector-dynamodb/pom.xml                   |    2 +-
 .../28f0499c-3213-4ec2-97f7-970f052922b3           |    0
 .../4c963703-6b45-4782-825a-5cc6ba1556dd           |    0
 .../archunit-violations/stored.rules               |    6 +-
 flink-connector-kinesis/pom.xml                    |  394 ++++++
 .../kinesis/FlinkDynamoDBStreamsConsumer.java      |   80 ++
 .../connectors/kinesis/FlinkKinesisConsumer.java   |  556 ++++++++
 .../connectors/kinesis/FlinkKinesisException.java  |   48 +
 .../connectors/kinesis/FlinkKinesisProducer.java   |  500 +++++++
 .../connectors/kinesis/KinesisPartitioner.java     |   65 +
 .../connectors/kinesis/KinesisShardAssigner.java   |   54 +
 .../kinesis/config/AWSConfigConstants.java         |   13 +-
 .../kinesis/config/ConsumerConfigConstants.java    |  427 ++++++
 .../kinesis/config/ProducerConfigConstants.java    |   51 +
 .../internals/DynamoDBStreamsDataFetcher.java      |  128 ++
 .../kinesis/internals/KinesisDataFetcher.java      | 1460 ++++++++++++++++++++
 .../kinesis/internals/ShardConsumer.java           |  258 ++++
 .../kinesis/internals/publisher/RecordBatch.java   |   95 ++
 .../internals/publisher/RecordPublisher.java       |   62 +
 .../publisher/RecordPublisherFactory.java          |   51 +
 .../publisher/fanout/FanOutRecordPublisher.java    |  304 ++++
 .../fanout/FanOutRecordPublisherConfiguration.java |  475 +++++++
 .../fanout/FanOutRecordPublisherFactory.java       |   99 ++
 .../publisher/fanout/FanOutShardSubscriber.java    |  609 ++++++++
 .../publisher/fanout/StreamConsumerRegistrar.java  |  313 +++++
 .../polling/AdaptivePollingRecordPublisher.java    |  132 ++
 .../publisher/polling/PollingRecordPublisher.java  |  223 +++
 .../PollingRecordPublisherConfiguration.java       |   70 +
 .../polling/PollingRecordPublisherFactory.java     |   89 ++
 .../metrics/KinesisConsumerMetricConstants.java    |   48 +
 .../PollingRecordPublisherMetricsReporter.java     |   87 ++
 .../metrics/ShardConsumerMetricsReporter.java      |   90 ++
 .../kinesis/model/DynamoDBStreamsShardHandle.java  |   60 +
 .../kinesis/model/KinesisStreamShard.java          |  149 ++
 .../kinesis/model/KinesisStreamShardState.java     |  103 ++
 .../kinesis/model/SentinelSequenceNumber.java      |   76 +
 .../connectors/kinesis/model/SequenceNumber.java   |  112 ++
 .../connectors/kinesis/model/StartingPosition.java |  118 ++
 .../kinesis/model/StreamShardHandle.java           |  115 ++
 .../kinesis/model/StreamShardMetadata.java         |  236 ++++
 .../kinesis/proxy/DynamoDBStreamsProxy.java        |  149 ++
 .../kinesis/proxy/FullJitterBackoff.java           |   61 +
 .../kinesis/proxy/GetShardListResult.java          |   78 ++
 .../connectors/kinesis/proxy/KinesisProxy.java     |  654 +++++++++
 .../kinesis/proxy/KinesisProxyInterface.java       |   83 ++
 .../connectors/kinesis/proxy/KinesisProxyV2.java   |  221 +++
 .../kinesis/proxy/KinesisProxyV2Factory.java       |   82 ++
 .../kinesis/proxy/KinesisProxyV2Interface.java     |   63 +
 .../serialization/DynamoDBStreamsSchema.java       |   48 +
 .../KinesisDeserializationSchema.java              |   81 ++
 .../KinesisDeserializationSchemaWrapper.java       |   86 ++
 .../serialization/KinesisSerializationSchema.java  |   61 +
 .../kinesis/table/KinesisConnectorOptionsUtil.java |   58 +
 .../kinesis/table/KinesisConsumerOptionsUtil.java  |   87 ++
 .../kinesis/table/KinesisDynamicSource.java        |  222 +++
 .../kinesis/table/KinesisDynamicTableFactory.java  |   99 ++
 .../table/RowDataKinesisDeserializationSchema.java |  144 ++
 .../streaming/connectors/kinesis/util/AWSUtil.java |  291 ++++
 .../connectors/kinesis/util/AwsV2Util.java         |   81 ++
 .../BeanDeserializerModifierForIgnorables.java     |   81 ++
 .../kinesis/util/JobManagerWatermarkTracker.java   |  188 +++
 .../connectors/kinesis/util/KinesisConfigUtil.java |  615 +++++++++
 .../connectors/kinesis/util/RecordEmitter.java     |  284 ++++
 .../kinesis/util/StreamConsumerRegistrarUtil.java  |  178 +++
 .../connectors/kinesis/util/TimeoutLatch.java      |   33 +-
 .../kinesis/util/UniformShardAssigner.java         |   58 +
 .../connectors/kinesis/util/WatermarkTracker.java  |  117 ++
 .../src/main/resources/META-INF/NOTICE             |  293 ++++
 .../resources/META-INF/licenses/LICENSE.protobuf   |   32 +
 ...aded.software.amazon.awssdk.http.SdkHttpService |   20 +
 .../org.apache.flink.table.factories.Factory       |    2 +-
 .../awssdk/global/handlers/execution.interceptors  |    1 +
 .../architecture/TestCodeArchitectureTest.java     |    2 +-
 .../kinesis/FlinkKinesisConsumerMigrationTest.java |  548 ++++++++
 .../kinesis/FlinkKinesisConsumerTest.java          | 1258 +++++++++++++++++
 .../connectors/kinesis/FlinkKinesisITCase.java     |  251 ++++
 .../kinesis/FlinkKinesisProducerTest.java          |  546 ++++++++
 .../connectors/kinesis/KinesisConsumerTest.java    |   75 +
 .../examples/ConsumeFromDynamoDBStreams.java       |   59 +
 .../kinesis/examples/ConsumeFromKinesis.java       |   55 +
 .../kinesis/examples/ProduceIntoKinesis.java       |   83 ++
 .../internals/DynamoDBStreamsDataFetcherTest.java  |   69 +
 .../kinesis/internals/KinesisDataFetcherTest.java  | 1117 +++++++++++++++
 .../kinesis/internals/ShardConsumerFanOutTest.java |  300 ++++
 .../kinesis/internals/ShardConsumerTest.java       |  246 ++++
 .../kinesis/internals/ShardConsumerTestUtils.java  |  207 +++
 .../internals/publisher/RecordBatchTest.java       |   92 ++
 .../FanOutRecordPublisherConfigurationTest.java    |  196 +++
 .../fanout/FanOutRecordPublisherTest.java          |  616 +++++++++
 .../fanout/FanOutShardSubscriberTest.java          |  185 +++
 .../fanout/StreamConsumerRegistrarTest.java        |  340 +++++
 .../PollingRecordPublisherConfigurationTest.java   |   73 +
 .../polling/PollingRecordPublisherFactoryTest.java |   71 +
 .../polling/PollingRecordPublisherTest.java        |  174 +++
 .../manualtests/ManualConsumerProducerTest.java    |  132 ++
 .../kinesis/manualtests/ManualExactlyOnceTest.java |  172 +++
 .../ManualExactlyOnceWithStreamReshardingTest.java |  289 ++++
 .../kinesis/manualtests/ManualProducerTest.java    |   95 ++
 .../PollingRecordPublisherMetricsReporterTest.java |   68 +
 .../metrics/ShardConsumerMetricsReporterTest.java  |   84 ++
 .../model/DynamoDBStreamsShardHandleTest.java      |  108 ++
 .../kinesis/model/SentinelSequenceNumberTest.java  |   21 +-
 .../kinesis/model/StartingPositionTest.java        |   94 ++
 .../kinesis/model/StreamShardHandleTest.java       |   41 +
 .../kinesis/proxy/DynamoDBStreamsProxyTest.java    |   82 ++
 .../connectors/kinesis/proxy/KinesisProxyTest.java |  514 +++++++
 .../kinesis/proxy/KinesisProxyV2FactoryTest.java   |   86 ++
 .../kinesis/proxy/KinesisProxyV2Test.java          |  423 ++++++
 .../table/KinesisDynamicTableFactoryTest.java      |  275 ++++
 .../AlwaysThrowsDeserializationSchema.java         |   57 +
 .../ExactlyOnceValidatingConsumerThread.java       |  185 +++
 .../testutils/FakeKinesisBehavioursFactory.java    |  686 +++++++++
 .../testutils/FakeKinesisClientFactory.java        |  361 +++++
 .../FakeKinesisFanOutBehavioursFactory.java        |  708 ++++++++++
 .../KinesisEventsGeneratorProducerThread.java      |  132 ++
 .../kinesis/testutils/KinesisPubsubClient.java     |  158 +++
 .../kinesis/testutils/KinesisShardIdGenerator.java |   23 +-
 .../kinesis/testutils/TestRuntimeContext.java      |   89 ++
 .../kinesis/testutils/TestSourceContext.java       |   66 +
 .../connectors/kinesis/testutils/TestUtils.java    |  207 +++
 .../testutils/TestableFlinkKinesisConsumer.java    |   69 +
 .../testutils/TestableKinesisDataFetcher.java      |  217 +++
 ...inesisDataFetcherForShardConsumerException.java |  102 ++
 .../connectors/kinesis/util/AWSUtilTest.java       |  206 +++
 .../connectors/kinesis/util/AwsV2UtilTest.java     |  167 +++
 .../util/JobManagerWatermarkTrackerTest.java       |   75 +
 .../kinesis/util/KinesisConfigUtilTest.java        | 1024 ++++++++++++++
 .../connectors/kinesis/util/RecordEmitterTest.java |  136 ++
 .../util/StreamConsumerRegistrarUtilTest.java      |   92 ++
 .../kinesis/util/UniformShardAssignerTest.java     |   75 +
 .../kinesis/util/WatermarkTrackerTest.java         |  106 ++
 .../src/test/resources/archunit.properties         |    0
 ...onsumer-migration-test-flink1.10-empty-snapshot |  Bin 0 -> 2862 bytes
 ...esis-consumer-migration-test-flink1.10-snapshot |  Bin 0 -> 2930 bytes
 ...onsumer-migration-test-flink1.11-empty-snapshot |  Bin 0 -> 2870 bytes
 ...esis-consumer-migration-test-flink1.11-snapshot |  Bin 0 -> 2938 bytes
 ...onsumer-migration-test-flink1.12-empty-snapshot |  Bin 0 -> 2870 bytes
 ...esis-consumer-migration-test-flink1.12-snapshot |  Bin 0 -> 2938 bytes
 ...onsumer-migration-test-flink1.13-empty-snapshot |  Bin 0 -> 2870 bytes
 ...esis-consumer-migration-test-flink1.13-snapshot |  Bin 0 -> 2938 bytes
 ...onsumer-migration-test-flink1.14-empty-snapshot |  Bin 0 -> 2870 bytes
 ...esis-consumer-migration-test-flink1.14-snapshot |  Bin 0 -> 2938 bytes
 ...onsumer-migration-test-flink1.15-empty-snapshot |  Bin 0 -> 2870 bytes
 ...esis-consumer-migration-test-flink1.15-snapshot |  Bin 0 -> 2938 bytes
 ...onsumer-migration-test-flink1.16-empty-snapshot |  Bin 0 -> 2870 bytes
 ...esis-consumer-migration-test-flink1.16-snapshot |  Bin 0 -> 2938 bytes
 ...consumer-migration-test-flink1.3-empty-snapshot |  Bin 0 -> 13975 bytes
 ...nesis-consumer-migration-test-flink1.3-snapshot |  Bin 0 -> 14043 bytes
 ...consumer-migration-test-flink1.4-empty-snapshot |  Bin 0 -> 13147 bytes
 ...nesis-consumer-migration-test-flink1.4-snapshot |  Bin 0 -> 13215 bytes
 ...consumer-migration-test-flink1.7-empty-snapshot |  Bin 0 -> 18539 bytes
 ...nesis-consumer-migration-test-flink1.7-snapshot |  Bin 0 -> 18607 bytes
 ...consumer-migration-test-flink1.8-empty-snapshot |  Bin 0 -> 2862 bytes
 ...nesis-consumer-migration-test-flink1.8-snapshot |  Bin 0 -> 2930 bytes
 ...consumer-migration-test-flink1.9-empty-snapshot |  Bin 0 -> 2862 bytes
 ...nesis-consumer-migration-test-flink1.9-snapshot |  Bin 0 -> 2930 bytes
 .../src/test/resources/log4j2-test.properties      |    0
 flink-connector-kinesis/src/test/resources/profile |    7 +
 .../pom.xml                                        |  101 +-
 .../src/main/resources/META-INF/NOTICE             |   10 +-
 .../pom.xml                                        |   82 +-
 .../src/main/resources/META-INF/NOTICE             |   18 +-
 flink-sql-connector-dynamodb/pom.xml               |    2 +-
 .../src/main/resources/META-INF/NOTICE             |    6 +-
 .../pom.xml                                        |   95 +-
 .../src/main/resources/META-INF/NOTICE             |   20 +
 .../flink/connectors/kinesis/PackagingITCase.java  |   46 +
 pom.xml                                            |  171 ++-
 tools/maven/suppressions.xml                       |    9 +-
 259 files changed, 34913 insertions(+), 530 deletions(-)
 copy flink-connector-dynamodb/archunit-violations/23e251e7-3783-4611-867f-207bf0d3bf73 => flink-connector-aws-base/archunit-violations/33252236-fc9f-4f63-b537-39e2322f7ccd (100%)
 copy flink-connector-dynamodb/archunit-violations/23e251e7-3783-4611-867f-207bf0d3bf73 => flink-connector-aws-base/archunit-violations/733a854b-2487-43da-a5fa-9b089af5fb4e (100%)
 copy {flink-connector-dynamodb => flink-connector-aws-base}/archunit-violations/stored.rules (50%)
 copy {flink-connector-dynamodb => flink-connector-aws-base}/pom.xml (50%)
 create mode 100644 flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/config/AWSConfigConstants.java
 create mode 100644 flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/table/util/AWSOptionUtils.java
 create mode 100644 flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/table/util/AsyncClientOptionsUtils.java
 create mode 100644 flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSAsyncSinkUtil.java
 copy flink-connector-dynamodb/src/main/java/org/apache/flink/connector/dynamodb/sink/client/SdkClientProvider.java => flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSAuthenticationException.java (61%)
 create mode 100644 flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSCredentialFatalExceptionClassifiers.java
 create mode 100644 flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSGeneralUtil.java
 copy {flink-connector-dynamodb => flink-connector-aws-base}/src/main/resources/log4j2.properties (100%)
 copy {flink-connector-dynamodb => flink-connector-aws-base}/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java (96%)
 create mode 100644 flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/table/util/AWSOptionsUtilTest.java
 create mode 100644 flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/table/util/AsyncClientOptionsUtilsTest.java
 create mode 100644 flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/testutils/AWSServicesTestUtils.java
 create mode 100644 flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/testutils/LocalstackContainer.java
 create mode 100644 flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/util/AWSAsyncSinkUtilTest.java
 create mode 100644 flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/util/AWSGeneralUtilTest.java
 create mode 100644 flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/util/TestUtil.java
 copy flink-connector-dynamodb/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory => flink-connector-aws-base/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension (91%)
 copy {flink-connector-dynamodb => flink-connector-aws-base}/src/test/resources/archunit.properties (100%)
 create mode 100644 flink-connector-aws-base/src/test/resources/log4j2-test.properties
 create mode 100644 flink-connector-aws-base/src/test/resources/profile
 copy flink-connector-dynamodb/archunit-violations/23e251e7-3783-4611-867f-207bf0d3bf73 => flink-connector-aws-kinesis-firehose/archunit-violations/54da9a7d-14d2-4632-a045-1dd8fc665c8f (100%)
 create mode 100644 flink-connector-aws-kinesis-firehose/archunit-violations/a6cbd99c-b115-447a-8f19-43c1094db549
 copy {flink-connector-dynamodb => flink-connector-aws-kinesis-firehose}/archunit-violations/stored.rules (50%)
 copy {flink-connector-dynamodb => flink-connector-aws-kinesis-firehose}/pom.xml (68%)
 copy flink-connector-dynamodb/src/main/java/org/apache/flink/connector/dynamodb/sink/DynamoDbConfigConstants.java => flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseConfigConstants.java (66%)
 create mode 100644 flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseException.java
 create mode 100644 flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSink.java
 create mode 100644 flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkBuilder.java
 create mode 100644 flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkElementConverter.java
 create mode 100644 flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkWriter.java
 copy flink-connector-dynamodb/src/main/java/org/apache/flink/connector/dynamodb/sink/DynamoDbWriterStateSerializer.java => flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseStateSerializer.java (59%)
 copy flink-connector-dynamodb/src/main/java/org/apache/flink/connector/dynamodb/table/DynamoDbConnectorOptions.java => flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/KinesisFirehoseConnectorOptions.java (55%)
 create mode 100644 flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/KinesisFirehoseDynamicSink.java
 create mode 100644 flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/KinesisFirehoseDynamicTableFactory.java
 copy flink-connector-dynamodb/src/main/java/org/apache/flink/connector/dynamodb/table/DynamoDbConfiguration.java => flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/util/KinesisFirehoseConnectorOptionUtils.java (50%)
 copy {flink-connector-dynamodb => flink-connector-aws-kinesis-firehose}/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory (91%)
 copy {flink-connector-dynamodb => flink-connector-aws-kinesis-firehose}/src/main/resources/log4j2.properties (100%)
 copy {flink-connector-dynamodb => flink-connector-aws-kinesis-firehose}/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java (96%)
 create mode 100644 flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkBuilderTest.java
 create mode 100644 flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkElementConverterTest.java
 create mode 100644 flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkITCase.java
 create mode 100644 flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkTest.java
 create mode 100644 flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkWriterTest.java
 create mode 100644 flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseStateSerializerTest.java
 create mode 100644 flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/testutils/KinesisFirehoseTestUtils.java
 create mode 100644 flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/table/KinesisFirehoseDynamicTableFactoryTest.java
 copy flink-connector-dynamodb/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory => flink-connector-aws-kinesis-firehose/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension (91%)
 copy {flink-connector-dynamodb => flink-connector-aws-kinesis-firehose}/src/test/resources/archunit.properties (100%)
 create mode 100644 flink-connector-aws-kinesis-firehose/src/test/resources/log4j2-test.properties
 copy flink-connector-dynamodb/archunit-violations/23e251e7-3783-4611-867f-207bf0d3bf73 => flink-connector-aws-kinesis-streams/archunit-violations/75596a92-3816-4a44-85ac-7c96e72f443a (100%)
 copy flink-connector-dynamodb/archunit-violations/23e251e7-3783-4611-867f-207bf0d3bf73 => flink-connector-aws-kinesis-streams/archunit-violations/7e2560a3-23eb-40cc-8669-e7943e393b88 (100%)
 create mode 100644 flink-connector-aws-kinesis-streams/archunit-violations/84abeb9c-8355-4165-96aa-dda65b04e5e7
 copy {flink-connector-dynamodb => flink-connector-aws-kinesis-streams}/archunit-violations/stored.rules (50%)
 copy {flink-connector-dynamodb => flink-connector-aws-kinesis-streams}/pom.xml (73%)
 copy flink-connector-dynamodb/src/main/java/org/apache/flink/connector/dynamodb/sink/DynamoDbConfigConstants.java => flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsConfigConstants.java (66%)
 create mode 100644 flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsException.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSink.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkBuilder.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkElementConverter.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkWriter.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsStateSerializer.java
 copy flink-connector-dynamodb/src/main/java/org/apache/flink/connector/dynamodb/sink/DynamoDbConfigConstants.java => flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/PartitionKeyGenerator.java (66%)
 create mode 100644 flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/FixedKinesisPartitionKeyGenerator.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisConnectorOptions.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisDynamicSink.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisDynamicTableSinkFactory.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisPartitionKeyGeneratorFactory.java
 copy flink-connector-dynamodb/src/main/java/org/apache/flink/connector/dynamodb/sink/InvalidConfigurationException.java => flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/RandomKinesisPartitionKeyGenerator.java (54%)
 create mode 100644 flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/RowDataFieldsKinesisPartitionKeyGenerator.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/util/KinesisStreamsConnectorOptionsUtils.java
 copy {flink-connector-dynamodb => flink-connector-aws-kinesis-streams}/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory (91%)
 copy {flink-connector-dynamodb => flink-connector-aws-kinesis-streams}/src/main/resources/log4j2.properties (100%)
 copy {flink-connector-dynamodb => flink-connector-aws-kinesis-streams}/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java (96%)
 create mode 100644 flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkBuilderTest.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkITCase.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsStateSerializerTest.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/examples/SinkIntoKinesis.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/table/KinesisDynamicTableSinkFactoryTest.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/table/RowDataFieldsKinesisPartitionKeyGeneratorTest.java
 create mode 100644 flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/table/util/KinesisProducerOptionsMapperTest.java
 copy flink-connector-dynamodb/src/test/java/org/apache/flink/connector/dynamodb/testutils/DynamoDbContainer.java => flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connectors/kinesis/testutils/KinesaliteContainer.java (66%)
 copy flink-connector-dynamodb/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory => flink-connector-aws-kinesis-streams/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension (91%)
 copy {flink-connector-dynamodb => flink-connector-aws-kinesis-streams}/src/test/resources/archunit.properties (100%)
 create mode 100644 flink-connector-aws-kinesis-streams/src/test/resources/log4j2-test.properties
 create mode 100644 flink-connector-aws-kinesis-streams/src/test/resources/profile
 copy flink-connector-dynamodb/archunit-violations/23e251e7-3783-4611-867f-207bf0d3bf73 => flink-connector-kinesis/archunit-violations/28f0499c-3213-4ec2-97f7-970f052922b3 (100%)
 copy flink-connector-dynamodb/archunit-violations/23e251e7-3783-4611-867f-207bf0d3bf73 => flink-connector-kinesis/archunit-violations/4c963703-6b45-4782-825a-5cc6ba1556dd (100%)
 copy {flink-connector-dynamodb => flink-connector-kinesis}/archunit-violations/stored.rules (50%)
 create mode 100644 flink-connector-kinesis/pom.xml
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkDynamoDBStreamsConsumer.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisException.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducer.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisShardAssigner.java
 copy flink-connector-dynamodb/src/main/java/org/apache/flink/connector/dynamodb/sink/InvalidConfigurationException.java => flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/AWSConfigConstants.java (75%)
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ProducerConfigConstants.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/DynamoDBStreamsDataFetcher.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordBatch.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordPublisher.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordPublisherFactory.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisher.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfiguration.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherFactory.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutShardSubscriber.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrar.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/AdaptivePollingRecordPublisher.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisher.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherConfiguration.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactory.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/KinesisConsumerMetricConstants.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/PollingRecordPublisherMetricsReporter.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardConsumerMetricsReporter.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/DynamoDBStreamsShardHandle.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShard.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShardState.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumber.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SequenceNumber.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StartingPosition.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandle.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardMetadata.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/DynamoDBStreamsProxy.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/FullJitterBackoff.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/GetShardListResult.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Factory.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Interface.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/DynamoDBStreamsSchema.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchema.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchemaWrapper.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisSerializationSchema.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/table/KinesisConnectorOptionsUtil.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/table/KinesisConsumerOptionsUtil.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/table/KinesisDynamicSource.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/table/KinesisDynamicTableFactory.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/table/RowDataKinesisDeserializationSchema.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2Util.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/BeanDeserializerModifierForIgnorables.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/JobManagerWatermarkTracker.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/RecordEmitter.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/StreamConsumerRegistrarUtil.java
 copy flink-connector-dynamodb/src/main/java/org/apache/flink/connector/dynamodb/sink/client/SdkClientProvider.java => flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/TimeoutLatch.java (61%)
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/UniformShardAssigner.java
 create mode 100644 flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/WatermarkTracker.java
 create mode 100644 flink-connector-kinesis/src/main/resources/META-INF/NOTICE
 create mode 100644 flink-connector-kinesis/src/main/resources/META-INF/licenses/LICENSE.protobuf
 create mode 100644 flink-connector-kinesis/src/main/resources/META-INF/services/org.apache.flink.kinesis.shaded.software.amazon.awssdk.http.SdkHttpService
 copy {flink-connector-dynamodb => flink-connector-kinesis}/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory (90%)
 create mode 100644 flink-connector-kinesis/src/main/resources/org/apache/flink/kinesis/shaded/software/amazon/awssdk/global/handlers/execution.interceptors
 copy {flink-connector-dynamodb => flink-connector-kinesis}/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java (95%)
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerMigrationTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisITCase.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/KinesisConsumerTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/examples/ConsumeFromDynamoDBStreams.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/examples/ConsumeFromKinesis.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/examples/ProduceIntoKinesis.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/DynamoDBStreamsDataFetcherTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerFanOutTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTestUtils.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordBatchTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfigurationTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutShardSubscriberTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrarTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherConfigurationTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactoryTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualConsumerProducerTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualExactlyOnceWithStreamReshardingTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/manualtests/ManualProducerTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/metrics/PollingRecordPublisherMetricsReporterTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardConsumerMetricsReporterTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/DynamoDBStreamsShardHandleTest.java
 copy flink-connector-dynamodb/src/main/java/org/apache/flink/connector/dynamodb/sink/DynamoDbConfigConstants.java => flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumberTest.java (61%)
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/StartingPositionTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandleTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/DynamoDBStreamsProxyTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2FactoryTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Test.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/table/KinesisDynamicTableFactoryTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/AlwaysThrowsDeserializationSchema.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/ExactlyOnceValidatingConsumerThread.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisBehavioursFactory.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisClientFactory.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisFanOutBehavioursFactory.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/KinesisEventsGeneratorProducerThread.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/KinesisPubsubClient.java
 copy flink-connector-dynamodb/src/main/java/org/apache/flink/connector/dynamodb/sink/DynamoDbConfigConstants.java => flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/KinesisShardIdGenerator.java (61%)
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestRuntimeContext.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestSourceContext.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestUtils.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableFlinkKinesisConsumer.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcherForShardConsumerException.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtilTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2UtilTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/JobManagerWatermarkTrackerTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/RecordEmitterTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/StreamConsumerRegistrarUtilTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/UniformShardAssignerTest.java
 create mode 100644 flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/WatermarkTrackerTest.java
 copy {flink-connector-dynamodb => flink-connector-kinesis}/src/test/resources/archunit.properties (100%)
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.10-empty-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.10-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.11-empty-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.11-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.12-empty-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.12-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.13-empty-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.13-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.14-empty-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.14-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.15-empty-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.15-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.16-empty-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.16-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.3-empty-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.3-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.4-empty-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.4-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.7-empty-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.7-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.8-empty-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.8-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.9-empty-snapshot
 create mode 100644 flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.9-snapshot
 copy {flink-connector-dynamodb => flink-connector-kinesis}/src/test/resources/log4j2-test.properties (100%)
 create mode 100644 flink-connector-kinesis/src/test/resources/profile
 copy {flink-sql-connector-dynamodb => flink-sql-connector-aws-kinesis-firehose}/pom.xml (56%)
 copy {flink-sql-connector-dynamodb => flink-sql-connector-aws-kinesis-firehose}/src/main/resources/META-INF/NOTICE (87%)
 copy {flink-sql-connector-dynamodb => flink-sql-connector-aws-kinesis-streams}/pom.xml (68%)
 copy {flink-sql-connector-dynamodb => flink-sql-connector-aws-kinesis-streams}/src/main/resources/META-INF/NOTICE (81%)
 copy {flink-sql-connector-dynamodb => flink-sql-connector-kinesis}/pom.xml (58%)
 create mode 100644 flink-sql-connector-kinesis/src/main/resources/META-INF/NOTICE
 create mode 100644 flink-sql-connector-kinesis/src/test/java/org/apache/flink/connectors/kinesis/PackagingITCase.java


[flink-connector-aws] 06/08: [FLINK-29907][Connectors/AWS] Update NOTICE files

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

dannycranmer pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/flink-connector-aws.git

commit 8844ea15606dbb842f0e50a4c45b7d8fd999e984
Author: Danny Cranmer <da...@apache.org>
AuthorDate: Fri Dec 2 13:03:05 2022 +0000

    [FLINK-29907][Connectors/AWS] Update NOTICE files
---
 .../src/main/resources/META-INF/NOTICE              | 21 ++++++++++-----------
 .../src/main/resources/META-INF/NOTICE              | 21 ++++++++++-----------
 .../src/main/resources/META-INF/NOTICE              | 21 ++++++++++-----------
 .../src/main/resources/META-INF/NOTICE              |  6 +++---
 .../src/main/resources/META-INF/NOTICE              |  2 +-
 5 files changed, 34 insertions(+), 37 deletions(-)

diff --git a/flink-connector-kinesis/src/main/resources/META-INF/NOTICE b/flink-connector-kinesis/src/main/resources/META-INF/NOTICE
index 14db128..0de146e 100644
--- a/flink-connector-kinesis/src/main/resources/META-INF/NOTICE
+++ b/flink-connector-kinesis/src/main/resources/META-INF/NOTICE
@@ -45,17 +45,16 @@ This project bundles the following dependencies under the Apache Software Licens
 - software.amazon.awssdk:json-utils:2.17.247
 - software.amazon.awssdk:third-party-jackson-core:2.17.247
 - software.amazon.awssdk:third-party-jackson-dataformat-cbor:2.17.247
-- io.netty:netty-codec-http:4.1.70.Final
-- io.netty:netty-codec-http2:4.1.70.Final
-- io.netty:netty-codec:4.1.70.Final
-- io.netty:netty-transport:4.1.70.Final
-- io.netty:netty-resolver:4.1.70.Final
-- io.netty:netty-common:4.1.70.Final
-- io.netty:netty-buffer:4.1.70.Final
-- io.netty:netty-handler:4.1.70.Final
-- io.netty:netty-transport-classes-epoll:4.1.70.Final
-- io.netty:netty-transport-native-epoll:linux-x86_64:4.1.70.Final
-- io.netty:netty-transport-native-unix-common:4.1.70.Final
+- io.netty:netty-codec-http:4.1.77.Final
+- io.netty:netty-codec-http2:4.1.77.Final
+- io.netty:netty-codec:4.1.77.Final
+- io.netty:netty-transport:4.1.77.Final
+- io.netty:netty-resolver:4.1.77.Final
+- io.netty:netty-common:4.1.77.Final
+- io.netty:netty-buffer:4.1.77.Final
+- io.netty:netty-handler:4.1.77.Final
+- io.netty:netty-transport-classes-epoll:4.1.77.Final
+- io.netty:netty-transport-native-unix-common:4.1.77.Final
 - com.typesafe.netty:netty-reactive-streams-http:2.0.5
 - com.typesafe.netty:netty-reactive-streams:2.0.5
 
diff --git a/flink-sql-connector-aws-kinesis-firehose/src/main/resources/META-INF/NOTICE b/flink-sql-connector-aws-kinesis-firehose/src/main/resources/META-INF/NOTICE
index 7cf7302..00b0360 100644
--- a/flink-sql-connector-aws-kinesis-firehose/src/main/resources/META-INF/NOTICE
+++ b/flink-sql-connector-aws-kinesis-firehose/src/main/resources/META-INF/NOTICE
@@ -25,17 +25,16 @@ This project bundles the following dependencies under the Apache Software Licens
 - software.amazon.awssdk:aws-query-protocol:2.17.247
 - software.amazon.awssdk:json-utils:2.17.247
 - software.amazon.awssdk:third-party-jackson-core:2.17.247
-- io.netty:netty-codec-http:4.1.70.Final
-- io.netty:netty-codec-http2:4.1.70.Final
-- io.netty:netty-codec:4.1.70.Final
-- io.netty:netty-transport:4.1.70.Final
-- io.netty:netty-resolver:4.1.70.Final
-- io.netty:netty-common:4.1.70.Final
-- io.netty:netty-buffer:4.1.70.Final
-- io.netty:netty-handler:4.1.70.Final
-- io.netty:netty-transport-native-epoll:linux-x86_64:4.1.70.Final
-- io.netty:netty-transport-native-unix-common:4.1.70.Final
-- io.netty:netty-transport-classes-epoll:4.1.70.Final
+- io.netty:netty-codec-http:4.1.77.Final
+- io.netty:netty-codec-http2:4.1.77.Final
+- io.netty:netty-codec:4.1.77.Final
+- io.netty:netty-transport:4.1.77.Final
+- io.netty:netty-resolver:4.1.77.Final
+- io.netty:netty-common:4.1.77.Final
+- io.netty:netty-buffer:4.1.77.Final
+- io.netty:netty-handler:4.1.77.Final
+- io.netty:netty-transport-native-unix-common:4.1.77.Final
+- io.netty:netty-transport-classes-epoll:4.1.77.Final
 - com.typesafe.netty:netty-reactive-streams-http:2.0.5
 - com.typesafe.netty:netty-reactive-streams:2.0.5
 - org.apache.httpcomponents:httpclient:4.5.13
diff --git a/flink-sql-connector-aws-kinesis-streams/src/main/resources/META-INF/NOTICE b/flink-sql-connector-aws-kinesis-streams/src/main/resources/META-INF/NOTICE
index b3a38b8..bec6d75 100644
--- a/flink-sql-connector-aws-kinesis-streams/src/main/resources/META-INF/NOTICE
+++ b/flink-sql-connector-aws-kinesis-streams/src/main/resources/META-INF/NOTICE
@@ -27,17 +27,16 @@ This project bundles the following dependencies under the Apache Software Licens
 - software.amazon.awssdk:json-utils:2.17.247
 - software.amazon.awssdk:third-party-jackson-core:2.17.247
 - software.amazon.awssdk:third-party-jackson-dataformat-cbor:2.17.247
-- io.netty:netty-codec-http:4.1.70.Final
-- io.netty:netty-codec-http2:4.1.70.Final
-- io.netty:netty-codec:4.1.70.Final
-- io.netty:netty-transport:4.1.70.Final
-- io.netty:netty-resolver:4.1.70.Final
-- io.netty:netty-common:4.1.70.Final
-- io.netty:netty-buffer:4.1.70.Final
-- io.netty:netty-handler:4.1.70.Final
-- io.netty:netty-transport-classes-epoll:4.1.70.Final
-- io.netty:netty-transport-native-epoll:linux-x86_64:4.1.70.Final
-- io.netty:netty-transport-native-unix-common:4.1.70.Final
+- io.netty:netty-codec-http:4.1.77.Final
+- io.netty:netty-codec-http2:4.1.77.Final
+- io.netty:netty-codec:4.1.77.Final
+- io.netty:netty-transport:4.1.77.Final
+- io.netty:netty-resolver:4.1.77.Final
+- io.netty:netty-common:4.1.77.Final
+- io.netty:netty-buffer:4.1.77.Final
+- io.netty:netty-handler:4.1.77.Final
+- io.netty:netty-transport-classes-epoll:4.1.77.Final
+- io.netty:netty-transport-native-unix-common:4.1.77.Final
 - com.typesafe.netty:netty-reactive-streams-http:2.0.5
 - com.typesafe.netty:netty-reactive-streams:2.0.5
 - org.apache.httpcomponents:httpclient:4.5.13
diff --git a/flink-sql-connector-dynamodb/src/main/resources/META-INF/NOTICE b/flink-sql-connector-dynamodb/src/main/resources/META-INF/NOTICE
index 96af7b6..3e00a44 100644
--- a/flink-sql-connector-dynamodb/src/main/resources/META-INF/NOTICE
+++ b/flink-sql-connector-dynamodb/src/main/resources/META-INF/NOTICE
@@ -37,9 +37,9 @@ This project bundles the following dependencies under the Apache Software Licens
 - io.netty:netty-transport-native-unix-common:4.1.77.Final
 - io.netty:netty-transport-classes-epoll:4.1.77.Final
 - org.apache.httpcomponents:httpclient:4.5.13
-- org.apache.httpcomponents:httpcore:4.4.13
-- commons-logging:commons-logging:1.2
-- commons-codec:commons-codec:1.11
+- org.apache.httpcomponents:httpcore:4.4.14
+- commons-logging:commons-logging:1.1.3
+- commons-codec:commons-codec:1.15
 
 This project bundles the following dependencies under the Creative Commons Zero license (https://creativecommons.org/publicdomain/zero/1.0/).
 
diff --git a/flink-sql-connector-kinesis/src/main/resources/META-INF/NOTICE b/flink-sql-connector-kinesis/src/main/resources/META-INF/NOTICE
index 6521c2d..1fada9c 100644
--- a/flink-sql-connector-kinesis/src/main/resources/META-INF/NOTICE
+++ b/flink-sql-connector-kinesis/src/main/resources/META-INF/NOTICE
@@ -6,7 +6,7 @@ The Apache Software Foundation (http://www.apache.org/).
 
 This project bundles the following dependencies under the Apache Software License 2.0. (http://www.apache.org/licenses/LICENSE-2.0.txt)
 
-- joda-time:joda-time:2.5
+- joda-time:joda-time:2.8.1
 - commons-io:commons-io:2.11.0
 - commons-lang:commons-lang:2.6
 - commons-logging:commons-logging:1.1.3


[flink-connector-aws] 08/08: [FLINK-29907][Connectors/AWS] Fix NOTICE file for Firehose SQL connector

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

dannycranmer pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/flink-connector-aws.git

commit aa10d88f107dab8264ca6073b6cd64f599b34e93
Author: Danny Cranmer <da...@apache.org>
AuthorDate: Fri Dec 2 17:46:16 2022 +0000

    [FLINK-29907][Connectors/AWS] Fix NOTICE file for Firehose SQL connector
---
 .../src/main/resources/META-INF/NOTICE                                  | 2 --
 1 file changed, 2 deletions(-)

diff --git a/flink-sql-connector-aws-kinesis-firehose/src/main/resources/META-INF/NOTICE b/flink-sql-connector-aws-kinesis-firehose/src/main/resources/META-INF/NOTICE
index 00b0360..32f8b75 100644
--- a/flink-sql-connector-aws-kinesis-firehose/src/main/resources/META-INF/NOTICE
+++ b/flink-sql-connector-aws-kinesis-firehose/src/main/resources/META-INF/NOTICE
@@ -35,8 +35,6 @@ This project bundles the following dependencies under the Apache Software Licens
 - io.netty:netty-handler:4.1.77.Final
 - io.netty:netty-transport-native-unix-common:4.1.77.Final
 - io.netty:netty-transport-classes-epoll:4.1.77.Final
-- com.typesafe.netty:netty-reactive-streams-http:2.0.5
-- com.typesafe.netty:netty-reactive-streams:2.0.5
 - org.apache.httpcomponents:httpclient:4.5.13
 - org.apache.httpcomponents:httpcore:4.4.14
 - commons-logging:commons-logging:1.1.3


[flink-connector-aws] 01/08: [FLINK-29907][Connectors/DynamoDB] Improve maven module names

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

dannycranmer pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/flink-connector-aws.git

commit 569049e0e8a8707908b3b2db1258f33be055807d
Author: Danny Cranmer <da...@apache.org>
AuthorDate: Fri Dec 2 09:20:12 2022 +0000

    [FLINK-29907][Connectors/DynamoDB] Improve maven module names
---
 flink-connector-dynamodb/pom.xml     | 2 +-
 flink-sql-connector-dynamodb/pom.xml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/flink-connector-dynamodb/pom.xml b/flink-connector-dynamodb/pom.xml
index 89fd19b..0afb150 100644
--- a/flink-connector-dynamodb/pom.xml
+++ b/flink-connector-dynamodb/pom.xml
@@ -30,7 +30,7 @@ under the License.
     </parent>
 
     <artifactId>flink-connector-dynamodb</artifactId>
-    <name>Flink : Connectors : Amazon DynamoDB</name>
+    <name>Flink : Connectors : AWS : Amazon DynamoDB</name>
 
     <packaging>jar</packaging>
 
diff --git a/flink-sql-connector-dynamodb/pom.xml b/flink-sql-connector-dynamodb/pom.xml
index a2bf0a5..531b5fd 100644
--- a/flink-sql-connector-dynamodb/pom.xml
+++ b/flink-sql-connector-dynamodb/pom.xml
@@ -31,7 +31,7 @@ under the License.
     </parent>
 
     <artifactId>flink-sql-connector-dynamodb</artifactId>
-    <name>Flink : Connectors : SQL : Amazon DynamoDB</name>
+    <name>Flink : Connectors : AWS : SQL : Amazon DynamoDB</name>
     <packaging>jar</packaging>
 
     <dependencyManagement>


[flink-connector-aws] 05/08: [FLINK-29907][Connectors/Kinesis] Externalize Amazon Kinesis connectors from Flink repo

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

dannycranmer pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/flink-connector-aws.git

commit a23e101dda08cc377602db66df5414889c32a8c8
Author: Danny Cranmer <da...@apache.org>
AuthorDate: Fri Dec 2 11:30:40 2022 +0000

    [FLINK-29907][Connectors/Kinesis] Externalize Amazon Kinesis connectors from Flink repo
---
 flink-connector-aws-kinesis-streams/pom.xml        |    2 +-
 .../28f0499c-3213-4ec2-97f7-970f052922b3           |    0
 .../4c963703-6b45-4782-825a-5cc6ba1556dd           |    0
 .../archunit-violations/stored.rules               |    4 +
 flink-connector-kinesis/pom.xml                    |  394 ++++++
 .../kinesis/FlinkDynamoDBStreamsConsumer.java      |   80 ++
 .../connectors/kinesis/FlinkKinesisConsumer.java   |  556 ++++++++
 .../connectors/kinesis/FlinkKinesisException.java  |   48 +
 .../connectors/kinesis/FlinkKinesisProducer.java   |  500 +++++++
 .../connectors/kinesis/KinesisPartitioner.java     |   65 +
 .../connectors/kinesis/KinesisShardAssigner.java   |   54 +
 .../kinesis/config/AWSConfigConstants.java         |   28 +
 .../kinesis/config/ConsumerConfigConstants.java    |  427 ++++++
 .../kinesis/config/ProducerConfigConstants.java    |   51 +
 .../internals/DynamoDBStreamsDataFetcher.java      |  128 ++
 .../kinesis/internals/KinesisDataFetcher.java      | 1460 ++++++++++++++++++++
 .../kinesis/internals/ShardConsumer.java           |  258 ++++
 .../kinesis/internals/publisher/RecordBatch.java   |   95 ++
 .../internals/publisher/RecordPublisher.java       |   62 +
 .../publisher/RecordPublisherFactory.java          |   51 +
 .../publisher/fanout/FanOutRecordPublisher.java    |  304 ++++
 .../fanout/FanOutRecordPublisherConfiguration.java |  475 +++++++
 .../fanout/FanOutRecordPublisherFactory.java       |   99 ++
 .../publisher/fanout/FanOutShardSubscriber.java    |  609 ++++++++
 .../publisher/fanout/StreamConsumerRegistrar.java  |  313 +++++
 .../polling/AdaptivePollingRecordPublisher.java    |  132 ++
 .../publisher/polling/PollingRecordPublisher.java  |  223 +++
 .../PollingRecordPublisherConfiguration.java       |   70 +
 .../polling/PollingRecordPublisherFactory.java     |   89 ++
 .../metrics/KinesisConsumerMetricConstants.java    |   48 +
 .../PollingRecordPublisherMetricsReporter.java     |   87 ++
 .../metrics/ShardConsumerMetricsReporter.java      |   90 ++
 .../kinesis/model/DynamoDBStreamsShardHandle.java  |   60 +
 .../kinesis/model/KinesisStreamShard.java          |  149 ++
 .../kinesis/model/KinesisStreamShardState.java     |  103 ++
 .../kinesis/model/SentinelSequenceNumber.java      |   76 +
 .../connectors/kinesis/model/SequenceNumber.java   |  112 ++
 .../connectors/kinesis/model/StartingPosition.java |  118 ++
 .../kinesis/model/StreamShardHandle.java           |  115 ++
 .../kinesis/model/StreamShardMetadata.java         |  236 ++++
 .../kinesis/proxy/DynamoDBStreamsProxy.java        |  132 ++
 .../kinesis/proxy/FullJitterBackoff.java           |   61 +
 .../kinesis/proxy/GetShardListResult.java          |   78 ++
 .../connectors/kinesis/proxy/KinesisProxy.java     |  654 +++++++++
 .../kinesis/proxy/KinesisProxyInterface.java       |   83 ++
 .../connectors/kinesis/proxy/KinesisProxyV2.java   |  221 +++
 .../kinesis/proxy/KinesisProxyV2Factory.java       |   82 ++
 .../kinesis/proxy/KinesisProxyV2Interface.java     |   63 +
 .../serialization/DynamoDBStreamsSchema.java       |   48 +
 .../KinesisDeserializationSchema.java              |   81 ++
 .../KinesisDeserializationSchemaWrapper.java       |   86 ++
 .../serialization/KinesisSerializationSchema.java  |   61 +
 .../kinesis/table/KinesisConnectorOptionsUtil.java |   58 +
 .../kinesis/table/KinesisConsumerOptionsUtil.java  |   87 ++
 .../kinesis/table/KinesisDynamicSource.java        |  222 +++
 .../kinesis/table/KinesisDynamicTableFactory.java  |   99 ++
 .../table/RowDataKinesisDeserializationSchema.java |  144 ++
 .../streaming/connectors/kinesis/util/AWSUtil.java |  291 ++++
 .../connectors/kinesis/util/AwsV2Util.java         |   81 ++
 .../BeanDeserializerModifierForIgnorables.java     |   81 ++
 .../kinesis/util/JobManagerWatermarkTracker.java   |  188 +++
 .../connectors/kinesis/util/KinesisConfigUtil.java |  615 +++++++++
 .../connectors/kinesis/util/RecordEmitter.java     |  284 ++++
 .../kinesis/util/StreamConsumerRegistrarUtil.java  |  178 +++
 .../connectors/kinesis/util/TimeoutLatch.java      |   45 +
 .../kinesis/util/UniformShardAssigner.java         |   58 +
 .../connectors/kinesis/util/WatermarkTracker.java  |  117 ++
 .../src/main/resources/META-INF/NOTICE             |  294 ++++
 .../resources/META-INF/licenses/LICENSE.protobuf   |   32 +
 ...aded.software.amazon.awssdk.http.SdkHttpService |   20 +
 .../org.apache.flink.table.factories.Factory       |   16 +
 .../awssdk/global/handlers/execution.interceptors  |    1 +
 .../architecture/TestCodeArchitectureTest.java     |   40 +
 .../kinesis/FlinkKinesisConsumerMigrationTest.java |  547 ++++++++
 .../kinesis/FlinkKinesisConsumerTest.java          | 1258 +++++++++++++++++
 .../connectors/kinesis/FlinkKinesisITCase.java     |  251 ++++
 .../kinesis/FlinkKinesisProducerTest.java          |  546 ++++++++
 .../connectors/kinesis/KinesisConsumerTest.java    |   75 +
 .../examples/ConsumeFromDynamoDBStreams.java       |   59 +
 .../kinesis/examples/ConsumeFromKinesis.java       |   55 +
 .../kinesis/examples/ProduceIntoKinesis.java       |   83 ++
 .../internals/DynamoDBStreamsDataFetcherTest.java  |   69 +
 .../kinesis/internals/KinesisDataFetcherTest.java  | 1117 +++++++++++++++
 .../kinesis/internals/ShardConsumerFanOutTest.java |  300 ++++
 .../kinesis/internals/ShardConsumerTest.java       |  246 ++++
 .../kinesis/internals/ShardConsumerTestUtils.java  |  207 +++
 .../internals/publisher/RecordBatchTest.java       |   92 ++
 .../FanOutRecordPublisherConfigurationTest.java    |  196 +++
 .../fanout/FanOutRecordPublisherTest.java          |  616 +++++++++
 .../fanout/FanOutShardSubscriberTest.java          |  185 +++
 .../fanout/StreamConsumerRegistrarTest.java        |  340 +++++
 .../PollingRecordPublisherConfigurationTest.java   |   73 +
 .../polling/PollingRecordPublisherFactoryTest.java |   71 +
 .../polling/PollingRecordPublisherTest.java        |  174 +++
 .../manualtests/ManualConsumerProducerTest.java    |  132 ++
 .../kinesis/manualtests/ManualExactlyOnceTest.java |  172 +++
 .../ManualExactlyOnceWithStreamReshardingTest.java |  289 ++++
 .../kinesis/manualtests/ManualProducerTest.java    |   95 ++
 .../PollingRecordPublisherMetricsReporterTest.java |   68 +
 .../metrics/ShardConsumerMetricsReporterTest.java  |   84 ++
 .../model/DynamoDBStreamsShardHandleTest.java      |  108 ++
 .../kinesis/model/SentinelSequenceNumberTest.java  |   33 +
 .../kinesis/model/StartingPositionTest.java        |   94 ++
 .../kinesis/model/StreamShardHandleTest.java       |   41 +
 .../connectors/kinesis/proxy/KinesisProxyTest.java |  514 +++++++
 .../kinesis/proxy/KinesisProxyV2FactoryTest.java   |   86 ++
 .../kinesis/proxy/KinesisProxyV2Test.java          |  423 ++++++
 .../table/KinesisDynamicTableFactoryTest.java      |  275 ++++
 .../AlwaysThrowsDeserializationSchema.java         |   57 +
 .../ExactlyOnceValidatingConsumerThread.java       |  185 +++
 .../testutils/FakeKinesisBehavioursFactory.java    |  686 +++++++++
 .../FakeKinesisFanOutBehavioursFactory.java        |  708 ++++++++++
 .../KinesisEventsGeneratorProducerThread.java      |  132 ++
 .../kinesis/testutils/KinesisPubsubClient.java     |  158 +++
 .../kinesis/testutils/KinesisShardIdGenerator.java |   29 +
 .../kinesis/testutils/TestRuntimeContext.java      |   89 ++
 .../kinesis/testutils/TestSourceContext.java       |   66 +
 .../connectors/kinesis/testutils/TestUtils.java    |  207 +++
 .../testutils/TestableFlinkKinesisConsumer.java    |   69 +
 .../testutils/TestableKinesisDataFetcher.java      |  217 +++
 ...inesisDataFetcherForShardConsumerException.java |  102 ++
 .../connectors/kinesis/util/AWSUtilTest.java       |  206 +++
 .../connectors/kinesis/util/AwsV2UtilTest.java     |  167 +++
 .../util/JobManagerWatermarkTrackerTest.java       |   75 +
 .../kinesis/util/KinesisConfigUtilTest.java        | 1024 ++++++++++++++
 .../connectors/kinesis/util/RecordEmitterTest.java |  136 ++
 .../util/StreamConsumerRegistrarUtilTest.java      |   92 ++
 .../kinesis/util/UniformShardAssignerTest.java     |   75 +
 .../kinesis/util/WatermarkTrackerTest.java         |  106 ++
 .../src/test/resources/archunit.properties         |   31 +
 ...onsumer-migration-test-flink1.10-empty-snapshot |  Bin 0 -> 2862 bytes
 ...esis-consumer-migration-test-flink1.10-snapshot |  Bin 0 -> 2930 bytes
 ...onsumer-migration-test-flink1.11-empty-snapshot |  Bin 0 -> 2870 bytes
 ...esis-consumer-migration-test-flink1.11-snapshot |  Bin 0 -> 2938 bytes
 ...onsumer-migration-test-flink1.12-empty-snapshot |  Bin 0 -> 2870 bytes
 ...esis-consumer-migration-test-flink1.12-snapshot |  Bin 0 -> 2938 bytes
 ...onsumer-migration-test-flink1.13-empty-snapshot |  Bin 0 -> 2870 bytes
 ...esis-consumer-migration-test-flink1.13-snapshot |  Bin 0 -> 2938 bytes
 ...onsumer-migration-test-flink1.14-empty-snapshot |  Bin 0 -> 2870 bytes
 ...esis-consumer-migration-test-flink1.14-snapshot |  Bin 0 -> 2938 bytes
 ...onsumer-migration-test-flink1.15-empty-snapshot |  Bin 0 -> 2870 bytes
 ...esis-consumer-migration-test-flink1.15-snapshot |  Bin 0 -> 2938 bytes
 ...consumer-migration-test-flink1.3-empty-snapshot |  Bin 0 -> 13975 bytes
 ...nesis-consumer-migration-test-flink1.3-snapshot |  Bin 0 -> 14043 bytes
 ...consumer-migration-test-flink1.4-empty-snapshot |  Bin 0 -> 13147 bytes
 ...nesis-consumer-migration-test-flink1.4-snapshot |  Bin 0 -> 13215 bytes
 ...consumer-migration-test-flink1.7-empty-snapshot |  Bin 0 -> 18539 bytes
 ...nesis-consumer-migration-test-flink1.7-snapshot |  Bin 0 -> 18607 bytes
 ...consumer-migration-test-flink1.8-empty-snapshot |  Bin 0 -> 2862 bytes
 ...nesis-consumer-migration-test-flink1.8-snapshot |  Bin 0 -> 2930 bytes
 ...consumer-migration-test-flink1.9-empty-snapshot |  Bin 0 -> 2862 bytes
 ...nesis-consumer-migration-test-flink1.9-snapshot |  Bin 0 -> 2930 bytes
 .../src/test/resources/log4j2-test.properties      |   29 +
 flink-connector-kinesis/src/test/resources/profile |    7 +
 flink-sql-connector-aws-kinesis-streams/pom.xml    |    2 +-
 flink-sql-connector-kinesis/pom.xml                |  121 ++
 .../src/main/resources/META-INF/NOTICE             |   20 +
 .../flink/connectors/kinesis/PackagingITCase.java  |   46 +
 pom.xml                                            |  127 +-
 tools/maven/suppressions.xml                       |    9 +-
 160 files changed, 26318 insertions(+), 6 deletions(-)

diff --git a/flink-connector-aws-kinesis-streams/pom.xml b/flink-connector-aws-kinesis-streams/pom.xml
index af280a6..0aba4c6 100644
--- a/flink-connector-aws-kinesis-streams/pom.xml
+++ b/flink-connector-aws-kinesis-streams/pom.xml
@@ -30,7 +30,7 @@ under the License.
     </parent>
 
     <artifactId>flink-connector-aws-kinesis-streams</artifactId>
-    <name>Flink : Connectors : AWS : Amazon Kinesis Data Streams</name>
+    <name>Flink : Connectors : AWS : Amazon Kinesis Data Streams Sink v2</name>
     <packaging>jar</packaging>
 
     <dependencies>
diff --git a/flink-connector-kinesis/archunit-violations/28f0499c-3213-4ec2-97f7-970f052922b3 b/flink-connector-kinesis/archunit-violations/28f0499c-3213-4ec2-97f7-970f052922b3
new file mode 100644
index 0000000..e69de29
diff --git a/flink-connector-kinesis/archunit-violations/4c963703-6b45-4782-825a-5cc6ba1556dd b/flink-connector-kinesis/archunit-violations/4c963703-6b45-4782-825a-5cc6ba1556dd
new file mode 100644
index 0000000..e69de29
diff --git a/flink-connector-kinesis/archunit-violations/stored.rules b/flink-connector-kinesis/archunit-violations/stored.rules
new file mode 100644
index 0000000..fe99e99
--- /dev/null
+++ b/flink-connector-kinesis/archunit-violations/stored.rules
@@ -0,0 +1,4 @@
+#
+#Tue Feb 22 12:18:40 CET 2022
+Tests\ inheriting\ from\ AbstractTestBase\ should\ have\ name\ ending\ with\ ITCase=4c963703-6b45-4782-825a-5cc6ba1556dd
+ITCASE\ tests\ should\ use\ a\ MiniCluster\ resource\ or\ extension=28f0499c-3213-4ec2-97f7-970f052922b3
diff --git a/flink-connector-kinesis/pom.xml b/flink-connector-kinesis/pom.xml
new file mode 100644
index 0000000..66d743f
--- /dev/null
+++ b/flink-connector-kinesis/pom.xml
@@ -0,0 +1,394 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.flink</groupId>
+        <artifactId>flink-connector-aws-parent</artifactId>
+        <version>4.0-SNAPSHOT</version>
+    </parent>
+
+    <artifactId>flink-connector-kinesis</artifactId>
+    <name>Flink : Connectors : AWS : Amazon Kinesis Data Streams</name>
+
+    <properties>
+        <aws.kinesis-kpl.version>0.14.1</aws.kinesis-kpl.version>
+        <aws.dynamodbstreams-kinesis-adapter.version>1.5.3</aws.dynamodbstreams-kinesis-adapter.version>
+        <hamcrest.version>1.3</hamcrest.version>
+    </properties>
+
+    <packaging>jar</packaging>
+
+    <dependencies>
+        <!-- AWS dependencies -->
+        <dependency>
+            <groupId>com.amazonaws</groupId>
+            <artifactId>amazon-kinesis-aggregator</artifactId>
+            <version>1.0.3</version>
+            <scope>test</scope>
+        </dependency>
+
+        <!-- Amazon AWS SDK v1.x dependencies -->
+        <dependency>
+            <groupId>com.amazonaws</groupId>
+            <artifactId>aws-java-sdk-kinesis</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.amazonaws</groupId>
+            <artifactId>aws-java-sdk-sts</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.amazonaws</groupId>
+            <artifactId>aws-java-sdk-kms</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.amazonaws</groupId>
+            <artifactId>aws-java-sdk-s3</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.amazonaws</groupId>
+            <artifactId>aws-java-sdk-dynamodb</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.amazonaws</groupId>
+            <artifactId>aws-java-sdk-cloudwatch</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.amazonaws</groupId>
+            <artifactId>amazon-kinesis-producer</artifactId>
+            <version>${aws.kinesis-kpl.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>com.amazonaws</groupId>
+            <artifactId>amazon-kinesis-client</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.amazonaws</groupId>
+            <artifactId>dynamodb-streams-kinesis-adapter</artifactId>
+            <version>${aws.dynamodbstreams-kinesis-adapter.version}</version>
+        </dependency>
+
+        <!-- Other third-party dependencies -->
+        <dependency>
+            <!-- KPL requires jaxb-api for javax.xml.bind.DatatypeConverter -->
+            <groupId>javax.xml.bind</groupId>
+            <artifactId>jaxb-api</artifactId>
+            <!-- packaged in flink-dist -->
+            <scope>provided</scope>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+        </dependency>
+
+        <!-- Flink ecosystem -->
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-streaming-java</artifactId>
+            <version>${flink.version}</version>
+            <scope>provided</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-aws-kinesis-streams</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-aws-base</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+
+        <!-- Flink Table API ecosystem -->
+        <!-- Projects depending on this project won't depend on flink-table-*. -->
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-table-api-java-bridge</artifactId>
+            <version>${flink.version}</version>
+            <scope>provided</scope>
+            <optional>true</optional>
+        </dependency>
+
+        <!-- Test dependencies -->
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-core</artifactId>
+            <version>${flink.version}</version>
+            <scope>test</scope>
+            <type>test-jar</type>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-streaming-java</artifactId>
+            <version>${flink.version}</version>
+            <scope>test</scope>
+            <type>test-jar</type>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-test-utils</artifactId>
+            <version>${flink.version}</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-test-utils</artifactId>
+            <version>${flink.version}</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-runtime</artifactId>
+            <version>${flink.version}</version>
+            <type>test-jar</type>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.hamcrest</groupId>
+            <artifactId>hamcrest-all</artifactId>
+            <version>${hamcrest.version}</version>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-aws-kinesis-streams</artifactId>
+            <version>${project.version}</version>
+            <type>test-jar</type>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-aws-base</artifactId>
+            <version>${project.version}</version>
+            <type>test-jar</type>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.testcontainers</groupId>
+            <artifactId>testcontainers</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+        <!-- Kinesis table factory testing -->
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-table-common</artifactId>
+            <version>${flink.version}</version>
+            <type>test-jar</type>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-table-test-utils</artifactId>
+            <version>${flink.version}</version>
+            <scope>test</scope>
+        </dependency>
+
+        <!-- Amazon AWS SDK v2.x dependencies -->
+        <dependency>
+            <groupId>software.amazon.awssdk</groupId>
+            <artifactId>kinesis</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>software.amazon.awssdk</groupId>
+            <artifactId>netty-nio-client</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>software.amazon.awssdk</groupId>
+            <artifactId>sts</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.httpcomponents</groupId>
+            <artifactId>httpclient</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.httpcomponents</groupId>
+            <artifactId>httpcore</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>com.fasterxml.jackson.dataformat</groupId>
+            <artifactId>jackson-dataformat-cbor</artifactId>
+        </dependency>
+
+        <!-- ArchUit test dependencies -->
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-architecture-tests-test</artifactId>
+            <scope>test</scope>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-jar-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>test-jar</goal>
+                        </goals>
+                        <configuration>
+                            <includes>
+                                <include>**/org/apache/flink/streaming/connectors/kinesis/testutils/**</include>
+                                <include>META-INF/LICENSE</include>
+                                <include>META-INF/NOTICE</include>
+                            </includes>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-shade-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <id>shade-flink</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>shade</goal>
+                        </goals>
+                        <configuration>
+                            <!-- required for the Kinesis e2e test -->
+                            <shadeTestJar>true</shadeTestJar>
+                            <promoteTransitiveDependencies>true</promoteTransitiveDependencies>
+                            <artifactSet combine.children="append">
+                                <includes>
+                                    <include>org.apache.flink:flink-connector-aws-base:*</include>
+                                    <include>org.apache.flink:flink-connector-aws-kinesis-streams:*</include>
+                                    <include>com.amazonaws:*</include>
+                                    <include>com.google.protobuf:*</include>
+                                    <include>org.apache.httpcomponents:*</include>
+                                    <include>software.amazon.awssdk:*</include>
+                                    <include>software.amazon.eventstream:*</include>
+                                    <include>software.amazon.ion:*</include>
+                                    <include>org.reactivestreams:*</include>
+                                    <include>io.netty:*</include>
+                                    <include>com.typesafe.netty:*</include>
+                                </includes>
+                            </artifactSet>
+                            <relocations combine.children="override">
+                                <!-- Do not relocate guava because it is exposed in the Kinesis API (KinesisProducer#addUserRecord).
+                                     Users may be using other affected API's, so relocations may break user-code -->
+                                <relocation>
+                                    <pattern>org.apache.flink.connector.aws.config</pattern>
+                                    <shadedPattern>
+                                        org.apache.flink.kinesis.shaded.org.apache.flink.connector.aws.config
+                                    </shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>org.apache.flink.connector.aws.util</pattern>
+                                    <shadedPattern>org.apache.flink.kinesis.shaded.org.apache.flink.connector.aws.util
+                                    </shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>com.google.protobuf</pattern>
+                                    <shadedPattern>org.apache.flink.kinesis.shaded.com.google.protobuf</shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>com.amazonaws</pattern>
+                                    <shadedPattern>org.apache.flink.kinesis.shaded.com.amazonaws</shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>org.apache.http</pattern>
+                                    <shadedPattern>org.apache.flink.kinesis.shaded.org.apache.http</shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>software.amazon</pattern>
+                                    <shadedPattern>org.apache.flink.kinesis.shaded.software.amazon</shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>io.netty</pattern>
+                                    <shadedPattern>org.apache.flink.kinesis.shaded.io.netty</shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>com.typesafe.netty</pattern>
+                                    <shadedPattern>org.apache.flink.kinesis.shaded.com.typesafe.netty</shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>org.reactivestreams</pattern>
+                                    <shadedPattern>org.apache.flink.kinesis.shaded.org.reactivestreams</shadedPattern>
+                                </relocation>
+                            </relocations>
+                            <filters>
+                                <filter>
+                                    <artifact>*:*</artifact>
+                                    <excludes>
+                                        <exclude>.gitkeep</exclude>
+                                    </excludes>
+                                </filter>
+                                <filter>
+                                    <artifact>com.amazonaws:amazon-kinesis-producer</artifact>
+                                    <excludes>
+                                        <exclude>META-INF/THIRD_PARTY_NOTICES</exclude>
+                                    </excludes>
+                                </filter>
+                                <filter>
+                                    <artifact>software.amazon.awssdk:*</artifact>
+                                    <excludes>
+                                        <exclude>META-INF/services/**</exclude>
+                                    </excludes>
+                                </filter>
+                                <filter>
+                                    <artifact>org.apache.flink:flink-connector-aws-kinesis-streams:*</artifact>
+                                    <excludes>
+                                        <exclude>profile</exclude>
+                                    </excludes>
+                                </filter>
+                                <filter>
+                                    <artifact>org.apache.flink:flink-connector-aws-base:*</artifact>
+                                    <excludes>
+                                        <exclude>profile</exclude>
+                                    </excludes>
+                                </filter>
+                            </filters>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-surefire-plugin</artifactId>
+                <configuration>
+                    <systemPropertyVariables>
+                        <com.amazonaws.sdk.disableCbor>true</com.amazonaws.sdk.disableCbor>
+                        <com.amazonaws.sdk.disableCertChecking>true</com.amazonaws.sdk.disableCertChecking>
+                    </systemPropertyVariables>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+</project>
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkDynamoDBStreamsConsumer.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkDynamoDBStreamsConsumer.java
new file mode 100644
index 0000000..fd87a62
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkDynamoDBStreamsConsumer.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis;
+
+import org.apache.flink.api.common.functions.RuntimeContext;
+import org.apache.flink.api.common.serialization.DeserializationSchema;
+import org.apache.flink.streaming.api.functions.source.SourceFunction;
+import org.apache.flink.streaming.connectors.kinesis.internals.DynamoDBStreamsDataFetcher;
+import org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetcher;
+import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Properties;
+
+/**
+ * Consume events from DynamoDB streams.
+ *
+ * @param <T> the type of data emitted
+ */
+public class FlinkDynamoDBStreamsConsumer<T> extends FlinkKinesisConsumer<T> {
+    private static final Logger LOG = LoggerFactory.getLogger(FlinkDynamoDBStreamsConsumer.class);
+
+    /**
+     * Constructor of FlinkDynamoDBStreamsConsumer.
+     *
+     * @param stream stream to consume
+     * @param deserializer deserialization schema
+     * @param config config properties
+     */
+    public FlinkDynamoDBStreamsConsumer(
+            String stream, DeserializationSchema<T> deserializer, Properties config) {
+        super(stream, deserializer, config);
+    }
+
+    /**
+     * Constructor of FlinkDynamodbStreamConsumer.
+     *
+     * @param streams list of streams to consume
+     * @param deserializer deserialization schema
+     * @param config config properties
+     */
+    public FlinkDynamoDBStreamsConsumer(
+            List<String> streams, KinesisDeserializationSchema deserializer, Properties config) {
+        super(streams, deserializer, config);
+    }
+
+    @Override
+    protected KinesisDataFetcher<T> createFetcher(
+            List<String> streams,
+            SourceFunction.SourceContext<T> sourceContext,
+            RuntimeContext runtimeContext,
+            Properties configProps,
+            KinesisDeserializationSchema<T> deserializationSchema) {
+        return new DynamoDBStreamsDataFetcher<T>(
+                streams,
+                sourceContext,
+                runtimeContext,
+                configProps,
+                deserializationSchema,
+                getShardAssigner());
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java
new file mode 100644
index 0000000..488a1f5
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java
@@ -0,0 +1,556 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.api.common.ExecutionConfig;
+import org.apache.flink.api.common.functions.RuntimeContext;
+import org.apache.flink.api.common.serialization.DeserializationSchema;
+import org.apache.flink.api.common.state.ListState;
+import org.apache.flink.api.common.state.ListStateDescriptor;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.java.ClosureCleaner;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
+import org.apache.flink.api.java.typeutils.TupleTypeInfo;
+import org.apache.flink.runtime.state.FunctionInitializationContext;
+import org.apache.flink.runtime.state.FunctionSnapshotContext;
+import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
+import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
+import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction;
+import org.apache.flink.streaming.api.functions.source.SourceFunction;
+import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
+import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.InitialPosition;
+import org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetcher;
+import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
+import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardMetadata;
+import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
+import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchemaWrapper;
+import org.apache.flink.streaming.connectors.kinesis.util.KinesisConfigUtil;
+import org.apache.flink.streaming.connectors.kinesis.util.StreamConsumerRegistrarUtil;
+import org.apache.flink.streaming.connectors.kinesis.util.WatermarkTracker;
+import org.apache.flink.util.InstantiationUtil;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.apache.flink.util.Preconditions.checkArgument;
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * The Flink Kinesis Consumer is an exactly-once parallel streaming data source that subscribes to
+ * multiple AWS Kinesis streams within the same AWS service region, and can handle resharding of
+ * streams. Each subtask of the consumer is responsible for fetching data records from multiple
+ * Kinesis shards. The number of shards fetched by each subtask will change as shards are closed and
+ * created by Kinesis.
+ *
+ * <p>To leverage Flink's checkpointing mechanics for exactly-once streaming processing guarantees,
+ * the Flink Kinesis consumer is implemented with the AWS Java SDK, instead of the officially
+ * recommended AWS Kinesis Client Library, for low-level control on the management of stream state.
+ * The Flink Kinesis Connector also supports setting the initial starting points of Kinesis streams,
+ * namely TRIM_HORIZON and LATEST.
+ *
+ * <p>Kinesis and the Flink consumer support dynamic re-sharding and shard IDs, while sequential,
+ * cannot be assumed to be consecutive. There is no perfect generic default assignment function.
+ * Default shard to subtask assignment, which is based on hash code, may result in skew, with some
+ * subtasks having many shards assigned and others none.
+ *
+ * <p>It is recommended to monitor the shard distribution and adjust assignment appropriately. A
+ * custom assigner implementation can be set via {@link #setShardAssigner(KinesisShardAssigner)} to
+ * optimize the hash function or use static overrides to limit skew.
+ *
+ * <p>In order for the consumer to emit watermarks, a timestamp assigner needs to be set via {@link
+ * #setPeriodicWatermarkAssigner(AssignerWithPeriodicWatermarks)} and the auto watermark emit
+ * interval configured via {@link
+ * org.apache.flink.api.common.ExecutionConfig#setAutoWatermarkInterval(long)}.
+ *
+ * <p>Watermarks can only advance when all shards of a subtask continuously deliver records. To
+ * avoid an inactive or closed shard to block the watermark progress, the idle timeout should be
+ * configured via configuration property {@link ConsumerConfigConstants#SHARD_IDLE_INTERVAL_MILLIS}.
+ * By default, shards won't be considered idle and watermark calculation will wait for newer records
+ * to arrive from all shards.
+ *
+ * <p>Note that re-sharding of the Kinesis stream while an application (that relies on the Kinesis
+ * records for watermarking) is running can lead to incorrect late events. This depends on how
+ * shards are assigned to subtasks and applies regardless of whether watermarks are generated in the
+ * source or a downstream operator.
+ *
+ * @param <T> the type of data emitted
+ */
+@PublicEvolving
+public class FlinkKinesisConsumer<T> extends RichParallelSourceFunction<T>
+        implements ResultTypeQueryable<T>, CheckpointedFunction {
+
+    private static final long serialVersionUID = 4724006128720664870L;
+
+    private static final Logger LOG = LoggerFactory.getLogger(FlinkKinesisConsumer.class);
+
+    // ------------------------------------------------------------------------
+    //  Consumer properties
+    // ------------------------------------------------------------------------
+
+    /** The names of the Kinesis streams that we will be consuming from. */
+    private final List<String> streams;
+
+    /**
+     * Properties to parametrize settings such as AWS service region, initial position in stream,
+     * shard list retrieval behaviours, etc.
+     */
+    private final Properties configProps;
+
+    /** User supplied deserialization schema to convert Kinesis byte messages to Flink objects. */
+    private final KinesisDeserializationSchema<T> deserializer;
+
+    /** The function that determines which subtask a shard should be assigned to. */
+    private KinesisShardAssigner shardAssigner = KinesisDataFetcher.DEFAULT_SHARD_ASSIGNER;
+
+    private AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner;
+    private WatermarkTracker watermarkTracker;
+
+    // ------------------------------------------------------------------------
+    //  Runtime state
+    // ------------------------------------------------------------------------
+
+    /**
+     * Per-task fetcher for Kinesis data records, where each fetcher pulls data from one or more
+     * Kinesis shards.
+     */
+    private transient KinesisDataFetcher<T> fetcher;
+
+    /** The sequence numbers to restore to upon restore from failure. */
+    private transient HashMap<StreamShardMetadata.EquivalenceWrapper, SequenceNumber>
+            sequenceNumsToRestore;
+
+    private volatile boolean running = true;
+
+    // ------------------------------------------------------------------------
+    //  State for Checkpoint
+    // ------------------------------------------------------------------------
+
+    /** State name to access shard sequence number states; cannot be changed. */
+    private static final String sequenceNumsStateStoreName = "Kinesis-Stream-Shard-State";
+
+    private transient ListState<Tuple2<StreamShardMetadata, SequenceNumber>>
+            sequenceNumsStateForCheckpoint;
+
+    // ------------------------------------------------------------------------
+    //  Constructors
+    // ------------------------------------------------------------------------
+
+    /**
+     * Creates a new Flink Kinesis Consumer.
+     *
+     * <p>The AWS credentials to be used, AWS region of the Kinesis streams, initial position to
+     * start streaming from are configured with a {@link Properties} instance.
+     *
+     * @param stream The single AWS Kinesis stream to read from.
+     * @param deserializer The deserializer used to convert raw bytes of Kinesis records to Java
+     *     objects (without key).
+     * @param configProps The properties used to configure AWS credentials, AWS region, and initial
+     *     starting position.
+     */
+    public FlinkKinesisConsumer(
+            String stream, DeserializationSchema<T> deserializer, Properties configProps) {
+        this(stream, new KinesisDeserializationSchemaWrapper<>(deserializer), configProps);
+    }
+
+    /**
+     * Creates a new Flink Kinesis Consumer.
+     *
+     * <p>The AWS credentials to be used, AWS region of the Kinesis streams, initial position to
+     * start streaming from are configured with a {@link Properties} instance.
+     *
+     * @param stream The single AWS Kinesis stream to read from.
+     * @param deserializer The keyed deserializer used to convert raw bytes of Kinesis records to
+     *     Java objects.
+     * @param configProps The properties used to configure AWS credentials, AWS region, and initial
+     *     starting position.
+     */
+    public FlinkKinesisConsumer(
+            String stream, KinesisDeserializationSchema<T> deserializer, Properties configProps) {
+        this(Collections.singletonList(stream), deserializer, configProps);
+    }
+
+    /**
+     * Creates a new Flink Kinesis Consumer.
+     *
+     * <p>The AWS credentials to be used, AWS region of the Kinesis streams, initial position to
+     * start streaming from are configured with a {@link Properties} instance.
+     *
+     * @param streams The AWS Kinesis streams to read from.
+     * @param deserializer The keyed deserializer used to convert raw bytes of Kinesis records to
+     *     Java objects.
+     * @param configProps The properties used to configure AWS credentials, AWS region, and initial
+     *     starting position.
+     */
+    public FlinkKinesisConsumer(
+            List<String> streams,
+            KinesisDeserializationSchema<T> deserializer,
+            Properties configProps) {
+        checkNotNull(streams, "streams can not be null");
+        checkArgument(streams.size() != 0, "must be consuming at least 1 stream");
+        checkArgument(!streams.contains(""), "stream names cannot be empty Strings");
+        this.streams = streams;
+
+        this.configProps = checkNotNull(configProps, "configProps can not be null");
+
+        // check the configuration properties for any conflicting settings
+        KinesisConfigUtil.validateConsumerConfiguration(this.configProps, streams);
+
+        checkNotNull(deserializer, "deserializer can not be null");
+        checkArgument(
+                InstantiationUtil.isSerializable(deserializer),
+                "The provided deserialization schema is not serializable: "
+                        + deserializer.getClass().getName()
+                        + ". "
+                        + "Please check that it does not contain references to non-serializable instances.");
+        this.deserializer = deserializer;
+
+        StreamConsumerRegistrarUtil.eagerlyRegisterStreamConsumers(configProps, streams);
+
+        if (LOG.isInfoEnabled()) {
+            StringBuilder sb = new StringBuilder();
+            for (String stream : streams) {
+                sb.append(stream).append(", ");
+            }
+            LOG.info(
+                    "Flink Kinesis Consumer is going to read the following streams: {}",
+                    sb.toString());
+        }
+    }
+
+    public KinesisShardAssigner getShardAssigner() {
+        return shardAssigner;
+    }
+
+    /**
+     * Provide a custom assigner to influence how shards are distributed over subtasks.
+     *
+     * @param shardAssigner shard assigner
+     */
+    public void setShardAssigner(KinesisShardAssigner shardAssigner) {
+        this.shardAssigner = checkNotNull(shardAssigner, "function can not be null");
+        ClosureCleaner.clean(shardAssigner, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
+    }
+
+    public AssignerWithPeriodicWatermarks<T> getPeriodicWatermarkAssigner() {
+        return periodicWatermarkAssigner;
+    }
+
+    /**
+     * Set the assigner that will extract the timestamp from {@link T} and calculate the watermark.
+     *
+     * @param periodicWatermarkAssigner periodic watermark assigner
+     */
+    public void setPeriodicWatermarkAssigner(
+            AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner) {
+        this.periodicWatermarkAssigner = periodicWatermarkAssigner;
+        ClosureCleaner.clean(
+                this.periodicWatermarkAssigner,
+                ExecutionConfig.ClosureCleanerLevel.RECURSIVE,
+                true);
+    }
+
+    public WatermarkTracker getWatermarkTracker() {
+        return this.watermarkTracker;
+    }
+
+    /**
+     * Set the global watermark tracker. When set, it will be used by the fetcher to align the shard
+     * consumers by event time.
+     *
+     * @param watermarkTracker
+     */
+    public void setWatermarkTracker(WatermarkTracker watermarkTracker) {
+        this.watermarkTracker = watermarkTracker;
+        ClosureCleaner.clean(
+                this.watermarkTracker, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
+    }
+
+    // ------------------------------------------------------------------------
+    //  Source life cycle
+    // ------------------------------------------------------------------------
+
+    @Override
+    public void run(SourceContext<T> sourceContext) throws Exception {
+
+        // all subtasks will run a fetcher, regardless of whether or not the subtask will initially
+        // have
+        // shards to subscribe to; fetchers will continuously poll for changes in the shard list, so
+        // all subtasks
+        // can potentially have new shards to subscribe to later on
+        KinesisDataFetcher<T> fetcher =
+                createFetcher(
+                        streams, sourceContext, getRuntimeContext(), configProps, deserializer);
+
+        // initial discovery
+        List<StreamShardHandle> allShards = fetcher.discoverNewShardsToSubscribe();
+
+        for (StreamShardHandle shard : allShards) {
+            StreamShardMetadata.EquivalenceWrapper kinesisStreamShard =
+                    new StreamShardMetadata.EquivalenceWrapper(
+                            KinesisDataFetcher.convertToStreamShardMetadata(shard));
+
+            if (sequenceNumsToRestore != null) {
+
+                if (sequenceNumsToRestore.containsKey(kinesisStreamShard)) {
+                    // if the shard was already seen and is contained in the state,
+                    // just use the sequence number stored in the state
+                    fetcher.registerNewSubscribedShardState(
+                            new KinesisStreamShardState(
+                                    kinesisStreamShard.getShardMetadata(),
+                                    shard,
+                                    sequenceNumsToRestore.get(kinesisStreamShard)));
+
+                    if (LOG.isInfoEnabled()) {
+                        LOG.info(
+                                "Subtask {} is seeding the fetcher with restored shard {},"
+                                        + " starting state set to the restored sequence number {}",
+                                getRuntimeContext().getIndexOfThisSubtask(),
+                                shard.toString(),
+                                sequenceNumsToRestore.get(kinesisStreamShard));
+                    }
+                } else {
+                    // the shard wasn't discovered in the previous run, therefore should be consumed
+                    // from the beginning
+                    fetcher.registerNewSubscribedShardState(
+                            new KinesisStreamShardState(
+                                    kinesisStreamShard.getShardMetadata(),
+                                    shard,
+                                    SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get()));
+
+                    if (LOG.isInfoEnabled()) {
+                        LOG.info(
+                                "Subtask {} is seeding the fetcher with new discovered shard {},"
+                                        + " starting state set to the SENTINEL_EARLIEST_SEQUENCE_NUM",
+                                getRuntimeContext().getIndexOfThisSubtask(),
+                                shard.toString());
+                    }
+                }
+            } else {
+                // we're starting fresh; use the configured start position as initial state
+                SentinelSequenceNumber startingSeqNum =
+                        InitialPosition.valueOf(
+                                        configProps.getProperty(
+                                                ConsumerConfigConstants.STREAM_INITIAL_POSITION,
+                                                ConsumerConfigConstants
+                                                        .DEFAULT_STREAM_INITIAL_POSITION))
+                                .toSentinelSequenceNumber();
+
+                fetcher.registerNewSubscribedShardState(
+                        new KinesisStreamShardState(
+                                kinesisStreamShard.getShardMetadata(),
+                                shard,
+                                startingSeqNum.get()));
+
+                if (LOG.isInfoEnabled()) {
+                    LOG.info(
+                            "Subtask {} will be seeded with initial shard {}, starting state set as sequence number {}",
+                            getRuntimeContext().getIndexOfThisSubtask(),
+                            shard.toString(),
+                            startingSeqNum.get());
+                }
+            }
+        }
+
+        // check that we are running before starting the fetcher
+        if (!running) {
+            return;
+        }
+
+        // expose the fetcher from this point, so that state
+        // snapshots can be taken from the fetcher's state holders
+        this.fetcher = fetcher;
+
+        // start the fetcher loop. The fetcher will stop running only when cancel() or
+        // close() is called, or an error is thrown by threads created by the fetcher
+        fetcher.runFetcher();
+
+        // check that the fetcher has terminated before fully closing
+        fetcher.awaitTermination();
+        sourceContext.close();
+    }
+
+    @Override
+    public void cancel() {
+        running = false;
+
+        KinesisDataFetcher fetcher = this.fetcher;
+
+        // this method might be called before the subtask actually starts running,
+        // so we must check if the fetcher is actually created
+        if (fetcher != null) {
+            try {
+                // interrupt the fetcher of any work
+                fetcher.shutdownFetcher();
+            } catch (Exception e) {
+                LOG.warn("Error while closing Kinesis data fetcher", e);
+            }
+        }
+    }
+
+    @Override
+    public void close() throws Exception {
+        cancel();
+        // safe-guard when the fetcher has been interrupted, make sure to not leak resources
+        // application might be stopped before connector subtask has been started
+        // so we must check if the fetcher is actually created
+        KinesisDataFetcher fetcher = this.fetcher;
+        if (fetcher != null) {
+            fetcher.awaitTermination();
+        }
+        this.fetcher = null;
+        super.close();
+    }
+
+    @Override
+    public TypeInformation<T> getProducedType() {
+        return deserializer.getProducedType();
+    }
+
+    // ------------------------------------------------------------------------
+    //  State Snapshot & Restore
+    // ------------------------------------------------------------------------
+
+    @Override
+    public void initializeState(FunctionInitializationContext context) throws Exception {
+        TypeInformation<Tuple2<StreamShardMetadata, SequenceNumber>> shardsStateTypeInfo =
+                new TupleTypeInfo<>(
+                        TypeInformation.of(StreamShardMetadata.class),
+                        TypeInformation.of(SequenceNumber.class));
+
+        sequenceNumsStateForCheckpoint =
+                context.getOperatorStateStore()
+                        .getUnionListState(
+                                new ListStateDescriptor<>(
+                                        sequenceNumsStateStoreName, shardsStateTypeInfo));
+
+        if (context.isRestored()) {
+            if (sequenceNumsToRestore == null) {
+                sequenceNumsToRestore = new HashMap<>();
+                for (Tuple2<StreamShardMetadata, SequenceNumber> kinesisSequenceNumber :
+                        sequenceNumsStateForCheckpoint.get()) {
+                    sequenceNumsToRestore.put(
+                            // we wrap the restored metadata inside an equivalence wrapper that
+                            // checks only stream name and shard id,
+                            // so that if a shard had been closed (due to a Kinesis reshard
+                            // operation, for example) since
+                            // the savepoint and has a different metadata than what we last stored,
+                            // we will still be able to match it in sequenceNumsToRestore. Please
+                            // see FLINK-8484 for details.
+                            new StreamShardMetadata.EquivalenceWrapper(kinesisSequenceNumber.f0),
+                            kinesisSequenceNumber.f1);
+                }
+
+                LOG.info(
+                        "Setting restore state in the FlinkKinesisConsumer. Using the following offsets: {}",
+                        sequenceNumsToRestore);
+            }
+        } else {
+            LOG.info("No restore state for FlinkKinesisConsumer.");
+        }
+    }
+
+    @Override
+    public void snapshotState(FunctionSnapshotContext context) throws Exception {
+        if (!running) {
+            LOG.debug("snapshotState() called on closed source; returning null.");
+        } else {
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("Snapshotting state ...");
+            }
+
+            sequenceNumsStateForCheckpoint.clear();
+
+            if (fetcher == null) {
+                if (sequenceNumsToRestore != null) {
+                    for (Map.Entry<StreamShardMetadata.EquivalenceWrapper, SequenceNumber> entry :
+                            sequenceNumsToRestore.entrySet()) {
+                        // sequenceNumsToRestore is the restored global union state;
+                        // should only snapshot shards that actually belong to us
+                        int hashCode =
+                                shardAssigner.assign(
+                                        KinesisDataFetcher.convertToStreamShardHandle(
+                                                entry.getKey().getShardMetadata()),
+                                        getRuntimeContext().getNumberOfParallelSubtasks());
+                        if (KinesisDataFetcher.isThisSubtaskShouldSubscribeTo(
+                                hashCode,
+                                getRuntimeContext().getNumberOfParallelSubtasks(),
+                                getRuntimeContext().getIndexOfThisSubtask())) {
+
+                            sequenceNumsStateForCheckpoint.add(
+                                    Tuple2.of(entry.getKey().getShardMetadata(), entry.getValue()));
+                        }
+                    }
+                }
+            } else {
+                HashMap<StreamShardMetadata, SequenceNumber> lastStateSnapshot =
+                        fetcher.snapshotState();
+
+                if (LOG.isDebugEnabled()) {
+                    LOG.debug(
+                            "Snapshotted state, last processed sequence numbers: {}, checkpoint id: {}, timestamp: {}",
+                            lastStateSnapshot,
+                            context.getCheckpointId(),
+                            context.getCheckpointTimestamp());
+                }
+
+                for (Map.Entry<StreamShardMetadata, SequenceNumber> entry :
+                        lastStateSnapshot.entrySet()) {
+                    sequenceNumsStateForCheckpoint.add(Tuple2.of(entry.getKey(), entry.getValue()));
+                }
+            }
+        }
+    }
+
+    /**
+     * This method is exposed for tests that need to mock the KinesisDataFetcher in the consumer.
+     */
+    protected KinesisDataFetcher<T> createFetcher(
+            List<String> streams,
+            SourceFunction.SourceContext<T> sourceContext,
+            RuntimeContext runtimeContext,
+            Properties configProps,
+            KinesisDeserializationSchema<T> deserializationSchema) {
+
+        return new KinesisDataFetcher<>(
+                streams,
+                sourceContext,
+                runtimeContext,
+                configProps,
+                deserializationSchema,
+                shardAssigner,
+                periodicWatermarkAssigner,
+                watermarkTracker);
+    }
+
+    @VisibleForTesting
+    HashMap<StreamShardMetadata.EquivalenceWrapper, SequenceNumber> getRestoredState() {
+        return sequenceNumsToRestore;
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisException.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisException.java
new file mode 100644
index 0000000..201324a
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisException.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis;
+
+import org.apache.flink.annotation.Internal;
+
+/**
+ * A {@link RuntimeException} wrapper indicating the exception was thrown from this connector. This
+ * class is abstract, semantic subclasses should be created to indicate the type of exception.
+ */
+@Internal
+public abstract class FlinkKinesisException extends RuntimeException {
+
+    public FlinkKinesisException(final String message) {
+        super(message);
+    }
+
+    public FlinkKinesisException(final String message, final Throwable cause) {
+        super(message, cause);
+    }
+
+    /**
+     * A semantic {@link RuntimeException} thrown to indicate timeout errors in the Kinesis
+     * connector.
+     */
+    @Internal
+    public static class FlinkKinesisTimeoutException extends FlinkKinesisException {
+
+        public FlinkKinesisTimeoutException(String message) {
+            super(message);
+        }
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducer.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducer.java
new file mode 100644
index 0000000..338cd28
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducer.java
@@ -0,0 +1,500 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.api.common.functions.RuntimeContext;
+import org.apache.flink.api.common.serialization.RuntimeContextInitializationContextAdapters;
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.connector.kinesis.sink.KinesisStreamsSink;
+import org.apache.flink.metrics.Counter;
+import org.apache.flink.metrics.MetricGroup;
+import org.apache.flink.runtime.state.FunctionInitializationContext;
+import org.apache.flink.runtime.state.FunctionSnapshotContext;
+import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
+import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
+import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisSerializationSchema;
+import org.apache.flink.streaming.connectors.kinesis.util.KinesisConfigUtil;
+import org.apache.flink.streaming.connectors.kinesis.util.TimeoutLatch;
+import org.apache.flink.util.InstantiationUtil;
+
+import com.amazonaws.metrics.AwsSdkMetrics;
+import com.amazonaws.services.kinesis.producer.Attempt;
+import com.amazonaws.services.kinesis.producer.KinesisProducer;
+import com.amazonaws.services.kinesis.producer.KinesisProducerConfiguration;
+import com.amazonaws.services.kinesis.producer.UserRecordFailedException;
+import com.amazonaws.services.kinesis.producer.UserRecordResult;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.MoreExecutors;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.reflect.Field;
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.flink.util.Preconditions.checkArgument;
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * The FlinkKinesisProducer allows to produce from a Flink DataStream into Kinesis.
+ *
+ * @param <OUT> Data type to produce into Kinesis Streams
+ * @deprecated This producer based on the Kinesis Producer Library KPL has been superseded. The new
+ *     sink can be found in the module {@code flink-connectors/flink-connector-aws-kinesis-streams}
+ *     and package {@link KinesisStreamsSink}. It is based on the AWS SDK for Java 2.x. The work to
+ *     replace this sink was carried out in FLINK-24227.
+ */
+@Deprecated
+@PublicEvolving
+public class FlinkKinesisProducer<OUT> extends RichSinkFunction<OUT>
+        implements CheckpointedFunction {
+
+    public static final String KINESIS_PRODUCER_METRIC_GROUP = "kinesisProducer";
+
+    public static final String METRIC_BACKPRESSURE_CYCLES = "backpressureCycles";
+
+    public static final String METRIC_OUTSTANDING_RECORDS_COUNT = "outstandingRecordsCount";
+
+    public static final String KINESIS_PRODUCER_RELEASE_HOOK_NAME = "kinesisProducer";
+
+    private static final long serialVersionUID = 6447077318449477846L;
+
+    private static final Logger LOG = LoggerFactory.getLogger(FlinkKinesisProducer.class);
+
+    /** Properties to parametrize settings such as AWS service region, access key etc. */
+    private final Properties configProps;
+
+    /* Flag controlling the error behavior of the producer */
+    private boolean failOnError = false;
+
+    /* Maximum length of the internal record queue before backpressuring */
+    private int queueLimit = Integer.MAX_VALUE;
+
+    /* Name of the default stream to produce to. Can be overwritten by the serialization schema */
+    private String defaultStream;
+
+    /* Default partition id. Can be overwritten by the serialization schema */
+    private String defaultPartition;
+
+    /* Schema for turning the OUT type into a byte array. */
+    private final KinesisSerializationSchema<OUT> schema;
+
+    /* Optional custom partitioner */
+    private KinesisPartitioner<OUT> customPartitioner = null;
+
+    // --------------------------- Runtime fields ---------------------------
+
+    /* Our Kinesis instance for each parallel Flink sink */
+    private transient KinesisProducer producer;
+
+    /* Backpressuring waits for this latch, triggered by record callback */
+    private transient volatile TimeoutLatch backpressureLatch;
+
+    /* Callback handling failures */
+    private transient FutureCallback<UserRecordResult> callback;
+
+    /* Counts how often we have to wait for KPL because we are above the queue limit */
+    private transient Counter backpressureCycles;
+
+    /* Field for async exception */
+    private transient volatile Throwable thrownException;
+
+    // --------------------------- Initialization and configuration  ---------------------------
+
+    /**
+     * Create a new FlinkKinesisProducer. This is a constructor supporting Flink's {@see
+     * SerializationSchema}.
+     *
+     * @param schema Serialization schema for the data type
+     * @param configProps The properties used to configure KinesisProducer, including AWS
+     *     credentials and AWS region
+     */
+    public FlinkKinesisProducer(final SerializationSchema<OUT> schema, Properties configProps) {
+
+        // create a simple wrapper for the serialization schema
+        this(
+                new KinesisSerializationSchema<OUT>() {
+
+                    @Override
+                    public void open(SerializationSchema.InitializationContext context)
+                            throws Exception {
+                        schema.open(context);
+                    }
+
+                    @Override
+                    public ByteBuffer serialize(OUT element) {
+                        // wrap into ByteBuffer
+                        return ByteBuffer.wrap(schema.serialize(element));
+                    }
+                    // use default stream and hash key
+
+                    @Override
+                    public String getTargetStream(OUT element) {
+                        return null;
+                    }
+                },
+                configProps);
+    }
+
+    /**
+     * Create a new FlinkKinesisProducer. This is a constructor supporting {@see
+     * KinesisSerializationSchema}.
+     *
+     * @param schema Kinesis serialization schema for the data type
+     * @param configProps The properties used to configure KinesisProducer, including AWS
+     *     credentials and AWS region
+     */
+    public FlinkKinesisProducer(KinesisSerializationSchema<OUT> schema, Properties configProps) {
+        checkNotNull(configProps, "configProps can not be null");
+        this.configProps = KinesisConfigUtil.replaceDeprecatedProducerKeys(configProps);
+
+        checkNotNull(schema, "serialization schema cannot be null");
+        checkArgument(
+                InstantiationUtil.isSerializable(schema),
+                "The provided serialization schema is not serializable: "
+                        + schema.getClass().getName()
+                        + ". "
+                        + "Please check that it does not contain references to non-serializable instances.");
+        this.schema = schema;
+    }
+
+    /**
+     * If set to true, the producer will immediately fail with an exception on any error. Otherwise,
+     * the errors are logged and the producer goes on.
+     *
+     * @param failOnError Error behavior flag
+     */
+    public void setFailOnError(boolean failOnError) {
+        this.failOnError = failOnError;
+    }
+
+    /**
+     * The {@link KinesisProducer} holds an unbounded queue internally. To avoid memory problems
+     * under high loads, a limit can be employed above which the internal queue will be flushed,
+     * thereby applying backpressure.
+     *
+     * @param queueLimit The maximum length of the internal queue before backpressuring
+     */
+    public void setQueueLimit(int queueLimit) {
+        checkArgument(queueLimit > 0, "queueLimit must be a positive number");
+        this.queueLimit = queueLimit;
+    }
+
+    /**
+     * Set a default stream name.
+     *
+     * @param defaultStream Name of the default Kinesis stream
+     */
+    public void setDefaultStream(String defaultStream) {
+        this.defaultStream = defaultStream;
+    }
+
+    /**
+     * Set default partition id.
+     *
+     * @param defaultPartition Name of the default partition
+     */
+    public void setDefaultPartition(String defaultPartition) {
+        this.defaultPartition = defaultPartition;
+    }
+
+    public void setCustomPartitioner(KinesisPartitioner<OUT> partitioner) {
+        checkNotNull(partitioner, "partitioner cannot be null");
+        checkArgument(
+                InstantiationUtil.isSerializable(partitioner),
+                "The provided custom partitioner is not serializable: "
+                        + partitioner.getClass().getName()
+                        + ". "
+                        + "Please check that it does not contain references to non-serializable instances.");
+
+        this.customPartitioner = partitioner;
+    }
+
+    // --------------------------- Lifecycle methods ---------------------------
+
+    @Override
+    public void open(Configuration parameters) throws Exception {
+        super.open(parameters);
+
+        schema.open(
+                RuntimeContextInitializationContextAdapters.serializationAdapter(
+                        getRuntimeContext(), metricGroup -> metricGroup.addGroup("user")));
+
+        // check and pass the configuration properties
+        KinesisProducerConfiguration producerConfig =
+                KinesisConfigUtil.getValidatedProducerConfiguration(configProps);
+
+        producer = getKinesisProducer(producerConfig);
+
+        final MetricGroup kinesisMectricGroup =
+                getRuntimeContext().getMetricGroup().addGroup(KINESIS_PRODUCER_METRIC_GROUP);
+        this.backpressureCycles = kinesisMectricGroup.counter(METRIC_BACKPRESSURE_CYCLES);
+        kinesisMectricGroup.gauge(
+                METRIC_OUTSTANDING_RECORDS_COUNT, producer::getOutstandingRecordsCount);
+
+        backpressureLatch = new TimeoutLatch();
+        callback =
+                new FutureCallback<UserRecordResult>() {
+                    @Override
+                    public void onSuccess(UserRecordResult result) {
+                        backpressureLatch.trigger();
+                        if (!result.isSuccessful()) {
+                            if (failOnError) {
+                                // only remember the first thrown exception
+                                if (thrownException == null) {
+                                    thrownException =
+                                            new RuntimeException("Record was not sent successful");
+                                }
+                            } else {
+                                LOG.warn("Record was not sent successful");
+                            }
+                        }
+                    }
+
+                    @Override
+                    public void onFailure(Throwable t) {
+                        backpressureLatch.trigger();
+                        if (failOnError) {
+                            thrownException = t;
+                        } else {
+                            LOG.warn("An exception occurred while processing a record", t);
+                        }
+                    }
+                };
+
+        if (this.customPartitioner != null) {
+            this.customPartitioner.initialize(
+                    getRuntimeContext().getIndexOfThisSubtask(),
+                    getRuntimeContext().getNumberOfParallelSubtasks());
+        }
+
+        final RuntimeContext ctx = getRuntimeContext();
+        ctx.registerUserCodeClassLoaderReleaseHookIfAbsent(
+                KINESIS_PRODUCER_RELEASE_HOOK_NAME,
+                () -> this.runClassLoaderReleaseHook(ctx.getUserCodeClassLoader()));
+
+        LOG.info("Started Kinesis producer instance for region '{}'", producerConfig.getRegion());
+    }
+
+    @Override
+    public void invoke(OUT value, Context context) throws Exception {
+        if (this.producer == null) {
+            throw new RuntimeException("Kinesis producer has been closed");
+        }
+
+        checkAndPropagateAsyncError();
+        boolean didWaitForFlush = enforceQueueLimit();
+
+        if (didWaitForFlush) {
+            checkAndPropagateAsyncError();
+        }
+
+        String stream = defaultStream;
+        String partition = defaultPartition;
+
+        ByteBuffer serialized = schema.serialize(value);
+
+        // maybe set custom stream
+        String customStream = schema.getTargetStream(value);
+        if (customStream != null) {
+            stream = customStream;
+        }
+
+        String explicitHashkey = null;
+        // maybe set custom partition
+        if (customPartitioner != null) {
+            partition = customPartitioner.getPartitionId(value);
+            explicitHashkey = customPartitioner.getExplicitHashKey(value);
+        }
+
+        if (stream == null) {
+            if (failOnError) {
+                throw new RuntimeException("No target stream set");
+            } else {
+                LOG.warn("No target stream set. Skipping record");
+                return;
+            }
+        }
+
+        ListenableFuture<UserRecordResult> cb =
+                producer.addUserRecord(stream, partition, explicitHashkey, serialized);
+        Futures.addCallback(cb, callback, MoreExecutors.directExecutor());
+    }
+
+    @Override
+    public void close() throws Exception {
+        LOG.info("Closing producer");
+        super.close();
+
+        if (producer != null) {
+            LOG.info("Flushing outstanding {} records", producer.getOutstandingRecordsCount());
+            // try to flush all outstanding records
+            flushSync();
+
+            LOG.info("Flushing done. Destroying producer instance.");
+            producer.destroy();
+            producer = null;
+        }
+
+        // make sure we propagate pending errors
+        checkAndPropagateAsyncError();
+    }
+
+    @Override
+    public void initializeState(FunctionInitializationContext context) throws Exception {
+        // nothing to do
+    }
+
+    @Override
+    public void snapshotState(FunctionSnapshotContext context) throws Exception {
+        // check for asynchronous errors and fail the checkpoint if necessary
+        checkAndPropagateAsyncError();
+
+        flushSync();
+        if (producer.getOutstandingRecordsCount() > 0) {
+            throw new IllegalStateException(
+                    "Number of outstanding records must be zero at this point: "
+                            + producer.getOutstandingRecordsCount());
+        }
+
+        // if the flushed requests has errors, we should propagate it also and fail the checkpoint
+        checkAndPropagateAsyncError();
+    }
+
+    // --------------------------- Utilities ---------------------------
+
+    /**
+     * Creates a {@link KinesisProducer}. Exposed so that tests can inject mock producers easily.
+     */
+    @VisibleForTesting
+    protected KinesisProducer getKinesisProducer(KinesisProducerConfiguration producerConfig) {
+        return new KinesisProducer(producerConfig);
+    }
+
+    /** Check if there are any asynchronous exceptions. If so, rethrow the exception. */
+    private void checkAndPropagateAsyncError() throws Exception {
+        if (thrownException != null) {
+            String errorMessages = "";
+            if (thrownException instanceof UserRecordFailedException) {
+                List<Attempt> attempts =
+                        ((UserRecordFailedException) thrownException).getResult().getAttempts();
+                for (Attempt attempt : attempts) {
+                    if (attempt.getErrorMessage() != null) {
+                        errorMessages += attempt.getErrorMessage() + "\n";
+                    }
+                }
+            }
+            if (failOnError) {
+                throw new RuntimeException(
+                        "An exception was thrown while processing a record: " + errorMessages,
+                        thrownException);
+            } else {
+                LOG.warn(
+                        "An exception was thrown while processing a record: {}.",
+                        errorMessages,
+                        thrownException);
+
+                // reset, prevent double throwing
+                thrownException = null;
+            }
+        }
+    }
+
+    /**
+     * If the internal queue of the {@link KinesisProducer} gets too long, flush some of the records
+     * until we are below the limit again. We don't want to flush _all_ records at this point since
+     * that would break record aggregation.
+     *
+     * @return boolean whether flushing occurred or not
+     */
+    private boolean enforceQueueLimit() {
+        int attempt = 0;
+        while (producer.getOutstandingRecordsCount() >= queueLimit) {
+            backpressureCycles.inc();
+            if (attempt >= 10) {
+                LOG.warn(
+                        "Waiting for the queue length to drop below the limit takes unusually long, still not done after {} attempts.",
+                        attempt);
+            }
+            attempt++;
+            try {
+                backpressureLatch.await(100);
+            } catch (InterruptedException e) {
+                LOG.warn("Flushing was interrupted.");
+                break;
+            }
+        }
+        return attempt > 0;
+    }
+
+    /**
+     * A reimplementation of {@link KinesisProducer#flushSync()}. This implementation releases the
+     * block on flushing if an interruption occurred.
+     */
+    private void flushSync() throws Exception {
+        while (producer.getOutstandingRecordsCount() > 0) {
+            producer.flush();
+            try {
+                Thread.sleep(500);
+            } catch (InterruptedException e) {
+                LOG.warn("Flushing was interrupted.");
+                break;
+            }
+        }
+    }
+
+    /**
+     * Remove references created by the producer, preventing the classloader to unload. References
+     * were analyzed as of versions: aws.kinesis-kpl.version = 0.14.0 aws.sdk.version = 1.11.754
+     * aws.sdkv2.version = 2.13.52
+     */
+    private void runClassLoaderReleaseHook(ClassLoader classLoader) {
+        AwsSdkMetrics.unregisterMetricAdminMBean();
+
+        // shutdown FileAgeManager thread pool
+        try {
+            Class<?> fileAgeManagerClazz =
+                    Class.forName(
+                            "com.amazonaws.services.kinesis.producer.FileAgeManager",
+                            true,
+                            classLoader);
+            Field instanceField = fileAgeManagerClazz.getDeclaredField("instance");
+            instanceField.setAccessible(true);
+            Object fileAgeManager = instanceField.get(null);
+
+            Field executorField = fileAgeManagerClazz.getDeclaredField("executorService");
+            executorField.setAccessible(true);
+            ExecutorService executorService = (ExecutorService) executorField.get(fileAgeManager);
+            executorService.shutdown();
+            executorService.awaitTermination(1, TimeUnit.MINUTES);
+        } catch (ClassNotFoundException
+                | NoSuchFieldException
+                | IllegalAccessException
+                | InterruptedException e) {
+            LOG.info("Unable to shutdown thread pool of KinesisProducer/FileAgeManager.", e);
+        }
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java
new file mode 100644
index 0000000..c93c9da
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisPartitioner.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.connector.kinesis.sink.PartitionKeyGenerator;
+
+import java.io.Serializable;
+
+/**
+ * An interface for partitioning records.
+ *
+ * @param <T> record type
+ */
+@PublicEvolving
+public abstract class KinesisPartitioner<T> implements Serializable, PartitionKeyGenerator<T> {
+
+    private static final long serialVersionUID = -7467294664702189780L;
+
+    /**
+     * Return a partition id based on the input.
+     *
+     * @param element Element to partition
+     * @return A string representing the partition id
+     */
+    public abstract String getPartitionId(T element);
+
+    /**
+     * Optional method for setting an explicit hash key.
+     *
+     * @param element Element to get the hash key for
+     * @return the hash key for the element
+     */
+    public String getExplicitHashKey(T element) {
+        return null;
+    }
+
+    /**
+     * Optional initializer.
+     *
+     * @param indexOfThisSubtask Index of this partitioner instance
+     * @param numberOfParallelSubtasks Total number of parallel instances
+     */
+    public void initialize(int indexOfThisSubtask, int numberOfParallelSubtasks) {}
+
+    @Override
+    public String apply(T element) {
+        return getPartitionId(element);
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisShardAssigner.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisShardAssigner.java
new file mode 100644
index 0000000..322b548
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/KinesisShardAssigner.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+
+import java.io.Serializable;
+
+/**
+ * Utility to map Kinesis shards to Flink subtask indices. Users can implement this interface to
+ * optimize distribution of shards over subtasks. See {@link #assign(StreamShardHandle, int)} for
+ * details.
+ */
+@PublicEvolving
+public interface KinesisShardAssigner extends Serializable {
+
+    /**
+     * Returns the index of the target subtask that a specific shard should be assigned to. For
+     * return values outside the subtask range, modulus operation will be applied automatically,
+     * hence it is also valid to just return a hash code.
+     *
+     * <p>The resulting distribution of shards should have the following contract:
+     *
+     * <ul>
+     *   <li>1. Uniform distribution across subtasks
+     *   <li>2. Deterministic, calls for a given shard always return same index.
+     * </ul>
+     *
+     * <p>The above contract is crucial and cannot be broken. Consumer subtasks rely on this
+     * contract to filter out shards that they should not subscribe to, guaranteeing that each shard
+     * of a stream will always be assigned to one subtask in a uniformly distributed manner.
+     *
+     * @param shard the shard to determine
+     * @param numParallelSubtasks total number of subtasks
+     * @return target index, if index falls outside of the range, modulus operation will be applied
+     */
+    int assign(StreamShardHandle shard, int numParallelSubtasks);
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/AWSConfigConstants.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/AWSConfigConstants.java
new file mode 100644
index 0000000..4c1bda4
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/AWSConfigConstants.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.config;
+
+import org.apache.flink.annotation.PublicEvolving;
+
+/**
+ * Class inheriting from {@link org.apache.flink.connector.aws.config.AWSConfigConstants} for
+ * backward compatibility.
+ */
+@PublicEvolving
+public class AWSConfigConstants extends org.apache.flink.connector.aws.config.AWSConfigConstants {}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
new file mode 100644
index 0000000..fff44d6
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
@@ -0,0 +1,427 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.config;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisConsumer;
+import org.apache.flink.streaming.connectors.kinesis.internals.ShardConsumer;
+import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
+
+import java.time.Duration;
+
+/**
+ * Optional consumer specific configuration keys and default values for {@link
+ * FlinkKinesisConsumer}.
+ */
+@PublicEvolving
+public class ConsumerConfigConstants extends AWSConfigConstants {
+
+    /**
+     * The initial position to start reading shards from. This will affect the {@code
+     * ShardIteratorType} used when the consumer tasks retrieve the first shard iterator for each
+     * Kinesis shard.
+     */
+    public enum InitialPosition {
+
+        /**
+         * Start reading from the earliest possible record in the stream (excluding expired data
+         * records).
+         */
+        TRIM_HORIZON(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM),
+
+        /** Start reading from the latest incoming record. */
+        LATEST(SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM),
+
+        /** Start reading from the record at the specified timestamp. */
+        AT_TIMESTAMP(SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM);
+
+        private SentinelSequenceNumber sentinelSequenceNumber;
+
+        InitialPosition(SentinelSequenceNumber sentinelSequenceNumber) {
+            this.sentinelSequenceNumber = sentinelSequenceNumber;
+        }
+
+        public SentinelSequenceNumber toSentinelSequenceNumber() {
+            return this.sentinelSequenceNumber;
+        }
+    }
+
+    /** The record publisher type represents the record-consume style. */
+    public enum RecordPublisherType {
+
+        /** Consume the Kinesis records using AWS SDK v2 with the enhanced fan-out consumer. */
+        EFO,
+        /** Consume the Kinesis records using AWS SDK v1 with the get-records method. */
+        POLLING
+    }
+
+    /** The EFO registration type represents how we are going to de-/register efo consumer. */
+    public enum EFORegistrationType {
+
+        /**
+         * Delay the registration of efo consumer for taskmanager to execute. De-register the efo
+         * consumer for taskmanager to execute when task is shut down.
+         */
+        LAZY,
+        /**
+         * Register the efo consumer eagerly for jobmanager to execute. De-register the efo consumer
+         * the same way as lazy does.
+         */
+        EAGER,
+        /** Do not register efo consumer programmatically. Do not de-register either. */
+        NONE
+    }
+
+    /** The RecordPublisher type (EFO|POLLING, default is POLLING). */
+    public static final String RECORD_PUBLISHER_TYPE = "flink.stream.recordpublisher";
+
+    /** The name of the EFO consumer to register with KDS. */
+    public static final String EFO_CONSUMER_NAME = "flink.stream.efo.consumername";
+
+    /**
+     * Determine how and when consumer de-/registration is performed (LAZY|EAGER|NONE, default is
+     * LAZY).
+     */
+    public static final String EFO_REGISTRATION_TYPE = "flink.stream.efo.registration";
+
+    /** The prefix of consumer ARN for a given stream. */
+    public static final String EFO_CONSUMER_ARN_PREFIX = "flink.stream.efo.consumerarn";
+
+    /** The initial position to start reading Kinesis streams from (LATEST is used if not set). */
+    public static final String STREAM_INITIAL_POSITION = "flink.stream.initpos";
+
+    /**
+     * The initial timestamp to start reading Kinesis stream from (when AT_TIMESTAMP is set for
+     * STREAM_INITIAL_POSITION).
+     */
+    public static final String STREAM_INITIAL_TIMESTAMP = "flink.stream.initpos.timestamp";
+
+    /**
+     * The date format of initial timestamp to start reading Kinesis stream from (when AT_TIMESTAMP
+     * is set for STREAM_INITIAL_POSITION).
+     */
+    public static final String STREAM_TIMESTAMP_DATE_FORMAT =
+            "flink.stream.initpos.timestamp.format";
+
+    /** The maximum number of describeStream attempts if we get a recoverable exception. */
+    public static final String STREAM_DESCRIBE_RETRIES = "flink.stream.describe.maxretries";
+
+    /**
+     * The base backoff time between each describeStream attempt (for consuming from DynamoDB
+     * streams).
+     */
+    public static final String STREAM_DESCRIBE_BACKOFF_BASE = "flink.stream.describe.backoff.base";
+
+    /**
+     * The maximum backoff time between each describeStream attempt (for consuming from DynamoDB
+     * streams).
+     */
+    public static final String STREAM_DESCRIBE_BACKOFF_MAX = "flink.stream.describe.backoff.max";
+
+    /**
+     * The power constant for exponential backoff between each describeStream attempt (for consuming
+     * from DynamoDB streams).
+     */
+    public static final String STREAM_DESCRIBE_BACKOFF_EXPONENTIAL_CONSTANT =
+            "flink.stream.describe.backoff.expconst";
+
+    /** The maximum number of listShards attempts if we get a recoverable exception. */
+    public static final String LIST_SHARDS_RETRIES = "flink.list.shards.maxretries";
+
+    /** The base backoff time between each listShards attempt. */
+    public static final String LIST_SHARDS_BACKOFF_BASE = "flink.list.shards.backoff.base";
+
+    /** The maximum backoff time between each listShards attempt. */
+    public static final String LIST_SHARDS_BACKOFF_MAX = "flink.list.shards.backoff.max";
+
+    /** The power constant for exponential backoff between each listShards attempt. */
+    public static final String LIST_SHARDS_BACKOFF_EXPONENTIAL_CONSTANT =
+            "flink.list.shards.backoff.expconst";
+
+    /** The maximum number of describeStreamConsumer attempts if we get a recoverable exception. */
+    public static final String DESCRIBE_STREAM_CONSUMER_RETRIES =
+            "flink.stream.describestreamconsumer.maxretries";
+
+    /** The base backoff time between each describeStreamConsumer attempt. */
+    public static final String DESCRIBE_STREAM_CONSUMER_BACKOFF_BASE =
+            "flink.stream.describestreamconsumer.backoff.base";
+
+    /** The maximum backoff time between each describeStreamConsumer attempt. */
+    public static final String DESCRIBE_STREAM_CONSUMER_BACKOFF_MAX =
+            "flink.stream.describestreamconsumer.backoff.max";
+
+    /** The power constant for exponential backoff between each describeStreamConsumer attempt. */
+    public static final String DESCRIBE_STREAM_CONSUMER_BACKOFF_EXPONENTIAL_CONSTANT =
+            "flink.stream.describestreamconsumer.backoff.expconst";
+
+    /** The maximum number of registerStream attempts if we get a recoverable exception. */
+    public static final String REGISTER_STREAM_RETRIES =
+            "flink.stream.registerstreamconsumer.maxretries";
+
+    /**
+     * The maximum time in seconds to wait for a stream consumer to become active before giving up.
+     */
+    public static final String REGISTER_STREAM_TIMEOUT_SECONDS =
+            "flink.stream.registerstreamconsumer.timeout";
+
+    /** The base backoff time between each registerStream attempt. */
+    public static final String REGISTER_STREAM_BACKOFF_BASE =
+            "flink.stream.registerstreamconsumer.backoff.base";
+
+    /** The maximum backoff time between each registerStream attempt. */
+    public static final String REGISTER_STREAM_BACKOFF_MAX =
+            "flink.stream.registerstreamconsumer.backoff.max";
+
+    /** The power constant for exponential backoff between each registerStream attempt. */
+    public static final String REGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT =
+            "flink.stream.registerstreamconsumer.backoff.expconst";
+
+    /** The maximum number of deregisterStream attempts if we get a recoverable exception. */
+    public static final String DEREGISTER_STREAM_RETRIES =
+            "flink.stream.deregisterstreamconsumer.maxretries";
+
+    /** The maximum time in seconds to wait for a stream consumer to deregister before giving up. */
+    public static final String DEREGISTER_STREAM_TIMEOUT_SECONDS =
+            "flink.stream.deregisterstreamconsumer.timeout";
+
+    /** The base backoff time between each deregisterStream attempt. */
+    public static final String DEREGISTER_STREAM_BACKOFF_BASE =
+            "flink.stream.deregisterstreamconsumer.backoff.base";
+
+    /** The maximum backoff time between each deregisterStream attempt. */
+    public static final String DEREGISTER_STREAM_BACKOFF_MAX =
+            "flink.stream.deregisterstreamconsumer.backoff.max";
+
+    /** The power constant for exponential backoff between each deregisterStream attempt. */
+    public static final String DEREGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT =
+            "flink.stream.deregisterstreamconsumer.backoff.expconst";
+
+    /** The maximum number of subscribeToShard attempts if we get a recoverable exception. */
+    public static final String SUBSCRIBE_TO_SHARD_RETRIES =
+            "flink.shard.subscribetoshard.maxretries";
+
+    /** A timeout when waiting for a shard subscription to be established. */
+    public static final String SUBSCRIBE_TO_SHARD_TIMEOUT_SECONDS =
+            "flink.shard.subscribetoshard.timeout";
+
+    /** The base backoff time between each subscribeToShard attempt. */
+    public static final String SUBSCRIBE_TO_SHARD_BACKOFF_BASE =
+            "flink.shard.subscribetoshard.backoff.base";
+
+    /** The maximum backoff time between each subscribeToShard attempt. */
+    public static final String SUBSCRIBE_TO_SHARD_BACKOFF_MAX =
+            "flink.shard.subscribetoshard.backoff.max";
+
+    /** The power constant for exponential backoff between each subscribeToShard attempt. */
+    public static final String SUBSCRIBE_TO_SHARD_BACKOFF_EXPONENTIAL_CONSTANT =
+            "flink.shard.subscribetoshard.backoff.expconst";
+
+    /**
+     * The maximum number of records to try to get each time we fetch records from a AWS Kinesis
+     * shard.
+     */
+    public static final String SHARD_GETRECORDS_MAX = "flink.shard.getrecords.maxrecordcount";
+
+    /** The maximum number of getRecords attempts if we get a recoverable exception. */
+    public static final String SHARD_GETRECORDS_RETRIES = "flink.shard.getrecords.maxretries";
+
+    /**
+     * The base backoff time between getRecords attempts if we get a
+     * ProvisionedThroughputExceededException.
+     */
+    public static final String SHARD_GETRECORDS_BACKOFF_BASE =
+            "flink.shard.getrecords.backoff.base";
+
+    /**
+     * The maximum backoff time between getRecords attempts if we get a
+     * ProvisionedThroughputExceededException.
+     */
+    public static final String SHARD_GETRECORDS_BACKOFF_MAX = "flink.shard.getrecords.backoff.max";
+
+    /** The power constant for exponential backoff between each getRecords attempt. */
+    public static final String SHARD_GETRECORDS_BACKOFF_EXPONENTIAL_CONSTANT =
+            "flink.shard.getrecords.backoff.expconst";
+
+    /** The interval between each getRecords request to a AWS Kinesis shard in milliseconds. */
+    public static final String SHARD_GETRECORDS_INTERVAL_MILLIS =
+            "flink.shard.getrecords.intervalmillis";
+
+    /**
+     * The maximum number of getShardIterator attempts if we get
+     * ProvisionedThroughputExceededException.
+     */
+    public static final String SHARD_GETITERATOR_RETRIES = "flink.shard.getiterator.maxretries";
+
+    /**
+     * The base backoff time between getShardIterator attempts if we get a
+     * ProvisionedThroughputExceededException.
+     */
+    public static final String SHARD_GETITERATOR_BACKOFF_BASE =
+            "flink.shard.getiterator.backoff.base";
+
+    /**
+     * The maximum backoff time between getShardIterator attempts if we get a
+     * ProvisionedThroughputExceededException.
+     */
+    public static final String SHARD_GETITERATOR_BACKOFF_MAX =
+            "flink.shard.getiterator.backoff.max";
+
+    /** The power constant for exponential backoff between each getShardIterator attempt. */
+    public static final String SHARD_GETITERATOR_BACKOFF_EXPONENTIAL_CONSTANT =
+            "flink.shard.getiterator.backoff.expconst";
+
+    /** The interval between each attempt to discover new shards. */
+    public static final String SHARD_DISCOVERY_INTERVAL_MILLIS =
+            "flink.shard.discovery.intervalmillis";
+
+    /** The config to turn on adaptive reads from a shard. */
+    public static final String SHARD_USE_ADAPTIVE_READS = "flink.shard.adaptivereads";
+
+    /** The interval after which to consider a shard idle for purposes of watermark generation. */
+    public static final String SHARD_IDLE_INTERVAL_MILLIS = "flink.shard.idle.interval";
+
+    /** The interval for periodically synchronizing the shared watermark state. */
+    public static final String WATERMARK_SYNC_MILLIS = "flink.watermark.sync.interval";
+
+    /** The maximum delta allowed for the reader to advance ahead of the shared global watermark. */
+    public static final String WATERMARK_LOOKAHEAD_MILLIS = "flink.watermark.lookahead.millis";
+
+    /**
+     * The maximum number of records that will be buffered before suspending consumption of a shard.
+     */
+    public static final String WATERMARK_SYNC_QUEUE_CAPACITY =
+            "flink.watermark.sync.queue.capacity";
+
+    public static final String EFO_HTTP_CLIENT_MAX_CONCURRENCY =
+            "flink.stream.efo.http-client.max-concurrency";
+
+    public static final String EFO_HTTP_CLIENT_READ_TIMEOUT_MILLIS =
+            "flink.stream.efo.http-client.read-timeout";
+
+    // ------------------------------------------------------------------------
+    //  Default values for consumer configuration
+    // ------------------------------------------------------------------------
+
+    public static final String DEFAULT_STREAM_INITIAL_POSITION = InitialPosition.LATEST.toString();
+
+    public static final String DEFAULT_STREAM_TIMESTAMP_DATE_FORMAT =
+            "yyyy-MM-dd'T'HH:mm:ss.SSSXXX";
+
+    public static final int DEFAULT_STREAM_DESCRIBE_RETRIES = 50;
+
+    public static final long DEFAULT_STREAM_DESCRIBE_BACKOFF_BASE = 2000L;
+
+    public static final long DEFAULT_STREAM_DESCRIBE_BACKOFF_MAX = 5000L;
+
+    public static final double DEFAULT_STREAM_DESCRIBE_BACKOFF_EXPONENTIAL_CONSTANT = 1.5;
+
+    public static final long DEFAULT_LIST_SHARDS_BACKOFF_BASE = 1000L;
+
+    public static final long DEFAULT_LIST_SHARDS_BACKOFF_MAX = 5000L;
+
+    public static final double DEFAULT_LIST_SHARDS_BACKOFF_EXPONENTIAL_CONSTANT = 1.5;
+
+    public static final int DEFAULT_LIST_SHARDS_RETRIES = 10;
+
+    public static final int DEFAULT_DESCRIBE_STREAM_CONSUMER_RETRIES = 50;
+
+    public static final long DEFAULT_DESCRIBE_STREAM_CONSUMER_BACKOFF_BASE = 2000L;
+
+    public static final long DEFAULT_DESCRIBE_STREAM_CONSUMER_BACKOFF_MAX = 5000L;
+
+    public static final double DEFAULT_DESCRIBE_STREAM_CONSUMER_BACKOFF_EXPONENTIAL_CONSTANT = 1.5;
+
+    public static final int DEFAULT_REGISTER_STREAM_RETRIES = 10;
+
+    public static final Duration DEFAULT_REGISTER_STREAM_TIMEOUT = Duration.ofSeconds(60);
+
+    public static final long DEFAULT_REGISTER_STREAM_BACKOFF_BASE = 500L;
+
+    public static final long DEFAULT_REGISTER_STREAM_BACKOFF_MAX = 2000L;
+
+    public static final double DEFAULT_REGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT = 1.5;
+
+    public static final int DEFAULT_DEREGISTER_STREAM_RETRIES = 10;
+
+    public static final Duration DEFAULT_DEREGISTER_STREAM_TIMEOUT = Duration.ofSeconds(60);
+
+    public static final long DEFAULT_DEREGISTER_STREAM_BACKOFF_BASE = 500L;
+
+    public static final long DEFAULT_DEREGISTER_STREAM_BACKOFF_MAX = 2000L;
+
+    public static final double DEFAULT_DEREGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT = 1.5;
+
+    public static final int DEFAULT_SUBSCRIBE_TO_SHARD_RETRIES = 10;
+
+    public static final Duration DEFAULT_SUBSCRIBE_TO_SHARD_TIMEOUT = Duration.ofSeconds(60);
+
+    public static final long DEFAULT_SUBSCRIBE_TO_SHARD_BACKOFF_BASE = 1000L;
+
+    public static final long DEFAULT_SUBSCRIBE_TO_SHARD_BACKOFF_MAX = 2000L;
+
+    public static final double DEFAULT_SUBSCRIBE_TO_SHARD_BACKOFF_EXPONENTIAL_CONSTANT = 1.5;
+
+    public static final int DEFAULT_SHARD_GETRECORDS_MAX = 10000;
+
+    public static final int DEFAULT_SHARD_GETRECORDS_RETRIES = 3;
+
+    public static final long DEFAULT_SHARD_GETRECORDS_BACKOFF_BASE = 300L;
+
+    public static final long DEFAULT_SHARD_GETRECORDS_BACKOFF_MAX = 1000L;
+
+    public static final double DEFAULT_SHARD_GETRECORDS_BACKOFF_EXPONENTIAL_CONSTANT = 1.5;
+
+    public static final long DEFAULT_SHARD_GETRECORDS_INTERVAL_MILLIS = 200L;
+
+    public static final int DEFAULT_SHARD_GETITERATOR_RETRIES = 3;
+
+    public static final long DEFAULT_SHARD_GETITERATOR_BACKOFF_BASE = 300L;
+
+    public static final long DEFAULT_SHARD_GETITERATOR_BACKOFF_MAX = 1000L;
+
+    public static final double DEFAULT_SHARD_GETITERATOR_BACKOFF_EXPONENTIAL_CONSTANT = 1.5;
+
+    public static final long DEFAULT_SHARD_DISCOVERY_INTERVAL_MILLIS = 10000L;
+
+    public static final boolean DEFAULT_SHARD_USE_ADAPTIVE_READS = false;
+
+    public static final long DEFAULT_SHARD_IDLE_INTERVAL_MILLIS = -1;
+
+    public static final long DEFAULT_WATERMARK_SYNC_MILLIS = 30_000;
+
+    public static final int DEFAULT_EFO_HTTP_CLIENT_MAX_CONCURRENCY = 10_000;
+
+    public static final Duration DEFAULT_EFO_HTTP_CLIENT_READ_TIMEOUT = Duration.ofMinutes(6);
+
+    /**
+     * To avoid shard iterator expires in {@link ShardConsumer}s, the value for the configured
+     * getRecords interval can not exceed 5 minutes, which is the expire time for retrieved
+     * iterators.
+     */
+    public static final long MAX_SHARD_GETRECORDS_INTERVAL_MILLIS = 300000L;
+
+    /**
+     * Build the key of an EFO consumer ARN according to a stream name.
+     *
+     * @param streamName the stream name the key is built upon.
+     * @return a key of EFO consumer ARN.
+     */
+    public static String efoConsumerArn(final String streamName) {
+        return EFO_CONSUMER_ARN_PREFIX + "." + streamName;
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ProducerConfigConstants.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ProducerConfigConstants.java
new file mode 100644
index 0000000..539ed93
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ProducerConfigConstants.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.config;
+
+import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisProducer;
+
+/**
+ * Optional producer specific configuration keys for {@link FlinkKinesisProducer}.
+ *
+ * @deprecated This class is deprecated in favor of the official AWS Kinesis producer configuration
+ *     keys. See <a
+ *     href="https://github.com/awslabs/amazon-kinesis-producer/blob/master/java/amazon-kinesis-producer-sample/default_config.properties">
+ *     here</a> for the full list of available configs. For configuring the region and credentials,
+ *     please use the keys in {@link
+ *     org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants}.
+ */
+@Deprecated
+public class ProducerConfigConstants extends AWSConfigConstants {
+
+    /**
+     * Deprecated key.
+     *
+     * @deprecated This is deprecated in favor of the official AWS Kinesis producer configuration
+     *     keys. Please use {@code CollectionMaxCount} instead.
+     */
+    @Deprecated public static final String COLLECTION_MAX_COUNT = "aws.producer.collectionMaxCount";
+
+    /**
+     * Deprecated key.
+     *
+     * @deprecated This is deprecated in favor of the official AWS Kinesis producer configuration
+     *     keys. Please use {@code AggregationMaxCount} instead.
+     */
+    @Deprecated
+    public static final String AGGREGATION_MAX_COUNT = "aws.producer.aggregationMaxCount";
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/DynamoDBStreamsDataFetcher.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/DynamoDBStreamsDataFetcher.java
new file mode 100644
index 0000000..e3926d5
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/DynamoDBStreamsDataFetcher.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals;
+
+import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.api.common.functions.RuntimeContext;
+import org.apache.flink.metrics.MetricGroup;
+import org.apache.flink.streaming.api.functions.source.SourceFunction;
+import org.apache.flink.streaming.connectors.kinesis.KinesisShardAssigner;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisherFactory;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.polling.PollingRecordPublisherFactory;
+import org.apache.flink.streaming.connectors.kinesis.model.DynamoDBStreamsShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.StartingPosition;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.proxy.DynamoDBStreamsProxy;
+import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * Dynamodb streams data fetcher.
+ *
+ * @param <T> type of fetched data.
+ */
+public class DynamoDBStreamsDataFetcher<T> extends KinesisDataFetcher<T> {
+    private final RecordPublisherFactory recordPublisherFactory;
+
+    /**
+     * Constructor.
+     *
+     * @param streams list of streams to fetch data
+     * @param sourceContext source context
+     * @param runtimeContext runtime context
+     * @param configProps config properties
+     * @param deserializationSchema deserialization schema
+     * @param shardAssigner shard assigner
+     */
+    public DynamoDBStreamsDataFetcher(
+            List<String> streams,
+            SourceFunction.SourceContext<T> sourceContext,
+            RuntimeContext runtimeContext,
+            Properties configProps,
+            KinesisDeserializationSchema<T> deserializationSchema,
+            KinesisShardAssigner shardAssigner) {
+
+        this(
+                streams,
+                sourceContext,
+                runtimeContext,
+                configProps,
+                deserializationSchema,
+                shardAssigner,
+                DynamoDBStreamsProxy::create);
+    }
+
+    @VisibleForTesting
+    DynamoDBStreamsDataFetcher(
+            List<String> streams,
+            SourceFunction.SourceContext<T> sourceContext,
+            RuntimeContext runtimeContext,
+            Properties configProps,
+            KinesisDeserializationSchema<T> deserializationSchema,
+            KinesisShardAssigner shardAssigner,
+            FlinkKinesisProxyFactory flinkKinesisProxyFactory) {
+        super(
+                streams,
+                sourceContext,
+                sourceContext.getCheckpointLock(),
+                runtimeContext,
+                configProps,
+                deserializationSchema,
+                shardAssigner,
+                null,
+                null,
+                new AtomicReference<>(),
+                new ArrayList<>(),
+                createInitialSubscribedStreamsToLastDiscoveredShardsState(streams),
+                flinkKinesisProxyFactory,
+                null);
+
+        this.recordPublisherFactory = new PollingRecordPublisherFactory(flinkKinesisProxyFactory);
+    }
+
+    @Override
+    protected boolean shouldAdvanceLastDiscoveredShardId(
+            String shardId, String lastSeenShardIdOfStream) {
+        if (DynamoDBStreamsShardHandle.compareShardIds(shardId, lastSeenShardIdOfStream) <= 0) {
+            // shardID update is valid only if the given shard id is greater
+            // than the previous last seen shard id of the stream.
+            return false;
+        }
+
+        return true;
+    }
+
+    @Override
+    protected RecordPublisher createRecordPublisher(
+            SequenceNumber sequenceNumber,
+            Properties configProps,
+            MetricGroup metricGroup,
+            StreamShardHandle subscribedShard)
+            throws InterruptedException {
+        StartingPosition startingPosition =
+                StartingPosition.continueFromSequenceNumber(sequenceNumber);
+        return recordPublisherFactory.create(
+                startingPosition, getConsumerConfiguration(), metricGroup, subscribedShard);
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
new file mode 100644
index 0000000..4fcc80a
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
@@ -0,0 +1,1460 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.api.common.functions.RuntimeContext;
+import org.apache.flink.api.common.operators.ProcessingTimeService.ProcessingTimeCallback;
+import org.apache.flink.api.common.serialization.RuntimeContextInitializationContextAdapters;
+import org.apache.flink.metrics.MetricGroup;
+import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
+import org.apache.flink.streaming.api.functions.source.SourceFunction;
+import org.apache.flink.streaming.api.operators.StreamingRuntimeContext;
+import org.apache.flink.streaming.api.watermark.Watermark;
+import org.apache.flink.streaming.connectors.kinesis.KinesisShardAssigner;
+import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
+import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisherFactory;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.FanOutRecordPublisherFactory;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.polling.PollingRecordPublisherFactory;
+import org.apache.flink.streaming.connectors.kinesis.metrics.KinesisConsumerMetricConstants;
+import org.apache.flink.streaming.connectors.kinesis.metrics.ShardConsumerMetricsReporter;
+import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
+import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.StartingPosition;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardMetadata;
+import org.apache.flink.streaming.connectors.kinesis.proxy.GetShardListResult;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxy;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Factory;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
+import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
+import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
+import org.apache.flink.streaming.connectors.kinesis.util.RecordEmitter;
+import org.apache.flink.streaming.connectors.kinesis.util.StreamConsumerRegistrarUtil;
+import org.apache.flink.streaming.connectors.kinesis.util.WatermarkTracker;
+import org.apache.flink.streaming.runtime.operators.windowing.TimestampedValue;
+import org.apache.flink.streaming.runtime.tasks.ProcessingTimeService;
+import org.apache.flink.util.InstantiationUtil;
+import org.apache.flink.util.Preconditions;
+
+import com.amazonaws.services.kinesis.model.HashKeyRange;
+import com.amazonaws.services.kinesis.model.SequenceNumberRange;
+import com.amazonaws.services.kinesis.model.Shard;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nullable;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RECORD_PUBLISHER_TYPE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.POLLING;
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * A KinesisDataFetcher is responsible for fetching data from multiple Kinesis shards. Each parallel
+ * subtask instantiates and runs a single fetcher throughout the subtask's lifetime. The fetcher
+ * accomplishes the following:
+ *
+ * <ul>
+ *   <li>1. continuously poll Kinesis to discover shards that the subtask should subscribe to. The
+ *       subscribed subset of shards, including future new shards, is non-overlapping across
+ *       subtasks (no two subtasks will be subscribed to the same shard) and determinate across
+ *       subtask restores (the subtask will always subscribe to the same subset of shards even after
+ *       restoring)
+ *   <li>2. decide where in each discovered shard should the fetcher start subscribing to
+ *   <li>3. subscribe to shards by creating a single thread for each shard
+ * </ul>
+ *
+ * <p>The fetcher manages two states: 1) last seen shard ids of each subscribed stream (used for
+ * continuous shard discovery), and 2) last processed sequence numbers of each subscribed shard.
+ * Since operations on the second state will be performed by multiple threads, these operations
+ * should only be done using the handler methods provided in this class.
+ */
+@SuppressWarnings("unchecked")
+@Internal
+public class KinesisDataFetcher<T> {
+
+    public static final KinesisShardAssigner DEFAULT_SHARD_ASSIGNER =
+            (shard, subtasks) -> shard.hashCode();
+
+    private static final Logger LOG = LoggerFactory.getLogger(KinesisDataFetcher.class);
+
+    // ------------------------------------------------------------------------
+    //  Consumer-wide settings
+    // ------------------------------------------------------------------------
+
+    /** Configuration properties for the Flink Kinesis Consumer. */
+    private final Properties configProps;
+
+    /** The list of Kinesis streams that the consumer is subscribing to. */
+    private final List<String> streams;
+
+    /**
+     * The deserialization schema we will be using to convert Kinesis records to Flink objects. Note
+     * that since this might not be thread-safe, {@link ShardConsumer}s using this must clone a copy
+     * using {@link KinesisDataFetcher#getClonedDeserializationSchema()}.
+     */
+    private final KinesisDeserializationSchema<T> deserializationSchema;
+
+    /** The function that determines which subtask a shard should be assigned to. */
+    private final KinesisShardAssigner shardAssigner;
+
+    // ------------------------------------------------------------------------
+    //  Consumer metrics
+    // ------------------------------------------------------------------------
+
+    /** The metric group that all metrics should be registered to. */
+    private final MetricGroup consumerMetricGroup;
+
+    // ------------------------------------------------------------------------
+    //  Subtask-specific settings
+    // ------------------------------------------------------------------------
+
+    /** Runtime context of the subtask that this fetcher was created in. */
+    private final RuntimeContext runtimeContext;
+
+    private final int totalNumberOfConsumerSubtasks;
+
+    private final int indexOfThisConsumerSubtask;
+
+    // ------------------------------------------------------------------------
+    //  Executor services to run created threads
+    // ------------------------------------------------------------------------
+
+    /** Executor service to run {@link ShardConsumer}s to consume Kinesis shards. */
+    private final ExecutorService shardConsumersExecutor;
+
+    // ------------------------------------------------------------------------
+    //  Managed state, accessed and updated across multiple threads
+    // ------------------------------------------------------------------------
+
+    /**
+     * The last discovered shard ids of each subscribed stream, updated as the fetcher discovers new
+     * shards in. Note: this state will be updated if new shards are found when {@link
+     * KinesisDataFetcher#discoverNewShardsToSubscribe()} is called.
+     */
+    private final Map<String, String> subscribedStreamsToLastDiscoveredShardIds;
+
+    /**
+     * The shards, along with their last processed sequence numbers, that this fetcher is subscribed
+     * to. The fetcher will add new subscribed shard states to this list as it discovers new shards.
+     * {@link ShardConsumer} threads update the last processed sequence number of subscribed shards
+     * as they fetch and process records.
+     *
+     * <p>Note that since multiple {@link ShardConsumer} threads will be performing operations on
+     * this list, all operations must be wrapped in synchronized blocks on the {@link
+     * KinesisDataFetcher#checkpointLock} lock. For this purpose, all threads must use the following
+     * thread-safe methods this class provides to operate on this list:
+     *
+     * <ul>
+     *   <li>{@link KinesisDataFetcher#registerNewSubscribedShardState(KinesisStreamShardState)}
+     *   <li>{@link KinesisDataFetcher#updateState(int, SequenceNumber)}
+     *   <li>{@link KinesisDataFetcher#emitRecordAndUpdateState(T, long, int, SequenceNumber)}
+     * </ul>
+     */
+    private final List<KinesisStreamShardState> subscribedShardsState;
+
+    private final SourceFunction.SourceContext<T> sourceContext;
+
+    /** Checkpoint lock, also used to synchronize operations on subscribedShardsState. */
+    private final Object checkpointLock;
+
+    /** Reference to the first error thrown by any of the {@link ShardConsumer} threads. */
+    private final AtomicReference<Throwable> error;
+
+    /**
+     * The Kinesis proxy factory that will be used to create instances for discovery and shard
+     * consumers.
+     */
+    private final FlinkKinesisProxyFactory kinesisProxyFactory;
+
+    /**
+     * The Kinesis proxy V2 factory that will be used to create instances for EFO shard consumers.
+     */
+    private final FlinkKinesisProxyV2Factory kinesisProxyV2Factory;
+
+    /** The Kinesis proxy that the fetcher will be using to discover new shards. */
+    private final KinesisProxyInterface kinesis;
+
+    /** The factory used to create record publishers that consumer from Kinesis shards. */
+    private final RecordPublisherFactory recordPublisherFactory;
+
+    private final CompletableFuture<Void> cancelFuture = new CompletableFuture<>();
+
+    /**
+     * The current number of shards that are actively read by this fetcher.
+     *
+     * <p>This value is updated in {@link
+     * KinesisDataFetcher#registerNewSubscribedShardState(KinesisStreamShardState)}, and {@link
+     * KinesisDataFetcher#updateState(int, SequenceNumber)}.
+     */
+    private final AtomicInteger numberOfActiveShards = new AtomicInteger(0);
+
+    private volatile boolean running = true;
+
+    private final AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner;
+    private final WatermarkTracker watermarkTracker;
+    private final RecordEmitter recordEmitter;
+    private boolean isIdle;
+
+    /**
+     * The watermark related state for each shard consumer. Entries in this map will be created when
+     * shards are discovered. After recovery, this shard map will be recreated, possibly with
+     * different shard index keys, since those are transient and not part of checkpointed state.
+     */
+    private ConcurrentHashMap<Integer, ShardWatermarkState> shardWatermarks =
+            new ConcurrentHashMap<>();
+
+    /**
+     * The most recent watermark, calculated from the per shard watermarks. The initial value will
+     * never be emitted and also apply after recovery. The fist watermark that will be emitted is
+     * derived from actually consumed records. In case of recovery and replay, the watermark will
+     * rewind, consistent with the shard consumer sequence.
+     */
+    private long lastWatermark = Long.MIN_VALUE;
+
+    /**
+     * The next watermark used for synchronization. For purposes of global watermark calculation, we
+     * need to consider the next watermark based on the buffered records vs. the last emitted
+     * watermark to allow for progress.
+     */
+    private long nextWatermark = Long.MIN_VALUE;
+
+    /**
+     * The time span since last consumed record, after which a shard will be considered idle for
+     * purpose of watermark calculation. A positive value will allow the watermark to progress even
+     * when some shards don't receive new records.
+     */
+    private long shardIdleIntervalMillis =
+            ConsumerConfigConstants.DEFAULT_SHARD_IDLE_INTERVAL_MILLIS;
+
+    /** Factory to create Kinesis proxy instances used by a fetcher. */
+    public interface FlinkKinesisProxyFactory {
+        KinesisProxyInterface create(Properties configProps);
+    }
+
+    /** Factory to create Kinesis proxy V@ instances used by a fetcher. */
+    public interface FlinkKinesisProxyV2Factory {
+        KinesisProxyV2Interface create(Properties configProps);
+    }
+
+    /**
+     * The wrapper that holds the watermark handling related parameters of a record produced by the
+     * shard consumer thread.
+     *
+     * @param <T>
+     */
+    private static class RecordWrapper<T> extends TimestampedValue<T> {
+        int shardStateIndex;
+        SequenceNumber lastSequenceNumber;
+        long timestamp;
+        Watermark watermark;
+
+        private RecordWrapper(T record, long timestamp) {
+            super(record, timestamp);
+            this.timestamp = timestamp;
+        }
+
+        @Override
+        public long getTimestamp() {
+            return timestamp;
+        }
+    }
+
+    /** Kinesis data fetcher specific, asynchronous record emitter. */
+    private class AsyncKinesisRecordEmitter extends RecordEmitter<RecordWrapper<T>> {
+
+        private AsyncKinesisRecordEmitter() {
+            this(DEFAULT_QUEUE_CAPACITY);
+        }
+
+        private AsyncKinesisRecordEmitter(int queueCapacity) {
+            super(queueCapacity);
+        }
+
+        @Override
+        public void emit(RecordWrapper<T> record, RecordQueue<RecordWrapper<T>> queue) {
+            emitRecordAndUpdateState(record);
+        }
+    }
+
+    /** Synchronous emitter for use w/o watermark synchronization. */
+    private class SyncKinesisRecordEmitter extends AsyncKinesisRecordEmitter {
+        private final ConcurrentHashMap<Integer, RecordQueue<RecordWrapper<T>>> queues =
+                new ConcurrentHashMap<>();
+
+        @Override
+        public RecordQueue<RecordWrapper<T>> getQueue(int producerIndex) {
+            return queues.computeIfAbsent(
+                    producerIndex,
+                    (key) ->
+                            new RecordQueue<RecordWrapper<T>>() {
+                                @Override
+                                public void put(RecordWrapper<T> record) {
+                                    emit(record, this);
+                                }
+
+                                @Override
+                                public int getSize() {
+                                    return 0;
+                                }
+
+                                @Override
+                                public RecordWrapper<T> peek() {
+                                    return null;
+                                }
+                            });
+        }
+    }
+
+    /**
+     * Creates a Kinesis Data Fetcher.
+     *
+     * @param streams the streams to subscribe to
+     * @param sourceContext context of the source function
+     * @param runtimeContext this subtask's runtime context
+     * @param configProps the consumer configuration properties
+     * @param deserializationSchema deserialization schema
+     */
+    public KinesisDataFetcher(
+            final List<String> streams,
+            final SourceFunction.SourceContext<T> sourceContext,
+            final RuntimeContext runtimeContext,
+            final Properties configProps,
+            final KinesisDeserializationSchema<T> deserializationSchema,
+            final KinesisShardAssigner shardAssigner,
+            final AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner,
+            final WatermarkTracker watermarkTracker) {
+        this(
+                streams,
+                sourceContext,
+                sourceContext.getCheckpointLock(),
+                runtimeContext,
+                configProps,
+                deserializationSchema,
+                shardAssigner,
+                periodicWatermarkAssigner,
+                watermarkTracker,
+                new AtomicReference<>(),
+                new ArrayList<>(),
+                createInitialSubscribedStreamsToLastDiscoveredShardsState(streams),
+                KinesisProxy::create,
+                KinesisProxyV2Factory::createKinesisProxyV2);
+    }
+
+    @VisibleForTesting
+    protected KinesisDataFetcher(
+            final List<String> streams,
+            final SourceFunction.SourceContext<T> sourceContext,
+            final Object checkpointLock,
+            final RuntimeContext runtimeContext,
+            final Properties configProps,
+            final KinesisDeserializationSchema<T> deserializationSchema,
+            final KinesisShardAssigner shardAssigner,
+            final AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner,
+            final WatermarkTracker watermarkTracker,
+            final AtomicReference<Throwable> error,
+            final List<KinesisStreamShardState> subscribedShardsState,
+            final HashMap<String, String> subscribedStreamsToLastDiscoveredShardIds,
+            final FlinkKinesisProxyFactory kinesisProxyFactory,
+            @Nullable final FlinkKinesisProxyV2Factory kinesisProxyV2Factory) {
+        this.streams = checkNotNull(streams);
+        this.configProps = checkNotNull(configProps);
+        this.sourceContext = checkNotNull(sourceContext);
+        this.checkpointLock = checkNotNull(checkpointLock);
+        this.runtimeContext = checkNotNull(runtimeContext);
+        this.totalNumberOfConsumerSubtasks = runtimeContext.getNumberOfParallelSubtasks();
+        this.indexOfThisConsumerSubtask = runtimeContext.getIndexOfThisSubtask();
+        this.deserializationSchema = checkNotNull(deserializationSchema);
+        this.shardAssigner = checkNotNull(shardAssigner);
+        this.periodicWatermarkAssigner = periodicWatermarkAssigner;
+        this.watermarkTracker = watermarkTracker;
+        this.kinesisProxyFactory = checkNotNull(kinesisProxyFactory);
+        this.kinesisProxyV2Factory = kinesisProxyV2Factory;
+        this.kinesis = kinesisProxyFactory.create(configProps);
+        this.recordPublisherFactory = createRecordPublisherFactory();
+
+        this.consumerMetricGroup =
+                runtimeContext
+                        .getMetricGroup()
+                        .addGroup(KinesisConsumerMetricConstants.KINESIS_CONSUMER_METRICS_GROUP);
+
+        this.error = checkNotNull(error);
+        this.subscribedShardsState = checkNotNull(subscribedShardsState);
+        this.subscribedStreamsToLastDiscoveredShardIds =
+                checkNotNull(subscribedStreamsToLastDiscoveredShardIds);
+
+        this.shardConsumersExecutor =
+                createShardConsumersThreadPool(runtimeContext.getTaskNameWithSubtasks());
+
+        this.recordEmitter = createRecordEmitter(configProps);
+
+        StreamConsumerRegistrarUtil.lazilyRegisterStreamConsumers(configProps, streams);
+    }
+
+    private RecordEmitter createRecordEmitter(Properties configProps) {
+        if (periodicWatermarkAssigner != null && watermarkTracker != null) {
+            int queueCapacity =
+                    Integer.parseInt(
+                            configProps.getProperty(
+                                    ConsumerConfigConstants.WATERMARK_SYNC_QUEUE_CAPACITY,
+                                    Integer.toString(
+                                            AsyncKinesisRecordEmitter.DEFAULT_QUEUE_CAPACITY)));
+            return new AsyncKinesisRecordEmitter(queueCapacity);
+        }
+        return new SyncKinesisRecordEmitter();
+    }
+
+    /**
+     * Create a new shard consumer. Override this method to customize shard consumer behavior in
+     * subclasses.
+     *
+     * @param subscribedShardStateIndex the state index of the shard this consumer is subscribed to
+     * @param subscribedShard the shard this consumer is subscribed to
+     * @param lastSequenceNum the sequence number in the shard to start consuming
+     * @param metricGroup the metric group to report metrics to
+     * @return shard consumer
+     */
+    protected ShardConsumer<T> createShardConsumer(
+            final Integer subscribedShardStateIndex,
+            final StreamShardHandle subscribedShard,
+            final SequenceNumber lastSequenceNum,
+            final MetricGroup metricGroup,
+            final KinesisDeserializationSchema<T> shardDeserializer)
+            throws InterruptedException {
+
+        return new ShardConsumer<>(
+                this,
+                createRecordPublisher(lastSequenceNum, configProps, metricGroup, subscribedShard),
+                subscribedShardStateIndex,
+                subscribedShard,
+                lastSequenceNum,
+                new ShardConsumerMetricsReporter(metricGroup),
+                shardDeserializer);
+    }
+
+    protected RecordPublisherFactory createRecordPublisherFactory() {
+        RecordPublisherType recordPublisherType =
+                RecordPublisherType.valueOf(
+                        configProps.getProperty(RECORD_PUBLISHER_TYPE, POLLING.name()));
+
+        switch (recordPublisherType) {
+            case EFO:
+                return new FanOutRecordPublisherFactory(kinesisProxyV2Factory.create(configProps));
+            case POLLING:
+            default:
+                return new PollingRecordPublisherFactory(kinesisProxyFactory);
+        }
+    }
+
+    protected RecordPublisher createRecordPublisher(
+            final SequenceNumber sequenceNumber,
+            final Properties configProps,
+            final MetricGroup metricGroup,
+            final StreamShardHandle subscribedShard)
+            throws InterruptedException {
+
+        StartingPosition startingPosition =
+                AWSUtil.getStartingPosition(sequenceNumber, configProps);
+        return recordPublisherFactory.create(
+                startingPosition, configProps, metricGroup, subscribedShard);
+    }
+
+    /**
+     * Starts the fetcher. After starting the fetcher, it can only be stopped by calling {@link
+     * KinesisDataFetcher#shutdownFetcher()}.
+     *
+     * @throws Exception the first error or exception thrown by the fetcher or any of the threads
+     *     created by the fetcher.
+     */
+    public void runFetcher() throws Exception {
+
+        // check that we are running before proceeding
+        if (!running) {
+            return;
+        }
+
+        // ------------------------------------------------------------------------
+        //  Procedures before starting the infinite while loop:
+        // ------------------------------------------------------------------------
+
+        //  1. check that there is at least one shard in the subscribed streams to consume from (can
+        // be done by
+        //     checking if at least one value in subscribedStreamsToLastDiscoveredShardIds is not
+        // null)
+        boolean hasShards = false;
+        StringBuilder streamsWithNoShardsFound = new StringBuilder();
+        for (Map.Entry<String, String> streamToLastDiscoveredShardEntry :
+                subscribedStreamsToLastDiscoveredShardIds.entrySet()) {
+            if (streamToLastDiscoveredShardEntry.getValue() != null) {
+                hasShards = true;
+            } else {
+                streamsWithNoShardsFound
+                        .append(streamToLastDiscoveredShardEntry.getKey())
+                        .append(", ");
+            }
+        }
+
+        if (streamsWithNoShardsFound.length() != 0 && LOG.isWarnEnabled()) {
+            LOG.warn(
+                    "Subtask {} has failed to find any shards for the following subscribed streams: {}",
+                    indexOfThisConsumerSubtask,
+                    streamsWithNoShardsFound.toString());
+        }
+
+        if (!hasShards) {
+            throw new RuntimeException(
+                    "No shards can be found for all subscribed streams: " + streams);
+        }
+
+        //  2. start consuming any shard state we already have in the subscribedShardState up to
+        // this point; the
+        //     subscribedShardState may already be seeded with values due to step 1., or explicitly
+        // added by the
+        //     consumer using a restored state checkpoint
+        for (int seededStateIndex = 0;
+                seededStateIndex < subscribedShardsState.size();
+                seededStateIndex++) {
+            KinesisStreamShardState seededShardState = subscribedShardsState.get(seededStateIndex);
+
+            // only start a consuming thread if the seeded subscribed shard has not been completely
+            // read already
+            if (!seededShardState
+                    .getLastProcessedSequenceNum()
+                    .equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
+
+                if (LOG.isInfoEnabled()) {
+                    LOG.info(
+                            "Subtask {} will start consuming seeded shard {} from sequence number {} with ShardConsumer {}",
+                            indexOfThisConsumerSubtask,
+                            seededShardState.getStreamShardHandle().toString(),
+                            seededShardState.getLastProcessedSequenceNum(),
+                            seededStateIndex);
+                }
+
+                StreamShardHandle streamShardHandle =
+                        subscribedShardsState.get(seededStateIndex).getStreamShardHandle();
+                KinesisDeserializationSchema<T> shardDeserializationSchema =
+                        getClonedDeserializationSchema();
+                shardDeserializationSchema.open(
+                        RuntimeContextInitializationContextAdapters.deserializationAdapter(
+                                runtimeContext,
+                                // ignore the provided metric group
+                                metricGroup ->
+                                        consumerMetricGroup
+                                                .addGroup(
+                                                        "subtaskId",
+                                                        String.valueOf(indexOfThisConsumerSubtask))
+                                                .addGroup(
+                                                        "shardId",
+                                                        streamShardHandle.getShard().getShardId())
+                                                .addGroup("user")));
+                shardConsumersExecutor.submit(
+                        createShardConsumer(
+                                seededStateIndex,
+                                streamShardHandle,
+                                subscribedShardsState
+                                        .get(seededStateIndex)
+                                        .getLastProcessedSequenceNum(),
+                                registerShardMetricGroup(
+                                        consumerMetricGroup,
+                                        subscribedShardsState.get(seededStateIndex)),
+                                shardDeserializationSchema));
+            }
+        }
+
+        // start periodic watermark emitter, if a watermark assigner was configured
+        if (periodicWatermarkAssigner != null) {
+            long periodicWatermarkIntervalMillis =
+                    runtimeContext.getExecutionConfig().getAutoWatermarkInterval();
+            if (periodicWatermarkIntervalMillis > 0) {
+                ProcessingTimeService timerService =
+                        ((StreamingRuntimeContext) runtimeContext).getProcessingTimeService();
+                LOG.info(
+                        "Starting periodic watermark emitter with interval {}",
+                        periodicWatermarkIntervalMillis);
+                new PeriodicWatermarkEmitter(timerService, periodicWatermarkIntervalMillis).start();
+                if (watermarkTracker != null) {
+                    // setup global watermark tracking
+                    long watermarkSyncMillis =
+                            Long.parseLong(
+                                    getConsumerConfiguration()
+                                            .getProperty(
+                                                    ConsumerConfigConstants.WATERMARK_SYNC_MILLIS,
+                                                    Long.toString(
+                                                            ConsumerConfigConstants
+                                                                    .DEFAULT_WATERMARK_SYNC_MILLIS)));
+                    watermarkTracker.setUpdateTimeoutMillis(
+                            watermarkSyncMillis * 3); // synchronization latency
+                    watermarkTracker.open(runtimeContext);
+                    new WatermarkSyncCallback(timerService, watermarkSyncMillis).start();
+                    // emit records ahead of watermark to offset synchronization latency
+                    long lookaheadMillis =
+                            Long.parseLong(
+                                    getConsumerConfiguration()
+                                            .getProperty(
+                                                    ConsumerConfigConstants
+                                                            .WATERMARK_LOOKAHEAD_MILLIS,
+                                                    Long.toString(0)));
+                    recordEmitter.setMaxLookaheadMillis(
+                            Math.max(lookaheadMillis, watermarkSyncMillis * 3));
+
+                    // record emitter depends on periodic watermark
+                    // it runs in a separate thread since main thread is used for discovery
+                    Runnable recordEmitterRunnable =
+                            new Runnable() {
+                                @Override
+                                public void run() {
+                                    try {
+                                        recordEmitter.run();
+                                    } catch (Throwable error) {
+                                        // report the error that terminated the emitter loop to
+                                        // source thread
+                                        stopWithError(error);
+                                    }
+                                }
+                            };
+
+                    Thread thread = new Thread(recordEmitterRunnable);
+                    thread.setName("recordEmitter-" + runtimeContext.getTaskNameWithSubtasks());
+                    thread.setDaemon(true);
+                    thread.start();
+                }
+            }
+            this.shardIdleIntervalMillis =
+                    Long.parseLong(
+                            getConsumerConfiguration()
+                                    .getProperty(
+                                            ConsumerConfigConstants.SHARD_IDLE_INTERVAL_MILLIS,
+                                            Long.toString(
+                                                    ConsumerConfigConstants
+                                                            .DEFAULT_SHARD_IDLE_INTERVAL_MILLIS)));
+        }
+
+        // ------------------------------------------------------------------------
+
+        // finally, start the infinite shard discovery and consumer launching loop;
+        // we will escape from this loop only when shutdownFetcher() or stopWithError() is called
+        // TODO: have this thread emit the records for tracking backpressure
+
+        final long discoveryIntervalMillis =
+                Long.parseLong(
+                        configProps.getProperty(
+                                ConsumerConfigConstants.SHARD_DISCOVERY_INTERVAL_MILLIS,
+                                Long.toString(
+                                        ConsumerConfigConstants
+                                                .DEFAULT_SHARD_DISCOVERY_INTERVAL_MILLIS)));
+
+        if (this.numberOfActiveShards.get() == 0) {
+            LOG.info(
+                    "Subtask {} has no active shards to read on startup; marking the subtask as temporarily idle ...",
+                    indexOfThisConsumerSubtask);
+            sourceContext.markAsTemporarilyIdle();
+        }
+
+        while (running) {
+            if (LOG.isDebugEnabled()) {
+                LOG.debug(
+                        "Subtask {} is trying to discover new shards that were created due to resharding ...",
+                        indexOfThisConsumerSubtask);
+            }
+            List<StreamShardHandle> newShardsDueToResharding = discoverNewShardsToSubscribe();
+
+            for (StreamShardHandle shard : newShardsDueToResharding) {
+                // since there may be delay in discovering a new shard, all new shards due to
+                // resharding should be read starting from the earliest record possible
+                KinesisStreamShardState newShardState =
+                        new KinesisStreamShardState(
+                                convertToStreamShardMetadata(shard),
+                                shard,
+                                SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get());
+                int newStateIndex = registerNewSubscribedShardState(newShardState);
+
+                if (LOG.isInfoEnabled()) {
+                    LOG.info(
+                            "Subtask {} has discovered a new shard {} due to resharding, and will start consuming "
+                                    + "the shard from sequence number {} with ShardConsumer {}",
+                            indexOfThisConsumerSubtask,
+                            newShardState.getStreamShardHandle().toString(),
+                            newShardState.getLastProcessedSequenceNum(),
+                            newStateIndex);
+                }
+
+                StreamShardHandle streamShardHandle = newShardState.getStreamShardHandle();
+                KinesisDeserializationSchema<T> shardDeserializationSchema =
+                        getClonedDeserializationSchema();
+                shardDeserializationSchema.open(
+                        RuntimeContextInitializationContextAdapters.deserializationAdapter(
+                                runtimeContext,
+                                // ignore the provided metric group
+                                metricGroup ->
+                                        consumerMetricGroup
+                                                .addGroup(
+                                                        "subtaskId",
+                                                        String.valueOf(indexOfThisConsumerSubtask))
+                                                .addGroup(
+                                                        "shardId",
+                                                        streamShardHandle.getShard().getShardId())
+                                                .addGroup("user")));
+                shardConsumersExecutor.submit(
+                        createShardConsumer(
+                                newStateIndex,
+                                newShardState.getStreamShardHandle(),
+                                newShardState.getLastProcessedSequenceNum(),
+                                registerShardMetricGroup(consumerMetricGroup, newShardState),
+                                shardDeserializationSchema));
+            }
+
+            // we also check if we are running here so that we won't start the discovery sleep
+            // interval if the running flag was set to false during the middle of the while loop
+            if (running && discoveryIntervalMillis != 0) {
+                try {
+                    cancelFuture.get(discoveryIntervalMillis, TimeUnit.MILLISECONDS);
+                    LOG.debug("Cancelled discovery");
+                } catch (TimeoutException iex) {
+                    // timeout is expected when fetcher is not cancelled
+                }
+            }
+        }
+
+        // make sure all resources have been terminated before leaving
+        try {
+            awaitTermination();
+        } catch (InterruptedException ie) {
+            // If there is an original exception, preserve it, since that's more important/useful.
+            this.error.compareAndSet(null, ie);
+        }
+
+        // any error thrown in the shard consumer threads will be thrown to the main thread
+        Throwable throwable = this.error.get();
+        if (throwable != null) {
+            if (throwable instanceof Exception) {
+                throw (Exception) throwable;
+            } else if (throwable instanceof Error) {
+                throw (Error) throwable;
+            } else {
+                throw new Exception(throwable);
+            }
+        }
+    }
+
+    /**
+     * Creates a snapshot of the current last processed sequence numbers of each subscribed shard.
+     *
+     * @return state snapshot
+     */
+    public HashMap<StreamShardMetadata, SequenceNumber> snapshotState() {
+        // this method assumes that the checkpoint lock is held
+        assert Thread.holdsLock(checkpointLock);
+
+        HashMap<StreamShardMetadata, SequenceNumber> stateSnapshot = new HashMap<>();
+        for (KinesisStreamShardState shardWithState : subscribedShardsState) {
+            stateSnapshot.put(
+                    shardWithState.getStreamShardMetadata(),
+                    shardWithState.getLastProcessedSequenceNum());
+        }
+        return stateSnapshot;
+    }
+
+    /**
+     * Starts shutting down the fetcher. Must be called to allow {@link
+     * KinesisDataFetcher#runFetcher()} to complete. Once called, the shutdown procedure will be
+     * executed and all shard consuming threads will be interrupted.
+     */
+    public void shutdownFetcher() {
+        LOG.info(
+                "Starting shutdown of shard consumer threads and AWS SDK resources of subtask {} ...",
+                indexOfThisConsumerSubtask,
+                error.get());
+
+        running = false;
+        try {
+            try {
+                deregisterStreamConsumer();
+            } catch (Exception e) {
+                LOG.warn("Encountered exception deregistering stream consumers", e);
+            }
+
+            try {
+                closeRecordPublisherFactory();
+            } catch (Exception e) {
+                LOG.warn("Encountered exception closing record publisher factory", e);
+            }
+        } finally {
+            gracefulShutdownShardConsumers();
+
+            cancelFuture.complete(null);
+
+            if (watermarkTracker != null) {
+                watermarkTracker.close();
+            }
+            this.recordEmitter.stop();
+        }
+
+        LOG.info(
+                "Shutting down the shard consumer threads of subtask {} ...",
+                indexOfThisConsumerSubtask);
+    }
+
+    /**
+     * Closes recordRecordPublisherFactory. Allows test to override this to simulate exception for
+     * shutdown logic.
+     */
+    @VisibleForTesting
+    protected void closeRecordPublisherFactory() {
+        recordPublisherFactory.close();
+    }
+
+    /**
+     * Deregisters stream consumers. Allows test to override this to simulate exception for shutdown
+     * logic.
+     */
+    @VisibleForTesting
+    protected void deregisterStreamConsumer() {
+        StreamConsumerRegistrarUtil.deregisterStreamConsumers(configProps, streams);
+    }
+
+    /** Gracefully stops shardConsumersExecutor without interrupting running threads. */
+    private void gracefulShutdownShardConsumers() {
+        shardConsumersExecutor.shutdown();
+    }
+
+    /**
+     * Returns a flag indicating if this fetcher is running.
+     *
+     * @return true if the fetch is running, false if it has been shutdown
+     */
+    boolean isRunning() {
+        return running;
+    }
+
+    /**
+     * After calling {@link KinesisDataFetcher#shutdownFetcher()}, this can be called to await the
+     * fetcher shutdown.
+     */
+    @SuppressWarnings("StatementWithEmptyBody")
+    public void awaitTermination() throws InterruptedException {
+        while (!shardConsumersExecutor.awaitTermination(1, TimeUnit.MINUTES)) {
+            // Keep waiting.
+        }
+    }
+
+    /**
+     * Called by created threads to pass on errors. Only the first thrown error is set. Once set,
+     * the shutdown process will be executed and all shard consuming threads will be interrupted.
+     */
+    protected void stopWithError(Throwable throwable) {
+        if (this.error.compareAndSet(null, throwable)) {
+            shutdownFetcher();
+        }
+    }
+
+    // ------------------------------------------------------------------------
+    //  Functions that update the subscribedStreamToLastDiscoveredShardIds state
+    // ------------------------------------------------------------------------
+
+    /**
+     * Updates the last discovered shard of a subscribed stream; only updates if the update is
+     * valid.
+     */
+    public void advanceLastDiscoveredShardOfStream(String stream, String shardId) {
+        String lastSeenShardIdOfStream = this.subscribedStreamsToLastDiscoveredShardIds.get(stream);
+
+        // the update is valid only if the given shard id is greater
+        // than the previous last seen shard id of the stream
+        if (lastSeenShardIdOfStream == null) {
+            // if not previously set, simply put as the last seen shard id
+            this.subscribedStreamsToLastDiscoveredShardIds.put(stream, shardId);
+        } else if (shouldAdvanceLastDiscoveredShardId(shardId, lastSeenShardIdOfStream)) {
+            this.subscribedStreamsToLastDiscoveredShardIds.put(stream, shardId);
+        }
+    }
+
+    /** Given lastSeenShardId, check if last discovered shardId should be advanced. */
+    protected boolean shouldAdvanceLastDiscoveredShardId(
+            String shardId, String lastSeenShardIdOfStream) {
+        return (StreamShardHandle.compareShardIds(shardId, lastSeenShardIdOfStream) > 0);
+    }
+
+    /**
+     * A utility function that does the following:
+     *
+     * <p>1. Find new shards for each stream that we haven't seen before 2. For each new shard,
+     * determine whether this consumer subtask should subscribe to them; if yes, it is added to the
+     * returned list of shards 3. Update the subscribedStreamsToLastDiscoveredShardIds state so that
+     * we won't get shards that we have already seen before the next time this function is called
+     */
+    public List<StreamShardHandle> discoverNewShardsToSubscribe() throws InterruptedException {
+
+        List<StreamShardHandle> newShardsToSubscribe = new LinkedList<>();
+
+        GetShardListResult shardListResult =
+                kinesis.getShardList(subscribedStreamsToLastDiscoveredShardIds);
+        if (shardListResult.hasRetrievedShards()) {
+            Set<String> streamsWithNewShards = shardListResult.getStreamsWithRetrievedShards();
+
+            for (String stream : streamsWithNewShards) {
+                List<StreamShardHandle> newShardsOfStream =
+                        shardListResult.getRetrievedShardListOfStream(stream);
+                for (StreamShardHandle newShard : newShardsOfStream) {
+                    int hashCode = shardAssigner.assign(newShard, totalNumberOfConsumerSubtasks);
+                    if (isThisSubtaskShouldSubscribeTo(
+                            hashCode, totalNumberOfConsumerSubtasks, indexOfThisConsumerSubtask)) {
+                        newShardsToSubscribe.add(newShard);
+                    }
+                }
+
+                advanceLastDiscoveredShardOfStream(
+                        stream,
+                        shardListResult.getLastSeenShardOfStream(stream).getShard().getShardId());
+            }
+        }
+
+        return newShardsToSubscribe;
+    }
+
+    // ------------------------------------------------------------------------
+    //  Functions to get / set information about the consumer
+    // ------------------------------------------------------------------------
+
+    protected Properties getConsumerConfiguration() {
+        return configProps;
+    }
+
+    private KinesisDeserializationSchema<T> getClonedDeserializationSchema() {
+        try {
+            return InstantiationUtil.clone(
+                    deserializationSchema, runtimeContext.getUserCodeClassLoader());
+        } catch (IOException | ClassNotFoundException ex) {
+            // this really shouldn't happen; simply wrap it around a runtime exception
+            throw new RuntimeException(ex);
+        }
+    }
+
+    // ------------------------------------------------------------------------
+    //  Thread-safe operations for record emitting and shard state updating
+    //  that assure atomicity with respect to the checkpoint lock
+    // ------------------------------------------------------------------------
+
+    /**
+     * Prepare a record and hand it over to the {@link RecordEmitter}, which may collect it
+     * asynchronously. This method is called by {@link ShardConsumer}s.
+     *
+     * @param record the record to collect
+     * @param recordTimestamp timestamp to attach to the collected record
+     * @param shardStateIndex index of the shard to update in subscribedShardsState; this index
+     *     should be the returned value from {@link
+     *     KinesisDataFetcher#registerNewSubscribedShardState(KinesisStreamShardState)}, called when
+     *     the shard state was registered.
+     * @param lastSequenceNumber the last sequence number value to update
+     */
+    protected void emitRecordAndUpdateState(
+            T record,
+            long recordTimestamp,
+            int shardStateIndex,
+            SequenceNumber lastSequenceNumber) {
+        ShardWatermarkState sws = shardWatermarks.get(shardStateIndex);
+        Preconditions.checkNotNull(
+                sws, "shard watermark state initialized in registerNewSubscribedShardState");
+        Watermark watermark = null;
+        if (sws.periodicWatermarkAssigner != null) {
+            recordTimestamp =
+                    sws.periodicWatermarkAssigner.extractTimestamp(record, sws.lastRecordTimestamp);
+            // track watermark per record since extractTimestamp has side effect
+            watermark = sws.periodicWatermarkAssigner.getCurrentWatermark();
+        }
+        sws.lastRecordTimestamp = recordTimestamp;
+        sws.lastUpdated = getCurrentTimeMillis();
+
+        RecordWrapper<T> recordWrapper = new RecordWrapper<>(record, recordTimestamp);
+        recordWrapper.shardStateIndex = shardStateIndex;
+        recordWrapper.lastSequenceNumber = lastSequenceNumber;
+        recordWrapper.watermark = watermark;
+        try {
+            sws.emitQueue.put(recordWrapper);
+        } catch (InterruptedException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    /**
+     * Atomic operation to collect a record and update state to the sequence number of the record.
+     * This method is called from the record emitter.
+     *
+     * <p>Responsible for tracking per shard watermarks and emit timestamps extracted from the
+     * record, when a watermark assigner was configured.
+     */
+    private void emitRecordAndUpdateState(RecordWrapper<T> rw) {
+        synchronized (checkpointLock) {
+            if (rw.getValue() != null) {
+                sourceContext.collectWithTimestamp(rw.getValue(), rw.timestamp);
+                ShardWatermarkState<T> sws = shardWatermarks.get(rw.shardStateIndex);
+                sws.lastEmittedRecordWatermark = rw.watermark;
+            } else {
+                LOG.warn(
+                        "Skipping non-deserializable record at sequence number {} of shard {}.",
+                        rw.lastSequenceNumber,
+                        subscribedShardsState.get(rw.shardStateIndex).getStreamShardHandle());
+            }
+            updateState(rw.shardStateIndex, rw.lastSequenceNumber);
+        }
+    }
+
+    /**
+     * Update the shard to last processed sequence number state. This method is called by {@link
+     * ShardConsumer}s.
+     *
+     * @param shardStateIndex index of the shard to update in subscribedShardsState; this index
+     *     should be the returned value from {@link
+     *     KinesisDataFetcher#registerNewSubscribedShardState(KinesisStreamShardState)}, called when
+     *     the shard state was registered.
+     * @param lastSequenceNumber the last sequence number value to update
+     */
+    protected final void updateState(int shardStateIndex, SequenceNumber lastSequenceNumber) {
+        synchronized (checkpointLock) {
+            subscribedShardsState
+                    .get(shardStateIndex)
+                    .setLastProcessedSequenceNum(lastSequenceNumber);
+
+            // if a shard's state is updated to be SENTINEL_SHARD_ENDING_SEQUENCE_NUM by its
+            // consumer thread,
+            // we've finished reading the shard and should determine it to be non-active
+            if (lastSequenceNumber.equals(
+                    SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
+                LOG.info(
+                        "Subtask {} has reached the end of subscribed shard: {}",
+                        indexOfThisConsumerSubtask,
+                        subscribedShardsState.get(shardStateIndex).getStreamShardHandle());
+
+                // check if we need to mark the source as idle;
+                // note that on resharding, if registerNewSubscribedShardState was invoked for newly
+                // discovered shards
+                // AFTER the old shards had reached the end, the subtask's status will be
+                // automatically toggled back to
+                // be active immediately afterwards as soon as we collect records from the new
+                // shards
+                if (this.numberOfActiveShards.decrementAndGet() == 0) {
+                    LOG.info(
+                            "Subtask {} has reached the end of all currently subscribed shards; marking the subtask as temporarily idle ...",
+                            indexOfThisConsumerSubtask);
+
+                    sourceContext.markAsTemporarilyIdle();
+                }
+            }
+        }
+    }
+
+    /**
+     * Register a new subscribed shard state.
+     *
+     * @param newSubscribedShardState the new shard state that this fetcher is to be subscribed to
+     */
+    public int registerNewSubscribedShardState(KinesisStreamShardState newSubscribedShardState) {
+        synchronized (checkpointLock) {
+            subscribedShardsState.add(newSubscribedShardState);
+
+            // If a registered shard has initial state that is not
+            // SENTINEL_SHARD_ENDING_SEQUENCE_NUM (will be the case
+            // if the consumer had already finished reading a shard before we failed and restored),
+            // we determine that
+            // this subtask has a new active shard
+            if (!newSubscribedShardState
+                    .getLastProcessedSequenceNum()
+                    .equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
+                this.numberOfActiveShards.incrementAndGet();
+            }
+
+            int shardStateIndex = subscribedShardsState.size() - 1;
+
+            // track all discovered shards for watermark determination
+            ShardWatermarkState sws = shardWatermarks.get(shardStateIndex);
+            if (sws == null) {
+                sws = new ShardWatermarkState();
+                try {
+                    sws.periodicWatermarkAssigner =
+                            InstantiationUtil.clone(periodicWatermarkAssigner);
+                } catch (Exception e) {
+                    throw new RuntimeException("Failed to instantiate new WatermarkAssigner", e);
+                }
+                sws.emitQueue = recordEmitter.getQueue(shardStateIndex);
+                sws.lastUpdated = getCurrentTimeMillis();
+                sws.lastRecordTimestamp = Long.MIN_VALUE;
+                shardWatermarks.put(shardStateIndex, sws);
+            }
+
+            return shardStateIndex;
+        }
+    }
+
+    /**
+     * Return the current system time. Allow tests to override this to simulate progress for
+     * watermark logic.
+     *
+     * @return current processing time
+     */
+    @VisibleForTesting
+    protected long getCurrentTimeMillis() {
+        return System.currentTimeMillis();
+    }
+
+    /**
+     * Called periodically to emit a watermark. Checks all shards for the current event time
+     * watermark, and possibly emits the next watermark.
+     *
+     * <p>Shards that have not received an update for a certain interval are considered inactive so
+     * as to not hold back the watermark indefinitely. When all shards are inactive, the subtask
+     * will be marked as temporarily idle to not block downstream operators.
+     */
+    @VisibleForTesting
+    protected void emitWatermark() {
+        LOG.debug(
+                "Evaluating watermark for subtask {} time {}",
+                indexOfThisConsumerSubtask,
+                getCurrentTimeMillis());
+        long potentialWatermark = Long.MAX_VALUE;
+        long potentialNextWatermark = Long.MAX_VALUE;
+        long idleTime =
+                (shardIdleIntervalMillis > 0)
+                        ? getCurrentTimeMillis() - shardIdleIntervalMillis
+                        : Long.MAX_VALUE;
+
+        for (Map.Entry<Integer, ShardWatermarkState> e : shardWatermarks.entrySet()) {
+            Watermark w = e.getValue().lastEmittedRecordWatermark;
+            // consider only active shards, or those that would advance the watermark
+            if (w != null
+                    && (e.getValue().lastUpdated >= idleTime
+                            || e.getValue().emitQueue.getSize() > 0
+                            || w.getTimestamp() > lastWatermark)) {
+                potentialWatermark = Math.min(potentialWatermark, w.getTimestamp());
+                // for sync, use the watermark of the next record, when available
+                // otherwise watermark may stall when record is blocked by synchronization
+                RecordEmitter.RecordQueue<RecordWrapper<T>> q = e.getValue().emitQueue;
+                RecordWrapper<T> nextRecord = q.peek();
+                Watermark nextWatermark = (nextRecord != null) ? nextRecord.watermark : w;
+                potentialNextWatermark =
+                        Math.min(potentialNextWatermark, nextWatermark.getTimestamp());
+            }
+        }
+
+        // advance watermark if possible (watermarks can only be ascending)
+        if (potentialWatermark == Long.MAX_VALUE) {
+            if (shardWatermarks.isEmpty() || shardIdleIntervalMillis > 0) {
+                LOG.info(
+                        "No active shard for subtask {}, marking the source idle.",
+                        indexOfThisConsumerSubtask);
+                // no active shard, signal downstream operators to not wait for a watermark
+                sourceContext.markAsTemporarilyIdle();
+                isIdle = true;
+            }
+        } else {
+            if (potentialWatermark > lastWatermark) {
+                LOG.debug(
+                        "Emitting watermark {} from subtask {}",
+                        potentialWatermark,
+                        indexOfThisConsumerSubtask);
+                sourceContext.emitWatermark(new Watermark(potentialWatermark));
+                lastWatermark = potentialWatermark;
+                isIdle = false;
+            }
+            nextWatermark = potentialNextWatermark;
+        }
+    }
+
+    /** Per shard tracking of watermark and last activity. */
+    private static class ShardWatermarkState<T> {
+        private AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner;
+        private RecordEmitter.RecordQueue<RecordWrapper<T>> emitQueue;
+        private volatile long lastRecordTimestamp;
+        private volatile long lastUpdated;
+        private volatile Watermark lastEmittedRecordWatermark;
+    }
+
+    /**
+     * The periodic watermark emitter. In its given interval, it checks all shards for the current
+     * event time watermark, and possibly emits the next watermark.
+     */
+    private class PeriodicWatermarkEmitter implements ProcessingTimeCallback {
+
+        private final ProcessingTimeService timerService;
+        private final long interval;
+
+        PeriodicWatermarkEmitter(ProcessingTimeService timerService, long autoWatermarkInterval) {
+            this.timerService = checkNotNull(timerService);
+            this.interval = autoWatermarkInterval;
+        }
+
+        public void start() {
+            LOG.debug("registering periodic watermark timer with interval {}", interval);
+            timerService.registerTimer(timerService.getCurrentProcessingTime() + interval, this);
+        }
+
+        @Override
+        public void onProcessingTime(long timestamp) {
+            emitWatermark();
+            // schedule the next watermark
+            timerService.registerTimer(timerService.getCurrentProcessingTime() + interval, this);
+        }
+    }
+
+    /** Timer task to update shared watermark state. */
+    private class WatermarkSyncCallback implements ProcessingTimeCallback {
+
+        private static final long LOG_INTERVAL_MILLIS = 60_000;
+
+        private final ProcessingTimeService timerService;
+        private final long interval;
+        private long lastGlobalWatermark = Long.MIN_VALUE;
+        private long propagatedLocalWatermark = Long.MIN_VALUE;
+        private int stalledWatermarkIntervalCount = 0;
+        private long lastLogged;
+
+        WatermarkSyncCallback(ProcessingTimeService timerService, long interval) {
+            this.timerService = checkNotNull(timerService);
+            this.interval = interval;
+            MetricGroup shardMetricsGroup =
+                    consumerMetricGroup.addGroup(
+                            "subtaskId", String.valueOf(indexOfThisConsumerSubtask));
+            shardMetricsGroup.gauge("localWatermark", () -> nextWatermark);
+            shardMetricsGroup.gauge("globalWatermark", () -> lastGlobalWatermark);
+        }
+
+        public void start() {
+            LOG.info("Registering watermark tracker with interval {}", interval);
+            timerService.registerTimer(timerService.getCurrentProcessingTime() + interval, this);
+        }
+
+        @Override
+        public void onProcessingTime(long timestamp) {
+            if (nextWatermark != Long.MIN_VALUE) {
+                long globalWatermark = lastGlobalWatermark;
+                // TODO: refresh watermark while idle
+                if (!(isIdle && nextWatermark == propagatedLocalWatermark)) {
+                    globalWatermark = watermarkTracker.updateWatermark(nextWatermark);
+                    propagatedLocalWatermark = nextWatermark;
+                } else {
+                    LOG.info(
+                            "WatermarkSyncCallback subtask: {} is idle",
+                            indexOfThisConsumerSubtask);
+                }
+
+                if (timestamp - lastLogged > LOG_INTERVAL_MILLIS) {
+                    lastLogged = System.currentTimeMillis();
+                    LOG.info(
+                            "WatermarkSyncCallback subtask: {} local watermark: {}"
+                                    + ", global watermark: {}, delta: {} timeouts: {}, emitter: {}",
+                            indexOfThisConsumerSubtask,
+                            nextWatermark,
+                            globalWatermark,
+                            nextWatermark - globalWatermark,
+                            watermarkTracker.getUpdateTimeoutCount(),
+                            recordEmitter.printInfo());
+
+                    // Following is for debugging non-reproducible issue with stalled watermark
+                    if (globalWatermark == nextWatermark
+                            && globalWatermark == lastGlobalWatermark
+                            && stalledWatermarkIntervalCount++ > 5) {
+                        // subtask blocks watermark, log to aid troubleshooting
+                        stalledWatermarkIntervalCount = 0;
+                        for (Map.Entry<Integer, ShardWatermarkState> e :
+                                shardWatermarks.entrySet()) {
+                            RecordEmitter.RecordQueue<RecordWrapper<T>> q = e.getValue().emitQueue;
+                            RecordWrapper<T> nextRecord = q.peek();
+                            if (nextRecord != null) {
+                                LOG.info(
+                                        "stalled watermark {} key {} next watermark {} next timestamp {}",
+                                        nextWatermark,
+                                        e.getKey(),
+                                        nextRecord.watermark,
+                                        nextRecord.timestamp);
+                            }
+                        }
+                    }
+                }
+
+                lastGlobalWatermark = globalWatermark;
+                recordEmitter.setCurrentWatermark(globalWatermark);
+            }
+            // schedule next callback
+            timerService.registerTimer(timerService.getCurrentProcessingTime() + interval, this);
+        }
+    }
+
+    /**
+     * Registers a metric group associated with the shard id of the provided {@link
+     * KinesisStreamShardState shardState}.
+     *
+     * @return a {@link MetricGroup} that can be used to update metric values
+     */
+    private MetricGroup registerShardMetricGroup(
+            final MetricGroup metricGroup, final KinesisStreamShardState shardState) {
+        return metricGroup
+                .addGroup(
+                        KinesisConsumerMetricConstants.STREAM_METRICS_GROUP,
+                        shardState.getStreamShardHandle().getStreamName())
+                .addGroup(
+                        KinesisConsumerMetricConstants.SHARD_METRICS_GROUP,
+                        shardState.getStreamShardHandle().getShard().getShardId());
+    }
+
+    // ------------------------------------------------------------------------
+    //  Miscellaneous utility functions
+    // ------------------------------------------------------------------------
+
+    /**
+     * Utility function to determine whether a shard should be subscribed by this consumer subtask.
+     *
+     * @param shardHash hash code for the shard
+     * @param totalNumberOfConsumerSubtasks total number of consumer subtasks
+     * @param indexOfThisConsumerSubtask index of this consumer subtask
+     */
+    public static boolean isThisSubtaskShouldSubscribeTo(
+            int shardHash, int totalNumberOfConsumerSubtasks, int indexOfThisConsumerSubtask) {
+        return (Math.abs(shardHash % totalNumberOfConsumerSubtasks)) == indexOfThisConsumerSubtask;
+    }
+
+    @VisibleForTesting
+    protected ExecutorService createShardConsumersThreadPool(final String subtaskName) {
+        return Executors.newCachedThreadPool(
+                new ThreadFactory() {
+                    private final AtomicLong threadCount = new AtomicLong(0);
+
+                    @Override
+                    public Thread newThread(Runnable runnable) {
+                        Thread thread = new Thread(runnable);
+                        thread.setName(
+                                "shardConsumers-"
+                                        + subtaskName
+                                        + "-thread-"
+                                        + threadCount.getAndIncrement());
+                        thread.setDaemon(true);
+                        return thread;
+                    }
+                });
+    }
+
+    @VisibleForTesting
+    public List<KinesisStreamShardState> getSubscribedShardsState() {
+        return subscribedShardsState;
+    }
+
+    /**
+     * Utility function to create an initial map of the last discovered shard id of each subscribed
+     * stream, set to null; This is called in the constructor; correct values will be set later on
+     * by calling advanceLastDiscoveredShardOfStream().
+     *
+     * @param streams the list of subscribed streams
+     * @return the initial map for subscribedStreamsToLastDiscoveredShardIds
+     */
+    protected static HashMap<String, String>
+            createInitialSubscribedStreamsToLastDiscoveredShardsState(List<String> streams) {
+        HashMap<String, String> initial = new HashMap<>();
+        for (String stream : streams) {
+            initial.put(stream, null);
+        }
+        return initial;
+    }
+
+    /**
+     * Utility function to convert {@link StreamShardHandle} into {@link StreamShardMetadata}.
+     *
+     * @param streamShardHandle the {@link StreamShardHandle} to be converted
+     * @return a {@link StreamShardMetadata} object
+     */
+    public static StreamShardMetadata convertToStreamShardMetadata(
+            StreamShardHandle streamShardHandle) {
+        StreamShardMetadata streamShardMetadata = new StreamShardMetadata();
+
+        streamShardMetadata.setStreamName(streamShardHandle.getStreamName());
+        streamShardMetadata.setShardId(streamShardHandle.getShard().getShardId());
+        streamShardMetadata.setParentShardId(streamShardHandle.getShard().getParentShardId());
+        streamShardMetadata.setAdjacentParentShardId(
+                streamShardHandle.getShard().getAdjacentParentShardId());
+
+        if (streamShardHandle.getShard().getHashKeyRange() != null) {
+            streamShardMetadata.setStartingHashKey(
+                    streamShardHandle.getShard().getHashKeyRange().getStartingHashKey());
+            streamShardMetadata.setEndingHashKey(
+                    streamShardHandle.getShard().getHashKeyRange().getEndingHashKey());
+        }
+
+        if (streamShardHandle.getShard().getSequenceNumberRange() != null) {
+            streamShardMetadata.setStartingSequenceNumber(
+                    streamShardHandle
+                            .getShard()
+                            .getSequenceNumberRange()
+                            .getStartingSequenceNumber());
+            streamShardMetadata.setEndingSequenceNumber(
+                    streamShardHandle
+                            .getShard()
+                            .getSequenceNumberRange()
+                            .getEndingSequenceNumber());
+        }
+
+        return streamShardMetadata;
+    }
+
+    /**
+     * Utility function to convert {@link StreamShardMetadata} into {@link StreamShardHandle}.
+     *
+     * @param streamShardMetadata the {@link StreamShardMetadata} to be converted
+     * @return a {@link StreamShardHandle} object
+     */
+    public static StreamShardHandle convertToStreamShardHandle(
+            StreamShardMetadata streamShardMetadata) {
+        Shard shard = new Shard();
+        shard.withShardId(streamShardMetadata.getShardId());
+        shard.withParentShardId(streamShardMetadata.getParentShardId());
+        shard.withAdjacentParentShardId(streamShardMetadata.getAdjacentParentShardId());
+
+        HashKeyRange hashKeyRange = new HashKeyRange();
+        hashKeyRange.withStartingHashKey(streamShardMetadata.getStartingHashKey());
+        hashKeyRange.withEndingHashKey(streamShardMetadata.getEndingHashKey());
+        shard.withHashKeyRange(hashKeyRange);
+
+        SequenceNumberRange sequenceNumberRange = new SequenceNumberRange();
+        sequenceNumberRange.withStartingSequenceNumber(
+                streamShardMetadata.getStartingSequenceNumber());
+        sequenceNumberRange.withEndingSequenceNumber(streamShardMetadata.getEndingSequenceNumber());
+        shard.withSequenceNumberRange(sequenceNumberRange);
+
+        return new StreamShardHandle(streamShardMetadata.getStreamName(), shard);
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
new file mode 100644
index 0000000..85dc8f4
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
@@ -0,0 +1,258 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.api.TimeCharacteristic;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult;
+import org.apache.flink.streaming.connectors.kinesis.metrics.ShardConsumerMetricsReporter;
+import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
+
+import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import static java.util.Optional.ofNullable;
+import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.CANCELLED;
+import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.COMPLETE;
+import static org.apache.flink.util.Preconditions.checkArgument;
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * Thread that subscribes to the given {@link RecordPublisher}. Each thread is in charge of one
+ * Kinesis shard only.
+ *
+ * <p>A {@link ShardConsumer} is responsible for:
+ *
+ * <ul>
+ *   <li>Running the {@link RecordPublisher} to consume all records from the subscribed shard
+ *   <li>Deserializing and deaggregating incoming records from Kinesis
+ *   <li>Logging metrics
+ *   <li>Passing the records up to the {@link KinesisDataFetcher}
+ * </ul>
+ */
+@Internal
+public class ShardConsumer<T> implements Runnable {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ShardConsumer.class);
+
+    private final KinesisDeserializationSchema<T> deserializer;
+
+    private final int subscribedShardStateIndex;
+
+    private final KinesisDataFetcher<T> fetcherRef;
+
+    private final StreamShardHandle subscribedShard;
+
+    private final ShardConsumerMetricsReporter shardConsumerMetricsReporter;
+
+    private SequenceNumber lastSequenceNum;
+
+    private final RecordPublisher recordPublisher;
+
+    /**
+     * Creates a shard consumer.
+     *
+     * @param fetcherRef reference to the owning fetcher
+     * @param recordPublisher the record publisher used to read records from kinesis
+     * @param subscribedShardStateIndex the state index of the shard this consumer is subscribed to
+     * @param subscribedShard the shard this consumer is subscribed to
+     * @param lastSequenceNum the sequence number in the shard to start consuming
+     * @param shardConsumerMetricsReporter the reporter to report metrics to
+     * @param shardDeserializer used to deserialize incoming records
+     */
+    public ShardConsumer(
+            KinesisDataFetcher<T> fetcherRef,
+            RecordPublisher recordPublisher,
+            Integer subscribedShardStateIndex,
+            StreamShardHandle subscribedShard,
+            SequenceNumber lastSequenceNum,
+            ShardConsumerMetricsReporter shardConsumerMetricsReporter,
+            KinesisDeserializationSchema<T> shardDeserializer) {
+        this.fetcherRef = checkNotNull(fetcherRef);
+        this.recordPublisher = checkNotNull(recordPublisher);
+        this.subscribedShardStateIndex = checkNotNull(subscribedShardStateIndex);
+        this.subscribedShard = checkNotNull(subscribedShard);
+        this.shardConsumerMetricsReporter = checkNotNull(shardConsumerMetricsReporter);
+        this.lastSequenceNum = checkNotNull(lastSequenceNum);
+
+        checkArgument(
+                !lastSequenceNum.equals(
+                        SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get()),
+                "Should not start a ShardConsumer if the shard has already been completely read.");
+
+        this.deserializer = shardDeserializer;
+    }
+
+    @Override
+    public void run() {
+        try {
+            while (isRunning()) {
+                final RecordPublisherRunResult result =
+                        recordPublisher.run(
+                                batch -> {
+                                    if (!batch.getDeaggregatedRecords().isEmpty()) {
+                                        LOG.debug(
+                                                "stream: {}, shard: {}, millis behind latest: {}, batch size: {}",
+                                                subscribedShard.getStreamName(),
+                                                subscribedShard.getShard().getShardId(),
+                                                batch.getMillisBehindLatest(),
+                                                batch.getDeaggregatedRecordSize());
+                                    }
+                                    for (UserRecord userRecord : batch.getDeaggregatedRecords()) {
+                                        if (filterDeaggregatedRecord(userRecord)) {
+                                            deserializeRecordForCollectionAndUpdateState(
+                                                    userRecord);
+                                        }
+                                    }
+
+                                    shardConsumerMetricsReporter.setAverageRecordSizeBytes(
+                                            batch.getAverageRecordSizeBytes());
+                                    shardConsumerMetricsReporter.setNumberOfAggregatedRecords(
+                                            batch.getAggregatedRecordSize());
+                                    shardConsumerMetricsReporter.setNumberOfDeaggregatedRecords(
+                                            batch.getDeaggregatedRecordSize());
+                                    ofNullable(batch.getMillisBehindLatest())
+                                            .ifPresent(
+                                                    shardConsumerMetricsReporter
+                                                            ::setMillisBehindLatest);
+
+                                    return lastSequenceNum;
+                                });
+
+                if (result == COMPLETE) {
+                    fetcherRef.updateState(
+                            subscribedShardStateIndex,
+                            SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get());
+                    // we can close this consumer thread once we've reached the end of the
+                    // subscribed shard
+                    break;
+                } else if (result == CANCELLED) {
+                    final String errorMessage =
+                            "Shard consumer cancelled: " + subscribedShard.getShard().getShardId();
+                    LOG.info(errorMessage);
+                    throw new ShardConsumerCancelledException(errorMessage);
+                }
+            }
+        } catch (Throwable t) {
+            fetcherRef.stopWithError(t);
+        } finally {
+            this.shardConsumerMetricsReporter.unregister();
+        }
+    }
+
+    /**
+     * The loop in run() checks this before fetching next batch of records. Since this runnable will
+     * be executed by the ExecutorService {@code KinesisDataFetcher#shardConsumersExecutor}, this
+     * thread would be closed down by calling shutdownNow() on {@code
+     * KinesisDataFetcher#shardConsumersExecutor} and let the executor service interrupt all
+     * currently running {@link ShardConsumer}s. The AWS SDK resources must be shutdown prior to
+     * this thread in order to preserve classpath for teardown, therefore also check to see if the
+     * fetcher is still running.
+     */
+    private boolean isRunning() {
+        return !Thread.interrupted() && fetcherRef.isRunning();
+    }
+
+    /**
+     * Deserializes a record for collection, and accordingly updates the shard state in the fetcher.
+     * The last successfully collected sequence number in this shard consumer is also updated so
+     * that a {@link RecordPublisher} may be able to use the correct sequence number to refresh
+     * shard iterators if necessary.
+     *
+     * <p>Note that the server-side Kinesis timestamp is attached to the record when collected. When
+     * the user programs uses {@link TimeCharacteristic#EventTime}, this timestamp will be used by
+     * default.
+     *
+     * @param record record to deserialize and collect
+     */
+    private void deserializeRecordForCollectionAndUpdateState(final UserRecord record) {
+        ByteBuffer recordData = record.getData();
+
+        byte[] dataBytes = new byte[recordData.remaining()];
+        recordData.get(dataBytes);
+
+        final long approxArrivalTimestamp = record.getApproximateArrivalTimestamp().getTime();
+
+        final T value;
+        try {
+            value =
+                    deserializer.deserialize(
+                            dataBytes,
+                            record.getPartitionKey(),
+                            record.getSequenceNumber(),
+                            approxArrivalTimestamp,
+                            subscribedShard.getStreamName(),
+                            subscribedShard.getShard().getShardId());
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
+
+        SequenceNumber collectedSequenceNumber =
+                (record.isAggregated())
+                        ? new SequenceNumber(
+                                record.getSequenceNumber(), record.getSubSequenceNumber())
+                        : new SequenceNumber(record.getSequenceNumber());
+
+        fetcherRef.emitRecordAndUpdateState(
+                value, approxArrivalTimestamp, subscribedShardStateIndex, collectedSequenceNumber);
+
+        this.lastSequenceNum = collectedSequenceNumber;
+    }
+
+    /**
+     * Filters out aggregated records that have previously been processed. This method is to support
+     * restarting from a partially consumed aggregated sequence number.
+     *
+     * @param record the record to filter
+     * @return true if the record should be retained
+     */
+    private boolean filterDeaggregatedRecord(final UserRecord record) {
+        if (!lastSequenceNum.isAggregated()) {
+            return true;
+        }
+
+        return !record.getSequenceNumber().equals(lastSequenceNum.getSequenceNumber())
+                || record.getSubSequenceNumber() > lastSequenceNum.getSubSequenceNumber();
+    }
+
+    /** An exception wrapper to indicate an error has been thrown from the shard consumer. */
+    abstract static class ShardConsumerException extends RuntimeException {
+        private static final long serialVersionUID = 7732343624482321663L;
+
+        public ShardConsumerException(final String message) {
+            super(message);
+        }
+    }
+
+    /** An exception to indicate the shard consumer has been cancelled. */
+    static class ShardConsumerCancelledException extends ShardConsumerException {
+        private static final long serialVersionUID = 2707399313569728649L;
+
+        public ShardConsumerCancelledException(final String message) {
+            super(message);
+        }
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordBatch.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordBatch.java
new file mode 100644
index 0000000..cca1880
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordBatch.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.util.Preconditions;
+
+import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord;
+import com.amazonaws.services.kinesis.model.Record;
+
+import javax.annotation.Nullable;
+
+import java.math.BigInteger;
+import java.util.List;
+
+/**
+ * A batch of UserRecords received from Kinesis. Input records are de-aggregated using KCL 1.x
+ * library. It is expected that AWS SDK v2.x messages are converted to KCL 1.x {@link UserRecord}.
+ */
+@Internal
+public class RecordBatch {
+
+    private final int aggregatedRecordSize;
+
+    private final List<UserRecord> deaggregatedRecords;
+
+    private final long totalSizeInBytes;
+
+    private final Long millisBehindLatest;
+
+    public RecordBatch(
+            final List<Record> records,
+            final StreamShardHandle subscribedShard,
+            @Nullable final Long millisBehindLatest) {
+        Preconditions.checkNotNull(subscribedShard);
+        this.aggregatedRecordSize = Preconditions.checkNotNull(records).size();
+        this.deaggregatedRecords = deaggregateRecords(records, subscribedShard);
+        this.totalSizeInBytes =
+                this.deaggregatedRecords.stream().mapToInt(r -> r.getData().remaining()).sum();
+        this.millisBehindLatest = millisBehindLatest;
+    }
+
+    public int getAggregatedRecordSize() {
+        return aggregatedRecordSize;
+    }
+
+    public int getDeaggregatedRecordSize() {
+        return deaggregatedRecords.size();
+    }
+
+    public List<UserRecord> getDeaggregatedRecords() {
+        return deaggregatedRecords;
+    }
+
+    public long getTotalSizeInBytes() {
+        return totalSizeInBytes;
+    }
+
+    public long getAverageRecordSizeBytes() {
+        return deaggregatedRecords.isEmpty()
+                ? 0
+                : getTotalSizeInBytes() / getDeaggregatedRecordSize();
+    }
+
+    @Nullable
+    public Long getMillisBehindLatest() {
+        return millisBehindLatest;
+    }
+
+    private List<UserRecord> deaggregateRecords(
+            final List<Record> records, final StreamShardHandle subscribedShard) {
+        BigInteger start =
+                new BigInteger(subscribedShard.getShard().getHashKeyRange().getStartingHashKey());
+        BigInteger end =
+                new BigInteger(subscribedShard.getShard().getHashKeyRange().getEndingHashKey());
+
+        return UserRecord.deaggregate(records, start, end);
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordPublisher.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordPublisher.java
new file mode 100644
index 0000000..9da8794
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordPublisher.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+
+/**
+ * A {@code RecordPublisher} will consume records from an external stream and deliver them to the
+ * registered subscriber.
+ */
+@Internal
+public interface RecordPublisher {
+
+    /**
+     * Run the record publisher. Records will be consumed from the stream and published to the
+     * consumer. The number of batches retrieved by a single invocation will vary based on
+     * implementation.
+     *
+     * @param recordBatchConsumer the record batch consumer in which to output records
+     * @return a status enum to represent whether a shard has been fully consumed
+     * @throws InterruptedException
+     */
+    RecordPublisherRunResult run(RecordBatchConsumer recordBatchConsumer)
+            throws InterruptedException;
+
+    /** A status enum to represent whether a shard has been fully consumed. */
+    enum RecordPublisherRunResult {
+        /** There are no more records to consume from this shard. */
+        COMPLETE,
+
+        /** There are more records to consume from this shard. */
+        INCOMPLETE,
+
+        /** The record publisher has been cancelled. */
+        CANCELLED
+    }
+
+    /**
+     * An interface used to collect record batches, and reply with the latest consumed sequence
+     * number.
+     */
+    interface RecordBatchConsumer {
+
+        SequenceNumber accept(RecordBatch recordBatch);
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordPublisherFactory.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordPublisherFactory.java
new file mode 100644
index 0000000..a4a5848
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordPublisherFactory.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.metrics.MetricGroup;
+import org.apache.flink.streaming.connectors.kinesis.model.StartingPosition;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+
+import java.util.Properties;
+
+/** A factory interface used to create instances of {@link RecordPublisher}. */
+@Internal
+public interface RecordPublisherFactory {
+
+    /**
+     * Create a {@link RecordPublisher}.
+     *
+     * @param startingPosition the position in the shard to start consuming records from
+     * @param consumerConfig the properties used to configure the {@link RecordPublisher}.
+     * @param metricGroup the {@link MetricGroup} used to report metrics to
+     * @param streamShardHandle the stream shard in which to consume from
+     * @return the constructed {@link RecordPublisher}
+     */
+    RecordPublisher create(
+            StartingPosition startingPosition,
+            Properties consumerConfig,
+            MetricGroup metricGroup,
+            StreamShardHandle streamShardHandle)
+            throws InterruptedException;
+
+    /** Destroy any open resources used by the factory. */
+    default void close() {
+        // Do nothing by default
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisher.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisher.java
new file mode 100644
index 0000000..9df68bf
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisher.java
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordBatch;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.FanOutShardSubscriber.FanOutSubscriberException;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.FanOutShardSubscriber.FanOutSubscriberInterruptedException;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.FanOutShardSubscriber.RecoverableFanOutSubscriberException;
+import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.StartingPosition;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.proxy.FullJitterBackoff;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
+import org.apache.flink.util.Preconditions;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.kinesis.model.EncryptionType;
+import software.amazon.awssdk.services.kinesis.model.Record;
+import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEvent;
+
+import javax.annotation.Nonnull;
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.function.Consumer;
+
+import static com.amazonaws.services.kinesis.model.ShardIteratorType.AT_TIMESTAMP;
+import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.CANCELLED;
+import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.COMPLETE;
+import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.INCOMPLETE;
+import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM;
+import static software.amazon.awssdk.services.kinesis.model.StartingPosition.builder;
+
+/**
+ * A {@link RecordPublisher} that will read and forward records from Kinesis using EFO, to the
+ * subscriber. Records are consumed via Enhanced Fan Out subscriptions using SubscribeToShard API.
+ */
+@Internal
+public class FanOutRecordPublisher implements RecordPublisher {
+
+    private static final Logger LOG = LoggerFactory.getLogger(FanOutRecordPublisher.class);
+
+    private final FullJitterBackoff backoff;
+
+    private final String consumerArn;
+
+    private final KinesisProxyV2Interface kinesisProxy;
+
+    private final StreamShardHandle subscribedShard;
+
+    private final FanOutRecordPublisherConfiguration configuration;
+
+    /** The current attempt in the case of subsequent recoverable errors. */
+    private int attempt = 0;
+
+    private StartingPosition nextStartingPosition;
+
+    /**
+     * Instantiate a new FanOutRecordPublisher. Consumes data from KDS using EFO SubscribeToShard
+     * over AWS SDK V2.x
+     *
+     * @param startingPosition the position in the shard to start consuming from
+     * @param consumerArn the consumer ARN of the stream consumer
+     * @param subscribedShard the shard to consumer from
+     * @param kinesisProxy the proxy used to talk to Kinesis services
+     * @param configuration the record publisher configuration
+     */
+    public FanOutRecordPublisher(
+            final StartingPosition startingPosition,
+            final String consumerArn,
+            final StreamShardHandle subscribedShard,
+            final KinesisProxyV2Interface kinesisProxy,
+            final FanOutRecordPublisherConfiguration configuration,
+            final FullJitterBackoff backoff) {
+        this.nextStartingPosition = Preconditions.checkNotNull(startingPosition);
+        this.consumerArn = Preconditions.checkNotNull(consumerArn);
+        this.subscribedShard = Preconditions.checkNotNull(subscribedShard);
+        this.kinesisProxy = Preconditions.checkNotNull(kinesisProxy);
+        this.configuration = Preconditions.checkNotNull(configuration);
+        this.backoff = Preconditions.checkNotNull(backoff);
+    }
+
+    @Override
+    public RecordPublisherRunResult run(final RecordBatchConsumer recordConsumer)
+            throws InterruptedException {
+        LOG.info(
+                "Running fan out record publisher on {}::{} from {} - {}",
+                subscribedShard.getStreamName(),
+                subscribedShard.getShard().getShardId(),
+                nextStartingPosition.getShardIteratorType(),
+                nextStartingPosition.getStartingMarker());
+
+        Consumer<SubscribeToShardEvent> eventConsumer =
+                event -> {
+                    RecordBatch recordBatch =
+                            new RecordBatch(
+                                    toSdkV1Records(event.records()),
+                                    subscribedShard,
+                                    event.millisBehindLatest());
+                    SequenceNumber sequenceNumber = recordConsumer.accept(recordBatch);
+                    nextStartingPosition = getNextStartingPosition(sequenceNumber);
+                };
+
+        RecordPublisherRunResult result = runWithBackoff(eventConsumer);
+
+        LOG.info(
+                "Subscription expired {}::{}, with status {}",
+                subscribedShard.getStreamName(),
+                subscribedShard.getShard().getShardId(),
+                result);
+
+        return result;
+    }
+
+    private StartingPosition getNextStartingPosition(final SequenceNumber latestSequenceNumber) {
+        // When consuming from a timestamp sentinel/AT_TIMESTAMP ShardIteratorType.
+        // If the first RecordBatch has no deaggregated records, then the latestSequenceNumber would
+        // be the timestamp sentinel.
+        // This is because we have not yet received any real sequence numbers on this shard.
+        // In this condition we should retry from the previous starting position (AT_TIMESTAMP).
+        if (SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM.get().equals(latestSequenceNumber)) {
+            Preconditions.checkState(nextStartingPosition.getShardIteratorType() == AT_TIMESTAMP);
+            return nextStartingPosition;
+        } else {
+            return StartingPosition.continueFromSequenceNumber(latestSequenceNumber);
+        }
+    }
+
+    /**
+     * Runs the record publisher, will sleep for configuration computed jitter period in the case of
+     * certain exceptions. Unrecoverable exceptions are thrown to terminate the application.
+     *
+     * @param eventConsumer the consumer to pass events to
+     * @return {@code COMPLETE} if the shard is complete and this shard consumer should exit
+     * @throws InterruptedException
+     */
+    private RecordPublisherRunResult runWithBackoff(
+            final Consumer<SubscribeToShardEvent> eventConsumer) throws InterruptedException {
+        FanOutShardSubscriber fanOutShardSubscriber =
+                new FanOutShardSubscriber(
+                        consumerArn,
+                        subscribedShard.getShard().getShardId(),
+                        kinesisProxy,
+                        configuration.getSubscribeToShardTimeout());
+        boolean complete;
+
+        try {
+            complete =
+                    fanOutShardSubscriber.subscribeToShardAndConsumeRecords(
+                            toSdkV2StartingPosition(nextStartingPosition), eventConsumer);
+            attempt = 0;
+        } catch (FanOutSubscriberInterruptedException ex) {
+            LOG.info(
+                    "Thread interrupted, closing record publisher for shard {}.",
+                    subscribedShard.getShard().getShardId(),
+                    ex);
+            return CANCELLED;
+        } catch (RecoverableFanOutSubscriberException ex) {
+            // Recoverable errors should be reattempted without contributing to the retry policy
+            // A recoverable error would not result in the Flink job being cancelled
+            backoff(ex);
+            return INCOMPLETE;
+        } catch (FanOutSubscriberException ex) {
+            // We have received an error from the network layer
+            // This can be due to limits being exceeded, network timeouts, etc
+            // We should backoff, reacquire a subscription and try again
+            if (ex.getCause() instanceof ResourceNotFoundException) {
+                LOG.warn(
+                        "Received ResourceNotFoundException. Either the shard does not exist, or the stream subscriber has been deregistered."
+                                + "Marking this shard as complete {} ({})",
+                        subscribedShard.getShard().getShardId(),
+                        consumerArn);
+
+                return COMPLETE;
+            }
+
+            if (attempt == configuration.getSubscribeToShardMaxRetries()) {
+                final String errorMessage =
+                        "Maximum retries exceeded for SubscribeToShard. "
+                                + "Failed "
+                                + configuration.getSubscribeToShardMaxRetries()
+                                + " times.";
+                LOG.error(errorMessage, ex.getCause());
+                throw new RuntimeException(errorMessage, ex.getCause());
+            }
+
+            attempt++;
+            backoff(ex);
+            return INCOMPLETE;
+        }
+
+        return complete ? COMPLETE : INCOMPLETE;
+    }
+
+    private void backoff(final Throwable ex) throws InterruptedException {
+        long backoffMillis =
+                backoff.calculateFullJitterBackoff(
+                        configuration.getSubscribeToShardBaseBackoffMillis(),
+                        configuration.getSubscribeToShardMaxBackoffMillis(),
+                        configuration.getSubscribeToShardExpConstant(),
+                        attempt);
+
+        LOG.warn(
+                "Encountered recoverable error {}. Backing off for {} millis {} ({})",
+                ex.getCause().getClass().getSimpleName(),
+                backoffMillis,
+                subscribedShard.getShard().getShardId(),
+                consumerArn,
+                ex);
+
+        backoff.sleep(backoffMillis);
+    }
+
+    /**
+     * Records that come from KPL may be aggregated. Records must be deaggregated before they are
+     * processed by the application. Deaggregation is performed by KCL. In order to prevent having
+     * to import KCL 1.x and 2.x we convert the records to v1 format and use KCL v1.
+     *
+     * @param records the SDK v2 records
+     * @return records converted to SDK v1 format
+     */
+    private List<com.amazonaws.services.kinesis.model.Record> toSdkV1Records(
+            final List<Record> records) {
+        final List<com.amazonaws.services.kinesis.model.Record> sdkV1Records = new ArrayList<>();
+
+        for (Record record : records) {
+            sdkV1Records.add(toSdkV1Record(record));
+        }
+
+        return sdkV1Records;
+    }
+
+    private com.amazonaws.services.kinesis.model.Record toSdkV1Record(
+            @Nonnull final Record record) {
+        final com.amazonaws.services.kinesis.model.Record recordV1 =
+                new com.amazonaws.services.kinesis.model.Record()
+                        .withData(record.data().asByteBuffer())
+                        .withSequenceNumber(record.sequenceNumber())
+                        .withPartitionKey(record.partitionKey())
+                        .withApproximateArrivalTimestamp(
+                                new Date(record.approximateArrivalTimestamp().toEpochMilli()));
+
+        EncryptionType encryptionType = record.encryptionType();
+        if (encryptionType != null) {
+            recordV1.withEncryptionType(encryptionType.name());
+        }
+
+        return recordV1;
+    }
+
+    /**
+     * Converts a local {@link StartingPosition} to an AWS SDK V2 object representation.
+     *
+     * @param startingPosition the local {@link StartingPosition}
+     * @return an AWS SDK V2 representation
+     */
+    private software.amazon.awssdk.services.kinesis.model.StartingPosition toSdkV2StartingPosition(
+            StartingPosition startingPosition) {
+        software.amazon.awssdk.services.kinesis.model.StartingPosition.Builder builder =
+                builder().type(startingPosition.getShardIteratorType().toString());
+
+        Object marker = startingPosition.getStartingMarker();
+
+        switch (startingPosition.getShardIteratorType()) {
+            case AT_TIMESTAMP:
+                {
+                    Preconditions.checkNotNull(
+                            marker, "StartingPosition AT_TIMESTAMP date marker is null.");
+                    builder.timestamp(((Date) marker).toInstant());
+                    break;
+                }
+            case AT_SEQUENCE_NUMBER:
+            case AFTER_SEQUENCE_NUMBER:
+                {
+                    Preconditions.checkNotNull(
+                            marker, "StartingPosition *_SEQUENCE_NUMBER position is null.");
+                    builder.sequenceNumber(marker.toString());
+                    break;
+                }
+        }
+
+        return builder.build();
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfiguration.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfiguration.java
new file mode 100644
index 0000000..cd46876
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfiguration.java
@@ -0,0 +1,475 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout;
+
+import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
+import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFORegistrationType;
+import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType;
+import org.apache.flink.streaming.connectors.kinesis.util.KinesisConfigUtil;
+import org.apache.flink.util.Preconditions;
+
+import javax.annotation.Nullable;
+
+import java.time.Duration;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Properties;
+
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.efoConsumerArn;
+
+/** This is a configuration class for enhanced fan-out components. */
+public class FanOutRecordPublisherConfiguration {
+
+    /** The efo registration type for de-/registration of streams. */
+    private final EFORegistrationType efoRegistrationType;
+
+    /**
+     * The efo stream consumer name. Should not be Null if the efoRegistrationType is either LAZY or
+     * EAGER.
+     */
+    @Nullable private String consumerName;
+
+    /** A map of stream to stream consumer ARN for EFO subscriptions. */
+    private final Map<String, String> streamConsumerArns = new HashMap<>();
+
+    /** Base backoff millis for the deregister stream operation. */
+    private final int subscribeToShardMaxRetries;
+
+    /** A timeout when waiting for a shard subscription to be established. */
+    private final Duration subscribeToShardTimeout;
+
+    /** Maximum backoff millis for the subscribe to shard operation. */
+    private final long subscribeToShardMaxBackoffMillis;
+
+    /** Base backoff millis for the subscribe to shard operation. */
+    private final long subscribeToShardBaseBackoffMillis;
+
+    /** Exponential backoff power constant for the subscribe to shard operation. */
+    private final double subscribeToShardExpConstant;
+
+    /** Base backoff millis for the register stream operation. */
+    private final long registerStreamBaseBackoffMillis;
+
+    /** Maximum backoff millis for the register stream operation. */
+    private final long registerStreamMaxBackoffMillis;
+
+    /** Exponential backoff power constant for the register stream operation. */
+    private final double registerStreamExpConstant;
+
+    /** Maximum retry attempts for the register stream operation. */
+    private final int registerStreamMaxRetries;
+
+    /** Maximum time to wait for a stream consumer to become active before giving up. */
+    private final Duration registerStreamConsumerTimeout;
+
+    /** Base backoff millis for the deregister stream operation. */
+    private final long deregisterStreamBaseBackoffMillis;
+
+    /** Maximum backoff millis for the deregister stream operation. */
+    private final long deregisterStreamMaxBackoffMillis;
+
+    /** Exponential backoff power constant for the deregister stream operation. */
+    private final double deregisterStreamExpConstant;
+
+    /** Maximum retry attempts for the deregister stream operation. */
+    private final int deregisterStreamMaxRetries;
+
+    /** Maximum time to wait for a stream consumer to deregister before giving up. */
+    private final Duration deregisterStreamConsumerTimeout;
+
+    /** Max retries for the describe stream operation. */
+    private final int describeStreamMaxRetries;
+
+    /** Backoff millis for the describe stream operation. */
+    private final long describeStreamBaseBackoffMillis;
+
+    /** Maximum backoff millis for the describe stream operation. */
+    private final long describeStreamMaxBackoffMillis;
+
+    /** Exponential backoff power constant for the describe stream operation. */
+    private final double describeStreamExpConstant;
+
+    /** Max retries for the describe stream consumer operation. */
+    private final int describeStreamConsumerMaxRetries;
+
+    /** Backoff millis for the describe stream consumer operation. */
+    private final long describeStreamConsumerBaseBackoffMillis;
+
+    /** Maximum backoff millis for the describe stream consumer operation. */
+    private final long describeStreamConsumerMaxBackoffMillis;
+
+    /** Exponential backoff power constant for the describe stream consumer operation. */
+    private final double describeStreamConsumerExpConstant;
+
+    /**
+     * Creates a FanOutRecordPublisherConfiguration.
+     *
+     * @param configProps the configuration properties from config file.
+     * @param streams the streams which is sent to match the EFO consumer arn if the EFO
+     *     registration mode is set to `NONE`.
+     */
+    public FanOutRecordPublisherConfiguration(
+            final Properties configProps, final List<String> streams) {
+        Preconditions.checkArgument(
+                configProps
+                        .getProperty(ConsumerConfigConstants.RECORD_PUBLISHER_TYPE)
+                        .equals(RecordPublisherType.EFO.toString()),
+                "Only efo record publisher can register a FanOutProperties.");
+        KinesisConfigUtil.validateEfoConfiguration(configProps, streams);
+
+        efoRegistrationType =
+                EFORegistrationType.valueOf(
+                        configProps.getProperty(
+                                ConsumerConfigConstants.EFO_REGISTRATION_TYPE,
+                                EFORegistrationType.EAGER.toString()));
+        // if efo registration type is EAGER|LAZY, then user should explicitly provide a consumer
+        // name for each stream.
+        if (efoRegistrationType == EFORegistrationType.EAGER
+                || efoRegistrationType == EFORegistrationType.LAZY) {
+            consumerName = configProps.getProperty(ConsumerConfigConstants.EFO_CONSUMER_NAME);
+        }
+
+        for (String stream : streams) {
+            String key = efoConsumerArn(stream);
+            if (configProps.containsKey(key)) {
+                streamConsumerArns.put(stream, configProps.getProperty(key));
+            }
+        }
+
+        this.subscribeToShardMaxRetries =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.SUBSCRIBE_TO_SHARD_RETRIES))
+                        .map(Integer::parseInt)
+                        .orElse(ConsumerConfigConstants.DEFAULT_SUBSCRIBE_TO_SHARD_RETRIES);
+        this.subscribeToShardTimeout =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.SUBSCRIBE_TO_SHARD_TIMEOUT_SECONDS))
+                        .map(Integer::parseInt)
+                        .map(Duration::ofSeconds)
+                        .orElse(ConsumerConfigConstants.DEFAULT_SUBSCRIBE_TO_SHARD_TIMEOUT);
+        this.subscribeToShardBaseBackoffMillis =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.SUBSCRIBE_TO_SHARD_BACKOFF_BASE))
+                        .map(Long::parseLong)
+                        .orElse(ConsumerConfigConstants.DEFAULT_SUBSCRIBE_TO_SHARD_BACKOFF_BASE);
+        this.subscribeToShardMaxBackoffMillis =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.SUBSCRIBE_TO_SHARD_BACKOFF_MAX))
+                        .map(Long::parseLong)
+                        .orElse(ConsumerConfigConstants.DEFAULT_SUBSCRIBE_TO_SHARD_BACKOFF_MAX);
+        this.subscribeToShardExpConstant =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants
+                                                .SUBSCRIBE_TO_SHARD_BACKOFF_EXPONENTIAL_CONSTANT))
+                        .map(Double::parseDouble)
+                        .orElse(
+                                ConsumerConfigConstants
+                                        .DEFAULT_SUBSCRIBE_TO_SHARD_BACKOFF_EXPONENTIAL_CONSTANT);
+
+        this.registerStreamBaseBackoffMillis =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.REGISTER_STREAM_BACKOFF_BASE))
+                        .map(Long::parseLong)
+                        .orElse(ConsumerConfigConstants.DEFAULT_REGISTER_STREAM_BACKOFF_BASE);
+        this.registerStreamMaxBackoffMillis =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.REGISTER_STREAM_BACKOFF_MAX))
+                        .map(Long::parseLong)
+                        .orElse(ConsumerConfigConstants.DEFAULT_REGISTER_STREAM_BACKOFF_MAX);
+        this.registerStreamExpConstant =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants
+                                                .REGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT))
+                        .map(Double::parseDouble)
+                        .orElse(
+                                ConsumerConfigConstants
+                                        .DEFAULT_REGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT);
+        this.registerStreamMaxRetries =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.REGISTER_STREAM_RETRIES))
+                        .map(Integer::parseInt)
+                        .orElse(ConsumerConfigConstants.DEFAULT_REGISTER_STREAM_RETRIES);
+        this.registerStreamConsumerTimeout =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.REGISTER_STREAM_TIMEOUT_SECONDS))
+                        .map(Integer::parseInt)
+                        .map(Duration::ofSeconds)
+                        .orElse(ConsumerConfigConstants.DEFAULT_REGISTER_STREAM_TIMEOUT);
+
+        this.deregisterStreamBaseBackoffMillis =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.DEREGISTER_STREAM_BACKOFF_BASE))
+                        .map(Long::parseLong)
+                        .orElse(ConsumerConfigConstants.DEFAULT_DEREGISTER_STREAM_BACKOFF_BASE);
+        this.deregisterStreamMaxBackoffMillis =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.DEREGISTER_STREAM_BACKOFF_MAX))
+                        .map(Long::parseLong)
+                        .orElse(ConsumerConfigConstants.DEFAULT_DEREGISTER_STREAM_BACKOFF_MAX);
+        this.deregisterStreamExpConstant =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants
+                                                .DEREGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT))
+                        .map(Double::parseDouble)
+                        .orElse(
+                                ConsumerConfigConstants
+                                        .DEFAULT_DEREGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT);
+        this.deregisterStreamMaxRetries =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.DEREGISTER_STREAM_RETRIES))
+                        .map(Integer::parseInt)
+                        .orElse(ConsumerConfigConstants.DEFAULT_DEREGISTER_STREAM_RETRIES);
+        this.deregisterStreamConsumerTimeout =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.DEREGISTER_STREAM_TIMEOUT_SECONDS))
+                        .map(Integer::parseInt)
+                        .map(Duration::ofSeconds)
+                        .orElse(ConsumerConfigConstants.DEFAULT_DEREGISTER_STREAM_TIMEOUT);
+
+        this.describeStreamMaxRetries =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.STREAM_DESCRIBE_RETRIES))
+                        .map(Integer::parseInt)
+                        .orElse(ConsumerConfigConstants.DEFAULT_STREAM_DESCRIBE_RETRIES);
+        this.describeStreamBaseBackoffMillis =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.STREAM_DESCRIBE_BACKOFF_BASE))
+                        .map(Long::parseLong)
+                        .orElse(ConsumerConfigConstants.DEFAULT_STREAM_DESCRIBE_BACKOFF_BASE);
+        this.describeStreamMaxBackoffMillis =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.STREAM_DESCRIBE_BACKOFF_MAX))
+                        .map(Long::parseLong)
+                        .orElse(ConsumerConfigConstants.DEFAULT_STREAM_DESCRIBE_BACKOFF_MAX);
+        this.describeStreamExpConstant =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants
+                                                .STREAM_DESCRIBE_BACKOFF_EXPONENTIAL_CONSTANT))
+                        .map(Double::parseDouble)
+                        .orElse(
+                                ConsumerConfigConstants
+                                        .DEFAULT_STREAM_DESCRIBE_BACKOFF_EXPONENTIAL_CONSTANT);
+        this.describeStreamConsumerMaxRetries =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants.DESCRIBE_STREAM_CONSUMER_RETRIES))
+                        .map(Integer::parseInt)
+                        .orElse(ConsumerConfigConstants.DEFAULT_DESCRIBE_STREAM_CONSUMER_RETRIES);
+        this.describeStreamConsumerBaseBackoffMillis =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants
+                                                .DESCRIBE_STREAM_CONSUMER_BACKOFF_BASE))
+                        .map(Long::parseLong)
+                        .orElse(
+                                ConsumerConfigConstants
+                                        .DEFAULT_DESCRIBE_STREAM_CONSUMER_BACKOFF_BASE);
+        this.describeStreamConsumerMaxBackoffMillis =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants
+                                                .DESCRIBE_STREAM_CONSUMER_BACKOFF_MAX))
+                        .map(Long::parseLong)
+                        .orElse(
+                                ConsumerConfigConstants
+                                        .DEFAULT_DESCRIBE_STREAM_CONSUMER_BACKOFF_MAX);
+        this.describeStreamConsumerExpConstant =
+                Optional.ofNullable(
+                                configProps.getProperty(
+                                        ConsumerConfigConstants
+                                                .DESCRIBE_STREAM_CONSUMER_BACKOFF_EXPONENTIAL_CONSTANT))
+                        .map(Double::parseDouble)
+                        .orElse(
+                                ConsumerConfigConstants
+                                        .DEFAULT_DESCRIBE_STREAM_CONSUMER_BACKOFF_EXPONENTIAL_CONSTANT);
+    }
+
+    // ------------------------------------------------------------------------
+    //  subscribeToShard() related performance settings
+    // ------------------------------------------------------------------------
+
+    /** Get maximum retry attempts for the subscribe to shard operation. */
+    public int getSubscribeToShardMaxRetries() {
+        return subscribeToShardMaxRetries;
+    }
+
+    /** Get timeout when waiting for a shard subscription to be established. */
+    public Duration getSubscribeToShardTimeout() {
+        return subscribeToShardTimeout;
+    }
+
+    /** Get maximum backoff millis for the subscribe to shard operation. */
+    public long getSubscribeToShardMaxBackoffMillis() {
+        return subscribeToShardMaxBackoffMillis;
+    }
+
+    /** Get base backoff millis for the subscribe to shard operation. */
+    public long getSubscribeToShardBaseBackoffMillis() {
+        return subscribeToShardBaseBackoffMillis;
+    }
+
+    /** Get exponential backoff power constant for the subscribe to shard operation. */
+    public double getSubscribeToShardExpConstant() {
+        return subscribeToShardExpConstant;
+    }
+
+    // ------------------------------------------------------------------------
+    //  registerStream() related performance settings
+    // ------------------------------------------------------------------------
+
+    /** Get base backoff millis for the register stream operation. */
+    public long getRegisterStreamBaseBackoffMillis() {
+        return registerStreamBaseBackoffMillis;
+    }
+
+    /** Get maximum backoff millis for the register stream operation. */
+    public long getRegisterStreamMaxBackoffMillis() {
+        return registerStreamMaxBackoffMillis;
+    }
+
+    /** Get exponential backoff power constant for the register stream operation. */
+    public double getRegisterStreamExpConstant() {
+        return registerStreamExpConstant;
+    }
+
+    /** Get maximum retry attempts for the register stream operation. */
+    public int getRegisterStreamMaxRetries() {
+        return registerStreamMaxRetries;
+    }
+
+    /** Get maximum duration to wait for a stream consumer to become active before giving up. */
+    public Duration getRegisterStreamConsumerTimeout() {
+        return registerStreamConsumerTimeout;
+    }
+
+    // ------------------------------------------------------------------------
+    //  deregisterStream() related performance settings
+    // ------------------------------------------------------------------------
+
+    /** Get base backoff millis for the deregister stream operation. */
+    public long getDeregisterStreamBaseBackoffMillis() {
+        return deregisterStreamBaseBackoffMillis;
+    }
+
+    /** Get maximum backoff millis for the deregister stream operation. */
+    public long getDeregisterStreamMaxBackoffMillis() {
+        return deregisterStreamMaxBackoffMillis;
+    }
+
+    /** Get exponential backoff power constant for the deregister stream operation. */
+    public double getDeregisterStreamExpConstant() {
+        return deregisterStreamExpConstant;
+    }
+
+    /** Get maximum retry attempts for the register stream operation. */
+    public int getDeregisterStreamMaxRetries() {
+        return deregisterStreamMaxRetries;
+    }
+
+    /** Get maximum duration to wait for a stream consumer to deregister before giving up. */
+    public Duration getDeregisterStreamConsumerTimeout() {
+        return deregisterStreamConsumerTimeout;
+    }
+
+    // ------------------------------------------------------------------------
+    //  describeStream() related performance settings
+    // ------------------------------------------------------------------------
+
+    /** Get maximum retry attempts for the describe stream operation. */
+    public int getDescribeStreamMaxRetries() {
+        return describeStreamMaxRetries;
+    }
+
+    /** Get base backoff millis for the describe stream operation. */
+    public long getDescribeStreamBaseBackoffMillis() {
+        return describeStreamBaseBackoffMillis;
+    }
+
+    /** Get maximum backoff millis for the describe stream operation. */
+    public long getDescribeStreamMaxBackoffMillis() {
+        return describeStreamMaxBackoffMillis;
+    }
+
+    /** Get exponential backoff power constant for the describe stream operation. */
+    public double getDescribeStreamExpConstant() {
+        return describeStreamExpConstant;
+    }
+
+    // ------------------------------------------------------------------------
+    //  describeStreamConsumer() related performance settings
+    // ------------------------------------------------------------------------
+
+    /** Get maximum retry attempts for the describe stream operation. */
+    public int getDescribeStreamConsumerMaxRetries() {
+        return describeStreamConsumerMaxRetries;
+    }
+
+    /** Get base backoff millis for the describe stream operation. */
+    public long getDescribeStreamConsumerBaseBackoffMillis() {
+        return describeStreamConsumerBaseBackoffMillis;
+    }
+
+    /** Get maximum backoff millis for the describe stream operation. */
+    public long getDescribeStreamConsumerMaxBackoffMillis() {
+        return describeStreamConsumerMaxBackoffMillis;
+    }
+
+    /** Get exponential backoff power constant for the describe stream operation. */
+    public double getDescribeStreamConsumerExpConstant() {
+        return describeStreamConsumerExpConstant;
+    }
+
+    /** Get efo registration type. */
+    public EFORegistrationType getEfoRegistrationType() {
+        return efoRegistrationType;
+    }
+
+    /** Get consumer name, will be null if efo registration type is 'NONE'. */
+    public Optional<String> getConsumerName() {
+        return Optional.ofNullable(consumerName);
+    }
+
+    /**
+     * Get the according consumer arn to the stream, will be null if efo registration type is 'LAZY'
+     * or 'EAGER'.
+     */
+    public Optional<String> getStreamConsumerArn(String stream) {
+        return Optional.ofNullable(streamConsumerArns.get(stream));
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherFactory.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherFactory.java
new file mode 100644
index 0000000..5752822
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherFactory.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.metrics.MetricGroup;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisherFactory;
+import org.apache.flink.streaming.connectors.kinesis.model.StartingPosition;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.proxy.FullJitterBackoff;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
+import org.apache.flink.util.Preconditions;
+
+import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
+
+import java.util.Optional;
+import java.util.Properties;
+
+import static java.util.Collections.singletonList;
+
+/** A {@link RecordPublisher} factory used to create instances of {@link FanOutRecordPublisher}. */
+@Internal
+public class FanOutRecordPublisherFactory implements RecordPublisherFactory {
+
+    private static final FullJitterBackoff BACKOFF = new FullJitterBackoff();
+
+    /**
+     * A singleton {@link KinesisProxyV2} is used per Flink task. The {@link KinesisAsyncClient}
+     * uses an internal thread pool; using a single client reduces overhead.
+     */
+    private final KinesisProxyV2Interface kinesisProxy;
+
+    /**
+     * Instantiate a factory responsible for creating {@link FanOutRecordPublisher}.
+     *
+     * @param kinesisProxy the singleton proxy used by all record publishers created by this factory
+     */
+    public FanOutRecordPublisherFactory(final KinesisProxyV2Interface kinesisProxy) {
+        this.kinesisProxy = kinesisProxy;
+    }
+
+    /**
+     * Create a {@link FanOutRecordPublisher}.
+     *
+     * @param startingPosition the starting position in the shard to start consuming from
+     * @param consumerConfig the consumer configuration properties
+     * @param metricGroup the metric group to report metrics to
+     * @param streamShardHandle the shard this consumer is subscribed to
+     * @return a {@link FanOutRecordPublisher}
+     */
+    @Override
+    public FanOutRecordPublisher create(
+            final StartingPosition startingPosition,
+            final Properties consumerConfig,
+            final MetricGroup metricGroup,
+            final StreamShardHandle streamShardHandle) {
+        Preconditions.checkNotNull(startingPosition);
+        Preconditions.checkNotNull(consumerConfig);
+        Preconditions.checkNotNull(metricGroup);
+        Preconditions.checkNotNull(streamShardHandle);
+
+        String stream = streamShardHandle.getStreamName();
+        FanOutRecordPublisherConfiguration configuration =
+                new FanOutRecordPublisherConfiguration(consumerConfig, singletonList(stream));
+
+        Optional<String> streamConsumerArn = configuration.getStreamConsumerArn(stream);
+        Preconditions.checkState(streamConsumerArn.isPresent());
+
+        return new FanOutRecordPublisher(
+                startingPosition,
+                streamConsumerArn.get(),
+                streamShardHandle,
+                kinesisProxy,
+                configuration,
+                BACKOFF);
+    }
+
+    @Override
+    public void close() {
+        kinesisProxy.close();
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutShardSubscriber.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutShardSubscriber.java
new file mode 100644
index 0000000..a280a8f
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutShardSubscriber.java
@@ -0,0 +1,609 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
+import org.apache.flink.util.Preconditions;
+
+import io.netty.handler.timeout.ReadTimeoutException;
+import org.reactivestreams.Subscriber;
+import org.reactivestreams.Subscription;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
+import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
+import software.amazon.awssdk.services.kinesis.model.StartingPosition;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEvent;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEventStream;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler;
+
+import java.time.Duration;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.CompletionException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Consumer;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
+/**
+ * This class is responsible for acquiring an Enhanced Fan Out subscription and consuming records
+ * from a shard. A queue is used to buffer records between the Kinesis Proxy and Flink application.
+ * This allows processing to be separated from consumption; errors thrown in the consumption layer
+ * do not propagate up to application.
+ *
+ * <pre>{@code [
+ * | ----------- Source Connector Thread ----------- |                      | --- KinesisAsyncClient Thread(s) -- |
+ * | FanOutRecordPublisher | FanOutShardSubscription | == blocking queue == | KinesisProxyV2 | KinesisAsyncClient |
+ * ]}</pre>
+ *
+ * <p>Three types of message are passed over the queue for inter-thread communication:
+ *
+ * <ul>
+ *   <li>{@link SubscriptionNextEvent} - passes data from the network to the consumer
+ *   <li>{@link SubscriptionCompleteEvent} - indicates a subscription has expired
+ *   <li>{@link SubscriptionErrorEvent} - passes an exception from the network to the consumer
+ * </ul>
+ *
+ * <p>The blocking queue has a maximum capacity of two. One slot is used for a record batch, the
+ * remaining slot is reserved to completion events. At maximum capacity we will have two {@link
+ * SubscribeToShardEvent} in memory (per instance of this class):
+ *
+ * <ul>
+ *   <li>1 event being processed by the consumer
+ *   <li>1 event enqueued in the blocking queue
+ * </ul>
+ */
+@Internal
+public class FanOutShardSubscriber {
+
+    private static final Logger LOG = LoggerFactory.getLogger(FanOutShardSubscriber.class);
+
+    /**
+     * The maximum capacity of the queue between the network and consumption thread. The queue is
+     * mainly used to isolate networking from consumption such that errors do not bubble up. This
+     * queue also acts as a buffer resulting in a record prefetch and reduced latency. Capacity is 2
+     * to allow 1 pending record batch and leave room for a completion event to avoid any writer
+     * thread blocking on the queue.
+     */
+    private static final int QUEUE_CAPACITY = 2;
+
+    /**
+     * Read timeout will occur after 30 seconds, a sanity timeout to prevent lockup in unexpected
+     * error states. If the consumer does not receive a new event within the QUEUE_TIMEOUT_SECONDS
+     * it will backoff and resubscribe.
+     */
+    private static final Duration DEFAULT_QUEUE_TIMEOUT = Duration.ofSeconds(35);
+
+    private final BlockingQueue<FanOutSubscriptionEvent> queue =
+            new LinkedBlockingQueue<>(QUEUE_CAPACITY);
+
+    private final AtomicReference<FanOutSubscriptionEvent> subscriptionErrorEvent =
+            new AtomicReference<>();
+
+    private final KinesisProxyV2Interface kinesis;
+
+    private final String consumerArn;
+
+    private final String shardId;
+
+    private final Duration subscribeToShardTimeout;
+
+    private final Duration queueWaitTimeout;
+
+    /**
+     * Create a new Fan Out Shard subscriber.
+     *
+     * @param consumerArn the stream consumer ARN
+     * @param shardId the shard ID to subscribe to
+     * @param kinesis the Kinesis Proxy used to communicate via AWS SDK v2
+     * @param subscribeToShardTimeout A timeout when waiting for a shard subscription to be
+     *     established
+     */
+    FanOutShardSubscriber(
+            final String consumerArn,
+            final String shardId,
+            final KinesisProxyV2Interface kinesis,
+            final Duration subscribeToShardTimeout) {
+        this(consumerArn, shardId, kinesis, subscribeToShardTimeout, DEFAULT_QUEUE_TIMEOUT);
+    }
+
+    /**
+     * Create a new Fan Out Shard Subscriber.
+     *
+     * @param consumerArn the stream consumer ARN
+     * @param shardId the shard ID to subscribe to
+     * @param kinesis the Kinesis Proxy used to communicate via AWS SDK v2
+     * @param subscribeToShardTimeout A timeout when waiting for a shard subscription to be
+     *     established
+     * @param queueWaitTimeout A timeout when enqueuing/de-queueing
+     */
+    @VisibleForTesting
+    FanOutShardSubscriber(
+            final String consumerArn,
+            final String shardId,
+            final KinesisProxyV2Interface kinesis,
+            final Duration subscribeToShardTimeout,
+            final Duration queueWaitTimeout) {
+        this.kinesis = Preconditions.checkNotNull(kinesis);
+        this.consumerArn = Preconditions.checkNotNull(consumerArn);
+        this.shardId = Preconditions.checkNotNull(shardId);
+        this.subscribeToShardTimeout = subscribeToShardTimeout;
+        this.queueWaitTimeout = queueWaitTimeout;
+    }
+
+    /**
+     * Obtains a subscription to the shard from the specified {@code startingPosition}. {@link
+     * SubscribeToShardEvent} received from KDS are delivered to the given {@code eventConsumer}.
+     * Returns false if there are records left to consume from the shard.
+     *
+     * @param startingPosition the position in the stream in which to start receiving records
+     * @param eventConsumer the consumer to deliver received events to
+     * @return true if there are no more messages (complete), false if a subsequent subscription
+     *     should be obtained
+     * @throws FanOutSubscriberException when an exception is propagated from the networking stack
+     * @throws InterruptedException when the thread is interrupted
+     */
+    boolean subscribeToShardAndConsumeRecords(
+            final StartingPosition startingPosition,
+            final Consumer<SubscribeToShardEvent> eventConsumer)
+            throws InterruptedException, FanOutSubscriberException {
+        LOG.debug("Subscribing to shard {} ({})", shardId, consumerArn);
+
+        final FanOutShardSubscription subscription;
+        try {
+            subscription = openSubscriptionToShard(startingPosition);
+        } catch (FanOutSubscriberException ex) {
+            // The only exception that should cause a failure is a ResourceNotFoundException
+            // Rethrow the exception to trigger the application to terminate
+            if (ex.getCause() instanceof ResourceNotFoundException) {
+                throw (ResourceNotFoundException) ex.getCause();
+            }
+
+            throw ex;
+        }
+
+        return consumeAllRecordsFromKinesisShard(eventConsumer, subscription);
+    }
+
+    /**
+     * Calls {@link KinesisProxyV2#subscribeToShard} and waits to acquire a subscription. In the
+     * event a non-recoverable error occurs this method will rethrow the exception. Once the
+     * subscription is acquired the client signals to the producer that we are ready to receive
+     * records.
+     *
+     * @param startingPosition the position in which to start consuming from
+     * @throws FanOutSubscriberException when an exception is propagated from the networking stack
+     */
+    private FanOutShardSubscription openSubscriptionToShard(final StartingPosition startingPosition)
+            throws FanOutSubscriberException, InterruptedException {
+        SubscribeToShardRequest request =
+                SubscribeToShardRequest.builder()
+                        .consumerARN(consumerArn)
+                        .shardId(shardId)
+                        .startingPosition(startingPosition)
+                        .build();
+
+        AtomicReference<Throwable> exception = new AtomicReference<>();
+        CountDownLatch waitForSubscriptionLatch = new CountDownLatch(1);
+        FanOutShardSubscription subscription =
+                new FanOutShardSubscription(waitForSubscriptionLatch);
+
+        SubscribeToShardResponseHandler responseHandler =
+                SubscribeToShardResponseHandler.builder()
+                        .onError(
+                                e -> {
+                                    // Errors that occur while trying to acquire a subscription are
+                                    // only thrown from here
+                                    // Errors that occur during the subscription are surfaced here
+                                    // and to the FanOutShardSubscription
+                                    //	(errors are ignored here once the subscription is open)
+                                    if (waitForSubscriptionLatch.getCount() > 0) {
+                                        exception.set(e);
+                                        waitForSubscriptionLatch.countDown();
+                                    }
+                                })
+                        .subscriber(() -> subscription)
+                        .build();
+
+        kinesis.subscribeToShard(request, responseHandler);
+
+        boolean subscriptionEstablished =
+                waitForSubscriptionLatch.await(
+                        subscribeToShardTimeout.toMillis(), TimeUnit.MILLISECONDS);
+
+        if (!subscriptionEstablished) {
+            final String errorMessage =
+                    "Timed out acquiring subscription - " + shardId + " (" + consumerArn + ")";
+            LOG.error(errorMessage);
+            subscription.cancelSubscription();
+            handleError(
+                    new RecoverableFanOutSubscriberException(new TimeoutException(errorMessage)));
+        }
+
+        Throwable throwable = exception.get();
+        if (throwable != null) {
+            handleError(throwable);
+        }
+
+        LOG.debug("Acquired subscription - {} ({})", shardId, consumerArn);
+
+        // Request the first record to kick off consumption
+        // Following requests are made by the FanOutShardSubscription on the netty thread
+        subscription.requestRecord();
+
+        return subscription;
+    }
+
+    /**
+     * Update the reference to the latest networking error in this object. Parent caller can
+     * interrogate to decide how to handle error.
+     *
+     * @param throwable the exception that has occurred
+     */
+    private void handleError(final Throwable throwable) throws FanOutSubscriberException {
+        Throwable cause;
+        if (throwable instanceof CompletionException || throwable instanceof ExecutionException) {
+            cause = throwable.getCause();
+        } else {
+            cause = throwable;
+        }
+
+        LOG.warn(
+                "Error occurred on EFO subscription: {} - ({}).  {} ({})",
+                throwable.getClass().getName(),
+                throwable.getMessage(),
+                shardId,
+                consumerArn,
+                cause);
+
+        if (isInterrupted(throwable)) {
+            throw new FanOutSubscriberInterruptedException(throwable);
+        } else if (cause instanceof FanOutSubscriberException) {
+            throw (FanOutSubscriberException) cause;
+        } else if (cause instanceof ReadTimeoutException) {
+            // ReadTimeoutException occurs naturally under backpressure scenarios when full batches
+            // take longer to
+            // process than standard read timeout (default 30s). Recoverable exceptions are intended
+            // to be retried
+            // indefinitely to avoid system degradation under backpressure. The EFO connection
+            // (subscription) to Kinesis
+            // is closed, and reacquired once the queue of records has been processed.
+            throw new RecoverableFanOutSubscriberException(cause);
+        } else {
+            throw new RetryableFanOutSubscriberException(cause);
+        }
+    }
+
+    private boolean isInterrupted(final Throwable throwable) {
+        Throwable cause = throwable;
+        while (cause != null) {
+            if (cause instanceof InterruptedException) {
+                return true;
+            }
+
+            cause = cause.getCause();
+        }
+
+        return false;
+    }
+
+    /**
+     * Once the subscription is open, records will be delivered to the {@link BlockingQueue}. Queue
+     * capacity is hardcoded to 1 record, the queue is used solely to separate consumption and
+     * processing. However, this buffer will result in latency reduction as records are pre-fetched
+     * as a result. This method will poll the queue and exit under any of these conditions: - {@code
+     * continuationSequenceNumber} is {@code null}, indicating the shard is complete - The
+     * subscription expires, indicated by a {@link SubscriptionCompleteEvent} - There is an error
+     * while consuming records, indicated by a {@link SubscriptionErrorEvent}
+     *
+     * @param eventConsumer the event consumer to deliver records to
+     * @param subscription the subscription we are subscribed to
+     * @return true if there are no more messages (complete), false if a subsequent subscription
+     *     should be obtained
+     * @throws FanOutSubscriberException when an exception is propagated from the networking stack
+     * @throws InterruptedException when the thread is interrupted
+     */
+    private boolean consumeAllRecordsFromKinesisShard(
+            final Consumer<SubscribeToShardEvent> eventConsumer,
+            final FanOutShardSubscription subscription)
+            throws InterruptedException, FanOutSubscriberException {
+        String continuationSequenceNumber;
+        boolean result = true;
+
+        do {
+            FanOutSubscriptionEvent subscriptionEvent;
+            if (subscriptionErrorEvent.get() != null) {
+                subscriptionEvent = subscriptionErrorEvent.get();
+            } else {
+                // Read timeout occurs after 30 seconds, add a sanity timeout to prevent lockup
+                subscriptionEvent = queue.poll(queueWaitTimeout.toMillis(), MILLISECONDS);
+            }
+
+            if (subscriptionEvent == null) {
+                LOG.info(
+                        "Timed out polling events from network, reacquiring subscription - {} ({})",
+                        shardId,
+                        consumerArn);
+                result = false;
+                break;
+            } else if (subscriptionEvent.isSubscribeToShardEvent()) {
+                // Request for KDS to send the next record batch
+                subscription.requestRecord();
+
+                SubscribeToShardEvent event = subscriptionEvent.getSubscribeToShardEvent();
+                continuationSequenceNumber = event.continuationSequenceNumber();
+                if (!event.records().isEmpty()) {
+                    eventConsumer.accept(event);
+                }
+            } else if (subscriptionEvent.isSubscriptionComplete()) {
+                // The subscription is complete, but the shard might not be, so we return incomplete
+                return false;
+            } else {
+                handleError(subscriptionEvent.getThrowable());
+                result = false;
+                break;
+            }
+        } while (continuationSequenceNumber != null);
+
+        subscription.cancelSubscription();
+        return result;
+    }
+
+    /**
+     * The {@link FanOutShardSubscription} subscribes to the events coming from KDS and adds them to
+     * the {@link BlockingQueue}. Backpressure is applied based on the maximum capacity of the
+     * queue. The {@link Subscriber} methods of this class are invoked by a thread from the {@link
+     * KinesisAsyncClient}.
+     */
+    private class FanOutShardSubscription implements Subscriber<SubscribeToShardEventStream> {
+
+        private Subscription subscription;
+
+        private volatile boolean cancelled = false;
+
+        private final CountDownLatch waitForSubscriptionLatch;
+
+        private FanOutShardSubscription(final CountDownLatch waitForSubscriptionLatch) {
+            this.waitForSubscriptionLatch = waitForSubscriptionLatch;
+        }
+
+        /** Flag to the producer that we are ready to receive more events. */
+        void requestRecord() {
+            if (!cancelled) {
+                LOG.debug(
+                        "Requesting more records from EFO subscription - {} ({})",
+                        shardId,
+                        consumerArn);
+                subscription.request(1);
+            }
+        }
+
+        @Override
+        public void onSubscribe(Subscription subscription) {
+            this.subscription = subscription;
+            waitForSubscriptionLatch.countDown();
+        }
+
+        @Override
+        public void onNext(SubscribeToShardEventStream subscribeToShardEventStream) {
+            subscribeToShardEventStream.accept(
+                    new SubscribeToShardResponseHandler.Visitor() {
+                        @Override
+                        public void visit(SubscribeToShardEvent event) {
+                            enqueueEvent(new SubscriptionNextEvent(event));
+                        }
+                    });
+        }
+
+        @Override
+        public void onError(Throwable throwable) {
+            LOG.debug(
+                    "Error occurred on EFO subscription: {} - ({}).  {} ({})",
+                    throwable.getClass().getName(),
+                    throwable.getMessage(),
+                    shardId,
+                    consumerArn,
+                    throwable);
+
+            SubscriptionErrorEvent subscriptionErrorEvent = new SubscriptionErrorEvent(throwable);
+            if (FanOutShardSubscriber.this.subscriptionErrorEvent.get() == null) {
+                FanOutShardSubscriber.this.subscriptionErrorEvent.set(subscriptionErrorEvent);
+            } else {
+                LOG.warn("Error already queued. Ignoring subsequent exception.", throwable);
+            }
+
+            // Cancel the subscription to signal the onNext to stop requesting data
+            cancelSubscription();
+
+            // If there is space in the queue, insert the error to wake up blocked thread
+            if (queue.isEmpty()) {
+                queue.offer(subscriptionErrorEvent);
+            }
+        }
+
+        @Override
+        public void onComplete() {
+            LOG.debug("EFO subscription complete - {} ({})", shardId, consumerArn);
+            enqueueEvent(new SubscriptionCompleteEvent());
+        }
+
+        private void cancelSubscription() {
+            if (cancelled) {
+                return;
+            }
+            cancelled = true;
+
+            if (subscription != null) {
+                subscription.cancel();
+            }
+        }
+
+        /**
+         * Adds the event to the queue blocking until complete.
+         *
+         * @param event the event to enqueue
+         */
+        private void enqueueEvent(final FanOutSubscriptionEvent event) {
+            if (cancelled) {
+                return;
+            }
+
+            try {
+                if (!queue.offer(event, queueWaitTimeout.toMillis(), TimeUnit.MILLISECONDS)) {
+                    final String errorMessage =
+                            "Timed out enqueuing event "
+                                    + event.getClass().getSimpleName()
+                                    + " - "
+                                    + shardId
+                                    + " ("
+                                    + consumerArn
+                                    + ")";
+                    LOG.error(errorMessage);
+                    onError(
+                            new RecoverableFanOutSubscriberException(
+                                    new TimeoutException(errorMessage)));
+                }
+            } catch (InterruptedException e) {
+                Thread.currentThread().interrupt();
+                throw new RuntimeException(e);
+            }
+        }
+    }
+
+    /** An exception wrapper to indicate an error has been thrown from the networking stack. */
+    abstract static class FanOutSubscriberException extends Exception {
+
+        private static final long serialVersionUID = -3899472233945299730L;
+
+        public FanOutSubscriberException(Throwable cause) {
+            super(cause);
+        }
+    }
+
+    /**
+     * An exception wrapper to indicate a retryable error has been thrown from the networking stack.
+     * Retryable errors are subject to the Subscribe to Shard retry policy. If the configured number
+     * of retries are exceeded the application will terminate.
+     */
+    static class RetryableFanOutSubscriberException extends FanOutSubscriberException {
+
+        private static final long serialVersionUID = -2967281117554404883L;
+
+        public RetryableFanOutSubscriberException(Throwable cause) {
+            super(cause);
+        }
+    }
+
+    /**
+     * An exception wrapper to indicate a recoverable error has been thrown from the networking
+     * stack. Recoverable errors are not counted in the retry policy.
+     */
+    static class RecoverableFanOutSubscriberException extends FanOutSubscriberException {
+
+        private static final long serialVersionUID = -3223347557038294482L;
+
+        public RecoverableFanOutSubscriberException(Throwable cause) {
+            super(cause);
+        }
+    }
+
+    /** An exception wrapper to indicate the subscriber has been interrupted. */
+    static class FanOutSubscriberInterruptedException extends FanOutSubscriberException {
+
+        private static final long serialVersionUID = -2783477408630427189L;
+
+        public FanOutSubscriberInterruptedException(Throwable cause) {
+            super(cause);
+        }
+    }
+
+    /**
+     * An interface used to pass messages between {@link FanOutShardSubscription} and {@link
+     * FanOutShardSubscriber} via the {@link BlockingQueue}.
+     */
+    private interface FanOutSubscriptionEvent {
+
+        default boolean isSubscribeToShardEvent() {
+            return false;
+        }
+
+        default boolean isSubscriptionComplete() {
+            return false;
+        }
+
+        default SubscribeToShardEvent getSubscribeToShardEvent() {
+            throw new UnsupportedOperationException(
+                    "This event does not support getSubscribeToShardEvent()");
+        }
+
+        default Throwable getThrowable() {
+            throw new UnsupportedOperationException("This event does not support getThrowable()");
+        }
+    }
+
+    /** Indicates that an EFO subscription has completed/expired. */
+    private static class SubscriptionCompleteEvent implements FanOutSubscriptionEvent {
+
+        @Override
+        public boolean isSubscriptionComplete() {
+            return true;
+        }
+    }
+
+    /** Poison pill, indicates that an error occurred while consuming from KDS. */
+    private static class SubscriptionErrorEvent implements FanOutSubscriptionEvent {
+        private final Throwable throwable;
+
+        private SubscriptionErrorEvent(Throwable throwable) {
+            this.throwable = throwable;
+        }
+
+        @Override
+        public Throwable getThrowable() {
+            return throwable;
+        }
+    }
+
+    /** A wrapper to pass the next {@link SubscribeToShardEvent} between threads. */
+    private static class SubscriptionNextEvent implements FanOutSubscriptionEvent {
+        private final SubscribeToShardEvent subscribeToShardEvent;
+
+        private SubscriptionNextEvent(SubscribeToShardEvent subscribeToShardEvent) {
+            this.subscribeToShardEvent = subscribeToShardEvent;
+        }
+
+        @Override
+        public boolean isSubscribeToShardEvent() {
+            return true;
+        }
+
+        @Override
+        public SubscribeToShardEvent getSubscribeToShardEvent() {
+            return subscribeToShardEvent;
+        }
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrar.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrar.java
new file mode 100644
index 0000000..9bf5e18
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrar.java
@@ -0,0 +1,313 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisException.FlinkKinesisTimeoutException;
+import org.apache.flink.streaming.connectors.kinesis.proxy.FullJitterBackoff;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
+import org.apache.flink.util.Preconditions;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryResponse;
+import software.amazon.awssdk.services.kinesis.model.ResourceInUseException;
+import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
+
+import javax.annotation.Nullable;
+
+import java.time.Duration;
+import java.time.Instant;
+import java.util.Optional;
+import java.util.concurrent.ExecutionException;
+
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFORegistrationType.LAZY;
+import static software.amazon.awssdk.services.kinesis.model.ConsumerStatus.ACTIVE;
+import static software.amazon.awssdk.services.kinesis.model.ConsumerStatus.DELETING;
+
+/**
+ * Responsible for registering and deregistering EFO stream consumers. Will block until consumers
+ * are ready.
+ */
+@Internal
+public class StreamConsumerRegistrar {
+
+    private static final Logger LOG = LoggerFactory.getLogger(StreamConsumerRegistrar.class);
+
+    private final KinesisProxyV2Interface kinesisProxyV2Interface;
+
+    private final FanOutRecordPublisherConfiguration configuration;
+
+    private final FullJitterBackoff backoff;
+
+    public StreamConsumerRegistrar(
+            final KinesisProxyV2Interface kinesisProxyV2Interface,
+            final FanOutRecordPublisherConfiguration configuration,
+            final FullJitterBackoff backoff) {
+        this.kinesisProxyV2Interface = Preconditions.checkNotNull(kinesisProxyV2Interface);
+        this.configuration = Preconditions.checkNotNull(configuration);
+        this.backoff = Preconditions.checkNotNull(backoff);
+    }
+
+    /**
+     * Register a stream consumer with the given name against the given stream. Blocks until the
+     * consumer becomes active. If the stream consumer already exists, the ARN is returned.
+     *
+     * @param stream the stream to register the stream consumer against
+     * @param streamConsumerName the name of the new stream consumer
+     * @return the stream consumer ARN
+     * @throws ExecutionException
+     * @throws InterruptedException
+     */
+    public String registerStreamConsumer(final String stream, final String streamConsumerName)
+            throws ExecutionException, InterruptedException {
+        LOG.debug("Registering stream consumer - {}::{}", stream, streamConsumerName);
+
+        int attempt = 1;
+
+        if (configuration.getEfoRegistrationType() == LAZY) {
+            registrationBackoff(configuration, backoff, attempt++);
+        }
+
+        DescribeStreamSummaryResponse describeStreamSummaryResponse =
+                kinesisProxyV2Interface.describeStreamSummary(stream);
+        String streamArn = describeStreamSummaryResponse.streamDescriptionSummary().streamARN();
+
+        LOG.debug("Found stream ARN - {}", streamArn);
+
+        Optional<DescribeStreamConsumerResponse> describeStreamConsumerResponse =
+                describeStreamConsumer(streamArn, streamConsumerName);
+
+        if (!describeStreamConsumerResponse.isPresent()) {
+            invokeIgnoringResourceInUse(
+                    () ->
+                            kinesisProxyV2Interface.registerStreamConsumer(
+                                    streamArn, streamConsumerName));
+        }
+
+        String streamConsumerArn =
+                waitForConsumerToBecomeActive(
+                        describeStreamConsumerResponse.orElse(null),
+                        streamArn,
+                        streamConsumerName,
+                        attempt);
+
+        LOG.debug("Using stream consumer - {}", streamConsumerArn);
+
+        return streamConsumerArn;
+    }
+
+    /**
+     * Deregister the stream consumer with the given ARN. Blocks until the consumer is deleted.
+     *
+     * @param stream the stream in which to deregister the consumer
+     * @throws ExecutionException
+     * @throws InterruptedException
+     */
+    public void deregisterStreamConsumer(final String stream)
+            throws InterruptedException, ExecutionException {
+        LOG.debug("Deregistering stream consumer - {}", stream);
+
+        int attempt = 1;
+        String streamConsumerArn = getStreamConsumerArn(stream);
+
+        deregistrationBackoff(configuration, backoff, attempt++);
+
+        Optional<DescribeStreamConsumerResponse> response =
+                describeStreamConsumer(streamConsumerArn);
+        if (response.isPresent()
+                && response.get().consumerDescription().consumerStatus() != DELETING) {
+            invokeIgnoringResourceInUse(
+                    () -> kinesisProxyV2Interface.deregisterStreamConsumer(streamConsumerArn));
+        }
+
+        waitForConsumerToDeregister(response.orElse(null), streamConsumerArn, attempt);
+
+        LOG.debug("Deregistered stream consumer - {}", streamConsumerArn);
+    }
+
+    /** Destroy any open resources used by the factory. */
+    public void close() {
+        kinesisProxyV2Interface.close();
+    }
+
+    @VisibleForTesting
+    void registrationBackoff(
+            final FanOutRecordPublisherConfiguration configuration,
+            final FullJitterBackoff backoff,
+            int attempt)
+            throws InterruptedException {
+        long backoffMillis =
+                backoff.calculateFullJitterBackoff(
+                        configuration.getRegisterStreamBaseBackoffMillis(),
+                        configuration.getRegisterStreamMaxBackoffMillis(),
+                        configuration.getRegisterStreamExpConstant(),
+                        attempt);
+
+        backoff.sleep(backoffMillis);
+    }
+
+    @VisibleForTesting
+    void deregistrationBackoff(
+            final FanOutRecordPublisherConfiguration configuration,
+            final FullJitterBackoff backoff,
+            int attempt)
+            throws InterruptedException {
+        long backoffMillis =
+                backoff.calculateFullJitterBackoff(
+                        configuration.getDeregisterStreamBaseBackoffMillis(),
+                        configuration.getDeregisterStreamMaxBackoffMillis(),
+                        configuration.getDeregisterStreamExpConstant(),
+                        attempt);
+
+        backoff.sleep(backoffMillis);
+    }
+
+    private String waitForConsumerToBecomeActive(
+            @Nullable final DescribeStreamConsumerResponse describeStreamConsumerResponse,
+            final String streamArn,
+            final String streamConsumerName,
+            final int initialAttempt)
+            throws InterruptedException, ExecutionException {
+        int attempt = initialAttempt;
+
+        Instant start = Instant.now();
+        Duration timeout = configuration.getRegisterStreamConsumerTimeout();
+
+        DescribeStreamConsumerResponse response = describeStreamConsumerResponse;
+        while (response == null || response.consumerDescription().consumerStatus() != ACTIVE) {
+            LOG.debug(
+                    "Waiting for stream consumer to become active, attempt {} - {} on {}",
+                    attempt,
+                    streamConsumerName,
+                    streamArn);
+            registrationBackoff(configuration, backoff, attempt++);
+            response =
+                    kinesisProxyV2Interface.describeStreamConsumer(streamArn, streamConsumerName);
+
+            if (Duration.between(start, Instant.now()).compareTo(timeout) > 0) {
+                throw new FlinkKinesisTimeoutException(
+                        "Timeout waiting for stream consumer to become active: "
+                                + streamConsumerName
+                                + " on "
+                                + streamArn);
+            }
+        }
+
+        return response.consumerDescription().consumerARN();
+    }
+
+    private void waitForConsumerToDeregister(
+            @Nullable final DescribeStreamConsumerResponse describeStreamConsumerResponse,
+            final String streamConsumerArn,
+            final int initialAttempt)
+            throws InterruptedException, ExecutionException {
+        int attempt = initialAttempt;
+
+        Instant start = Instant.now();
+        Duration timeout = configuration.getDeregisterStreamConsumerTimeout();
+
+        Optional<DescribeStreamConsumerResponse> response =
+                Optional.ofNullable(describeStreamConsumerResponse);
+        while (response.isPresent()
+                && response.get().consumerDescription().consumerStatus() != DELETING) {
+            LOG.debug(
+                    "Waiting for stream consumer to deregister, attempt {} - {}",
+                    attempt,
+                    streamConsumerArn);
+            deregistrationBackoff(configuration, backoff, attempt++);
+            response = describeStreamConsumer(streamConsumerArn);
+
+            if (Duration.between(start, Instant.now()).compareTo(timeout) > 0) {
+                throw new FlinkKinesisTimeoutException(
+                        "Timeout waiting for stream consumer to deregister: " + streamConsumerArn);
+            }
+        }
+    }
+
+    private Optional<DescribeStreamConsumerResponse> describeStreamConsumer(
+            final String streamArn, final String streamConsumerName)
+            throws InterruptedException, ExecutionException {
+        return describeStreamConsumer(
+                () ->
+                        kinesisProxyV2Interface.describeStreamConsumer(
+                                streamArn, streamConsumerName));
+    }
+
+    private Optional<DescribeStreamConsumerResponse> describeStreamConsumer(
+            final String streamConsumerArn) throws InterruptedException, ExecutionException {
+        return describeStreamConsumer(
+                () -> kinesisProxyV2Interface.describeStreamConsumer(streamConsumerArn));
+    }
+
+    private Optional<DescribeStreamConsumerResponse> describeStreamConsumer(
+            final ResponseSupplier<DescribeStreamConsumerResponse> responseSupplier)
+            throws InterruptedException, ExecutionException {
+        DescribeStreamConsumerResponse response;
+
+        try {
+            response = responseSupplier.get();
+        } catch (ExecutionException ex) {
+            if (isResourceNotFound(ex)) {
+                return Optional.empty();
+            }
+
+            throw ex;
+        }
+
+        return Optional.ofNullable(response);
+    }
+
+    private <T> void invokeIgnoringResourceInUse(final ResponseSupplier<T> responseSupplier)
+            throws InterruptedException, ExecutionException {
+        try {
+            responseSupplier.get();
+        } catch (ExecutionException ex) {
+            if (isResourceInUse(ex)) {
+                // The stream consumer may have been created since we performed the describe
+                return;
+            }
+
+            throw ex;
+        }
+    }
+
+    private boolean isResourceNotFound(final ExecutionException ex) {
+        return ex.getCause() instanceof ResourceNotFoundException;
+    }
+
+    private boolean isResourceInUse(final ExecutionException ex) {
+        return ex.getCause() instanceof ResourceInUseException;
+    }
+
+    private String getStreamConsumerArn(final String stream) {
+        Optional<String> streamConsumerArn = configuration.getStreamConsumerArn(stream);
+        if (!streamConsumerArn.isPresent()) {
+            throw new IllegalArgumentException(
+                    "Stream consumer ARN not found for stream: " + stream);
+        }
+
+        return streamConsumerArn.get();
+    }
+
+    private interface ResponseSupplier<T> {
+        T get() throws ExecutionException, InterruptedException;
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/AdaptivePollingRecordPublisher.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/AdaptivePollingRecordPublisher.java
new file mode 100644
index 0000000..2de4655
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/AdaptivePollingRecordPublisher.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher.polling;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
+import org.apache.flink.streaming.connectors.kinesis.metrics.PollingRecordPublisherMetricsReporter;
+import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.StartingPosition;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
+
+/**
+ * An adaptive record publisher to add a dynamic batch read size for {@link PollingRecordPublisher}.
+ * Kinesis Streams have quotas on the transactions per second, and throughout. This class attempts
+ * to balance quotas and mitigate back off errors.
+ */
+@Internal
+public class AdaptivePollingRecordPublisher extends PollingRecordPublisher {
+    // AWS Kinesis has a read limit of 2 MB/sec
+    // https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html
+    private static final long KINESIS_SHARD_BYTES_PER_SECOND_LIMIT = 2 * 1024L * 1024L;
+
+    private int lastRecordBatchSize = 0;
+
+    private long lastRecordBatchSizeInBytes = 0;
+
+    private long processingStartTimeNanos = System.nanoTime();
+
+    private int maxNumberOfRecordsPerFetch;
+
+    private final PollingRecordPublisherMetricsReporter metricsReporter;
+
+    AdaptivePollingRecordPublisher(
+            final StartingPosition startingPosition,
+            final StreamShardHandle subscribedShard,
+            final PollingRecordPublisherMetricsReporter metricsReporter,
+            final KinesisProxyInterface kinesisProxy,
+            final int maxNumberOfRecordsPerFetch,
+            final long fetchIntervalMillis)
+            throws InterruptedException {
+        super(
+                startingPosition,
+                subscribedShard,
+                metricsReporter,
+                kinesisProxy,
+                maxNumberOfRecordsPerFetch,
+                fetchIntervalMillis);
+        this.maxNumberOfRecordsPerFetch = maxNumberOfRecordsPerFetch;
+        this.metricsReporter = metricsReporter;
+    }
+
+    @Override
+    public RecordPublisherRunResult run(final RecordBatchConsumer consumer)
+            throws InterruptedException {
+        final RecordPublisherRunResult result =
+                super.run(
+                        batch -> {
+                            SequenceNumber latestSequenceNumber = consumer.accept(batch);
+                            lastRecordBatchSize = batch.getDeaggregatedRecordSize();
+                            lastRecordBatchSizeInBytes = batch.getTotalSizeInBytes();
+                            return latestSequenceNumber;
+                        },
+                        maxNumberOfRecordsPerFetch);
+
+        long endTimeNanos = System.nanoTime();
+        long runLoopTimeNanos = endTimeNanos - processingStartTimeNanos;
+
+        maxNumberOfRecordsPerFetch =
+                adaptRecordsToRead(
+                        runLoopTimeNanos,
+                        lastRecordBatchSize,
+                        lastRecordBatchSizeInBytes,
+                        maxNumberOfRecordsPerFetch);
+
+        processingStartTimeNanos = endTimeNanos;
+
+        return result;
+    }
+
+    /**
+     * Calculates how many records to read each time through the loop based on a target throughput
+     * and the measured frequenecy of the loop.
+     *
+     * @param runLoopTimeNanos The total time of one pass through the loop
+     * @param numRecords The number of records of the last read operation
+     * @param recordBatchSizeBytes The total batch size of the last read operation
+     * @param maxNumberOfRecordsPerFetch The current maxNumberOfRecordsPerFetch
+     */
+    private int adaptRecordsToRead(
+            long runLoopTimeNanos,
+            int numRecords,
+            long recordBatchSizeBytes,
+            int maxNumberOfRecordsPerFetch) {
+        if (numRecords != 0 && runLoopTimeNanos != 0) {
+            long averageRecordSizeBytes = recordBatchSizeBytes / numRecords;
+            // Adjust number of records to fetch from the shard depending on current average record
+            // size
+            // to optimize 2 Mb / sec read limits
+            double loopFrequencyHz = 1000000000.0d / runLoopTimeNanos;
+            double bytesPerRead = KINESIS_SHARD_BYTES_PER_SECOND_LIMIT / loopFrequencyHz;
+            maxNumberOfRecordsPerFetch = (int) (bytesPerRead / averageRecordSizeBytes);
+            // Ensure the value is greater than 0 and not more than 10000L
+            maxNumberOfRecordsPerFetch =
+                    Math.max(
+                            1,
+                            Math.min(
+                                    maxNumberOfRecordsPerFetch,
+                                    ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_MAX));
+
+            // Set metrics
+            metricsReporter.setLoopFrequencyHz(loopFrequencyHz);
+            metricsReporter.setBytesPerRead(bytesPerRead);
+        }
+        return maxNumberOfRecordsPerFetch;
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisher.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisher.java
new file mode 100644
index 0000000..70e48a4
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisher.java
@@ -0,0 +1,223 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher.polling;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordBatch;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher;
+import org.apache.flink.streaming.connectors.kinesis.metrics.PollingRecordPublisherMetricsReporter;
+import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.StartingPosition;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
+import org.apache.flink.util.Preconditions;
+
+import com.amazonaws.services.kinesis.model.ExpiredIteratorException;
+import com.amazonaws.services.kinesis.model.GetRecordsResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nullable;
+
+import static com.amazonaws.services.kinesis.model.ShardIteratorType.AT_TIMESTAMP;
+import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.COMPLETE;
+import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.INCOMPLETE;
+import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM;
+
+/**
+ * A {@link RecordPublisher} that will read records from Kinesis and forward them to the subscriber.
+ * Records are consumed by polling the GetRecords KDS API using a ShardIterator.
+ */
+@Internal
+public class PollingRecordPublisher implements RecordPublisher {
+
+    private static final Logger LOG = LoggerFactory.getLogger(PollingRecordPublisher.class);
+
+    private final PollingRecordPublisherMetricsReporter metricsReporter;
+
+    private final KinesisProxyInterface kinesisProxy;
+
+    private final StreamShardHandle subscribedShard;
+
+    private String nextShardItr;
+
+    private StartingPosition nextStartingPosition;
+
+    private final int maxNumberOfRecordsPerFetch;
+
+    private final long fetchIntervalMillis;
+
+    private long processingStartTimeNanos = System.nanoTime();
+
+    /**
+     * A Polling implementation of {@link RecordPublisher} that polls kinesis for records. The
+     * following KDS services are used: GetRecords and GetShardIterator.
+     *
+     * @param startingPosition the position in the stream to start consuming from
+     * @param subscribedShard the shard in which to consume from
+     * @param metricsReporter a metric reporter used to output metrics
+     * @param kinesisProxy the proxy used to communicate with kinesis
+     * @param maxNumberOfRecordsPerFetch the maximum number of records to retrieve per batch
+     * @param fetchIntervalMillis the target interval between each GetRecords invocation
+     */
+    PollingRecordPublisher(
+            final StartingPosition startingPosition,
+            final StreamShardHandle subscribedShard,
+            final PollingRecordPublisherMetricsReporter metricsReporter,
+            final KinesisProxyInterface kinesisProxy,
+            final int maxNumberOfRecordsPerFetch,
+            final long fetchIntervalMillis)
+            throws InterruptedException {
+        this.nextStartingPosition = Preconditions.checkNotNull(startingPosition);
+        this.subscribedShard = Preconditions.checkNotNull(subscribedShard);
+        this.metricsReporter = Preconditions.checkNotNull(metricsReporter);
+        this.kinesisProxy = Preconditions.checkNotNull(kinesisProxy);
+        this.maxNumberOfRecordsPerFetch = maxNumberOfRecordsPerFetch;
+        this.fetchIntervalMillis = fetchIntervalMillis;
+
+        Preconditions.checkArgument(fetchIntervalMillis >= 0);
+        Preconditions.checkArgument(maxNumberOfRecordsPerFetch > 0);
+
+        this.nextShardItr = getShardIterator();
+    }
+
+    @Override
+    public RecordPublisherRunResult run(final RecordBatchConsumer consumer)
+            throws InterruptedException {
+        return run(consumer, maxNumberOfRecordsPerFetch);
+    }
+
+    public RecordPublisherRunResult run(final RecordBatchConsumer consumer, int maxNumberOfRecords)
+            throws InterruptedException {
+        if (nextShardItr == null) {
+            return COMPLETE;
+        }
+
+        metricsReporter.setMaxNumberOfRecordsPerFetch(maxNumberOfRecords);
+
+        GetRecordsResult result = getRecords(nextShardItr, maxNumberOfRecords);
+
+        RecordBatch recordBatch =
+                new RecordBatch(
+                        result.getRecords(), subscribedShard, result.getMillisBehindLatest());
+        SequenceNumber latestSequenceNumber = consumer.accept(recordBatch);
+
+        nextStartingPosition = getNextStartingPosition(latestSequenceNumber);
+        nextShardItr = result.getNextShardIterator();
+
+        long adjustmentEndTimeNanos =
+                adjustRunLoopFrequency(processingStartTimeNanos, System.nanoTime());
+        long runLoopTimeNanos = adjustmentEndTimeNanos - processingStartTimeNanos;
+
+        processingStartTimeNanos = adjustmentEndTimeNanos;
+        metricsReporter.setRunLoopTimeNanos(runLoopTimeNanos);
+
+        return nextShardItr == null ? COMPLETE : INCOMPLETE;
+    }
+
+    private StartingPosition getNextStartingPosition(final SequenceNumber latestSequenceNumber) {
+        // When consuming from a timestamp sentinel/AT_TIMESTAMP ShardIteratorType.
+        // If the first RecordBatch is empty, then the latestSequenceNumber would be the timestamp
+        // sentinel.
+        // This is because we have not yet received any real sequence numbers on this shard.
+        // In this condition we should retry from the previous starting position (AT_TIMESTAMP).
+        if (SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM.get().equals(latestSequenceNumber)) {
+            Preconditions.checkState(nextStartingPosition.getShardIteratorType() == AT_TIMESTAMP);
+            return nextStartingPosition;
+        } else {
+            return StartingPosition.continueFromSequenceNumber(latestSequenceNumber);
+        }
+    }
+
+    /**
+     * Calls {@link KinesisProxyInterface#getRecords(String, int)}, while also handling unexpected
+     * AWS {@link ExpiredIteratorException}s to assure that we get results and don't just fail on
+     * such occasions. The returned shard iterator within the successful {@link GetRecordsResult}
+     * should be used for the next call to this method.
+     *
+     * <p>Note: it is important that this method is not called again before all the records from the
+     * last result have been fully collected with {@code
+     * ShardConsumer#deserializeRecordForCollectionAndUpdateState(UserRecord)}, otherwise {@code
+     * ShardConsumer#lastSequenceNum} may refer to a sub-record in the middle of an aggregated
+     * record, leading to incorrect shard iteration if the iterator had to be refreshed.
+     *
+     * @param shardItr shard iterator to use
+     * @param maxNumberOfRecords the maximum number of records to fetch for this getRecords attempt
+     * @return get records result
+     */
+    private GetRecordsResult getRecords(String shardItr, int maxNumberOfRecords)
+            throws InterruptedException {
+        GetRecordsResult getRecordsResult = null;
+        while (getRecordsResult == null) {
+            try {
+                getRecordsResult = kinesisProxy.getRecords(shardItr, maxNumberOfRecords);
+            } catch (ExpiredIteratorException | InterruptedException eiEx) {
+                LOG.warn(
+                        "Encountered an unexpected expired iterator {} for shard {};"
+                                + " refreshing the iterator ...",
+                        shardItr,
+                        subscribedShard);
+
+                shardItr = getShardIterator();
+
+                // sleep for the fetch interval before the next getRecords attempt with the
+                // refreshed iterator
+                if (fetchIntervalMillis != 0) {
+                    Thread.sleep(fetchIntervalMillis);
+                }
+            }
+        }
+        return getRecordsResult;
+    }
+
+    /**
+     * Returns a shard iterator for the given {@link SequenceNumber}.
+     *
+     * @return shard iterator
+     */
+    @Nullable
+    private String getShardIterator() throws InterruptedException {
+        return kinesisProxy.getShardIterator(
+                subscribedShard,
+                nextStartingPosition.getShardIteratorType().toString(),
+                nextStartingPosition.getStartingMarker());
+    }
+
+    /**
+     * Adjusts loop timing to match target frequency if specified.
+     *
+     * @param processingStartTimeNanos The start time of the run loop "work"
+     * @param processingEndTimeNanos The end time of the run loop "work"
+     * @return The System.nanoTime() after the sleep (if any)
+     * @throws InterruptedException
+     */
+    private long adjustRunLoopFrequency(long processingStartTimeNanos, long processingEndTimeNanos)
+            throws InterruptedException {
+        long endTimeNanos = processingEndTimeNanos;
+        if (fetchIntervalMillis != 0) {
+            long processingTimeNanos = processingEndTimeNanos - processingStartTimeNanos;
+            long sleepTimeMillis = fetchIntervalMillis - (processingTimeNanos / 1_000_000);
+            if (sleepTimeMillis > 0) {
+                Thread.sleep(sleepTimeMillis);
+                endTimeNanos = System.nanoTime();
+                metricsReporter.setSleepTimeMillis(sleepTimeMillis);
+            }
+        }
+        return endTimeNanos;
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherConfiguration.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherConfiguration.java
new file mode 100644
index 0000000..871aaa2
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherConfiguration.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher.polling;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
+
+import java.util.Properties;
+
+/** A configuration class for {@link PollingRecordPublisher} instantiated from a properties map. */
+@Internal
+public class PollingRecordPublisherConfiguration {
+
+    private final boolean adaptiveReads;
+
+    private final int maxNumberOfRecordsPerFetch;
+
+    private final long fetchIntervalMillis;
+
+    public PollingRecordPublisherConfiguration(final Properties consumerConfig) {
+        this.maxNumberOfRecordsPerFetch =
+                Integer.parseInt(
+                        consumerConfig.getProperty(
+                                ConsumerConfigConstants.SHARD_GETRECORDS_MAX,
+                                Integer.toString(
+                                        ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_MAX)));
+
+        this.fetchIntervalMillis =
+                Long.parseLong(
+                        consumerConfig.getProperty(
+                                ConsumerConfigConstants.SHARD_GETRECORDS_INTERVAL_MILLIS,
+                                Long.toString(
+                                        ConsumerConfigConstants
+                                                .DEFAULT_SHARD_GETRECORDS_INTERVAL_MILLIS)));
+
+        this.adaptiveReads =
+                Boolean.parseBoolean(
+                        consumerConfig.getProperty(
+                                ConsumerConfigConstants.SHARD_USE_ADAPTIVE_READS,
+                                Boolean.toString(
+                                        ConsumerConfigConstants.DEFAULT_SHARD_USE_ADAPTIVE_READS)));
+    }
+
+    public boolean isAdaptiveReads() {
+        return adaptiveReads;
+    }
+
+    public int getMaxNumberOfRecordsPerFetch() {
+        return maxNumberOfRecordsPerFetch;
+    }
+
+    public long getFetchIntervalMillis() {
+        return fetchIntervalMillis;
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactory.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactory.java
new file mode 100644
index 0000000..4880d35
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactory.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher.polling;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.metrics.MetricGroup;
+import org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetcher.FlinkKinesisProxyFactory;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisherFactory;
+import org.apache.flink.streaming.connectors.kinesis.metrics.PollingRecordPublisherMetricsReporter;
+import org.apache.flink.streaming.connectors.kinesis.model.StartingPosition;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
+import org.apache.flink.util.Preconditions;
+
+import java.util.Properties;
+
+/** A {@link RecordPublisher} factory used to create instances of {@link PollingRecordPublisher}. */
+@Internal
+public class PollingRecordPublisherFactory implements RecordPublisherFactory {
+
+    private final FlinkKinesisProxyFactory kinesisProxyFactory;
+
+    public PollingRecordPublisherFactory(final FlinkKinesisProxyFactory kinesisProxyFactory) {
+        this.kinesisProxyFactory = kinesisProxyFactory;
+    }
+
+    /**
+     * Create a {@link PollingRecordPublisher}. An {@link AdaptivePollingRecordPublisher} will be
+     * created should adaptive reads be enabled in the configuration.
+     *
+     * @param startingPosition the position in the shard to start consuming records from
+     * @param consumerConfig the consumer configuration properties
+     * @param metricGroup the metric group to report metrics to
+     * @param streamShardHandle the shard this consumer is subscribed to
+     * @return a {@link PollingRecordPublisher}
+     */
+    @Override
+    public PollingRecordPublisher create(
+            final StartingPosition startingPosition,
+            final Properties consumerConfig,
+            final MetricGroup metricGroup,
+            final StreamShardHandle streamShardHandle)
+            throws InterruptedException {
+        Preconditions.checkNotNull(startingPosition);
+        Preconditions.checkNotNull(consumerConfig);
+        Preconditions.checkNotNull(metricGroup);
+        Preconditions.checkNotNull(streamShardHandle);
+
+        final PollingRecordPublisherConfiguration configuration =
+                new PollingRecordPublisherConfiguration(consumerConfig);
+        final PollingRecordPublisherMetricsReporter metricsReporter =
+                new PollingRecordPublisherMetricsReporter(metricGroup);
+        final KinesisProxyInterface kinesisProxy = kinesisProxyFactory.create(consumerConfig);
+
+        if (configuration.isAdaptiveReads()) {
+            return new AdaptivePollingRecordPublisher(
+                    startingPosition,
+                    streamShardHandle,
+                    metricsReporter,
+                    kinesisProxy,
+                    configuration.getMaxNumberOfRecordsPerFetch(),
+                    configuration.getFetchIntervalMillis());
+        } else {
+            return new PollingRecordPublisher(
+                    startingPosition,
+                    streamShardHandle,
+                    metricsReporter,
+                    kinesisProxy,
+                    configuration.getMaxNumberOfRecordsPerFetch(),
+                    configuration.getFetchIntervalMillis());
+        }
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/KinesisConsumerMetricConstants.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/KinesisConsumerMetricConstants.java
new file mode 100644
index 0000000..5b7135a
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/KinesisConsumerMetricConstants.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.metrics;
+
+import org.apache.flink.annotation.Internal;
+
+/**
+ * A collection of consumer metric related constant names.
+ *
+ * <p>The names must not be changed, as that would break backwards compatibility for the consumer
+ * metrics.
+ */
+@Internal
+public class KinesisConsumerMetricConstants {
+
+    public static final String KINESIS_CONSUMER_METRICS_GROUP = "KinesisConsumer";
+
+    public static final String STREAM_METRICS_GROUP = "stream";
+    public static final String SHARD_METRICS_GROUP = "shardId";
+
+    public static final String MILLIS_BEHIND_LATEST_GAUGE = "millisBehindLatest";
+    public static final String SLEEP_TIME_MILLIS = "sleepTimeMillis";
+    public static final String MAX_RECORDS_PER_FETCH = "maxNumberOfRecordsPerFetch";
+    public static final String NUM_AGGREGATED_RECORDS_PER_FETCH =
+            "numberOfAggregatedRecordsPerFetch";
+    public static final String NUM_DEAGGREGATED_RECORDS_PER_FETCH =
+            "numberOfDeaggregatedRecordsPerFetch";
+    public static final String AVG_RECORD_SIZE_BYTES = "averageRecordSizeBytes";
+    public static final String RUNTIME_LOOP_NANOS = "runLoopTimeNanos";
+    public static final String LOOP_FREQUENCY_HZ = "loopFrequencyHz";
+    public static final String BYTES_PER_READ = "bytesRequestedPerFetch";
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/PollingRecordPublisherMetricsReporter.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/PollingRecordPublisherMetricsReporter.java
new file mode 100644
index 0000000..c03534b
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/PollingRecordPublisherMetricsReporter.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.metrics;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.metrics.MetricGroup;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.polling.PollingRecordPublisher;
+
+/** A container for {@link PollingRecordPublisher}s to report metric values. */
+@Internal
+public class PollingRecordPublisherMetricsReporter {
+
+    private volatile double loopFrequencyHz = 0.0;
+    private volatile double bytesPerRead = 0.0;
+    private volatile long runLoopTimeNanos = 0L;
+    private volatile long sleepTimeMillis = 0L;
+    private volatile int maxNumberOfRecordsPerFetch = 0;
+
+    public PollingRecordPublisherMetricsReporter(final MetricGroup metricGroup) {
+        metricGroup.gauge(
+                KinesisConsumerMetricConstants.MAX_RECORDS_PER_FETCH,
+                this::getMaxNumberOfRecordsPerFetch);
+        metricGroup.gauge(KinesisConsumerMetricConstants.BYTES_PER_READ, this::getBytesPerRead);
+        metricGroup.gauge(
+                KinesisConsumerMetricConstants.RUNTIME_LOOP_NANOS, this::getRunLoopTimeNanos);
+        metricGroup.gauge(
+                KinesisConsumerMetricConstants.LOOP_FREQUENCY_HZ, this::getLoopFrequencyHz);
+        metricGroup.gauge(
+                KinesisConsumerMetricConstants.SLEEP_TIME_MILLIS, this::getSleepTimeMillis);
+    }
+
+    public double getLoopFrequencyHz() {
+        return loopFrequencyHz;
+    }
+
+    public void setLoopFrequencyHz(double loopFrequencyHz) {
+        this.loopFrequencyHz = loopFrequencyHz;
+    }
+
+    public double getBytesPerRead() {
+        return bytesPerRead;
+    }
+
+    public void setBytesPerRead(double bytesPerRead) {
+        this.bytesPerRead = bytesPerRead;
+    }
+
+    public long getRunLoopTimeNanos() {
+        return runLoopTimeNanos;
+    }
+
+    public void setRunLoopTimeNanos(long runLoopTimeNanos) {
+        this.runLoopTimeNanos = runLoopTimeNanos;
+    }
+
+    public long getSleepTimeMillis() {
+        return sleepTimeMillis;
+    }
+
+    public void setSleepTimeMillis(long sleepTimeMillis) {
+        this.sleepTimeMillis = sleepTimeMillis;
+    }
+
+    public int getMaxNumberOfRecordsPerFetch() {
+        return maxNumberOfRecordsPerFetch;
+    }
+
+    public void setMaxNumberOfRecordsPerFetch(int maxNumberOfRecordsPerFetch) {
+        this.maxNumberOfRecordsPerFetch = maxNumberOfRecordsPerFetch;
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardConsumerMetricsReporter.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardConsumerMetricsReporter.java
new file mode 100644
index 0000000..d8fdf99
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardConsumerMetricsReporter.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.metrics;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.metrics.MetricGroup;
+import org.apache.flink.runtime.metrics.groups.AbstractMetricGroup;
+import org.apache.flink.streaming.connectors.kinesis.internals.ShardConsumer;
+
+/** A container for {@link ShardConsumer}s to report metric values. */
+@Internal
+public class ShardConsumerMetricsReporter {
+
+    private final MetricGroup metricGroup;
+
+    private volatile long millisBehindLatest = -1;
+    private volatile long averageRecordSizeBytes = 0L;
+    private volatile int numberOfAggregatedRecords = 0;
+    private volatile int numberOfDeaggregatedRecords = 0;
+
+    public ShardConsumerMetricsReporter(final MetricGroup metricGroup) {
+        this.metricGroup = metricGroup;
+        metricGroup.gauge(
+                KinesisConsumerMetricConstants.MILLIS_BEHIND_LATEST_GAUGE,
+                this::getMillisBehindLatest);
+        metricGroup.gauge(
+                KinesisConsumerMetricConstants.NUM_AGGREGATED_RECORDS_PER_FETCH,
+                this::getNumberOfAggregatedRecords);
+        metricGroup.gauge(
+                KinesisConsumerMetricConstants.NUM_DEAGGREGATED_RECORDS_PER_FETCH,
+                this::getNumberOfDeaggregatedRecords);
+        metricGroup.gauge(
+                KinesisConsumerMetricConstants.AVG_RECORD_SIZE_BYTES,
+                this::getAverageRecordSizeBytes);
+    }
+
+    public long getMillisBehindLatest() {
+        return millisBehindLatest;
+    }
+
+    public void setMillisBehindLatest(long millisBehindLatest) {
+        this.millisBehindLatest = millisBehindLatest;
+    }
+
+    public long getAverageRecordSizeBytes() {
+        return averageRecordSizeBytes;
+    }
+
+    public void setAverageRecordSizeBytes(long averageRecordSizeBytes) {
+        this.averageRecordSizeBytes = averageRecordSizeBytes;
+    }
+
+    public int getNumberOfAggregatedRecords() {
+        return numberOfAggregatedRecords;
+    }
+
+    public void setNumberOfAggregatedRecords(int numberOfAggregatedRecords) {
+        this.numberOfAggregatedRecords = numberOfAggregatedRecords;
+    }
+
+    public int getNumberOfDeaggregatedRecords() {
+        return numberOfDeaggregatedRecords;
+    }
+
+    public void setNumberOfDeaggregatedRecords(int numberOfDeaggregatedRecords) {
+        this.numberOfDeaggregatedRecords = numberOfDeaggregatedRecords;
+    }
+
+    public void unregister() {
+        if (this.metricGroup instanceof AbstractMetricGroup) {
+            ((AbstractMetricGroup) this.metricGroup).close();
+        }
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/DynamoDBStreamsShardHandle.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/DynamoDBStreamsShardHandle.java
new file mode 100644
index 0000000..ba6e99d
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/DynamoDBStreamsShardHandle.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.model;
+
+import com.amazonaws.services.kinesis.model.Shard;
+
+/** DynamoDB streams shard handle format and utilities. */
+public class DynamoDBStreamsShardHandle extends StreamShardHandle {
+    public static final String SHARDID_PREFIX = "shardId-";
+    public static final int SHARDID_PREFIX_LEN = SHARDID_PREFIX.length();
+
+    public DynamoDBStreamsShardHandle(String streamName, Shard shard) {
+        super(streamName, shard);
+    }
+
+    public static int compareShardIds(String firstShardId, String secondShardId) {
+        if (!isValidShardId(firstShardId)) {
+            throw new IllegalArgumentException(
+                    String.format("The first shard id %s has invalid format.", firstShardId));
+        } else if (!isValidShardId(secondShardId)) {
+            throw new IllegalArgumentException(
+                    String.format("The second shard id %s has invalid format.", secondShardId));
+        }
+
+        return firstShardId
+                .substring(SHARDID_PREFIX_LEN)
+                .compareTo(secondShardId.substring(SHARDID_PREFIX_LEN));
+    }
+
+    /**
+     * Dynamodb streams shard ID is a char string ranging from 28 characters to 65 characters. (See
+     * https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_streams_Shard.html)
+     *
+     * <p>The shardId observed usually takes the format of: "shardId-00000001536805703746-69688cb1",
+     * where "shardId-" is a prefix, followed by a 20-digit timestamp string and 0-36 or more
+     * characters, separated by '-'. Following this format, it is expected the child shards created
+     * during a re-sharding event have shardIds bigger than their parents.
+     *
+     * @param shardId shard Id
+     * @return boolean indicate if the given shard Id is valid
+     */
+    public static boolean isValidShardId(String shardId) {
+        return shardId == null ? false : shardId.matches("^shardId-\\d{20}-{0,1}\\w{0,36}");
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShard.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShard.java
new file mode 100644
index 0000000..65197af
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShard.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.model;
+
+import org.apache.flink.annotation.Internal;
+
+import com.amazonaws.services.kinesis.model.Shard;
+
+import java.io.Serializable;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * A legacy serializable representation of a AWS Kinesis Stream shard. It is basically a wrapper
+ * class around the information provided along with {@link
+ * com.amazonaws.services.kinesis.model.Shard}.
+ *
+ * @deprecated Will be remove in a future version in favor of {@link StreamShardHandle}.
+ */
+@Deprecated
+@Internal
+public class KinesisStreamShard implements Serializable {
+
+    private static final long serialVersionUID = -6004217801761077536L;
+
+    private final String streamName;
+    private final Shard shard;
+
+    private final int cachedHash;
+
+    /**
+     * Create a new KinesisStreamShard.
+     *
+     * @param streamName the name of the Kinesis stream that this shard belongs to
+     * @param shard the actual AWS Shard instance that will be wrapped within this
+     *     KinesisStreamShard
+     */
+    public KinesisStreamShard(String streamName, Shard shard) {
+        this.streamName = checkNotNull(streamName);
+        this.shard = checkNotNull(shard);
+
+        // since our description of Kinesis Streams shards can be fully defined with the stream name
+        // and shard id,
+        // our hash doesn't need to use hash code of Amazon's description of Shards, which uses
+        // other info for calculation
+        int hash = 17;
+        hash = 37 * hash + streamName.hashCode();
+        hash = 37 * hash + shard.getShardId().hashCode();
+        this.cachedHash = hash;
+    }
+
+    public String getStreamName() {
+        return streamName;
+    }
+
+    public boolean isClosed() {
+        return (shard.getSequenceNumberRange().getEndingSequenceNumber() != null);
+    }
+
+    public Shard getShard() {
+        return shard;
+    }
+
+    @Override
+    public String toString() {
+        return "KinesisStreamShard{"
+                + "streamName='"
+                + streamName
+                + "'"
+                + ", shard='"
+                + shard.toString()
+                + "'}";
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof KinesisStreamShard)) {
+            return false;
+        }
+
+        if (obj == this) {
+            return true;
+        }
+
+        KinesisStreamShard other = (KinesisStreamShard) obj;
+
+        return streamName.equals(other.getStreamName()) && shard.equals(other.getShard());
+    }
+
+    @Override
+    public int hashCode() {
+        return cachedHash;
+    }
+
+    /**
+     * Utility function to convert {@link KinesisStreamShard} into the new {@link
+     * StreamShardMetadata} model.
+     *
+     * @param kinesisStreamShard the {@link KinesisStreamShard} to be converted
+     * @return the converted {@link StreamShardMetadata}
+     */
+    public static StreamShardMetadata convertToStreamShardMetadata(
+            KinesisStreamShard kinesisStreamShard) {
+        StreamShardMetadata streamShardMetadata = new StreamShardMetadata();
+
+        streamShardMetadata.setStreamName(kinesisStreamShard.getStreamName());
+        streamShardMetadata.setShardId(kinesisStreamShard.getShard().getShardId());
+        streamShardMetadata.setParentShardId(kinesisStreamShard.getShard().getParentShardId());
+        streamShardMetadata.setAdjacentParentShardId(
+                kinesisStreamShard.getShard().getAdjacentParentShardId());
+
+        if (kinesisStreamShard.getShard().getHashKeyRange() != null) {
+            streamShardMetadata.setStartingHashKey(
+                    kinesisStreamShard.getShard().getHashKeyRange().getStartingHashKey());
+            streamShardMetadata.setEndingHashKey(
+                    kinesisStreamShard.getShard().getHashKeyRange().getEndingHashKey());
+        }
+
+        if (kinesisStreamShard.getShard().getSequenceNumberRange() != null) {
+            streamShardMetadata.setStartingSequenceNumber(
+                    kinesisStreamShard
+                            .getShard()
+                            .getSequenceNumberRange()
+                            .getStartingSequenceNumber());
+            streamShardMetadata.setEndingSequenceNumber(
+                    kinesisStreamShard
+                            .getShard()
+                            .getSequenceNumberRange()
+                            .getEndingSequenceNumber());
+        }
+
+        return streamShardMetadata;
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShardState.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShardState.java
new file mode 100644
index 0000000..e4731fc
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/KinesisStreamShardState.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.model;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.util.Preconditions;
+
+import com.amazonaws.services.kinesis.model.Shard;
+
+/**
+ * A wrapper class that bundles a {@link StreamShardHandle} with its last processed sequence number.
+ */
+@Internal
+public class KinesisStreamShardState {
+
+    /** A handle object that wraps the actual {@link Shard} instance and stream name. */
+    private StreamShardHandle streamShardHandle;
+
+    /** The checkpointed state for each Kinesis stream shard. */
+    private StreamShardMetadata streamShardMetadata;
+
+    private SequenceNumber lastProcessedSequenceNum;
+
+    public KinesisStreamShardState(
+            StreamShardMetadata streamShardMetadata,
+            StreamShardHandle streamShardHandle,
+            SequenceNumber lastProcessedSequenceNum) {
+
+        this.streamShardMetadata = Preconditions.checkNotNull(streamShardMetadata);
+        this.streamShardHandle = Preconditions.checkNotNull(streamShardHandle);
+        this.lastProcessedSequenceNum = Preconditions.checkNotNull(lastProcessedSequenceNum);
+    }
+
+    public StreamShardMetadata getStreamShardMetadata() {
+        return this.streamShardMetadata;
+    }
+
+    public StreamShardHandle getStreamShardHandle() {
+        return this.streamShardHandle;
+    }
+
+    public SequenceNumber getLastProcessedSequenceNum() {
+        return this.lastProcessedSequenceNum;
+    }
+
+    public void setLastProcessedSequenceNum(SequenceNumber update) {
+        this.lastProcessedSequenceNum = update;
+    }
+
+    @Override
+    public String toString() {
+        return "KinesisStreamShardState{"
+                + "streamShardMetadata='"
+                + streamShardMetadata.toString()
+                + "'"
+                + ", streamShardHandle='"
+                + streamShardHandle.toString()
+                + "'"
+                + ", lastProcessedSequenceNumber='"
+                + lastProcessedSequenceNum.toString()
+                + "'}";
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof KinesisStreamShardState)) {
+            return false;
+        }
+
+        if (obj == this) {
+            return true;
+        }
+
+        KinesisStreamShardState other = (KinesisStreamShardState) obj;
+
+        return streamShardMetadata.equals(other.getStreamShardMetadata())
+                && streamShardHandle.equals(other.getStreamShardHandle())
+                && lastProcessedSequenceNum.equals(other.getLastProcessedSequenceNum());
+    }
+
+    @Override
+    public int hashCode() {
+        return 37
+                * (streamShardMetadata.hashCode()
+                        + streamShardHandle.hashCode()
+                        + lastProcessedSequenceNum.hashCode());
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumber.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumber.java
new file mode 100644
index 0000000..f86d838
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumber.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.model;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisConsumer;
+import org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetcher;
+
+/**
+ * Special flag values for sequence numbers in shards to indicate special positions. The value is
+ * initially set by {@link FlinkKinesisConsumer} when {@link KinesisDataFetcher}s are created. The
+ * KinesisDataFetchers will use this value to determine how to retrieve the starting shard iterator
+ * from AWS Kinesis.
+ */
+@Internal
+public enum SentinelSequenceNumber {
+
+    /**
+     * Flag value for shard's sequence numbers to indicate that the shard should start to be read
+     * from the latest incoming records.
+     */
+    SENTINEL_LATEST_SEQUENCE_NUM(new SequenceNumber("LATEST_SEQUENCE_NUM")),
+
+    /**
+     * Flag value for shard's sequence numbers to indicate that the shard should start to be read
+     * from the earliest records that haven't expired yet.
+     */
+    SENTINEL_EARLIEST_SEQUENCE_NUM(new SequenceNumber("EARLIEST_SEQUENCE_NUM")),
+
+    /**
+     * Flag value for shard's sequence numbers to indicate that the shard should start to be read
+     * from the specified timestamp.
+     */
+    SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM(new SequenceNumber("AT_TIMESTAMP_SEQUENCE_NUM")),
+
+    /**
+     * Flag value to indicate that we have already read the last record of this shard (Note: Kinesis
+     * shards that have been closed due to a split or merge will have an ending data record).
+     */
+    SENTINEL_SHARD_ENDING_SEQUENCE_NUM(new SequenceNumber("SHARD_ENDING_SEQUENCE_NUM"));
+
+    private SequenceNumber sentinel;
+
+    SentinelSequenceNumber(SequenceNumber sentinel) {
+        this.sentinel = sentinel;
+    }
+
+    public SequenceNumber get() {
+        return sentinel;
+    }
+
+    /** Returns {@code true} if the given {@link SequenceNumber} is a sentinel. */
+    public static boolean isSentinelSequenceNumber(SequenceNumber candidateSequenceNumber) {
+        for (SentinelSequenceNumber sentinel : values()) {
+            if (candidateSequenceNumber.equals(sentinel.get())) {
+                return true;
+            }
+        }
+        return false;
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SequenceNumber.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SequenceNumber.java
new file mode 100644
index 0000000..16f6185
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/SequenceNumber.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.model;
+
+import org.apache.flink.annotation.Internal;
+
+import java.io.Serializable;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * A serializable representation of a Kinesis record's sequence number. It has two fields: the main
+ * sequence number, and also a subsequence number. If this {@link SequenceNumber} is referring to an
+ * aggregated Kinesis record, the subsequence number will be a non-negative value representing the
+ * order of the sub-record within the aggregation.
+ */
+@Internal
+public class SequenceNumber implements Serializable {
+
+    private static final long serialVersionUID = 876972197938972667L;
+
+    private static final String DELIMITER = "-";
+
+    private final String sequenceNumber;
+    private final long subSequenceNumber;
+
+    private final int cachedHash;
+
+    /**
+     * Create a new instance for a non-aggregated Kinesis record without a subsequence number.
+     *
+     * @param sequenceNumber the sequence number
+     */
+    public SequenceNumber(String sequenceNumber) {
+        this(sequenceNumber, -1);
+    }
+
+    /**
+     * Create a new instance, with the specified sequence number and subsequence number. To
+     * represent the sequence number for a non-aggregated Kinesis record, the subsequence number
+     * should be -1. Otherwise, give a non-negative sequence number to represent an aggregated
+     * Kinesis record.
+     *
+     * @param sequenceNumber the sequence number
+     * @param subSequenceNumber the subsequence number (-1 to represent non-aggregated Kinesis
+     *     records)
+     */
+    public SequenceNumber(String sequenceNumber, long subSequenceNumber) {
+        this.sequenceNumber = checkNotNull(sequenceNumber);
+        this.subSequenceNumber = subSequenceNumber;
+
+        this.cachedHash =
+                37 * (sequenceNumber.hashCode() + Long.valueOf(subSequenceNumber).hashCode());
+    }
+
+    public boolean isAggregated() {
+        return subSequenceNumber >= 0;
+    }
+
+    public String getSequenceNumber() {
+        return sequenceNumber;
+    }
+
+    public long getSubSequenceNumber() {
+        return subSequenceNumber;
+    }
+
+    @Override
+    public String toString() {
+        if (isAggregated()) {
+            return sequenceNumber + DELIMITER + subSequenceNumber;
+        } else {
+            return sequenceNumber;
+        }
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof SequenceNumber)) {
+            return false;
+        }
+
+        if (obj == this) {
+            return true;
+        }
+
+        SequenceNumber other = (SequenceNumber) obj;
+
+        return sequenceNumber.equals(other.getSequenceNumber())
+                && (subSequenceNumber == other.getSubSequenceNumber());
+    }
+
+    @Override
+    public int hashCode() {
+        return cachedHash;
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StartingPosition.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StartingPosition.java
new file mode 100644
index 0000000..2cb9b26
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StartingPosition.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.model;
+
+import org.apache.flink.annotation.Internal;
+
+import com.amazonaws.services.kinesis.model.ShardIteratorType;
+
+import javax.annotation.Nullable;
+
+import java.util.Date;
+
+import static com.amazonaws.services.kinesis.model.ShardIteratorType.AFTER_SEQUENCE_NUMBER;
+import static com.amazonaws.services.kinesis.model.ShardIteratorType.AT_SEQUENCE_NUMBER;
+import static com.amazonaws.services.kinesis.model.ShardIteratorType.AT_TIMESTAMP;
+import static com.amazonaws.services.kinesis.model.ShardIteratorType.LATEST;
+import static com.amazonaws.services.kinesis.model.ShardIteratorType.TRIM_HORIZON;
+import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.isSentinelSequenceNumber;
+
+/** The position in which to start consuming from a stream. */
+@Internal
+public class StartingPosition {
+
+    private final ShardIteratorType shardIteratorType;
+
+    private final Object startingMarker;
+
+    private StartingPosition(
+            final ShardIteratorType shardIteratorType, @Nullable final Object startingMarker) {
+        this.shardIteratorType = shardIteratorType;
+        this.startingMarker = startingMarker;
+    }
+
+    public ShardIteratorType getShardIteratorType() {
+        return shardIteratorType;
+    }
+
+    @Nullable
+    public Object getStartingMarker() {
+        return startingMarker;
+    }
+
+    public static StartingPosition fromTimestamp(final Date date) {
+        return new StartingPosition(AT_TIMESTAMP, date);
+    }
+
+    /**
+     * Returns the starting position for the next record to consume from the given sequence number.
+     * The difference between {@code restartFromSequenceNumber()} and {@code
+     * continueFromSequenceNumber()} is that for {@code restartFromSequenceNumber()} aggregated
+     * records are reread to support subsequence failure.
+     *
+     * @param sequenceNumber the last successful sequence number, or sentinel marker
+     * @return the start position in which to consume from
+     */
+    public static StartingPosition continueFromSequenceNumber(final SequenceNumber sequenceNumber) {
+        return fromSequenceNumber(sequenceNumber, false);
+    }
+
+    /**
+     * Returns the starting position to restart record consumption from the given sequence number
+     * after failure. The difference between {@code restartFromSequenceNumber()} and {@code
+     * continueFromSequenceNumber()} is that for {@code restartFromSequenceNumber()} aggregated
+     * records are reread to support subsequence failure.
+     *
+     * @param sequenceNumber the last successful sequence number, or sentinel marker
+     * @return the start position in which to consume from
+     */
+    public static StartingPosition restartFromSequenceNumber(final SequenceNumber sequenceNumber) {
+        return fromSequenceNumber(sequenceNumber, true);
+    }
+
+    private static StartingPosition fromSequenceNumber(
+            final SequenceNumber sequenceNumber, final boolean restart) {
+        if (isSentinelSequenceNumber(sequenceNumber)) {
+            return new StartingPosition(fromSentinelSequenceNumber(sequenceNumber), null);
+        } else {
+            // we will be starting from an actual sequence number (due to restore from failure).
+            return new StartingPosition(
+                    getShardIteratorType(sequenceNumber, restart),
+                    sequenceNumber.getSequenceNumber());
+        }
+    }
+
+    private static ShardIteratorType getShardIteratorType(
+            final SequenceNumber sequenceNumber, final boolean restart) {
+        return restart && sequenceNumber.isAggregated()
+                ? AT_SEQUENCE_NUMBER
+                : AFTER_SEQUENCE_NUMBER;
+    }
+
+    private static ShardIteratorType fromSentinelSequenceNumber(
+            final SequenceNumber sequenceNumber) {
+        if (sequenceNumber.equals(SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM.get())) {
+            return LATEST;
+        } else if (sequenceNumber.equals(
+                SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get())) {
+            return TRIM_HORIZON;
+        } else {
+            throw new IllegalArgumentException("Unexpected sentinel type: " + sequenceNumber);
+        }
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandle.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandle.java
new file mode 100644
index 0000000..92997e6
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandle.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.model;
+
+import org.apache.flink.annotation.Internal;
+
+import com.amazonaws.services.kinesis.model.Shard;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * A wrapper class around the information provided along with streamName and {@link
+ * com.amazonaws.services.kinesis.model.Shard}, with some extra utility methods to determine whether
+ * or not a shard is closed and whether or not the shard is a result of parent shard splits or
+ * merges.
+ */
+@Internal
+public class StreamShardHandle {
+
+    private final String streamName;
+    private final Shard shard;
+
+    private final int cachedHash;
+
+    /**
+     * Create a new StreamShardHandle.
+     *
+     * @param streamName the name of the Kinesis stream that this shard belongs to
+     * @param shard the actual AWS Shard instance that will be wrapped within this StreamShardHandle
+     */
+    public StreamShardHandle(String streamName, Shard shard) {
+        this.streamName = checkNotNull(streamName);
+        this.shard = checkNotNull(shard);
+
+        // since our description of Kinesis Streams shards can be fully defined with the stream name
+        // and shard id,
+        // our hash doesn't need to use hash code of Amazon's description of Shards, which uses
+        // other info for calculation
+        int hash = 17;
+        hash = 37 * hash + streamName.hashCode();
+        hash = 37 * hash + shard.getShardId().hashCode();
+        this.cachedHash = hash;
+    }
+
+    public String getStreamName() {
+        return streamName;
+    }
+
+    public boolean isClosed() {
+        return (shard.getSequenceNumberRange().getEndingSequenceNumber() != null);
+    }
+
+    public Shard getShard() {
+        return shard;
+    }
+
+    @Override
+    public String toString() {
+        return "StreamShardHandle{"
+                + "streamName='"
+                + streamName
+                + "'"
+                + ", shard='"
+                + shard.toString()
+                + "'}";
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof StreamShardHandle)) {
+            return false;
+        }
+
+        if (obj == this) {
+            return true;
+        }
+
+        StreamShardHandle other = (StreamShardHandle) obj;
+
+        return streamName.equals(other.getStreamName()) && shard.equals(other.getShard());
+    }
+
+    @Override
+    public int hashCode() {
+        return cachedHash;
+    }
+
+    /**
+     * Utility function to compare two shard ids.
+     *
+     * @param firstShardId first shard id to compare
+     * @param secondShardId second shard id to compare
+     * @return a value less than 0 if the first shard id is smaller than the second shard id, or a
+     *     value larger than 0 the first shard is larger than the second shard id, or 0 if they are
+     *     equal
+     */
+    public static int compareShardIds(String firstShardId, String secondShardId) {
+        return firstShardId.compareTo(secondShardId);
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardMetadata.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardMetadata.java
new file mode 100644
index 0000000..23aa89a
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardMetadata.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.model;
+
+import org.apache.flink.annotation.Internal;
+
+import java.io.Serializable;
+import java.util.Objects;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * A serializable representation of a AWS Kinesis Stream shard. It is basically a wrapper class
+ * around the information disintegrated from {@link com.amazonaws.services.kinesis.model.Shard} and
+ * its nested classes. The disintegration is required to avoid being locked-in to a specific AWS SDK
+ * version in order to maintain the consumer's state backwards compatibility.
+ */
+@Internal
+public class StreamShardMetadata implements Serializable {
+
+    private static final long serialVersionUID = 5134869582298563604L;
+
+    private String streamName;
+    private String shardId;
+    private String parentShardId;
+    private String adjacentParentShardId;
+    private String startingHashKey;
+    private String endingHashKey;
+    private String startingSequenceNumber;
+    private String endingSequenceNumber;
+
+    public void setStreamName(String streamName) {
+        this.streamName = streamName;
+    }
+
+    public void setShardId(String shardId) {
+        this.shardId = shardId;
+    }
+
+    public void setParentShardId(String parentShardId) {
+        this.parentShardId = parentShardId;
+    }
+
+    public void setAdjacentParentShardId(String adjacentParentShardId) {
+        this.adjacentParentShardId = adjacentParentShardId;
+    }
+
+    public void setStartingHashKey(String startingHashKey) {
+        this.startingHashKey = startingHashKey;
+    }
+
+    public void setEndingHashKey(String endingHashKey) {
+        this.endingHashKey = endingHashKey;
+    }
+
+    public void setStartingSequenceNumber(String startingSequenceNumber) {
+        this.startingSequenceNumber = startingSequenceNumber;
+    }
+
+    public void setEndingSequenceNumber(String endingSequenceNumber) {
+        this.endingSequenceNumber = endingSequenceNumber;
+    }
+
+    public String getStreamName() {
+        return this.streamName;
+    }
+
+    public String getShardId() {
+        return this.shardId;
+    }
+
+    public String getParentShardId() {
+        return this.parentShardId;
+    }
+
+    public String getAdjacentParentShardId() {
+        return this.adjacentParentShardId;
+    }
+
+    public String getStartingHashKey() {
+        return this.startingHashKey;
+    }
+
+    public String getEndingHashKey() {
+        return this.endingHashKey;
+    }
+
+    public String getStartingSequenceNumber() {
+        return this.startingSequenceNumber;
+    }
+
+    public String getEndingSequenceNumber() {
+        return this.endingSequenceNumber;
+    }
+
+    @Override
+    public String toString() {
+        return "StreamShardMetadata{"
+                + "streamName='"
+                + streamName
+                + "'"
+                + ", shardId='"
+                + shardId
+                + "'"
+                + ", parentShardId='"
+                + parentShardId
+                + "'"
+                + ", adjacentParentShardId='"
+                + adjacentParentShardId
+                + "'"
+                + ", startingHashKey='"
+                + startingHashKey
+                + "'"
+                + ", endingHashKey='"
+                + endingHashKey
+                + "'"
+                + ", startingSequenceNumber='"
+                + startingSequenceNumber
+                + "'"
+                + ", endingSequenceNumber='"
+                + endingSequenceNumber
+                + "'}";
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof StreamShardMetadata)) {
+            return false;
+        }
+
+        if (obj == this) {
+            return true;
+        }
+
+        StreamShardMetadata other = (StreamShardMetadata) obj;
+
+        return streamName.equals(other.getStreamName())
+                && shardId.equals(other.getShardId())
+                && Objects.equals(parentShardId, other.getParentShardId())
+                && Objects.equals(adjacentParentShardId, other.getAdjacentParentShardId())
+                && Objects.equals(startingHashKey, other.getStartingHashKey())
+                && Objects.equals(endingHashKey, other.getEndingHashKey())
+                && Objects.equals(startingSequenceNumber, other.getStartingSequenceNumber())
+                && Objects.equals(endingSequenceNumber, other.getEndingSequenceNumber());
+    }
+
+    @Override
+    public int hashCode() {
+        int hash = 17;
+
+        if (streamName != null) {
+            hash = 37 * hash + streamName.hashCode();
+        }
+        if (shardId != null) {
+            hash = 37 * hash + shardId.hashCode();
+        }
+        if (parentShardId != null) {
+            hash = 37 * hash + parentShardId.hashCode();
+        }
+        if (adjacentParentShardId != null) {
+            hash = 37 * hash + adjacentParentShardId.hashCode();
+        }
+        if (startingHashKey != null) {
+            hash = 37 * hash + startingHashKey.hashCode();
+        }
+        if (endingHashKey != null) {
+            hash = 37 * hash + endingHashKey.hashCode();
+        }
+        if (startingSequenceNumber != null) {
+            hash = 37 * hash + startingSequenceNumber.hashCode();
+        }
+        if (endingSequenceNumber != null) {
+            hash = 37 * hash + endingSequenceNumber.hashCode();
+        }
+
+        return hash;
+    }
+
+    /** An equivalence wrapper that only checks for the stream name and shard id for equality. */
+    public static class EquivalenceWrapper {
+
+        private final StreamShardMetadata shardMetadata;
+
+        public EquivalenceWrapper(StreamShardMetadata shardMetadata) {
+            this.shardMetadata = checkNotNull(shardMetadata);
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            if (!(obj instanceof EquivalenceWrapper)) {
+                return false;
+            }
+
+            if (obj == this) {
+                return true;
+            }
+
+            EquivalenceWrapper other = (EquivalenceWrapper) obj;
+
+            return shardMetadata.getStreamName().equals(other.shardMetadata.getStreamName())
+                    && shardMetadata.getShardId().equals(other.shardMetadata.getShardId());
+        }
+
+        @Override
+        public int hashCode() {
+            int hash = 17;
+
+            if (shardMetadata.getStreamName() != null) {
+                hash = 37 * hash + shardMetadata.getStreamName().hashCode();
+            }
+            if (shardMetadata.getShardId() != null) {
+                hash = 37 * hash + shardMetadata.getShardId().hashCode();
+            }
+            return hash;
+        }
+
+        public StreamShardMetadata getShardMetadata() {
+            return shardMetadata;
+        }
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/DynamoDBStreamsProxy.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/DynamoDBStreamsProxy.java
new file mode 100644
index 0000000..65c4035
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/DynamoDBStreamsProxy.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.proxy;
+
+import org.apache.flink.runtime.util.EnvironmentInformation;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+
+import com.amazonaws.ClientConfiguration;
+import com.amazonaws.ClientConfigurationFactory;
+import com.amazonaws.auth.AWSCredentialsProvider;
+import com.amazonaws.regions.RegionUtils;
+import com.amazonaws.services.dynamodbv2.streamsadapter.AmazonDynamoDBStreamsAdapterClient;
+import com.amazonaws.services.kinesis.AmazonKinesis;
+import com.amazonaws.services.kinesis.model.DescribeStreamResult;
+import com.amazonaws.services.kinesis.model.Shard;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nullable;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.AWS_ENDPOINT;
+import static org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.AWS_REGION;
+import static org.apache.flink.streaming.connectors.kinesis.util.AWSUtil.getCredentialsProvider;
+import static org.apache.flink.streaming.connectors.kinesis.util.AWSUtil.setAwsClientConfigProperties;
+
+/** DynamoDB streams proxy: interface interacting with the DynamoDB streams. */
+public class DynamoDBStreamsProxy extends KinesisProxy {
+    private static final Logger LOG = LoggerFactory.getLogger(DynamoDBStreamsProxy.class);
+
+    /** Used for formatting Flink-specific user agent string when creating Kinesis client. */
+    private static final String USER_AGENT_FORMAT =
+            "Apache Flink %s (%s) DynamoDB Streams Connector";
+
+    protected DynamoDBStreamsProxy(Properties configProps) {
+        super(configProps);
+    }
+
+    /**
+     * Creates a DynamoDB streams proxy.
+     *
+     * @param configProps configuration properties
+     * @return the created DynamoDB streams proxy
+     */
+    public static KinesisProxyInterface create(Properties configProps) {
+        return new DynamoDBStreamsProxy(configProps);
+    }
+
+    /**
+     * Creates an AmazonDynamoDBStreamsAdapterClient. Uses it as the internal client interacting
+     * with the DynamoDB streams.
+     *
+     * @param configProps configuration properties
+     * @return an AWS DynamoDB streams adapter client
+     */
+    @Override
+    protected AmazonKinesis createKinesisClient(Properties configProps) {
+        ClientConfiguration awsClientConfig = new ClientConfigurationFactory().getConfig();
+        setAwsClientConfigProperties(awsClientConfig, configProps);
+
+        AWSCredentialsProvider credentials = getCredentialsProvider(configProps);
+        awsClientConfig.setUserAgentPrefix(
+                String.format(
+                        USER_AGENT_FORMAT,
+                        EnvironmentInformation.getVersion(),
+                        EnvironmentInformation.getRevisionInformation().commitId));
+
+        AmazonDynamoDBStreamsAdapterClient adapterClient =
+                new AmazonDynamoDBStreamsAdapterClient(credentials, awsClientConfig);
+
+        if (configProps.containsKey(AWS_ENDPOINT)) {
+            adapterClient.setEndpoint(configProps.getProperty(AWS_ENDPOINT));
+        } else {
+            adapterClient.setRegion(RegionUtils.getRegion(configProps.getProperty(AWS_REGION)));
+        }
+
+        return adapterClient;
+    }
+
+    @Override
+    public GetShardListResult getShardList(Map<String, String> streamNamesWithLastSeenShardIds)
+            throws InterruptedException {
+        GetShardListResult result = new GetShardListResult();
+
+        for (Map.Entry<String, String> streamNameWithLastSeenShardId :
+                streamNamesWithLastSeenShardIds.entrySet()) {
+            String stream = streamNameWithLastSeenShardId.getKey();
+            String lastSeenShardId = streamNameWithLastSeenShardId.getValue();
+            result.addRetrievedShardsToStream(stream, getShardsOfStream(stream, lastSeenShardId));
+        }
+        return result;
+    }
+
+    private List<StreamShardHandle> getShardsOfStream(
+            String streamName, @Nullable String lastSeenShardId) throws InterruptedException {
+        List<StreamShardHandle> shardsOfStream = new ArrayList<>();
+
+        DescribeStreamResult describeStreamResult;
+        do {
+            describeStreamResult = describeStream(streamName, lastSeenShardId);
+            List<Shard> shards = describeStreamResult.getStreamDescription().getShards();
+            for (Shard shard : shards) {
+                shardsOfStream.add(new StreamShardHandle(streamName, shard));
+            }
+
+            if (shards.size() != 0) {
+                lastSeenShardId = shards.get(shards.size() - 1).getShardId();
+            }
+        } while (describeStreamResult.getStreamDescription().isHasMoreShards());
+
+        return shardsOfStream;
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/FullJitterBackoff.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/FullJitterBackoff.java
new file mode 100644
index 0000000..553e862
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/FullJitterBackoff.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.proxy;
+
+import org.apache.flink.annotation.Internal;
+
+import java.util.Random;
+
+/**
+ * Used to calculate full jitter backoff sleep durations.
+ *
+ * @see <a href="https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/">
+ *     Exponential Backoff and Jitter </a>
+ */
+@Internal
+public class FullJitterBackoff {
+
+    /** Random seed used to calculate backoff jitter for Kinesis operations. */
+    private final Random seed = new Random();
+
+    /**
+     * Calculates the sleep time for full jitter based on the given parameters.
+     *
+     * @param baseMillis the base backoff time in milliseconds
+     * @param maxMillis the maximum backoff time in milliseconds
+     * @param power the power constant for exponential backoff
+     * @param attempt the attempt number
+     * @return the time to wait before trying again
+     */
+    public long calculateFullJitterBackoff(
+            long baseMillis, long maxMillis, double power, int attempt) {
+        long exponentialBackoff = (long) Math.min(maxMillis, baseMillis * Math.pow(power, attempt));
+        return (long) (seed.nextDouble() * exponentialBackoff);
+    }
+
+    /**
+     * Puts the current thread to sleep for the specified number of millis. Simply delegates to
+     * {@link Thread#sleep}.
+     *
+     * @param millisToSleep the number of milliseconds to sleep for
+     * @throws InterruptedException
+     */
+    public void sleep(long millisToSleep) throws InterruptedException {
+        Thread.sleep(millisToSleep);
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/GetShardListResult.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/GetShardListResult.java
new file mode 100644
index 0000000..82f0536
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/GetShardListResult.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.proxy;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Basic model class to bundle the shards retrieved from Kinesis on a {@link
+ * KinesisProxyInterface#getShardList(Map)} call.
+ */
+@Internal
+public class GetShardListResult {
+
+    private final Map<String, LinkedList<StreamShardHandle>> streamsToRetrievedShardList =
+            new HashMap<>();
+
+    public void addRetrievedShardToStream(String stream, StreamShardHandle retrievedShard) {
+        if (!streamsToRetrievedShardList.containsKey(stream)) {
+            streamsToRetrievedShardList.put(stream, new LinkedList<StreamShardHandle>());
+        }
+        streamsToRetrievedShardList.get(stream).add(retrievedShard);
+    }
+
+    public void addRetrievedShardsToStream(String stream, List<StreamShardHandle> retrievedShards) {
+        if (retrievedShards.size() != 0) {
+            if (!streamsToRetrievedShardList.containsKey(stream)) {
+                streamsToRetrievedShardList.put(stream, new LinkedList<StreamShardHandle>());
+            }
+            streamsToRetrievedShardList.get(stream).addAll(retrievedShards);
+        }
+    }
+
+    public List<StreamShardHandle> getRetrievedShardListOfStream(String stream) {
+        if (!streamsToRetrievedShardList.containsKey(stream)) {
+            return null;
+        } else {
+            return streamsToRetrievedShardList.get(stream);
+        }
+    }
+
+    public StreamShardHandle getLastSeenShardOfStream(String stream) {
+        if (!streamsToRetrievedShardList.containsKey(stream)) {
+            return null;
+        } else {
+            return streamsToRetrievedShardList.get(stream).getLast();
+        }
+    }
+
+    public boolean hasRetrievedShards() {
+        return !streamsToRetrievedShardList.isEmpty();
+    }
+
+    public Set<String> getStreamsWithRetrievedShards() {
+        return streamsToRetrievedShardList.keySet();
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
new file mode 100644
index 0000000..458c029
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
@@ -0,0 +1,654 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.proxy;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
+import org.apache.flink.streaming.connectors.kinesis.util.KinesisConfigUtil;
+import org.apache.flink.util.ExceptionUtils;
+
+import com.amazonaws.AmazonServiceException;
+import com.amazonaws.ClientConfiguration;
+import com.amazonaws.ClientConfigurationFactory;
+import com.amazonaws.SdkClientException;
+import com.amazonaws.services.kinesis.AmazonKinesis;
+import com.amazonaws.services.kinesis.model.DescribeStreamRequest;
+import com.amazonaws.services.kinesis.model.DescribeStreamResult;
+import com.amazonaws.services.kinesis.model.ExpiredNextTokenException;
+import com.amazonaws.services.kinesis.model.GetRecordsRequest;
+import com.amazonaws.services.kinesis.model.GetRecordsResult;
+import com.amazonaws.services.kinesis.model.GetShardIteratorRequest;
+import com.amazonaws.services.kinesis.model.GetShardIteratorResult;
+import com.amazonaws.services.kinesis.model.InvalidArgumentException;
+import com.amazonaws.services.kinesis.model.LimitExceededException;
+import com.amazonaws.services.kinesis.model.ListShardsRequest;
+import com.amazonaws.services.kinesis.model.ListShardsResult;
+import com.amazonaws.services.kinesis.model.ProvisionedThroughputExceededException;
+import com.amazonaws.services.kinesis.model.ResourceInUseException;
+import com.amazonaws.services.kinesis.model.ResourceNotFoundException;
+import com.amazonaws.services.kinesis.model.Shard;
+import com.amazonaws.services.kinesis.model.ShardIteratorType;
+import com.amazonaws.services.kinesis.model.StreamStatus;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nullable;
+
+import java.io.EOFException;
+import java.net.SocketTimeoutException;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * Kinesis proxy implementation - a utility class that is used as a proxy to make calls to AWS
+ * Kinesis for several functions, such as getting a list of shards and fetching a batch of data
+ * records starting from a specified record sequence number.
+ *
+ * <p>NOTE: In the AWS KCL library, there is a similar implementation - {@link
+ * com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisProxy}. This implementation differs
+ * mainly in that we can make operations to arbitrary Kinesis streams, which is a needed
+ * functionality for the Flink Kinesis Connector since the consumer may simultaneously read from
+ * multiple Kinesis streams.
+ */
+@Internal
+public class KinesisProxy implements KinesisProxyInterface {
+
+    private static final Logger LOG = LoggerFactory.getLogger(KinesisProxy.class);
+
+    /** Calculates full jitter backoff delays. */
+    private static final FullJitterBackoff BACKOFF = new FullJitterBackoff();
+
+    /** The actual Kinesis client from the AWS SDK that we will be using to make calls. */
+    private final AmazonKinesis kinesisClient;
+
+    // ------------------------------------------------------------------------
+    //  listShards() related performance settings
+    // ------------------------------------------------------------------------
+
+    /** Base backoff millis for the list shards operation. */
+    private final long listShardsBaseBackoffMillis;
+
+    /** Maximum backoff millis for the list shards operation. */
+    private final long listShardsMaxBackoffMillis;
+
+    /** Exponential backoff power constant for the list shards operation. */
+    private final double listShardsExpConstant;
+
+    /** Maximum retry attempts for the list shards operation. */
+    private final int listShardsMaxRetries;
+
+    // ------------------------------------------------------------------------
+    //  getRecords() related performance settings
+    // ------------------------------------------------------------------------
+
+    /** Base backoff millis for the get records operation. */
+    private final long getRecordsBaseBackoffMillis;
+
+    /** Maximum backoff millis for the get records operation. */
+    private final long getRecordsMaxBackoffMillis;
+
+    /** Exponential backoff power constant for the get records operation. */
+    private final double getRecordsExpConstant;
+
+    /** Maximum retry attempts for the get records operation. */
+    private final int getRecordsMaxRetries;
+
+    // ------------------------------------------------------------------------
+    //  getShardIterator() related performance settings
+    // ------------------------------------------------------------------------
+
+    /** Base backoff millis for the get shard iterator operation. */
+    private final long getShardIteratorBaseBackoffMillis;
+
+    /** Maximum backoff millis for the get shard iterator operation. */
+    private final long getShardIteratorMaxBackoffMillis;
+
+    /** Exponential backoff power constant for the get shard iterator operation. */
+    private final double getShardIteratorExpConstant;
+
+    /** Maximum retry attempts for the get shard iterator operation. */
+    private final int getShardIteratorMaxRetries;
+
+    /** Backoff millis for the describe stream operation. */
+    private final long describeStreamBaseBackoffMillis;
+
+    /** Maximum backoff millis for the describe stream operation. */
+    private final long describeStreamMaxBackoffMillis;
+
+    /** Exponential backoff power constant for the describe stream operation. */
+    private final double describeStreamExpConstant;
+
+    /**
+     * Create a new KinesisProxy based on the supplied configuration properties.
+     *
+     * @param configProps configuration properties containing AWS credential and AWS region info
+     */
+    protected KinesisProxy(Properties configProps) {
+        checkNotNull(configProps);
+        KinesisConfigUtil.backfillConsumerKeys(configProps);
+
+        this.kinesisClient = createKinesisClient(configProps);
+
+        this.listShardsBaseBackoffMillis =
+                Long.parseLong(
+                        configProps.getProperty(
+                                ConsumerConfigConstants.LIST_SHARDS_BACKOFF_BASE,
+                                Long.toString(
+                                        ConsumerConfigConstants.DEFAULT_LIST_SHARDS_BACKOFF_BASE)));
+        this.listShardsMaxBackoffMillis =
+                Long.parseLong(
+                        configProps.getProperty(
+                                ConsumerConfigConstants.LIST_SHARDS_BACKOFF_MAX,
+                                Long.toString(
+                                        ConsumerConfigConstants.DEFAULT_LIST_SHARDS_BACKOFF_MAX)));
+        this.listShardsExpConstant =
+                Double.parseDouble(
+                        configProps.getProperty(
+                                ConsumerConfigConstants.LIST_SHARDS_BACKOFF_EXPONENTIAL_CONSTANT,
+                                Double.toString(
+                                        ConsumerConfigConstants
+                                                .DEFAULT_LIST_SHARDS_BACKOFF_EXPONENTIAL_CONSTANT)));
+        this.listShardsMaxRetries =
+                Integer.parseInt(
+                        configProps.getProperty(
+                                ConsumerConfigConstants.LIST_SHARDS_RETRIES,
+                                Long.toString(
+                                        ConsumerConfigConstants.DEFAULT_LIST_SHARDS_RETRIES)));
+        this.describeStreamBaseBackoffMillis =
+                Long.parseLong(
+                        configProps.getProperty(
+                                ConsumerConfigConstants.STREAM_DESCRIBE_BACKOFF_BASE,
+                                Long.toString(
+                                        ConsumerConfigConstants
+                                                .DEFAULT_STREAM_DESCRIBE_BACKOFF_BASE)));
+        this.describeStreamMaxBackoffMillis =
+                Long.parseLong(
+                        configProps.getProperty(
+                                ConsumerConfigConstants.STREAM_DESCRIBE_BACKOFF_MAX,
+                                Long.toString(
+                                        ConsumerConfigConstants
+                                                .DEFAULT_STREAM_DESCRIBE_BACKOFF_MAX)));
+        this.describeStreamExpConstant =
+                Double.parseDouble(
+                        configProps.getProperty(
+                                ConsumerConfigConstants
+                                        .STREAM_DESCRIBE_BACKOFF_EXPONENTIAL_CONSTANT,
+                                Double.toString(
+                                        ConsumerConfigConstants
+                                                .DEFAULT_STREAM_DESCRIBE_BACKOFF_EXPONENTIAL_CONSTANT)));
+        this.getRecordsBaseBackoffMillis =
+                Long.parseLong(
+                        configProps.getProperty(
+                                ConsumerConfigConstants.SHARD_GETRECORDS_BACKOFF_BASE,
+                                Long.toString(
+                                        ConsumerConfigConstants
+                                                .DEFAULT_SHARD_GETRECORDS_BACKOFF_BASE)));
+        this.getRecordsMaxBackoffMillis =
+                Long.parseLong(
+                        configProps.getProperty(
+                                ConsumerConfigConstants.SHARD_GETRECORDS_BACKOFF_MAX,
+                                Long.toString(
+                                        ConsumerConfigConstants
+                                                .DEFAULT_SHARD_GETRECORDS_BACKOFF_MAX)));
+        this.getRecordsExpConstant =
+                Double.parseDouble(
+                        configProps.getProperty(
+                                ConsumerConfigConstants
+                                        .SHARD_GETRECORDS_BACKOFF_EXPONENTIAL_CONSTANT,
+                                Double.toString(
+                                        ConsumerConfigConstants
+                                                .DEFAULT_SHARD_GETRECORDS_BACKOFF_EXPONENTIAL_CONSTANT)));
+        this.getRecordsMaxRetries =
+                Integer.parseInt(
+                        configProps.getProperty(
+                                ConsumerConfigConstants.SHARD_GETRECORDS_RETRIES,
+                                Long.toString(
+                                        ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_RETRIES)));
+
+        this.getShardIteratorBaseBackoffMillis =
+                Long.parseLong(
+                        configProps.getProperty(
+                                ConsumerConfigConstants.SHARD_GETITERATOR_BACKOFF_BASE,
+                                Long.toString(
+                                        ConsumerConfigConstants
+                                                .DEFAULT_SHARD_GETITERATOR_BACKOFF_BASE)));
+        this.getShardIteratorMaxBackoffMillis =
+                Long.parseLong(
+                        configProps.getProperty(
+                                ConsumerConfigConstants.SHARD_GETITERATOR_BACKOFF_MAX,
+                                Long.toString(
+                                        ConsumerConfigConstants
+                                                .DEFAULT_SHARD_GETITERATOR_BACKOFF_MAX)));
+        this.getShardIteratorExpConstant =
+                Double.parseDouble(
+                        configProps.getProperty(
+                                ConsumerConfigConstants
+                                        .SHARD_GETITERATOR_BACKOFF_EXPONENTIAL_CONSTANT,
+                                Double.toString(
+                                        ConsumerConfigConstants
+                                                .DEFAULT_SHARD_GETITERATOR_BACKOFF_EXPONENTIAL_CONSTANT)));
+        this.getShardIteratorMaxRetries =
+                Integer.parseInt(
+                        configProps.getProperty(
+                                ConsumerConfigConstants.SHARD_GETITERATOR_RETRIES,
+                                Long.toString(
+                                        ConsumerConfigConstants
+                                                .DEFAULT_SHARD_GETITERATOR_RETRIES)));
+    }
+
+    /**
+     * Create the Kinesis client, using the provided configuration properties and default {@link
+     * ClientConfiguration}. Derived classes can override this method to customize the client
+     * configuration.
+     */
+    protected AmazonKinesis createKinesisClient(Properties configProps) {
+
+        ClientConfiguration awsClientConfig = new ClientConfigurationFactory().getConfig();
+        AWSUtil.setAwsClientConfigProperties(awsClientConfig, configProps);
+        return AWSUtil.createKinesisClient(configProps, awsClientConfig);
+    }
+
+    /**
+     * Creates a Kinesis proxy.
+     *
+     * @param configProps configuration properties
+     * @return the created kinesis proxy
+     */
+    public static KinesisProxyInterface create(Properties configProps) {
+        return new KinesisProxy(configProps);
+    }
+
+    @Override
+    public GetRecordsResult getRecords(String shardIterator, int maxRecordsToGet)
+            throws InterruptedException {
+        final GetRecordsRequest getRecordsRequest = new GetRecordsRequest();
+        getRecordsRequest.setShardIterator(shardIterator);
+        getRecordsRequest.setLimit(maxRecordsToGet);
+
+        GetRecordsResult getRecordsResult = null;
+
+        int retryCount = 0;
+        while (retryCount <= getRecordsMaxRetries && getRecordsResult == null) {
+            try {
+                getRecordsResult = kinesisClient.getRecords(getRecordsRequest);
+            } catch (SdkClientException ex) {
+                if (isRecoverableSdkClientException(ex)) {
+                    long backoffMillis =
+                            BACKOFF.calculateFullJitterBackoff(
+                                    getRecordsBaseBackoffMillis,
+                                    getRecordsMaxBackoffMillis,
+                                    getRecordsExpConstant,
+                                    retryCount++);
+                    LOG.warn(
+                            "Got recoverable SdkClientException. Backing off for "
+                                    + backoffMillis
+                                    + " millis ("
+                                    + ex.getClass().getName()
+                                    + ": "
+                                    + ex.getMessage()
+                                    + ")");
+                    BACKOFF.sleep(backoffMillis);
+                } else {
+                    throw ex;
+                }
+            }
+        }
+
+        if (getRecordsResult == null) {
+            throw new RuntimeException(
+                    "Retries exceeded for getRecords operation - all "
+                            + getRecordsMaxRetries
+                            + " retry attempts failed.");
+        }
+
+        return getRecordsResult;
+    }
+
+    @Override
+    public GetShardListResult getShardList(Map<String, String> streamNamesWithLastSeenShardIds)
+            throws InterruptedException {
+        GetShardListResult result = new GetShardListResult();
+
+        for (Map.Entry<String, String> streamNameWithLastSeenShardId :
+                streamNamesWithLastSeenShardIds.entrySet()) {
+            String stream = streamNameWithLastSeenShardId.getKey();
+            String lastSeenShardId = streamNameWithLastSeenShardId.getValue();
+            result.addRetrievedShardsToStream(stream, getShardsOfStream(stream, lastSeenShardId));
+        }
+        return result;
+    }
+
+    @Override
+    public String getShardIterator(
+            StreamShardHandle shard, String shardIteratorType, @Nullable Object startingMarker)
+            throws InterruptedException {
+        GetShardIteratorRequest getShardIteratorRequest =
+                new GetShardIteratorRequest()
+                        .withStreamName(shard.getStreamName())
+                        .withShardId(shard.getShard().getShardId())
+                        .withShardIteratorType(shardIteratorType);
+
+        switch (ShardIteratorType.fromValue(shardIteratorType)) {
+            case TRIM_HORIZON:
+            case LATEST:
+                break;
+            case AT_TIMESTAMP:
+                if (startingMarker instanceof Date) {
+                    getShardIteratorRequest.setTimestamp((Date) startingMarker);
+                } else {
+                    throw new IllegalArgumentException(
+                            "Invalid object given for GetShardIteratorRequest() when ShardIteratorType is AT_TIMESTAMP. Must be a Date object.");
+                }
+                break;
+            case AT_SEQUENCE_NUMBER:
+            case AFTER_SEQUENCE_NUMBER:
+                if (startingMarker instanceof String) {
+                    getShardIteratorRequest.setStartingSequenceNumber((String) startingMarker);
+                } else {
+                    throw new IllegalArgumentException(
+                            "Invalid object given for GetShardIteratorRequest() when ShardIteratorType is AT_SEQUENCE_NUMBER or AFTER_SEQUENCE_NUMBER. Must be a String.");
+                }
+        }
+        return getShardIterator(getShardIteratorRequest);
+    }
+
+    private String getShardIterator(GetShardIteratorRequest getShardIteratorRequest)
+            throws InterruptedException {
+        GetShardIteratorResult getShardIteratorResult = null;
+
+        int retryCount = 0;
+        while (retryCount <= getShardIteratorMaxRetries && getShardIteratorResult == null) {
+            try {
+                getShardIteratorResult = kinesisClient.getShardIterator(getShardIteratorRequest);
+            } catch (AmazonServiceException ex) {
+                if (isRecoverableException(ex)) {
+                    long backoffMillis =
+                            BACKOFF.calculateFullJitterBackoff(
+                                    getShardIteratorBaseBackoffMillis,
+                                    getShardIteratorMaxBackoffMillis,
+                                    getShardIteratorExpConstant,
+                                    retryCount++);
+                    LOG.warn(
+                            "Got recoverable AmazonServiceException. Backing off for "
+                                    + backoffMillis
+                                    + " millis ("
+                                    + ex.getClass().getName()
+                                    + ": "
+                                    + ex.getMessage()
+                                    + ")");
+                    BACKOFF.sleep(backoffMillis);
+                } else {
+                    throw ex;
+                }
+            }
+        }
+
+        if (getShardIteratorResult == null) {
+            throw new RuntimeException(
+                    "Retries exceeded for getShardIterator operation - all "
+                            + getShardIteratorMaxRetries
+                            + " retry attempts failed.");
+        }
+        return getShardIteratorResult.getShardIterator();
+    }
+
+    /**
+     * Determines whether the exception is recoverable using exponential-backoff.
+     *
+     * @param ex Exception to inspect
+     * @return <code>true</code> if the exception can be recovered from, else <code>false</code>
+     */
+    protected boolean isRecoverableSdkClientException(SdkClientException ex) {
+        if (ex instanceof AmazonServiceException) {
+            return KinesisProxy.isRecoverableException((AmazonServiceException) ex);
+        } else if (isRecoverableConnectionException(ex)) {
+            return true;
+        }
+        // customizations may decide to retry other errors, such as read timeouts
+        return false;
+    }
+
+    private boolean isRecoverableConnectionException(SdkClientException ex) {
+        return ExceptionUtils.findThrowable(ex, SocketTimeoutException.class).isPresent()
+                || ExceptionUtils.findThrowable(ex, EOFException.class).isPresent();
+    }
+
+    /**
+     * Determines whether the exception is recoverable using exponential-backoff.
+     *
+     * @param ex Exception to inspect
+     * @return <code>true</code> if the exception can be recovered from, else <code>false</code>
+     */
+    protected static boolean isRecoverableException(AmazonServiceException ex) {
+        if (ex.getErrorType() == null) {
+            return false;
+        }
+
+        switch (ex.getErrorType()) {
+            case Client:
+                return ex instanceof ProvisionedThroughputExceededException;
+            case Service:
+            case Unknown:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    private List<StreamShardHandle> getShardsOfStream(
+            String streamName, @Nullable String lastSeenShardId) throws InterruptedException {
+        List<StreamShardHandle> shardsOfStream = new ArrayList<>();
+
+        // List Shards returns just the first 1000 shard entries. In order to read the entire
+        // stream,
+        // we need to use the returned nextToken to get additional shards.
+        ListShardsResult listShardsResult;
+        String startShardToken = null;
+        do {
+            listShardsResult = listShards(streamName, lastSeenShardId, startShardToken);
+            if (listShardsResult == null) {
+                // In case we have exceptions while retrieving all shards, ensure that incomplete
+                // shard list is not returned.
+                // Hence clearing the incomplete shard list before returning it.
+                shardsOfStream.clear();
+                return shardsOfStream;
+            }
+            List<Shard> shards = listShardsResult.getShards();
+            for (Shard shard : shards) {
+                shardsOfStream.add(new StreamShardHandle(streamName, shard));
+            }
+            startShardToken = listShardsResult.getNextToken();
+        } while (startShardToken != null);
+
+        return shardsOfStream;
+    }
+
+    /**
+     * Get metainfo for a Kinesis stream, which contains information about which shards this Kinesis
+     * stream possess.
+     *
+     * <p>This method is using a "full jitter" approach described in AWS's article, <a
+     * href="https://www.awsarchitectureblog.com/2015/03/backoff.html">"Exponential Backoff and
+     * Jitter"</a>. This is necessary because concurrent calls will be made by all parallel
+     * subtask's fetcher. This jitter backoff approach will help distribute calls across the
+     * fetchers over time.
+     *
+     * @param streamName the stream to describe
+     * @param startShardId which shard to start with for this describe operation (earlier shard's
+     *     infos will not appear in result)
+     * @return the result of the describe stream operation
+     */
+    private ListShardsResult listShards(
+            String streamName, @Nullable String startShardId, @Nullable String startNextToken)
+            throws InterruptedException {
+        final ListShardsRequest listShardsRequest = new ListShardsRequest();
+        if (startNextToken == null) {
+            listShardsRequest.setExclusiveStartShardId(startShardId);
+            listShardsRequest.setStreamName(streamName);
+        } else {
+            // Note the nextToken returned by AWS expires within 300 sec.
+            listShardsRequest.setNextToken(startNextToken);
+        }
+
+        ListShardsResult listShardsResults = null;
+
+        // Call ListShards, with full-jitter backoff (if we get LimitExceededException).
+        int retryCount = 0;
+        // List Shards returns just the first 1000 shard entries. Make sure that all entries
+        // are taken up.
+        while (retryCount <= listShardsMaxRetries
+                && listShardsResults == null) { // retry until we get a result
+            try {
+
+                listShardsResults = kinesisClient.listShards(listShardsRequest);
+            } catch (LimitExceededException le) {
+                long backoffMillis =
+                        BACKOFF.calculateFullJitterBackoff(
+                                listShardsBaseBackoffMillis,
+                                listShardsMaxBackoffMillis,
+                                listShardsExpConstant,
+                                retryCount++);
+                LOG.warn(
+                        "Got LimitExceededException when listing shards from stream "
+                                + streamName
+                                + ". Backing off for "
+                                + backoffMillis
+                                + " millis.");
+                BACKOFF.sleep(backoffMillis);
+            } catch (ResourceInUseException reInUse) {
+                if (LOG.isWarnEnabled()) {
+                    // List Shards will throw an exception if stream in not in active state. Return
+                    // and re-use previous state available.
+                    LOG.info(
+                            "The stream is currently not in active state. Reusing the older state "
+                                    + "for the time being");
+                    break;
+                }
+            } catch (ResourceNotFoundException reNotFound) {
+                throw new RuntimeException(
+                        "Stream not found. Error while getting shard list.", reNotFound);
+            } catch (InvalidArgumentException inArg) {
+                throw new RuntimeException("Invalid Arguments to listShards.", inArg);
+            } catch (ExpiredNextTokenException expiredToken) {
+                LOG.warn("List Shards has an expired token. Reusing the previous state.");
+                break;
+            } catch (SdkClientException ex) {
+                if (retryCount < listShardsMaxRetries && isRecoverableSdkClientException(ex)) {
+                    long backoffMillis =
+                            BACKOFF.calculateFullJitterBackoff(
+                                    listShardsBaseBackoffMillis,
+                                    listShardsMaxBackoffMillis,
+                                    listShardsExpConstant,
+                                    retryCount++);
+                    LOG.warn(
+                            "Got SdkClientException when listing shards from stream {}. Backing off for {} millis.",
+                            streamName,
+                            backoffMillis);
+                    BACKOFF.sleep(backoffMillis);
+                } else {
+                    // propagate if retries exceeded or not recoverable
+                    // (otherwise would return null result and keep trying forever)
+                    throw ex;
+                }
+            }
+        }
+
+        // Kinesalite (mock implementation of Kinesis) does not correctly exclude shards before
+        // the exclusive start shard id in the returned shards list; check if we need to remove
+        // these erroneously returned shards.
+        // Related issues:
+        // 	https://github.com/mhart/kinesalite/pull/77
+        // 	https://github.com/lyft/kinesalite/pull/4
+        if (startShardId != null && listShardsResults != null) {
+            List<Shard> shards = listShardsResults.getShards();
+            shards.removeIf(
+                    shard ->
+                            StreamShardHandle.compareShardIds(shard.getShardId(), startShardId)
+                                    <= 0);
+        }
+
+        return listShardsResults;
+    }
+
+    /**
+     * Get metainfo for a Kinesis stream, which contains information about which shards this Kinesis
+     * stream possess.
+     *
+     * <p>This method is using a "full jitter" approach described in AWS's article, <a
+     * href="https://www.awsarchitectureblog.com/2015/03/backoff.html">"Exponential Backoff and
+     * Jitter"</a>. This is necessary because concurrent calls will be made by all parallel
+     * subtask's fetcher. This jitter backoff approach will help distribute calls across the
+     * fetchers over time.
+     *
+     * @param streamName the stream to describe
+     * @param startShardId which shard to start with for this describe operation
+     * @return the result of the describe stream operation
+     */
+    protected DescribeStreamResult describeStream(String streamName, @Nullable String startShardId)
+            throws InterruptedException {
+        final DescribeStreamRequest describeStreamRequest = new DescribeStreamRequest();
+        describeStreamRequest.setStreamName(streamName);
+        describeStreamRequest.setExclusiveStartShardId(startShardId);
+
+        DescribeStreamResult describeStreamResult = null;
+
+        // Call DescribeStream, with full-jitter backoff (if we get LimitExceededException).
+        int attemptCount = 0;
+        while (describeStreamResult == null) { // retry until we get a result
+            try {
+                describeStreamResult = kinesisClient.describeStream(describeStreamRequest);
+            } catch (LimitExceededException le) {
+                long backoffMillis =
+                        BACKOFF.calculateFullJitterBackoff(
+                                describeStreamBaseBackoffMillis,
+                                describeStreamMaxBackoffMillis,
+                                describeStreamExpConstant,
+                                attemptCount++);
+                LOG.warn(
+                        String.format(
+                                "Got LimitExceededException when describing stream %s. "
+                                        + "Backing off for %d millis.",
+                                streamName, backoffMillis));
+                BACKOFF.sleep(backoffMillis);
+            } catch (ResourceNotFoundException re) {
+                throw new RuntimeException("Error while getting stream details", re);
+            }
+        }
+
+        String streamStatus = describeStreamResult.getStreamDescription().getStreamStatus();
+        if (!(streamStatus.equals(StreamStatus.ACTIVE.toString())
+                || streamStatus.equals(StreamStatus.UPDATING.toString()))) {
+            if (LOG.isWarnEnabled()) {
+                LOG.warn(
+                        String.format(
+                                "The status of stream %s is %s ; result of the current "
+                                        + "describeStream operation will not contain any shard information.",
+                                streamName, streamStatus));
+            }
+        }
+
+        return describeStreamResult;
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java
new file mode 100644
index 0000000..3728d72
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.proxy;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+
+import com.amazonaws.services.kinesis.model.GetRecordsResult;
+
+import java.util.Map;
+
+/**
+ * Interface for a Kinesis proxy that operates on multiple Kinesis streams within the same AWS
+ * service region.
+ */
+@Internal
+public interface KinesisProxyInterface {
+
+    /**
+     * Get a shard iterator from the specified position in a shard. The retrieved shard iterator can
+     * be used in {@link KinesisProxyInterface#getRecords(String, int)}} to read data from the
+     * Kinesis shard.
+     *
+     * @param shard the shard to get the iterator
+     * @param shardIteratorType the iterator type, defining how the shard is to be iterated (one of:
+     *     TRIM_HORIZON, LATEST, AT_TIMESTAMP, AT_SEQUENCE_NUMBER, AFTER_SEQUENCE_NUMBER)
+     * @param startingMarker should be {@code null} if shardIteratorType is TRIM_HORIZON or LATEST,
+     *     should be a {@code Date} value if shardIteratorType is AT_TIMESTAMP, should be a {@code
+     *     String} representing the sequence number if shardIteratorType is AT_SEQUENCE_NUMBER,
+     *     AFTER_SEQUENCE_NUMBER
+     * @return shard iterator which can be used to read data from Kinesis
+     * @throws InterruptedException this method will retry with backoff if AWS Kinesis complains
+     *     that the operation has exceeded the rate limit; this exception will be thrown if the
+     *     backoff is interrupted.
+     */
+    String getShardIterator(
+            StreamShardHandle shard, String shardIteratorType, Object startingMarker)
+            throws InterruptedException;
+
+    /**
+     * Get the next batch of data records using a specific shard iterator.
+     *
+     * @param shardIterator a shard iterator that encodes info about which shard to read and where
+     *     to start reading
+     * @param maxRecordsToGet the maximum amount of records to retrieve for this batch
+     * @return the batch of retrieved records, also with a shard iterator that can be used to get
+     *     the next batch
+     * @throws InterruptedException this method will retry with backoff if AWS Kinesis complains
+     *     that the operation has exceeded the rate limit; this exception will be thrown if the
+     *     backoff is interrupted.
+     */
+    GetRecordsResult getRecords(String shardIterator, int maxRecordsToGet)
+            throws InterruptedException;
+
+    /**
+     * Get shard list of multiple Kinesis streams, ignoring the shards of each stream before a
+     * specified last seen shard id.
+     *
+     * @param streamNamesWithLastSeenShardIds a map with stream as key, and last seen shard id as
+     *     value
+     * @return result of the shard list query
+     * @throws InterruptedException this method will retry with backoff if AWS Kinesis complains
+     *     that the operation has exceeded the rate limit; this exception will be thrown if the
+     *     backoff is interrupted.
+     */
+    GetShardListResult getShardList(Map<String, String> streamNamesWithLastSeenShardIds)
+            throws InterruptedException;
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2.java
new file mode 100644
index 0000000..0677765
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2.java
@@ -0,0 +1,221 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.proxy;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.FanOutRecordPublisherConfiguration;
+import org.apache.flink.streaming.connectors.kinesis.util.AwsV2Util;
+import org.apache.flink.util.Preconditions;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
+import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
+import software.amazon.awssdk.services.kinesis.model.DeregisterStreamConsumerRequest;
+import software.amazon.awssdk.services.kinesis.model.DeregisterStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryResponse;
+import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerRequest;
+import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler;
+
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+
+/**
+ * Kinesis proxy implementation using AWS SDK v2.x - a utility class that is used as a proxy to make
+ * calls to AWS Kinesis for several EFO (Enhanced Fan Out) functions, such as de-/registering stream
+ * consumers, subscribing to a shard and receiving records from a shard.
+ */
+@Internal
+public class KinesisProxyV2 implements KinesisProxyV2Interface {
+
+    private static final Logger LOG = LoggerFactory.getLogger(KinesisProxyV2.class);
+
+    /** An Asynchronous client used to communicate with AWS services. */
+    private final KinesisAsyncClient kinesisAsyncClient;
+
+    private final SdkAsyncHttpClient httpClient;
+
+    private final FanOutRecordPublisherConfiguration fanOutRecordPublisherConfiguration;
+
+    private final FullJitterBackoff backoff;
+
+    /**
+     * Create a new KinesisProxyV2.
+     *
+     * @param kinesisAsyncClient AWS SDK v2 Kinesis client used to communicate with AWS services
+     * @param httpClient the underlying HTTP client, reference required for close only
+     * @param fanOutRecordPublisherConfiguration the configuration for Fan Out features
+     * @param backoff the backoff utility used to introduce Full Jitter delays
+     */
+    public KinesisProxyV2(
+            final KinesisAsyncClient kinesisAsyncClient,
+            final SdkAsyncHttpClient httpClient,
+            final FanOutRecordPublisherConfiguration fanOutRecordPublisherConfiguration,
+            final FullJitterBackoff backoff) {
+        this.kinesisAsyncClient = Preconditions.checkNotNull(kinesisAsyncClient);
+        this.httpClient = httpClient;
+        this.fanOutRecordPublisherConfiguration = fanOutRecordPublisherConfiguration;
+        this.backoff = backoff;
+    }
+
+    @Override
+    public CompletableFuture<Void> subscribeToShard(
+            final SubscribeToShardRequest request,
+            final SubscribeToShardResponseHandler responseHandler) {
+        return kinesisAsyncClient.subscribeToShard(request, responseHandler);
+    }
+
+    @Override
+    public void close() {
+        kinesisAsyncClient.close();
+        httpClient.close();
+    }
+
+    @Override
+    public DescribeStreamSummaryResponse describeStreamSummary(String stream)
+            throws InterruptedException, ExecutionException {
+        DescribeStreamSummaryRequest describeStreamRequest =
+                DescribeStreamSummaryRequest.builder().streamName(stream).build();
+
+        return invokeWithRetryAndBackoff(
+                () -> kinesisAsyncClient.describeStreamSummary(describeStreamRequest).get(),
+                fanOutRecordPublisherConfiguration.getDescribeStreamBaseBackoffMillis(),
+                fanOutRecordPublisherConfiguration.getDescribeStreamMaxBackoffMillis(),
+                fanOutRecordPublisherConfiguration.getDescribeStreamExpConstant(),
+                fanOutRecordPublisherConfiguration.getDescribeStreamMaxRetries());
+    }
+
+    @Override
+    public DescribeStreamConsumerResponse describeStreamConsumer(
+            final String streamArn, final String consumerName)
+            throws InterruptedException, ExecutionException {
+        DescribeStreamConsumerRequest describeStreamConsumerRequest =
+                DescribeStreamConsumerRequest.builder()
+                        .streamARN(streamArn)
+                        .consumerName(consumerName)
+                        .build();
+
+        return describeStreamConsumer(describeStreamConsumerRequest);
+    }
+
+    @Override
+    public DescribeStreamConsumerResponse describeStreamConsumer(final String streamConsumerArn)
+            throws InterruptedException, ExecutionException {
+        DescribeStreamConsumerRequest describeStreamConsumerRequest =
+                DescribeStreamConsumerRequest.builder().consumerARN(streamConsumerArn).build();
+
+        return describeStreamConsumer(describeStreamConsumerRequest);
+    }
+
+    private DescribeStreamConsumerResponse describeStreamConsumer(
+            final DescribeStreamConsumerRequest request)
+            throws InterruptedException, ExecutionException {
+        return invokeWithRetryAndBackoff(
+                () -> kinesisAsyncClient.describeStreamConsumer(request).get(),
+                fanOutRecordPublisherConfiguration.getDescribeStreamConsumerBaseBackoffMillis(),
+                fanOutRecordPublisherConfiguration.getDescribeStreamConsumerMaxBackoffMillis(),
+                fanOutRecordPublisherConfiguration.getDescribeStreamConsumerExpConstant(),
+                fanOutRecordPublisherConfiguration.getDescribeStreamConsumerMaxRetries());
+    }
+
+    @Override
+    public RegisterStreamConsumerResponse registerStreamConsumer(
+            final String streamArn, final String consumerName)
+            throws InterruptedException, ExecutionException {
+        RegisterStreamConsumerRequest registerStreamConsumerRequest =
+                RegisterStreamConsumerRequest.builder()
+                        .streamARN(streamArn)
+                        .consumerName(consumerName)
+                        .build();
+
+        return invokeWithRetryAndBackoff(
+                () ->
+                        kinesisAsyncClient
+                                .registerStreamConsumer(registerStreamConsumerRequest)
+                                .get(),
+                fanOutRecordPublisherConfiguration.getRegisterStreamBaseBackoffMillis(),
+                fanOutRecordPublisherConfiguration.getRegisterStreamMaxBackoffMillis(),
+                fanOutRecordPublisherConfiguration.getRegisterStreamExpConstant(),
+                fanOutRecordPublisherConfiguration.getRegisterStreamMaxRetries());
+    }
+
+    @Override
+    public DeregisterStreamConsumerResponse deregisterStreamConsumer(final String consumerArn)
+            throws InterruptedException, ExecutionException {
+        DeregisterStreamConsumerRequest deregisterStreamConsumerRequest =
+                DeregisterStreamConsumerRequest.builder().consumerARN(consumerArn).build();
+
+        return invokeWithRetryAndBackoff(
+                () ->
+                        kinesisAsyncClient
+                                .deregisterStreamConsumer(deregisterStreamConsumerRequest)
+                                .get(),
+                fanOutRecordPublisherConfiguration.getDeregisterStreamBaseBackoffMillis(),
+                fanOutRecordPublisherConfiguration.getDeregisterStreamMaxBackoffMillis(),
+                fanOutRecordPublisherConfiguration.getDeregisterStreamExpConstant(),
+                fanOutRecordPublisherConfiguration.getDeregisterStreamMaxRetries());
+    }
+
+    private <T> T invokeWithRetryAndBackoff(
+            final ResponseSupplier<T> responseSupplier,
+            final long jitterBase,
+            final long jitterMax,
+            final double jitterExponent,
+            final int maximumNumberOfRetries)
+            throws InterruptedException, ExecutionException {
+        T response = null;
+        int attempt = 0;
+
+        while (attempt < maximumNumberOfRetries && response == null) {
+            try {
+                response = responseSupplier.get();
+            } catch (Exception ex) {
+                if (AwsV2Util.isRecoverableException(ex)) {
+                    long backoffMillis =
+                            backoff.calculateFullJitterBackoff(
+                                    jitterBase, jitterMax, jitterExponent, ++attempt);
+                    LOG.warn(
+                            "Encountered recoverable error: {}. Backing off for {} millis.",
+                            ex.getClass().getSimpleName(),
+                            backoffMillis,
+                            ex);
+
+                    backoff.sleep(backoffMillis);
+                } else {
+                    throw ex;
+                }
+            }
+        }
+
+        if (response == null) {
+            throw new RuntimeException(
+                    "Retries exceeded - all " + maximumNumberOfRetries + " retry attempts failed.");
+        }
+
+        return response;
+    }
+
+    private interface ResponseSupplier<T> {
+        T get() throws ExecutionException, InterruptedException;
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Factory.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Factory.java
new file mode 100644
index 0000000..24c3cb3
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Factory.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.proxy;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.connector.aws.util.AWSAsyncSinkUtil;
+import org.apache.flink.connector.aws.util.AWSGeneralUtil;
+import org.apache.flink.connector.kinesis.sink.KinesisStreamsConfigConstants;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.FanOutRecordPublisherConfiguration;
+import org.apache.flink.streaming.connectors.kinesis.util.AwsV2Util;
+import org.apache.flink.streaming.connectors.kinesis.util.KinesisConfigUtil;
+import org.apache.flink.util.Preconditions;
+
+import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
+import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient;
+import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
+import software.amazon.awssdk.utils.AttributeMap;
+
+import java.util.Properties;
+
+import static java.util.Collections.emptyList;
+import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TCP_KEEPALIVE;
+
+/** Creates instances of {@link KinesisProxyV2}. */
+@Internal
+public class KinesisProxyV2Factory {
+
+    private static final FullJitterBackoff BACKOFF = new FullJitterBackoff();
+
+    /**
+     * Uses the given properties to instantiate a new instance of {@link KinesisProxyV2}.
+     *
+     * @param configProps the properties used to parse configuration
+     * @return the Kinesis proxy
+     */
+    public static KinesisProxyV2Interface createKinesisProxyV2(final Properties configProps) {
+        Preconditions.checkNotNull(configProps);
+
+        final AttributeMap convertedProperties = AwsV2Util.convertProperties(configProps);
+        final AttributeMap.Builder clientConfiguration = AttributeMap.builder();
+        populateDefaultValues(clientConfiguration);
+
+        final SdkAsyncHttpClient httpClient =
+                AWSGeneralUtil.createAsyncHttpClient(
+                        convertedProperties.merge(clientConfiguration.build()),
+                        NettyNioAsyncHttpClient.builder());
+        final FanOutRecordPublisherConfiguration configuration =
+                new FanOutRecordPublisherConfiguration(configProps, emptyList());
+
+        Properties asyncClientProperties =
+                KinesisConfigUtil.getV2ConsumerAsyncClientProperties(configProps);
+
+        final KinesisAsyncClient client =
+                AWSAsyncSinkUtil.createAwsAsyncClient(
+                        asyncClientProperties,
+                        httpClient,
+                        KinesisAsyncClient.builder(),
+                        KinesisStreamsConfigConstants.BASE_KINESIS_USER_AGENT_PREFIX_FORMAT,
+                        KinesisStreamsConfigConstants.KINESIS_CLIENT_USER_AGENT_PREFIX);
+
+        return new KinesisProxyV2(client, httpClient, configuration, BACKOFF);
+    }
+
+    private static void populateDefaultValues(final AttributeMap.Builder clientConfiguration) {
+        clientConfiguration.put(TCP_KEEPALIVE, true);
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Interface.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Interface.java
new file mode 100644
index 0000000..8c30db4
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Interface.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.proxy;
+
+import org.apache.flink.annotation.Internal;
+
+import software.amazon.awssdk.services.kinesis.model.DeregisterStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryResponse;
+import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler;
+
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+
+/**
+ * Interface for a Kinesis proxy using AWS SDK v2.x operating on multiple Kinesis streams within the
+ * same AWS service region.
+ */
+@Internal
+public interface KinesisProxyV2Interface {
+
+    DescribeStreamSummaryResponse describeStreamSummary(String stream)
+            throws InterruptedException, ExecutionException;
+
+    DescribeStreamConsumerResponse describeStreamConsumer(final String streamConsumerArn)
+            throws InterruptedException, ExecutionException;
+
+    DescribeStreamConsumerResponse describeStreamConsumer(
+            final String streamArn, final String consumerName)
+            throws InterruptedException, ExecutionException;
+
+    RegisterStreamConsumerResponse registerStreamConsumer(
+            final String streamArn, final String consumerName)
+            throws InterruptedException, ExecutionException;
+
+    DeregisterStreamConsumerResponse deregisterStreamConsumer(final String consumerArn)
+            throws InterruptedException, ExecutionException;
+
+    CompletableFuture<Void> subscribeToShard(
+            SubscribeToShardRequest request, SubscribeToShardResponseHandler responseHandler);
+
+    /** Destroy any open resources used by the factory. */
+    default void close() {
+        // Do nothing by default
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/DynamoDBStreamsSchema.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/DynamoDBStreamsSchema.java
new file mode 100644
index 0000000..e34a91c
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/DynamoDBStreamsSchema.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.serialization;
+
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+
+import com.amazonaws.services.dynamodbv2.model.Record;
+import com.amazonaws.services.dynamodbv2.streamsadapter.model.RecordObjectMapper;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+import java.io.IOException;
+
+/** Schema used for deserializing DynamoDB streams records. */
+public class DynamoDBStreamsSchema implements KinesisDeserializationSchema<Record> {
+    private static final ObjectMapper MAPPER = new RecordObjectMapper();
+
+    @Override
+    public Record deserialize(
+            byte[] message,
+            String partitionKey,
+            String seqNum,
+            long approxArrivalTimestamp,
+            String stream,
+            String shardId)
+            throws IOException {
+        return MAPPER.readValue(message, Record.class);
+    }
+
+    @Override
+    public TypeInformation<Record> getProducedType() {
+        return TypeInformation.of(Record.class);
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchema.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchema.java
new file mode 100644
index 0000000..84f5863
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchema.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.serialization;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.api.common.serialization.DeserializationSchema;
+import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+/**
+ * This is a deserialization schema specific for the Flink Kinesis Consumer. Different from the
+ * basic {@link DeserializationSchema}, this schema offers additional Kinesis-specific information
+ * about the record that may be useful to the user application.
+ *
+ * @param <T> The type created by the keyed deserialization schema.
+ */
+@PublicEvolving
+public interface KinesisDeserializationSchema<T> extends Serializable, ResultTypeQueryable<T> {
+
+    /**
+     * Initialization method for the schema. It is called before the actual working methods {@link
+     * #deserialize} and thus suitable for one time setup work.
+     *
+     * <p>The provided {@link DeserializationSchema.InitializationContext} can be used to access
+     * additional features such as e.g. registering user metrics.
+     *
+     * @param context Contextual information that can be used during initialization.
+     */
+    default void open(DeserializationSchema.InitializationContext context) throws Exception {}
+
+    /**
+     * Deserializes a Kinesis record's bytes. If the record cannot be deserialized, {@code null} may
+     * be returned. This informs the Flink Kinesis Consumer to process the Kinesis record without
+     * producing any output for it, i.e. effectively "skipping" the record.
+     *
+     * @param recordValue the record's value as a byte array
+     * @param partitionKey the record's partition key at the time of writing
+     * @param seqNum the sequence number of this record in the Kinesis shard
+     * @param approxArrivalTimestamp the server-side timestamp of when Kinesis received and stored
+     *     the record
+     * @param stream the name of the Kinesis stream that this record was sent to
+     * @param shardId The identifier of the shard the record was sent to
+     * @return the deserialized message as an Java object ({@code null} if the message cannot be
+     *     deserialized).
+     * @throws IOException
+     */
+    T deserialize(
+            byte[] recordValue,
+            String partitionKey,
+            String seqNum,
+            long approxArrivalTimestamp,
+            String stream,
+            String shardId)
+            throws IOException;
+
+    /**
+     * Method to decide whether the element signals the end of the stream. If true is returned the
+     * element won't be emitted.
+     *
+     * @param nextElement the element to test for the end-of-stream signal
+     * @return true if the element signals end of stream, false otherwise
+     */
+    // TODO FLINK-4194 ADD SUPPORT FOR boolean isEndOfStream(T nextElement);
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchemaWrapper.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchemaWrapper.java
new file mode 100644
index 0000000..a836d5b
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisDeserializationSchemaWrapper.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.serialization;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.serialization.DeserializationSchema;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.util.Collector;
+
+import java.io.IOException;
+
+/**
+ * A simple wrapper for using the {@link DeserializationSchema} with the {@link
+ * KinesisDeserializationSchema} interface.
+ *
+ * @param <T> The type created by the deserialization schema.
+ */
+@Internal
+public class KinesisDeserializationSchemaWrapper<T> implements KinesisDeserializationSchema<T> {
+    private static final long serialVersionUID = 9143148962928375886L;
+
+    private final DeserializationSchema<T> deserializationSchema;
+
+    public KinesisDeserializationSchemaWrapper(DeserializationSchema<T> deserializationSchema) {
+        try {
+            Class<? extends DeserializationSchema> deserilizationClass =
+                    deserializationSchema.getClass();
+            if (!deserilizationClass
+                    .getMethod("deserialize", byte[].class, Collector.class)
+                    .isDefault()) {
+                throw new IllegalArgumentException(
+                        "Kinesis consumer does not support DeserializationSchema that implements "
+                                + "deserialization with a Collector. Unsupported DeserializationSchema: "
+                                + deserilizationClass.getName());
+            }
+        } catch (NoSuchMethodException e) {
+            // swallow the exception
+        }
+        this.deserializationSchema = deserializationSchema;
+    }
+
+    @Override
+    public void open(DeserializationSchema.InitializationContext context) throws Exception {
+        this.deserializationSchema.open(context);
+    }
+
+    @Override
+    public T deserialize(
+            byte[] recordValue,
+            String partitionKey,
+            String seqNum,
+            long approxArrivalTimestamp,
+            String stream,
+            String shardId)
+            throws IOException {
+        return deserializationSchema.deserialize(recordValue);
+    }
+
+    /*
+    FLINK-4194
+
+    @Override
+    public boolean isEndOfStream(T nextElement) {
+    	return deserializationSchema.isEndOfStream(nextElement);
+    } */
+
+    @Override
+    public TypeInformation<T> getProducedType() {
+        return deserializationSchema.getProducedType();
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisSerializationSchema.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisSerializationSchema.java
new file mode 100644
index 0000000..88bc209
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/serialization/KinesisSerializationSchema.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.serialization;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.api.common.serialization.SerializationSchema.InitializationContext;
+
+import java.io.Serializable;
+import java.nio.ByteBuffer;
+
+/**
+ * Kinesis-specific serialization schema, allowing users to specify a target stream based on a
+ * record's contents.
+ *
+ * @param <T>
+ */
+@PublicEvolving
+public interface KinesisSerializationSchema<T> extends Serializable {
+    /**
+     * Initialization method for the schema. It is called before the actual working methods {@link
+     * #serialize(Object)} and thus suitable for one time setup work.
+     *
+     * <p>The provided {@link InitializationContext} can be used to access additional features such
+     * as e.g. registering user metrics.
+     *
+     * @param context Contextual information that can be used during initialization.
+     */
+    default void open(InitializationContext context) throws Exception {}
+
+    /**
+     * Serialize the given element into a ByteBuffer.
+     *
+     * @param element The element to serialize
+     * @return Serialized representation of the element
+     */
+    ByteBuffer serialize(T element);
+
+    /**
+     * Optional method to determine the target stream based on the element. Return <code>null</code>
+     * to use the default stream
+     *
+     * @param element The element to determine the target stream from
+     * @return target stream name
+     */
+    String getTargetStream(T element);
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/table/KinesisConnectorOptionsUtil.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/table/KinesisConnectorOptionsUtil.java
new file mode 100644
index 0000000..3964773
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/table/KinesisConnectorOptionsUtil.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.table;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.configuration.ReadableConfig;
+import org.apache.flink.connector.aws.table.util.AWSOptionUtils;
+import org.apache.flink.connector.aws.table.util.AsyncClientOptionsUtils;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.apache.flink.connector.kinesis.table.KinesisConnectorOptions.STREAM;
+
+/**
+ * Class for handling kinesis table options, including key mapping and validations and property
+ * extraction. Class uses options decorators {@link AWSOptionUtils}, {@link AsyncClientOptionsUtils}
+ * and {@link KinesisConsumerOptionsUtil} for handling each specified set of options.
+ */
+@Internal
+public class KinesisConnectorOptionsUtil {
+
+    private final KinesisConsumerOptionsUtil kinesisConsumerOptionsUtil;
+    private final Map<String, String> resolvedOptions;
+    private final ReadableConfig tableOptions;
+
+    public KinesisConnectorOptionsUtil(Map<String, String> options, ReadableConfig tableOptions) {
+        this.resolvedOptions = options;
+        this.tableOptions = tableOptions;
+        this.kinesisConsumerOptionsUtil =
+                new KinesisConsumerOptionsUtil(resolvedOptions, tableOptions.get(STREAM));
+    }
+
+    public Properties getValidatedSourceConfigurations() {
+        return kinesisConsumerOptionsUtil.getValidatedConfigurations();
+    }
+
+    public List<String> getNonValidatedPrefixes() {
+        return kinesisConsumerOptionsUtil.getNonValidatedPrefixes();
+    }
+}
diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/table/KinesisConsumerOptionsUtil.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/table/KinesisConsumerOptionsUtil.java
new file mode 100644
index 0000000..c3c2307
--- /dev/null
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/table/KinesisConsumerOptionsUtil.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.table;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.connector.aws.table.util.AWSOptionUtils;
+import org.apache.flink.streaming.connectors.kinesis.util.KinesisConfigUtil;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
... 17473 lines suppressed ...


[flink-connector-aws] 04/08: [FLINK-29907][Connectors/Kinesis] Externalize Amazon Kinesis v2 connectors from Flink repo

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

dannycranmer pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/flink-connector-aws.git

commit d5375d888a47f5d10ea695020a197380547786fd
Author: Danny Cranmer <da...@apache.org>
AuthorDate: Fri Dec 2 09:38:19 2022 +0000

    [FLINK-29907][Connectors/Kinesis] Externalize Amazon Kinesis v2 connectors from Flink repo
---
 .../75596a92-3816-4a44-85ac-7c96e72f443a           |   0
 .../7e2560a3-23eb-40cc-8669-e7943e393b88           |   0
 .../84abeb9c-8355-4165-96aa-dda65b04e5e7           |   6 +
 .../archunit-violations/stored.rules               |   4 +
 flink-connector-aws-kinesis-streams/pom.xml        | 137 +++++
 .../sink/KinesisStreamsConfigConstants.java        |  32 ++
 .../kinesis/sink/KinesisStreamsException.java      |  51 ++
 .../connector/kinesis/sink/KinesisStreamsSink.java | 160 ++++++
 .../kinesis/sink/KinesisStreamsSinkBuilder.java    | 134 +++++
 .../sink/KinesisStreamsSinkElementConverter.java   | 127 +++++
 .../kinesis/sink/KinesisStreamsSinkWriter.java     | 256 ++++++++++
 .../sink/KinesisStreamsStateSerializer.java        |  83 +++
 .../kinesis/sink/PartitionKeyGenerator.java        |  31 ++
 .../table/FixedKinesisPartitionKeyGenerator.java   |  73 +++
 .../kinesis/table/KinesisConnectorOptions.java     |  94 ++++
 .../kinesis/table/KinesisDynamicSink.java          | 258 ++++++++++
 .../table/KinesisDynamicTableSinkFactory.java      | 126 +++++
 .../table/KinesisPartitionKeyGeneratorFactory.java | 111 ++++
 .../table/RandomKinesisPartitionKeyGenerator.java  |  48 ++
 .../RowDataFieldsKinesisPartitionKeyGenerator.java | 266 ++++++++++
 .../util/KinesisStreamsConnectorOptionsUtils.java  | 272 ++++++++++
 .../org.apache.flink.table.factories.Factory       |  16 +
 .../src/main/resources/log4j2.properties           |  25 +
 .../architecture/TestCodeArchitectureTest.java     |  40 ++
 .../sink/KinesisStreamsSinkBuilderTest.java        |  93 ++++
 .../kinesis/sink/KinesisStreamsSinkITCase.java     | 566 +++++++++++++++++++++
 .../sink/KinesisStreamsStateSerializerTest.java    |  56 ++
 .../kinesis/sink/examples/SinkIntoKinesis.java     |  74 +++
 .../table/KinesisDynamicTableSinkFactoryTest.java  | 306 +++++++++++
 ...DataFieldsKinesisPartitionKeyGeneratorTest.java | 305 +++++++++++
 .../util/KinesisProducerOptionsMapperTest.java     |  79 +++
 .../kinesis/testutils/KinesaliteContainer.java     | 163 ++++++
 .../org.junit.jupiter.api.extension.Extension      |  16 +
 .../src/test/resources/archunit.properties         |  31 ++
 .../src/test/resources/log4j2-test.properties      |  28 +
 .../src/test/resources/profile                     |   7 +
 flink-sql-connector-aws-kinesis-streams/pom.xml    | 120 +++++
 .../src/main/resources/META-INF/NOTICE             |  50 ++
 pom.xml                                            |   6 +-
 39 files changed, 4249 insertions(+), 1 deletion(-)

diff --git a/flink-connector-aws-kinesis-streams/archunit-violations/75596a92-3816-4a44-85ac-7c96e72f443a b/flink-connector-aws-kinesis-streams/archunit-violations/75596a92-3816-4a44-85ac-7c96e72f443a
new file mode 100644
index 0000000..e69de29
diff --git a/flink-connector-aws-kinesis-streams/archunit-violations/7e2560a3-23eb-40cc-8669-e7943e393b88 b/flink-connector-aws-kinesis-streams/archunit-violations/7e2560a3-23eb-40cc-8669-e7943e393b88
new file mode 100644
index 0000000..e69de29
diff --git a/flink-connector-aws-kinesis-streams/archunit-violations/84abeb9c-8355-4165-96aa-dda65b04e5e7 b/flink-connector-aws-kinesis-streams/archunit-violations/84abeb9c-8355-4165-96aa-dda65b04e5e7
new file mode 100644
index 0000000..7c90aa6
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/archunit-violations/84abeb9c-8355-4165-96aa-dda65b04e5e7
@@ -0,0 +1,6 @@
+org.apache.flink.connector.kinesis.sink.KinesisStreamsSinkITCase does not satisfy: only one of the following predicates match:\
+* reside in a package 'org.apache.flink.runtime.*' and contain any fields that are static, final, and of type InternalMiniClusterExtension and annotated with @RegisterExtension\
+* reside outside of package 'org.apache.flink.runtime.*' and contain any fields that are static, final, and of type MiniClusterExtension and annotated with @RegisterExtension\
+* reside in a package 'org.apache.flink.runtime.*' and is annotated with @ExtendWith with class InternalMiniClusterExtension\
+* reside outside of package 'org.apache.flink.runtime.*' and is annotated with @ExtendWith with class MiniClusterExtension\
+ or contain any fields that are public, static, and of type MiniClusterWithClientResource and final and annotated with @ClassRule or contain any fields that is of type MiniClusterWithClientResource and public and final and not static and annotated with @Rule
\ No newline at end of file
diff --git a/flink-connector-aws-kinesis-streams/archunit-violations/stored.rules b/flink-connector-aws-kinesis-streams/archunit-violations/stored.rules
new file mode 100644
index 0000000..d2c4282
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/archunit-violations/stored.rules
@@ -0,0 +1,4 @@
+#
+#Tue Feb 22 12:18:27 CET 2022
+Tests\ inheriting\ from\ AbstractTestBase\ should\ have\ name\ ending\ with\ ITCase=7e2560a3-23eb-40cc-8669-e7943e393b88
+ITCASE\ tests\ should\ use\ a\ MiniCluster\ resource\ or\ extension=84abeb9c-8355-4165-96aa-dda65b04e5e7
diff --git a/flink-connector-aws-kinesis-streams/pom.xml b/flink-connector-aws-kinesis-streams/pom.xml
new file mode 100644
index 0000000..af280a6
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/pom.xml
@@ -0,0 +1,137 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.flink</groupId>
+        <artifactId>flink-connector-aws-parent</artifactId>
+        <version>4.0-SNAPSHOT</version>
+    </parent>
+
+    <artifactId>flink-connector-aws-kinesis-streams</artifactId>
+    <name>Flink : Connectors : AWS : Amazon Kinesis Data Streams</name>
+    <packaging>jar</packaging>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-streaming-java</artifactId>
+            <version>${flink.version}</version>
+            <scope>provided</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-base</artifactId>
+            <version>${flink.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-aws-base</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>software.amazon.awssdk</groupId>
+            <artifactId>kinesis</artifactId>
+        </dependency>
+
+        <!--Table API dependencies-->
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-table-common</artifactId>
+            <version>${flink.version}</version>
+        </dependency>
+
+        <!-- Test dependencies -->
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-test-utils</artifactId>
+            <version>${flink.version}</version>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-base</artifactId>
+            <version>${flink.version}</version>
+            <type>test-jar</type>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-aws-base</artifactId>
+            <version>${project.version}</version>
+            <type>test-jar</type>
+            <scope>test</scope>
+        </dependency>
+
+        <!-- Kinesis table factory testing -->
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-table-common</artifactId>
+            <version>${flink.version}</version>
+            <type>test-jar</type>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-table-test-utils</artifactId>
+            <version>${flink.version}</version>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.testcontainers</groupId>
+            <artifactId>testcontainers</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+        <!-- ArchUit test dependencies -->
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-architecture-tests-test</artifactId>
+            <scope>test</scope>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-jar-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>test-jar</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+</project>
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsConfigConstants.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsConfigConstants.java
new file mode 100644
index 0000000..338e52a
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsConfigConstants.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.sink;
+
+import org.apache.flink.annotation.PublicEvolving;
+
+/** Defaults for {@link KinesisStreamsSinkWriter}. */
+@PublicEvolving
+public class KinesisStreamsConfigConstants {
+
+    public static final String BASE_KINESIS_USER_AGENT_PREFIX_FORMAT =
+            "Apache Flink %s (%s) Kinesis Connector";
+
+    /** Kinesis identifier for user agent prefix. */
+    public static final String KINESIS_CLIENT_USER_AGENT_PREFIX =
+            "aws.kinesis.client.user-agent-prefix";
+}
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsException.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsException.java
new file mode 100644
index 0000000..696cbd8
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsException.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.sink;
+
+/**
+ * A {@link RuntimeException} wrapper indicating the exception was thrown from the Kinesis Data
+ * Streams Sink.
+ */
+class KinesisStreamsException extends RuntimeException {
+
+    public KinesisStreamsException(final String message) {
+        super(message);
+    }
+
+    public KinesisStreamsException(final String message, final Throwable cause) {
+        super(message, cause);
+    }
+
+    /**
+     * When the flag {@code failOnError} is set in {@link KinesisStreamsSinkWriter}, this exception
+     * is raised as soon as any exception occurs when KDS is written to.
+     */
+    static class KinesisStreamsFailFastException extends KinesisStreamsException {
+
+        private static final String ERROR_MESSAGE =
+                "Encountered an exception while persisting records, not retrying due to {failOnError} being set.";
+
+        public KinesisStreamsFailFastException() {
+            super(ERROR_MESSAGE);
+        }
+
+        public KinesisStreamsFailFastException(final Throwable cause) {
+            super(ERROR_MESSAGE, cause);
+        }
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSink.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSink.java
new file mode 100644
index 0000000..b6cef5a
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSink.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.sink;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.connector.base.sink.AsyncSinkBase;
+import org.apache.flink.connector.base.sink.writer.BufferedRequestState;
+import org.apache.flink.connector.base.sink.writer.ElementConverter;
+import org.apache.flink.core.io.SimpleVersionedSerializer;
+import org.apache.flink.util.Preconditions;
+
+import software.amazon.awssdk.services.kinesis.model.PutRecordsRequestEntry;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Properties;
+
+/**
+ * A Kinesis Data Streams (KDS) Sink that performs async requests against a destination stream using
+ * the buffering protocol specified in {@link AsyncSinkBase}.
+ *
+ * <p>The sink internally uses a {@link software.amazon.awssdk.services.kinesis.KinesisAsyncClient}
+ * to communicate with the AWS endpoint.
+ *
+ * <p>The behaviour of the buffering may be specified by providing configuration during the sink
+ * build time.
+ *
+ * <ul>
+ *   <li>{@code maxBatchSize}: the maximum size of a batch of entries that may be sent to KDS
+ *   <li>{@code maxInFlightRequests}: the maximum number of in flight requests that may exist, if
+ *       any more in flight requests need to be initiated once the maximum has been reached, then it
+ *       will be blocked until some have completed
+ *   <li>{@code maxBufferedRequests}: the maximum number of elements held in the buffer, requests to
+ *       add elements will be blocked while the number of elements in the buffer is at the maximum
+ *   <li>{@code maxBatchSizeInBytes}: the maximum size of a batch of entries that may be sent to KDS
+ *       measured in bytes
+ *   <li>{@code maxTimeInBufferMS}: the maximum amount of time an entry is allowed to live in the
+ *       buffer, if any element reaches this age, the entire buffer will be flushed immediately
+ *   <li>{@code maxRecordSizeInBytes}: the maximum size of a record the sink will accept into the
+ *       buffer, a record of size larger than this will be rejected when passed to the sink
+ *   <li>{@code failOnError}: when an exception is encountered while persisting to Kinesis Data
+ *       Streams, the job will fail immediately if failOnError is set
+ * </ul>
+ *
+ * <p>Please see the writer implementation in {@link KinesisStreamsSinkWriter}
+ *
+ * @param <InputT> Type of the elements handled by this sink
+ */
+@PublicEvolving
+public class KinesisStreamsSink<InputT> extends AsyncSinkBase<InputT, PutRecordsRequestEntry> {
+
+    private final boolean failOnError;
+    private final String streamName;
+    private final Properties kinesisClientProperties;
+
+    KinesisStreamsSink(
+            ElementConverter<InputT, PutRecordsRequestEntry> elementConverter,
+            Integer maxBatchSize,
+            Integer maxInFlightRequests,
+            Integer maxBufferedRequests,
+            Long maxBatchSizeInBytes,
+            Long maxTimeInBufferMS,
+            Long maxRecordSizeInBytes,
+            boolean failOnError,
+            String streamName,
+            Properties kinesisClientProperties) {
+        super(
+                elementConverter,
+                maxBatchSize,
+                maxInFlightRequests,
+                maxBufferedRequests,
+                maxBatchSizeInBytes,
+                maxTimeInBufferMS,
+                maxRecordSizeInBytes);
+        this.streamName =
+                Preconditions.checkNotNull(
+                        streamName,
+                        "The stream name must not be null when initializing the KDS Sink.");
+        Preconditions.checkArgument(
+                !this.streamName.isEmpty(),
+                "The stream name must be set when initializing the KDS Sink.");
+        this.failOnError = failOnError;
+        this.kinesisClientProperties = kinesisClientProperties;
+    }
+
+    /**
+     * Create a {@link KinesisStreamsSinkBuilder} to allow the fluent construction of a new {@code
+     * KinesisStreamsSink}.
+     *
+     * @param <InputT> type of incoming records
+     * @return {@link KinesisStreamsSinkBuilder}
+     */
+    public static <InputT> KinesisStreamsSinkBuilder<InputT> builder() {
+        return new KinesisStreamsSinkBuilder<>();
+    }
+
+    @Internal
+    @Override
+    public StatefulSinkWriter<InputT, BufferedRequestState<PutRecordsRequestEntry>> createWriter(
+            InitContext context) throws IOException {
+        return new KinesisStreamsSinkWriter<>(
+                getElementConverter(),
+                context,
+                getMaxBatchSize(),
+                getMaxInFlightRequests(),
+                getMaxBufferedRequests(),
+                getMaxBatchSizeInBytes(),
+                getMaxTimeInBufferMS(),
+                getMaxRecordSizeInBytes(),
+                failOnError,
+                streamName,
+                kinesisClientProperties,
+                Collections.emptyList());
+    }
+
+    @Internal
+    @Override
+    public SimpleVersionedSerializer<BufferedRequestState<PutRecordsRequestEntry>>
+            getWriterStateSerializer() {
+        return new KinesisStreamsStateSerializer();
+    }
+
+    @Internal
+    @Override
+    public StatefulSinkWriter<InputT, BufferedRequestState<PutRecordsRequestEntry>> restoreWriter(
+            InitContext context,
+            Collection<BufferedRequestState<PutRecordsRequestEntry>> recoveredState)
+            throws IOException {
+        return new KinesisStreamsSinkWriter<>(
+                getElementConverter(),
+                context,
+                getMaxBatchSize(),
+                getMaxInFlightRequests(),
+                getMaxBufferedRequests(),
+                getMaxBatchSizeInBytes(),
+                getMaxTimeInBufferMS(),
+                getMaxRecordSizeInBytes(),
+                failOnError,
+                streamName,
+                kinesisClientProperties,
+                recoveredState);
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkBuilder.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkBuilder.java
new file mode 100644
index 0000000..3e6f7ec
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkBuilder.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.sink;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.connector.base.sink.AsyncSinkBaseBuilder;
+
+import software.amazon.awssdk.services.kinesis.model.PutRecordsRequestEntry;
+
+import java.util.Optional;
+import java.util.Properties;
+
+/**
+ * Builder to construct {@link KinesisStreamsSink}.
+ *
+ * <p>The following example shows the minimum setup to create a {@link KinesisStreamsSink} that
+ * writes String values to a Kinesis Data Streams stream named your_stream_here.
+ *
+ * <pre>{@code
+ * KinesisStreamsSink<String> kdsSink =
+ *                 KinesisStreamsSink.<String>builder()
+ *                         .setElementConverter(elementConverter)
+ *                         .setStreamName("your_stream_name")
+ *                         .setSerializationSchema(new SimpleStringSchema())
+ *                         .setPartitionKeyGenerator(element -> String.valueOf(element.hashCode()))
+ *                         .build();
+ * }</pre>
+ *
+ * <p>If the following parameters are not set in this builder, the following defaults will be used:
+ *
+ * <ul>
+ *   <li>{@code maxBatchSize} will be 500
+ *   <li>{@code maxInFlightRequests} will be 50
+ *   <li>{@code maxBufferedRequests} will be 10000
+ *   <li>{@code maxBatchSizeInBytes} will be 5 MB i.e. {@code 5 * 1024 * 1024}
+ *   <li>{@code maxTimeInBufferMS} will be 5000ms
+ *   <li>{@code maxRecordSizeInBytes} will be 1 MB i.e. {@code 1 * 1024 * 1024}
+ *   <li>{@code failOnError} will be false
+ * </ul>
+ *
+ * @param <InputT> type of elements that should be persisted in the destination
+ */
+@PublicEvolving
+public class KinesisStreamsSinkBuilder<InputT>
+        extends AsyncSinkBaseBuilder<
+                InputT, PutRecordsRequestEntry, KinesisStreamsSinkBuilder<InputT>> {
+
+    private static final int DEFAULT_MAX_BATCH_SIZE = 500;
+    private static final int DEFAULT_MAX_IN_FLIGHT_REQUESTS = 50;
+    private static final int DEFAULT_MAX_BUFFERED_REQUESTS = 10_000;
+    private static final long DEFAULT_MAX_BATCH_SIZE_IN_B = 5 * 1024 * 1024;
+    private static final long DEFAULT_MAX_TIME_IN_BUFFER_MS = 5000;
+    private static final long DEFAULT_MAX_RECORD_SIZE_IN_B = 1 * 1024 * 1024;
+    private static final boolean DEFAULT_FAIL_ON_ERROR = false;
+
+    private Boolean failOnError;
+    private String streamName;
+    private Properties kinesisClientProperties;
+    private SerializationSchema<InputT> serializationSchema;
+    private PartitionKeyGenerator<InputT> partitionKeyGenerator;
+
+    KinesisStreamsSinkBuilder() {}
+
+    /**
+     * Sets the name of the KDS stream that the sink will connect to. There is no default for this
+     * parameter, therefore, this must be provided at sink creation time otherwise the build will
+     * fail.
+     *
+     * @param streamName the name of the stream
+     * @return {@link KinesisStreamsSinkBuilder} itself
+     */
+    public KinesisStreamsSinkBuilder<InputT> setStreamName(String streamName) {
+        this.streamName = streamName;
+        return this;
+    }
+
+    public KinesisStreamsSinkBuilder<InputT> setSerializationSchema(
+            SerializationSchema<InputT> serializationSchema) {
+        this.serializationSchema = serializationSchema;
+        return this;
+    }
+
+    public KinesisStreamsSinkBuilder<InputT> setPartitionKeyGenerator(
+            PartitionKeyGenerator<InputT> partitionKeyGenerator) {
+        this.partitionKeyGenerator = partitionKeyGenerator;
+        return this;
+    }
+
+    public KinesisStreamsSinkBuilder<InputT> setFailOnError(boolean failOnError) {
+        this.failOnError = failOnError;
+        return this;
+    }
+
+    public KinesisStreamsSinkBuilder<InputT> setKinesisClientProperties(
+            Properties kinesisClientProperties) {
+        this.kinesisClientProperties = kinesisClientProperties;
+        return this;
+    }
+
+    @Override
+    public KinesisStreamsSink<InputT> build() {
+        return new KinesisStreamsSink<>(
+                new KinesisStreamsSinkElementConverter.Builder<InputT>()
+                        .setSerializationSchema(serializationSchema)
+                        .setPartitionKeyGenerator(partitionKeyGenerator)
+                        .build(),
+                Optional.ofNullable(getMaxBatchSize()).orElse(DEFAULT_MAX_BATCH_SIZE),
+                Optional.ofNullable(getMaxInFlightRequests())
+                        .orElse(DEFAULT_MAX_IN_FLIGHT_REQUESTS),
+                Optional.ofNullable(getMaxBufferedRequests()).orElse(DEFAULT_MAX_BUFFERED_REQUESTS),
+                Optional.ofNullable(getMaxBatchSizeInBytes()).orElse(DEFAULT_MAX_BATCH_SIZE_IN_B),
+                Optional.ofNullable(getMaxTimeInBufferMS()).orElse(DEFAULT_MAX_TIME_IN_BUFFER_MS),
+                Optional.ofNullable(getMaxRecordSizeInBytes()).orElse(DEFAULT_MAX_RECORD_SIZE_IN_B),
+                Optional.ofNullable(failOnError).orElse(DEFAULT_FAIL_ON_ERROR),
+                streamName,
+                Optional.ofNullable(kinesisClientProperties).orElse(new Properties()));
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkElementConverter.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkElementConverter.java
new file mode 100644
index 0000000..a7e4411
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkElementConverter.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.sink;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.api.connector.sink2.SinkWriter;
+import org.apache.flink.connector.base.sink.writer.ElementConverter;
+import org.apache.flink.metrics.MetricGroup;
+import org.apache.flink.metrics.groups.UnregisteredMetricsGroup;
+import org.apache.flink.util.FlinkRuntimeException;
+import org.apache.flink.util.Preconditions;
+import org.apache.flink.util.SimpleUserCodeClassLoader;
+import org.apache.flink.util.UserCodeClassLoader;
+
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.services.kinesis.model.PutRecordsRequestEntry;
+
+/**
+ * An implementation of the {@link ElementConverter} that uses the AWS Kinesis SDK v2. The user only
+ * needs to provide a {@link SerializationSchema} of the {@code InputT} and a {@link
+ * PartitionKeyGenerator} lambda to transform the input element into a String.
+ */
+@Internal
+public class KinesisStreamsSinkElementConverter<InputT>
+        implements ElementConverter<InputT, PutRecordsRequestEntry> {
+
+    /** A serialization schema to specify how the input element should be serialized. */
+    private final SerializationSchema<InputT> serializationSchema;
+
+    /**
+     * A partition key generator functional interface that produces a string from the input element.
+     */
+    private final PartitionKeyGenerator<InputT> partitionKeyGenerator;
+
+    private boolean schemaOpened = false;
+
+    private KinesisStreamsSinkElementConverter(
+            SerializationSchema<InputT> serializationSchema,
+            PartitionKeyGenerator<InputT> partitionKeyGenerator) {
+        this.serializationSchema = serializationSchema;
+        this.partitionKeyGenerator = partitionKeyGenerator;
+    }
+
+    @Override
+    public PutRecordsRequestEntry apply(InputT element, SinkWriter.Context context) {
+        checkOpened();
+        return PutRecordsRequestEntry.builder()
+                .data(SdkBytes.fromByteArray(serializationSchema.serialize(element)))
+                .partitionKey(partitionKeyGenerator.apply(element))
+                .build();
+    }
+
+    private void checkOpened() {
+        if (!schemaOpened) {
+            try {
+                serializationSchema.open(
+                        new SerializationSchema.InitializationContext() {
+                            @Override
+                            public MetricGroup getMetricGroup() {
+                                return new UnregisteredMetricsGroup();
+                            }
+
+                            @Override
+                            public UserCodeClassLoader getUserCodeClassLoader() {
+                                return SimpleUserCodeClassLoader.create(
+                                        KinesisStreamsSinkElementConverter.class.getClassLoader());
+                            }
+                        });
+                schemaOpened = true;
+            } catch (Exception e) {
+                throw new FlinkRuntimeException("Failed to initialize serialization schema.", e);
+            }
+        }
+    }
+
+    public static <InputT> Builder<InputT> builder() {
+        return new Builder<>();
+    }
+
+    /** A builder for the KinesisStreamsSinkElementConverter. */
+    public static class Builder<InputT> {
+
+        private SerializationSchema<InputT> serializationSchema;
+        private PartitionKeyGenerator<InputT> partitionKeyGenerator;
+
+        public Builder<InputT> setSerializationSchema(
+                SerializationSchema<InputT> serializationSchema) {
+            this.serializationSchema = serializationSchema;
+            return this;
+        }
+
+        public Builder<InputT> setPartitionKeyGenerator(
+                PartitionKeyGenerator<InputT> partitionKeyGenerator) {
+            this.partitionKeyGenerator = partitionKeyGenerator;
+            return this;
+        }
+
+        public KinesisStreamsSinkElementConverter<InputT> build() {
+            Preconditions.checkNotNull(
+                    serializationSchema,
+                    "No SerializationSchema was supplied to the "
+                            + "KinesisStreamsSinkElementConverter builder.");
+            Preconditions.checkNotNull(
+                    partitionKeyGenerator,
+                    "No PartitionKeyGenerator lambda was supplied to the "
+                            + "KinesisStreamsSinkElementConverter builder.");
+            return new KinesisStreamsSinkElementConverter<>(
+                    serializationSchema, partitionKeyGenerator);
+        }
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkWriter.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkWriter.java
new file mode 100644
index 0000000..35e542f
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkWriter.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.sink;
+
+import org.apache.flink.api.connector.sink2.Sink;
+import org.apache.flink.connector.aws.util.AWSAsyncSinkUtil;
+import org.apache.flink.connector.aws.util.AWSGeneralUtil;
+import org.apache.flink.connector.base.sink.throwable.FatalExceptionClassifier;
+import org.apache.flink.connector.base.sink.writer.AsyncSinkWriter;
+import org.apache.flink.connector.base.sink.writer.BufferedRequestState;
+import org.apache.flink.connector.base.sink.writer.ElementConverter;
+import org.apache.flink.connector.base.sink.writer.config.AsyncSinkWriterConfiguration;
+import org.apache.flink.metrics.Counter;
+import org.apache.flink.metrics.groups.SinkWriterMetricGroup;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
+import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
+import software.amazon.awssdk.services.kinesis.model.PutRecordsRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordsRequestEntry;
+import software.amazon.awssdk.services.kinesis.model.PutRecordsResponse;
+import software.amazon.awssdk.services.kinesis.model.PutRecordsResultEntry;
+import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.CompletableFuture;
+import java.util.function.Consumer;
+
+import static org.apache.flink.connector.aws.util.AWSCredentialFatalExceptionClassifiers.getInvalidCredentialsExceptionClassifier;
+import static org.apache.flink.connector.aws.util.AWSCredentialFatalExceptionClassifiers.getSdkClientMisconfiguredExceptionClassifier;
+import static org.apache.flink.connector.base.sink.writer.AsyncSinkFatalExceptionClassifiers.getInterruptedExceptionClassifier;
+
+/**
+ * Sink writer created by {@link KinesisStreamsSink} to write to Kinesis Data Streams. More details
+ * on the operation of this sink writer may be found in the doc for {@link KinesisStreamsSink}. More
+ * details on the internals of this sink writer may be found in {@link AsyncSinkWriter}.
+ *
+ * <p>The {@link KinesisAsyncClient} used here may be configured in the standard way for the AWS SDK
+ * 2.x. e.g. the provision of {@code AWS_REGION}, {@code AWS_ACCESS_KEY_ID} and {@code
+ * AWS_SECRET_ACCESS_KEY} through environment variables etc.
+ */
+class KinesisStreamsSinkWriter<InputT> extends AsyncSinkWriter<InputT, PutRecordsRequestEntry> {
+    private static final Logger LOG = LoggerFactory.getLogger(KinesisStreamsSinkWriter.class);
+
+    private static final FatalExceptionClassifier RESOURCE_NOT_FOUND_EXCEPTION_CLASSIFIER =
+            FatalExceptionClassifier.withRootCauseOfType(
+                    ResourceNotFoundException.class,
+                    err ->
+                            new KinesisStreamsException(
+                                    "Encountered non-recoverable exception relating to not being able to find the specified resources",
+                                    err));
+
+    private static final FatalExceptionClassifier KINESIS_FATAL_EXCEPTION_CLASSIFIER =
+            FatalExceptionClassifier.createChain(
+                    getInterruptedExceptionClassifier(),
+                    getInvalidCredentialsExceptionClassifier(),
+                    RESOURCE_NOT_FOUND_EXCEPTION_CLASSIFIER,
+                    getSdkClientMisconfiguredExceptionClassifier());
+
+    private final Counter numRecordsOutErrorsCounter;
+
+    /* Name of the stream in Kinesis Data Streams */
+    private final String streamName;
+
+    /* The sink writer metric group */
+    private final SinkWriterMetricGroup metrics;
+
+    /* The asynchronous http client for the asynchronous Kinesis client */
+    private final SdkAsyncHttpClient httpClient;
+
+    /* The asynchronous Kinesis client - construction is by kinesisClientProperties */
+    private final KinesisAsyncClient kinesisClient;
+
+    /* Flag to whether fatally fail any time we encounter an exception when persisting records */
+    private final boolean failOnError;
+
+    KinesisStreamsSinkWriter(
+            ElementConverter<InputT, PutRecordsRequestEntry> elementConverter,
+            Sink.InitContext context,
+            int maxBatchSize,
+            int maxInFlightRequests,
+            int maxBufferedRequests,
+            long maxBatchSizeInBytes,
+            long maxTimeInBufferMS,
+            long maxRecordSizeInBytes,
+            boolean failOnError,
+            String streamName,
+            Properties kinesisClientProperties) {
+        this(
+                elementConverter,
+                context,
+                maxBatchSize,
+                maxInFlightRequests,
+                maxBufferedRequests,
+                maxBatchSizeInBytes,
+                maxTimeInBufferMS,
+                maxRecordSizeInBytes,
+                failOnError,
+                streamName,
+                kinesisClientProperties,
+                Collections.emptyList());
+    }
+
+    KinesisStreamsSinkWriter(
+            ElementConverter<InputT, PutRecordsRequestEntry> elementConverter,
+            Sink.InitContext context,
+            int maxBatchSize,
+            int maxInFlightRequests,
+            int maxBufferedRequests,
+            long maxBatchSizeInBytes,
+            long maxTimeInBufferMS,
+            long maxRecordSizeInBytes,
+            boolean failOnError,
+            String streamName,
+            Properties kinesisClientProperties,
+            Collection<BufferedRequestState<PutRecordsRequestEntry>> states) {
+        super(
+                elementConverter,
+                context,
+                AsyncSinkWriterConfiguration.builder()
+                        .setMaxBatchSize(maxBatchSize)
+                        .setMaxBatchSizeInBytes(maxBatchSizeInBytes)
+                        .setMaxInFlightRequests(maxInFlightRequests)
+                        .setMaxBufferedRequests(maxBufferedRequests)
+                        .setMaxTimeInBufferMS(maxTimeInBufferMS)
+                        .setMaxRecordSizeInBytes(maxRecordSizeInBytes)
+                        .build(),
+                states);
+        this.failOnError = failOnError;
+        this.streamName = streamName;
+        this.metrics = context.metricGroup();
+        this.numRecordsOutErrorsCounter = metrics.getNumRecordsOutErrorsCounter();
+        this.httpClient = AWSGeneralUtil.createAsyncHttpClient(kinesisClientProperties);
+        this.kinesisClient = buildClient(kinesisClientProperties, this.httpClient);
+    }
+
+    private KinesisAsyncClient buildClient(
+            Properties kinesisClientProperties, SdkAsyncHttpClient httpClient) {
+        AWSGeneralUtil.validateAwsCredentials(kinesisClientProperties);
+
+        return AWSAsyncSinkUtil.createAwsAsyncClient(
+                kinesisClientProperties,
+                httpClient,
+                KinesisAsyncClient.builder(),
+                KinesisStreamsConfigConstants.BASE_KINESIS_USER_AGENT_PREFIX_FORMAT,
+                KinesisStreamsConfigConstants.KINESIS_CLIENT_USER_AGENT_PREFIX);
+    }
+
+    @Override
+    protected void submitRequestEntries(
+            List<PutRecordsRequestEntry> requestEntries,
+            Consumer<List<PutRecordsRequestEntry>> requestResult) {
+
+        PutRecordsRequest batchRequest =
+                PutRecordsRequest.builder().records(requestEntries).streamName(streamName).build();
+
+        CompletableFuture<PutRecordsResponse> future = kinesisClient.putRecords(batchRequest);
+
+        future.whenComplete(
+                (response, err) -> {
+                    if (err != null) {
+                        handleFullyFailedRequest(err, requestEntries, requestResult);
+                    } else if (response.failedRecordCount() > 0) {
+                        handlePartiallyFailedRequest(response, requestEntries, requestResult);
+                    } else {
+                        requestResult.accept(Collections.emptyList());
+                    }
+                });
+    }
+
+    @Override
+    protected long getSizeInBytes(PutRecordsRequestEntry requestEntry) {
+        return requestEntry.data().asByteArrayUnsafe().length;
+    }
+
+    private void handleFullyFailedRequest(
+            Throwable err,
+            List<PutRecordsRequestEntry> requestEntries,
+            Consumer<List<PutRecordsRequestEntry>> requestResult) {
+        LOG.debug(
+                "KDS Sink failed to write and will retry {} entries to KDS",
+                requestEntries.size(),
+                err);
+        numRecordsOutErrorsCounter.inc(requestEntries.size());
+
+        if (isRetryable(err)) {
+            requestResult.accept(requestEntries);
+        }
+    }
+
+    @Override
+    public void close() {
+        AWSGeneralUtil.closeResources(httpClient, kinesisClient);
+    }
+
+    private void handlePartiallyFailedRequest(
+            PutRecordsResponse response,
+            List<PutRecordsRequestEntry> requestEntries,
+            Consumer<List<PutRecordsRequestEntry>> requestResult) {
+        LOG.debug(
+                "KDS Sink failed to write and will retry {} entries to KDS",
+                response.failedRecordCount());
+        numRecordsOutErrorsCounter.inc(response.failedRecordCount());
+
+        if (failOnError) {
+            getFatalExceptionCons()
+                    .accept(new KinesisStreamsException.KinesisStreamsFailFastException());
+            return;
+        }
+        List<PutRecordsRequestEntry> failedRequestEntries =
+                new ArrayList<>(response.failedRecordCount());
+        List<PutRecordsResultEntry> records = response.records();
+
+        for (int i = 0; i < records.size(); i++) {
+            if (records.get(i).errorCode() != null) {
+                failedRequestEntries.add(requestEntries.get(i));
+            }
+        }
+
+        requestResult.accept(failedRequestEntries);
+    }
+
+    private boolean isRetryable(Throwable err) {
+
+        if (!KINESIS_FATAL_EXCEPTION_CLASSIFIER.isFatal(err, getFatalExceptionCons())) {
+            return false;
+        }
+        if (failOnError) {
+            getFatalExceptionCons()
+                    .accept(new KinesisStreamsException.KinesisStreamsFailFastException(err));
+            return false;
+        }
+
+        return true;
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsStateSerializer.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsStateSerializer.java
new file mode 100644
index 0000000..ad1bd8f
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsStateSerializer.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.sink;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.connector.base.sink.writer.AsyncSinkWriterStateSerializer;
+
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.services.kinesis.model.PutRecordsRequestEntry;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+
+/** Kinesis Streams implementation {@link AsyncSinkWriterStateSerializer}. */
+@Internal
+public class KinesisStreamsStateSerializer
+        extends AsyncSinkWriterStateSerializer<PutRecordsRequestEntry> {
+    @Override
+    protected void serializeRequestToStream(PutRecordsRequestEntry request, DataOutputStream out)
+            throws IOException {
+        out.write(request.data().asByteArrayUnsafe());
+        serializePartitionKeyToStream(request.partitionKey(), out);
+        validateExplicitHashKey(request);
+    }
+
+    protected void serializePartitionKeyToStream(String partitionKey, DataOutputStream out)
+            throws IOException {
+        out.writeInt(partitionKey.length());
+        out.write(partitionKey.getBytes(StandardCharsets.UTF_8));
+    }
+
+    protected void validateExplicitHashKey(PutRecordsRequestEntry request) {
+        if (request.explicitHashKey() != null) {
+            throw new IllegalStateException(
+                    String.format(
+                            "KinesisStreamsStateSerializer is incompatible with ElementConverter."
+                                    + "Serializer version %d  does not support explicit hash key.",
+                            getVersion()));
+        }
+    }
+
+    @Override
+    protected PutRecordsRequestEntry deserializeRequestFromStream(
+            long requestSize, DataInputStream in) throws IOException {
+        byte[] requestData = new byte[(int) requestSize];
+        in.read(requestData);
+
+        return PutRecordsRequestEntry.builder()
+                .data(SdkBytes.fromByteArray(requestData))
+                .partitionKey(deserializePartitionKeyFromStream(in))
+                .build();
+    }
+
+    protected String deserializePartitionKeyFromStream(DataInputStream in) throws IOException {
+        int partitionKeyLength = in.readInt();
+        byte[] requestPartitionKeyData = new byte[(int) partitionKeyLength];
+        in.read(requestPartitionKeyData);
+        return new String(requestPartitionKeyData, StandardCharsets.UTF_8);
+    }
+
+    @Override
+    public int getVersion() {
+        return 1;
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/PartitionKeyGenerator.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/PartitionKeyGenerator.java
new file mode 100644
index 0000000..2873c05
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/sink/PartitionKeyGenerator.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.sink;
+
+import org.apache.flink.annotation.PublicEvolving;
+
+import java.io.Serializable;
+import java.util.function.Function;
+
+/**
+ * This is a serializable function whose {@code accept()} method specifies how to convert from an
+ * input element to the partition key, a string.
+ */
+@PublicEvolving
+@FunctionalInterface
+public interface PartitionKeyGenerator<InputT> extends Function<InputT, String>, Serializable {}
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/FixedKinesisPartitionKeyGenerator.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/FixedKinesisPartitionKeyGenerator.java
new file mode 100644
index 0000000..21e4595
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/FixedKinesisPartitionKeyGenerator.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.table;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.connector.kinesis.sink.PartitionKeyGenerator;
+import org.apache.flink.util.Preconditions;
+
+import java.util.Objects;
+
+/**
+ * A partitioner ensuring that each internal Flink partition ends up in the same Kinesis partition.
+ *
+ * <p>This is achieved by using the index of the producer task as a {@code PartitionKey}.
+ */
+@PublicEvolving
+public final class FixedKinesisPartitionKeyGenerator<T> implements PartitionKeyGenerator<T> {
+
+    private static final long serialVersionUID = 1L;
+
+    private int indexOfThisSubtask = 0;
+
+    public void initialize(int indexOfThisSubtask, int numberOfParallelSubtasks) {
+        Preconditions.checkArgument(
+                indexOfThisSubtask >= 0, "Id of this subtask cannot be negative.");
+        Preconditions.checkArgument(
+                numberOfParallelSubtasks > 0, "Number of subtasks must be larger than 0.");
+
+        this.indexOfThisSubtask = indexOfThisSubtask;
+    }
+
+    @Override
+    public String apply(T record) {
+        return String.valueOf(indexOfThisSubtask);
+    }
+
+    // --------------------------------------------------------------------------------------------
+    // Value semantics for equals and hashCode
+    // --------------------------------------------------------------------------------------------
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        final FixedKinesisPartitionKeyGenerator<?> that = (FixedKinesisPartitionKeyGenerator<?>) o;
+        return Objects.equals(this.indexOfThisSubtask, that.indexOfThisSubtask);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(FixedKinesisPartitionKeyGenerator.class.hashCode(), indexOfThisSubtask);
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisConnectorOptions.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisConnectorOptions.java
new file mode 100644
index 0000000..e4f1037
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisConnectorOptions.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.table;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.configuration.ConfigOption;
+import org.apache.flink.configuration.ConfigOptions;
+import org.apache.flink.configuration.description.Description;
+import org.apache.flink.connector.base.table.AsyncSinkConnectorOptions;
+import org.apache.flink.connector.kinesis.sink.PartitionKeyGenerator;
+
+import static org.apache.flink.configuration.description.TextElement.code;
+import static org.apache.flink.configuration.description.TextElement.text;
+
+/** Options for the Kinesis connector. */
+@PublicEvolving
+public class KinesisConnectorOptions extends AsyncSinkConnectorOptions {
+
+    // -----------------------------------------------------------------------------------------
+    // Kinesis specific options
+    // -----------------------------------------------------------------------------------------
+
+    public static final ConfigOption<String> STREAM =
+            ConfigOptions.key("stream")
+                    .stringType()
+                    .noDefaultValue()
+                    .withDescription("Name of the Kinesis stream backing this table.");
+
+    public static final ConfigOption<String> AWS_REGION =
+            ConfigOptions.key("aws.region")
+                    .stringType()
+                    .noDefaultValue()
+                    .withDescription("AWS region of used Kinesis stream.");
+
+    // -----------------------------------------------------------------------------------------
+    // Sink options
+    // -----------------------------------------------------------------------------------------
+
+    public static final ConfigOption<String> SINK_PARTITIONER =
+            ConfigOptions.key("sink.partitioner")
+                    .stringType()
+                    .noDefaultValue()
+                    .withDescription(
+                            Description.builder()
+                                    .text(
+                                            "Optional output partitioning from Flink's partitions into Kinesis shards. "
+                                                    + "Sinks that write to tables defined with the %s clause always use a "
+                                                    + "field-based partitioner and cannot define this option.",
+                                            code("PARTITION BY"))
+                                    .linebreak()
+                                    .text("Valid enumerations are")
+                                    .list(
+                                            text("random (use a random partition key)"),
+                                            text(
+                                                    "fixed (each Flink partition ends up in at most one Kinesis shard)"),
+                                            text(
+                                                    "custom class name (use a custom %s subclass)",
+                                                    text(PartitionKeyGenerator.class.getName())))
+                                    .build());
+
+    public static final ConfigOption<String> SINK_PARTITIONER_FIELD_DELIMITER =
+            ConfigOptions.key("sink.partitioner-field-delimiter")
+                    .stringType()
+                    .defaultValue("|")
+                    .withDescription(
+                            Description.builder()
+                                    .text(
+                                            "Optional field delimiter for fields-based partitioner derived from a %s clause",
+                                            code("PARTITION BY"))
+                                    .build());
+
+    public static final ConfigOption<Boolean> SINK_FAIL_ON_ERROR =
+            ConfigOptions.key("sink.fail-on-error")
+                    .booleanType()
+                    .defaultValue(false)
+                    .withDescription(
+                            "Determines whether an exception should fail the job, otherwise failed requests are retried.");
+}
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisDynamicSink.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisDynamicSink.java
new file mode 100644
index 0000000..c5ae78b
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisDynamicSink.java
@@ -0,0 +1,258 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.table;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.connector.base.table.sink.AsyncDynamicTableSink;
+import org.apache.flink.connector.base.table.sink.AsyncDynamicTableSinkBuilder;
+import org.apache.flink.connector.kinesis.sink.KinesisStreamsSink;
+import org.apache.flink.connector.kinesis.sink.KinesisStreamsSinkBuilder;
+import org.apache.flink.connector.kinesis.sink.PartitionKeyGenerator;
+import org.apache.flink.table.connector.ChangelogMode;
+import org.apache.flink.table.connector.format.EncodingFormat;
+import org.apache.flink.table.connector.sink.DynamicTableSink;
+import org.apache.flink.table.connector.sink.SinkV2Provider;
+import org.apache.flink.table.connector.sink.abilities.SupportsPartitioning;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.types.DataType;
+import org.apache.flink.util.Preconditions;
+
+import software.amazon.awssdk.services.kinesis.model.PutRecordsRequestEntry;
+
+import javax.annotation.Nullable;
+
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Properties;
+
+/** Kinesis backed {@link AsyncDynamicTableSink}. */
+@Internal
+public class KinesisDynamicSink extends AsyncDynamicTableSink<PutRecordsRequestEntry>
+        implements SupportsPartitioning {
+
+    /** Consumed data type of the table. */
+    private final DataType consumedDataType;
+
+    /** The Kinesis stream to write to. */
+    private final String stream;
+
+    /** Properties for the Kinesis DataStream Sink. */
+    private final Properties kinesisClientProperties;
+
+    /** Sink format for encoding records to Kinesis. */
+    private final EncodingFormat<SerializationSchema<RowData>> encodingFormat;
+
+    /** Partitioner to select Kinesis partition for each item. */
+    private final PartitionKeyGenerator<RowData> partitioner;
+
+    private final Boolean failOnError;
+
+    public KinesisDynamicSink(
+            @Nullable Integer maxBatchSize,
+            @Nullable Integer maxInFlightRequests,
+            @Nullable Integer maxBufferedRequests,
+            @Nullable Long maxBufferSizeInBytes,
+            @Nullable Long maxTimeInBufferMS,
+            @Nullable Boolean failOnError,
+            @Nullable DataType consumedDataType,
+            String stream,
+            @Nullable Properties kinesisClientProperties,
+            EncodingFormat<SerializationSchema<RowData>> encodingFormat,
+            PartitionKeyGenerator<RowData> partitioner) {
+        super(
+                maxBatchSize,
+                maxInFlightRequests,
+                maxBufferedRequests,
+                maxBufferSizeInBytes,
+                maxTimeInBufferMS);
+        this.failOnError = failOnError;
+        this.kinesisClientProperties = kinesisClientProperties;
+        this.consumedDataType =
+                Preconditions.checkNotNull(consumedDataType, "Consumed data type must not be null");
+        this.stream = Preconditions.checkNotNull(stream, "Kinesis stream name must not be null");
+        this.encodingFormat =
+                Preconditions.checkNotNull(encodingFormat, "Encoding format must not be null");
+        this.partitioner =
+                Preconditions.checkNotNull(
+                        partitioner, "Kinesis partition key generator must not be null");
+    }
+
+    @Override
+    public ChangelogMode getChangelogMode(ChangelogMode requestedMode) {
+        return encodingFormat.getChangelogMode();
+    }
+
+    @Override
+    public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
+        SerializationSchema<RowData> serializationSchema =
+                encodingFormat.createRuntimeEncoder(context, consumedDataType);
+
+        KinesisStreamsSinkBuilder<RowData> builder =
+                KinesisStreamsSink.<RowData>builder()
+                        .setSerializationSchema(serializationSchema)
+                        .setPartitionKeyGenerator(partitioner)
+                        .setKinesisClientProperties(kinesisClientProperties)
+                        .setStreamName(stream);
+
+        Optional.ofNullable(failOnError).ifPresent(builder::setFailOnError);
+        addAsyncOptionsToSinkBuilder(builder);
+        KinesisStreamsSink<RowData> kdsSink = builder.build();
+        return SinkV2Provider.of(kdsSink);
+    }
+
+    @Override
+    public DynamicTableSink copy() {
+        return new KinesisDynamicSink(
+                maxBatchSize,
+                maxInFlightRequests,
+                maxBufferedRequests,
+                maxBufferSizeInBytes,
+                maxTimeInBufferMS,
+                failOnError,
+                consumedDataType,
+                stream,
+                kinesisClientProperties,
+                encodingFormat,
+                partitioner);
+    }
+
+    @Override
+    public String asSummaryString() {
+        return "Kinesis";
+    }
+
+    // --------------------------------------------------------------------------------------------
+    // SupportsPartitioning
+    // --------------------------------------------------------------------------------------------
+
+    @Override
+    public void applyStaticPartition(Map<String, String> partition) {
+        if (partitioner instanceof RowDataFieldsKinesisPartitionKeyGenerator) {
+            ((RowDataFieldsKinesisPartitionKeyGenerator) partitioner).setStaticFields(partition);
+        } else {
+            String msg =
+                    ""
+                            + "Cannot apply static partition optimization to a partition class "
+                            + "that does not inherit from "
+                            + "org.apache.flink.streaming.connectors.kinesis.table.RowDataKinesisPartitioner.";
+            throw new RuntimeException(msg);
+        }
+    }
+
+    // --------------------------------------------------------------------------------------------
+    // Value semantics for equals and hashCode
+    // --------------------------------------------------------------------------------------------
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+
+        KinesisDynamicSink that = (KinesisDynamicSink) o;
+        return super.equals(o)
+                && Objects.equals(consumedDataType, that.consumedDataType)
+                && Objects.equals(stream, that.stream)
+                && Objects.equals(kinesisClientProperties, that.kinesisClientProperties)
+                && Objects.equals(encodingFormat, that.encodingFormat)
+                && Objects.equals(partitioner, that.partitioner)
+                && Objects.equals(failOnError, that.failOnError);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(
+                super.hashCode(),
+                consumedDataType,
+                stream,
+                kinesisClientProperties,
+                encodingFormat,
+                partitioner,
+                failOnError);
+    }
+
+    /** Builder class for {@link KinesisDynamicSink}. */
+    @Internal
+    public static class KinesisDynamicTableSinkBuilder
+            extends AsyncDynamicTableSinkBuilder<
+                    PutRecordsRequestEntry, KinesisDynamicTableSinkBuilder> {
+
+        private DataType consumedDataType = null;
+        private String stream = null;
+        private Properties kinesisClientProperties = null;
+        private EncodingFormat<SerializationSchema<RowData>> encodingFormat = null;
+        private PartitionKeyGenerator<RowData> partitioner = null;
+        private Boolean failOnError = null;
+
+        public KinesisDynamicTableSinkBuilder setConsumedDataType(DataType consumedDataType) {
+            this.consumedDataType = consumedDataType;
+            return this;
+        }
+
+        public KinesisDynamicTableSinkBuilder setStream(String stream) {
+            this.stream = stream;
+            return this;
+        }
+
+        public KinesisDynamicTableSinkBuilder setKinesisClientProperties(
+                Properties kinesisClientProperties) {
+            this.kinesisClientProperties = kinesisClientProperties;
+            return this;
+        }
+
+        public KinesisDynamicTableSinkBuilder setEncodingFormat(
+                EncodingFormat<SerializationSchema<RowData>> encodingFormat) {
+            this.encodingFormat = encodingFormat;
+            return this;
+        }
+
+        public KinesisDynamicTableSinkBuilder setFailOnError(Boolean failOnError) {
+            this.failOnError = failOnError;
+            return this;
+        }
+
+        public KinesisDynamicTableSinkBuilder setPartitioner(
+                PartitionKeyGenerator<RowData> partitioner) {
+            this.partitioner = partitioner;
+            return this;
+        }
+
+        @Override
+        public KinesisDynamicSink build() {
+            return new KinesisDynamicSink(
+                    getMaxBatchSize(),
+                    getMaxInFlightRequests(),
+                    getMaxBufferedRequests(),
+                    getMaxBufferSizeInBytes(),
+                    getMaxTimeInBufferMS(),
+                    failOnError,
+                    consumedDataType,
+                    stream,
+                    kinesisClientProperties,
+                    encodingFormat,
+                    partitioner);
+        }
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisDynamicTableSinkFactory.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisDynamicTableSinkFactory.java
new file mode 100644
index 0000000..5aa0931
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisDynamicTableSinkFactory.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.table;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.configuration.ConfigOption;
+import org.apache.flink.configuration.ReadableConfig;
+import org.apache.flink.connector.base.table.AsyncDynamicTableSinkFactory;
+import org.apache.flink.connector.kinesis.sink.PartitionKeyGenerator;
+import org.apache.flink.connector.kinesis.table.util.KinesisStreamsConnectorOptionsUtils;
+import org.apache.flink.table.api.ValidationException;
+import org.apache.flink.table.connector.sink.DynamicTableSink;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.types.logical.RowType;
+
+import java.util.HashSet;
+import java.util.Optional;
+import java.util.Properties;
+import java.util.Set;
+
+import static org.apache.flink.connector.kinesis.table.KinesisConnectorOptions.AWS_REGION;
+import static org.apache.flink.connector.kinesis.table.KinesisConnectorOptions.SINK_FAIL_ON_ERROR;
+import static org.apache.flink.connector.kinesis.table.KinesisConnectorOptions.SINK_PARTITIONER;
+import static org.apache.flink.connector.kinesis.table.KinesisConnectorOptions.SINK_PARTITIONER_FIELD_DELIMITER;
+import static org.apache.flink.connector.kinesis.table.KinesisConnectorOptions.STREAM;
+import static org.apache.flink.connector.kinesis.table.util.KinesisStreamsConnectorOptionsUtils.KINESIS_CLIENT_PROPERTIES_KEY;
+import static org.apache.flink.table.factories.FactoryUtil.FORMAT;
+
+/** Factory for creating {@link KinesisDynamicSink}. */
+@Internal
+public class KinesisDynamicTableSinkFactory extends AsyncDynamicTableSinkFactory {
+    public static final String IDENTIFIER = "kinesis";
+
+    @Override
+    public DynamicTableSink createDynamicTableSink(Context context) {
+
+        AsyncDynamicSinkContext factoryContext = new AsyncDynamicSinkContext(this, context);
+
+        KinesisStreamsConnectorOptionsUtils optionsUtils =
+                new KinesisStreamsConnectorOptionsUtils(
+                        factoryContext.getResolvedOptions(),
+                        factoryContext.getTableOptions(),
+                        (RowType) factoryContext.getPhysicalDataType().getLogicalType(),
+                        factoryContext.getPartitionKeys(),
+                        context.getClassLoader());
+        // validate the data types of the table options
+        factoryContext
+                .getFactoryHelper()
+                .validateExcept(optionsUtils.getNonValidatedPrefixes().toArray(new String[0]));
+
+        // Validate option values
+        validateKinesisPartitioner(
+                factoryContext.getTableOptions(), factoryContext.isPartitioned());
+        Properties properties = optionsUtils.getValidatedSinkConfigurations();
+
+        KinesisDynamicSink.KinesisDynamicTableSinkBuilder builder =
+                new KinesisDynamicSink.KinesisDynamicTableSinkBuilder();
+
+        builder.setStream((String) properties.get(STREAM.key()))
+                .setKinesisClientProperties(
+                        (Properties) properties.get(KINESIS_CLIENT_PROPERTIES_KEY))
+                .setEncodingFormat(factoryContext.getEncodingFormat())
+                .setConsumedDataType(factoryContext.getPhysicalDataType())
+                .setPartitioner(
+                        (PartitionKeyGenerator<RowData>) properties.get(SINK_PARTITIONER.key()));
+        addAsyncOptionsToBuilder(properties, builder);
+        Optional.ofNullable((Boolean) properties.get(SINK_FAIL_ON_ERROR.key()))
+                .ifPresent(builder::setFailOnError);
+        return builder.build();
+    }
+
+    @Override
+    public String factoryIdentifier() {
+        return IDENTIFIER;
+    }
+
+    @Override
+    public Set<ConfigOption<?>> requiredOptions() {
+        final Set<ConfigOption<?>> options = new HashSet<>();
+        options.add(STREAM);
+        options.add(FORMAT);
+        options.add(AWS_REGION);
+        return options;
+    }
+
+    @Override
+    public Set<ConfigOption<?>> optionalOptions() {
+        final Set<ConfigOption<?>> options = super.optionalOptions();
+        options.add(SINK_PARTITIONER);
+        options.add(SINK_PARTITIONER_FIELD_DELIMITER);
+        options.add(SINK_FAIL_ON_ERROR);
+        return KinesisStreamsConnectorOptionsUtils.KinesisProducerOptionsMapper.addDeprecatedKeys(
+                options);
+    }
+
+    private static void validateKinesisPartitioner(
+            ReadableConfig tableOptions, boolean isPartitioned) {
+        tableOptions
+                .getOptional(SINK_PARTITIONER)
+                .ifPresent(
+                        partitioner -> {
+                            if (isPartitioned) {
+                                throw new ValidationException(
+                                        String.format(
+                                                "Cannot set %s option for a table defined with a PARTITIONED BY clause",
+                                                SINK_PARTITIONER.key()));
+                            }
+                        });
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisPartitionKeyGeneratorFactory.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisPartitionKeyGeneratorFactory.java
new file mode 100644
index 0000000..552c50b
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/KinesisPartitionKeyGeneratorFactory.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.table;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.configuration.ReadableConfig;
+import org.apache.flink.connector.kinesis.sink.PartitionKeyGenerator;
+import org.apache.flink.table.api.ValidationException;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.flink.util.FlinkException;
+import org.apache.flink.util.InstantiationUtil;
+
+import java.util.List;
+
+import static org.apache.flink.connector.kinesis.table.KinesisConnectorOptions.SINK_PARTITIONER;
+import static org.apache.flink.connector.kinesis.table.KinesisConnectorOptions.SINK_PARTITIONER_FIELD_DELIMITER;
+
+/** Factory Class for {@link PartitionKeyGenerator}. */
+@Internal
+public class KinesisPartitionKeyGeneratorFactory {
+
+    // -----------------------------------------------------------------------------------------
+    // Option enumerations
+    // -----------------------------------------------------------------------------------------
+
+    public static final String SINK_PARTITIONER_VALUE_FIXED = "fixed";
+    public static final String SINK_PARTITIONER_VALUE_RANDOM = "random";
+
+    /**
+     * Constructs the kinesis partitioner for a {@code targetTable} based on the currently set
+     * {@code tableOptions}.
+     *
+     * <p>The following rules are applied with decreasing precedence order.
+     *
+     * <ul>
+     *   <li>If {@code targetTable} is partitioned, return a {@code RowDataKinesisPartitioner}.
+     *   <li>If the partitioner type is not set, return a {@link
+     *       RandomKinesisPartitionKeyGenerator}.
+     *   <li>If a specific partitioner type alias is used, instantiate the corresponding type
+     *   <li>Interpret the partitioner type as a classname of a user-defined partitioner.
+     * </ul>
+     *
+     * @param tableOptions A read-only set of config options that determines the partitioner type.
+     * @param physicalType Physical type for partitioning.
+     * @param partitionKeys Partitioning keys in physical type.
+     * @param classLoader A {@link ClassLoader} to use for loading user-defined partitioner classes.
+     */
+    public static PartitionKeyGenerator<RowData> getKinesisPartitioner(
+            ReadableConfig tableOptions,
+            RowType physicalType,
+            List<String> partitionKeys,
+            ClassLoader classLoader) {
+
+        if (!partitionKeys.isEmpty()) {
+            String delimiter = tableOptions.get(SINK_PARTITIONER_FIELD_DELIMITER);
+            return new RowDataFieldsKinesisPartitionKeyGenerator(
+                    physicalType, partitionKeys, delimiter);
+        } else if (!tableOptions.getOptional(SINK_PARTITIONER).isPresent()) {
+            return new RandomKinesisPartitionKeyGenerator<>();
+        } else {
+            String partitioner = tableOptions.getOptional(SINK_PARTITIONER).get();
+            if (SINK_PARTITIONER_VALUE_FIXED.equals(partitioner)) {
+                return new FixedKinesisPartitionKeyGenerator<>();
+            } else if (SINK_PARTITIONER_VALUE_RANDOM.equals(partitioner)) {
+                return new RandomKinesisPartitionKeyGenerator<>();
+            } else { // interpret the option value as a fully-qualified class name
+                return initializePartitioner(partitioner, classLoader);
+            }
+        }
+    }
+
+    /** Returns a class value with the given class name. */
+    private static <T> PartitionKeyGenerator<T> initializePartitioner(
+            String name, ClassLoader classLoader) {
+        try {
+            Class<?> clazz = Class.forName(name, true, classLoader);
+            if (!PartitionKeyGenerator.class.isAssignableFrom(clazz)) {
+                throw new ValidationException(
+                        String.format(
+                                "Partitioner class '%s' should have %s in its parents chain",
+                                name, PartitionKeyGenerator.class.getName()));
+            }
+            @SuppressWarnings("unchecked")
+            final PartitionKeyGenerator<T> partitioner =
+                    InstantiationUtil.instantiate(name, PartitionKeyGenerator.class, classLoader);
+
+            return partitioner;
+        } catch (ClassNotFoundException | FlinkException e) {
+            throw new ValidationException(
+                    String.format("Could not find and instantiate partitioner class '%s'", name),
+                    e);
+        }
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/RandomKinesisPartitionKeyGenerator.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/RandomKinesisPartitionKeyGenerator.java
new file mode 100644
index 0000000..280cfd0
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/RandomKinesisPartitionKeyGenerator.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.table;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.connector.kinesis.sink.PartitionKeyGenerator;
+
+import java.util.UUID;
+
+/**
+ * A {@link PartitionKeyGenerator} that maps an arbitrary input {@code element} to a random
+ * partition ID.
+ *
+ * @param <T> The input element type.
+ */
+@PublicEvolving
+public final class RandomKinesisPartitionKeyGenerator<T> implements PartitionKeyGenerator<T> {
+    @Override
+    public String apply(T element) {
+        return UUID.randomUUID().toString();
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        return o instanceof RandomKinesisPartitionKeyGenerator;
+    }
+
+    @Override
+    public int hashCode() {
+        return RandomKinesisPartitionKeyGenerator.class.hashCode();
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/RowDataFieldsKinesisPartitionKeyGenerator.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/RowDataFieldsKinesisPartitionKeyGenerator.java
new file mode 100644
index 0000000..52eedfb
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/RowDataFieldsKinesisPartitionKeyGenerator.java
@@ -0,0 +1,266 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.table;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.connector.kinesis.sink.PartitionKeyGenerator;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.flink.table.types.logical.RowType.RowField;
+import org.apache.flink.table.types.logical.utils.LogicalTypeChecks;
+import org.apache.flink.util.Preconditions;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+
+/**
+ * A {@link PartitionKeyGenerator} of {@link RowData} elements that constructs the partition key
+ * from a list of field names.
+ *
+ * <p>The key is constructed by concatenating the string representations of a list of fields
+ * projected from an input element. A fixed prefix can be optionally configured in order to speed up
+ * the key construction process.
+ *
+ * <p>Resulting partition key values are trimmed to the maximum length allowed by Kinesis.
+ */
+@Internal
+public final class RowDataFieldsKinesisPartitionKeyGenerator
+        implements PartitionKeyGenerator<RowData> {
+
+    private static final long serialVersionUID = 1L;
+
+    /**
+     * Allowed maximum length limit of a partition key.
+     *
+     * @link
+     *     https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecord.html#API_PutRecord_RequestSyntax
+     */
+    public static final int MAX_PARTITION_KEY_LENGTH = 256;
+
+    /** Default delimiter for {@link RowDataFieldsKinesisPartitionKeyGenerator#delimiter}. */
+    public static final String DEFAULT_DELIMITER = String.valueOf('|');
+
+    /** The character used to delimit field values in the concatenated partition key string. */
+    private final String delimiter;
+
+    /**
+     * A list of field names used to extract the partition key for a record that will be written to
+     * a Kinesis stream.
+     */
+    private final List<String> fieldNames;
+
+    /**
+     * A map of getter functions to dynamically extract the field values for all {@link
+     * RowDataFieldsKinesisPartitionKeyGenerator#fieldNames} from an input record.
+     */
+    private final Map<String, RowData.FieldGetter> dynamicFieldGetters;
+
+    /**
+     * A buffer used to accumulate the concatenation of all field values that form the partition
+     * key.
+     */
+    private final StringBuilder keyBuffer = new StringBuilder();
+
+    /**
+     * A prefix of static field values to be used instead of the corresponding {@link
+     * RowDataFieldsKinesisPartitionKeyGenerator#dynamicFieldGetters} entries.
+     */
+    private Map<String, String> staticFields = Collections.emptyMap();
+
+    /**
+     * The length of the static prefix of the {@link
+     * RowDataFieldsKinesisPartitionKeyGenerator#keyBuffer} (derived from the values in {@link
+     * RowDataFieldsKinesisPartitionKeyGenerator#staticFields}).
+     */
+    private int keyBufferStaticPrefixLength = 0;
+
+    /**
+     * The length of the prefix in {@link RowDataFieldsKinesisPartitionKeyGenerator#fieldNames} for
+     * which static field values are present in {@link
+     * RowDataFieldsKinesisPartitionKeyGenerator#staticFields}.
+     */
+    private int fieldNamesStaticPrefixLength = 0;
+
+    public RowDataFieldsKinesisPartitionKeyGenerator(
+            RowType physicalType, List<String> partitionKeys) {
+        this(physicalType, partitionKeys, DEFAULT_DELIMITER);
+    }
+
+    public RowDataFieldsKinesisPartitionKeyGenerator(
+            RowType physicalType, List<String> partitionKeys, String delimiter) {
+        Preconditions.checkNotNull(physicalType, "physicalType");
+        Preconditions.checkNotNull(partitionKeys, "partitionKeys");
+        Preconditions.checkNotNull(delimiter, "delimiter");
+        Preconditions.checkArgument(
+                !partitionKeys.isEmpty(),
+                "Cannot create a RowDataFieldsKinesisPartitioner for a non-partitioned table");
+        Preconditions.checkArgument(
+                partitionKeys.size() == new HashSet<>(partitionKeys).size(),
+                "The sequence of partition keys cannot contain duplicates");
+
+        List<String> fieldsList = physicalType.getFieldNames();
+
+        List<String> badKeyNames = new ArrayList<>();
+        List<String> badKeyTypes = new ArrayList<>();
+
+        for (String fieldName : partitionKeys) {
+            int index = fieldsList.indexOf(fieldName);
+            if (index < 0) {
+                badKeyNames.add(fieldName);
+            } else if (!LogicalTypeChecks.hasWellDefinedString(physicalType.getTypeAt(index))) {
+                badKeyTypes.add(fieldName);
+            }
+        }
+
+        Preconditions.checkArgument(
+                badKeyNames.size() == 0,
+                "The following partition keys are not present in the table: %s",
+                String.join(", ", badKeyNames));
+        Preconditions.checkArgument(
+                badKeyTypes.size() == 0,
+                "The following partition keys have types that are not supported by Kinesis: %s",
+                String.join(", ", badKeyTypes));
+
+        this.delimiter = delimiter;
+        this.fieldNames = partitionKeys;
+        this.dynamicFieldGetters = new HashMap<>();
+        for (String fieldName : partitionKeys) {
+            RowField field = physicalType.getFields().get(fieldsList.indexOf(fieldName));
+
+            RowData.FieldGetter fieldGetter =
+                    RowData.createFieldGetter(field.getType(), fieldsList.indexOf(field.getName()));
+
+            this.dynamicFieldGetters.put(fieldName, fieldGetter);
+        }
+    }
+
+    @Override
+    public String apply(RowData element) {
+        // reset the buffer to the end of the static prefix size
+        keyBuffer.setLength(keyBufferStaticPrefixLength);
+
+        // fill in the dynamic part of the buffer
+        for (int i = fieldNamesStaticPrefixLength; i < fieldNames.size(); i++) {
+            String fieldName = fieldNames.get(i);
+            if (!staticFields.containsKey(fieldName)) {
+                keyBuffer.append(dynamicFieldGetters.get(fieldName).getFieldOrNull(element));
+            } else {
+                keyBuffer.append(staticFields.get(fieldName));
+            }
+            keyBuffer.append(delimiter);
+
+            if (keyBuffer.length() >= MAX_PARTITION_KEY_LENGTH) {
+                break; // stop when the buffer length exceeds the allowed partition key size
+            }
+        }
+
+        // return the accumulated concatenated string trimmed to the max allowed partition key size
+        int length = Math.min(keyBuffer.length() - delimiter.length(), MAX_PARTITION_KEY_LENGTH);
+        return keyBuffer.substring(0, length);
+    }
+
+    /**
+     * Update the fixed partition key prefix.
+     *
+     * @param staticFields An association of (field name, field value) pairs to be used as static
+     *     partition key prefix.
+     */
+    public void setStaticFields(Map<String, String> staticFields) {
+        Preconditions.checkArgument(
+                isPartitionKeySubset(staticFields.keySet()),
+                String.format(
+                        "Not all static field names (%s) are part of the partition key (%s).",
+                        String.join(", ", staticFields.keySet()), String.join(", ", fieldNames)));
+        this.staticFields = new HashMap<>(staticFields);
+        updateKeyBufferStaticPrefix();
+    }
+
+    /**
+     * Check whether the set of field names in {@code candidatePrefix} forms a valid subset of the
+     * set of field names defined in {@link RowDataFieldsKinesisPartitionKeyGenerator#fieldNames}.
+     *
+     * @param candidateSubset A set of field names forming a candidate subset of {@link
+     *     RowDataFieldsKinesisPartitionKeyGenerator#fieldNames}.
+     * @return true if and only if the {@code candidatePrefix} is a proper subset of {@link
+     *     RowDataFieldsKinesisPartitionKeyGenerator#fieldNames}.
+     */
+    private boolean isPartitionKeySubset(Set<String> candidateSubset) {
+        return new HashSet<>(fieldNames).containsAll(candidateSubset);
+    }
+
+    /**
+     * Pre-fills a prefix with static partition key values in the {@link
+     * RowDataFieldsKinesisPartitionKeyGenerator#keyBufferStaticPrefixLength} buffer based on the
+     * currently set {@link RowDataFieldsKinesisPartitionKeyGenerator#staticFields}.
+     */
+    private void updateKeyBufferStaticPrefix() {
+        // update the fixed prefix and its cumulative length
+        keyBuffer.setLength(0);
+        fieldNamesStaticPrefixLength = 0;
+        for (String fieldName : fieldNames) {
+            if (staticFields.containsKey(fieldName)) {
+                keyBuffer.append(staticFields.get(fieldName));
+                keyBuffer.append(delimiter);
+                fieldNamesStaticPrefixLength++;
+            } else {
+                break; // stop on first static field
+            }
+        }
+        keyBufferStaticPrefixLength = keyBuffer.length();
+    }
+
+    // --------------------------------------------------------------------------------------------
+    // Value semantics for equals and hashCode
+    // --------------------------------------------------------------------------------------------
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        final RowDataFieldsKinesisPartitionKeyGenerator that =
+                (RowDataFieldsKinesisPartitionKeyGenerator) o;
+        return Objects.equals(this.delimiter, that.delimiter)
+                && Objects.equals(this.fieldNames, that.fieldNames)
+                && Objects.equals(this.staticFields, that.staticFields)
+                && Objects.equals(
+                        this.keyBufferStaticPrefixLength, that.keyBufferStaticPrefixLength)
+                && Objects.equals(
+                        this.fieldNamesStaticPrefixLength, that.fieldNamesStaticPrefixLength);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(
+                delimiter,
+                fieldNames,
+                staticFields,
+                keyBufferStaticPrefixLength,
+                fieldNamesStaticPrefixLength);
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/util/KinesisStreamsConnectorOptionsUtils.java b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/util/KinesisStreamsConnectorOptionsUtils.java
new file mode 100644
index 0000000..4b30fe0
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/java/org/apache/flink/connector/kinesis/table/util/KinesisStreamsConnectorOptionsUtils.java
@@ -0,0 +1,272 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.table.util;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.configuration.ConfigOption;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.configuration.ReadableConfig;
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+import org.apache.flink.connector.aws.table.util.AWSOptionUtils;
+import org.apache.flink.connector.aws.table.util.AsyncClientOptionsUtils;
+import org.apache.flink.connector.base.table.sink.options.AsyncSinkConfigurationValidator;
+import org.apache.flink.connector.kinesis.sink.PartitionKeyGenerator;
+import org.apache.flink.connector.kinesis.table.KinesisPartitionKeyGeneratorFactory;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.types.logical.RowType;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+import static org.apache.flink.connector.base.table.AsyncSinkConnectorOptions.FLUSH_BUFFER_SIZE;
+import static org.apache.flink.connector.base.table.AsyncSinkConnectorOptions.FLUSH_BUFFER_TIMEOUT;
+import static org.apache.flink.connector.base.table.AsyncSinkConnectorOptions.MAX_BATCH_SIZE;
+import static org.apache.flink.connector.base.table.AsyncSinkConnectorOptions.MAX_BUFFERED_REQUESTS;
+import static org.apache.flink.connector.base.table.AsyncSinkConnectorOptions.MAX_IN_FLIGHT_REQUESTS;
+import static org.apache.flink.connector.kinesis.table.KinesisConnectorOptions.SINK_FAIL_ON_ERROR;
+import static org.apache.flink.connector.kinesis.table.KinesisConnectorOptions.SINK_PARTITIONER;
+import static org.apache.flink.connector.kinesis.table.KinesisConnectorOptions.SINK_PARTITIONER_FIELD_DELIMITER;
+import static org.apache.flink.connector.kinesis.table.KinesisConnectorOptions.STREAM;
+
+/**
+ * Class for handling kinesis table options, including key mapping and validations and property
+ * extraction. Class uses options decorators {@link AWSOptionUtils}, {@link AsyncClientOptionsUtils}
+ * for handling each specified set of options.
+ */
+@Internal
+public class KinesisStreamsConnectorOptionsUtils {
+    /** Key for accessing kinesisAsyncClient properties. */
+    public static final String KINESIS_CLIENT_PROPERTIES_KEY = "sink.client.properties";
+
+    private final AsyncClientOptionsUtils asyncClientOptionsUtils;
+    private final AsyncSinkConfigurationValidator asyncSinkconfigurationValidator;
+    private final Map<String, String> resolvedOptions;
+    private final ReadableConfig tableOptions;
+    private final PartitionKeyGenerator<RowData> partitioner;
+
+    /**
+     * Prefixes of properties that are validated by downstream components and should not be
+     * validated by the Table API infrastructure.
+     */
+    private static final String[] NON_VALIDATED_PREFIXES =
+            new String[] {
+                AWSOptionUtils.AWS_PROPERTIES_PREFIX,
+                AsyncClientOptionsUtils.SINK_CLIENT_PREFIX,
+                KinesisProducerOptionsMapper.KINESIS_PRODUCER_PREFIX
+            };
+
+    public KinesisStreamsConnectorOptionsUtils(
+            Map<String, String> options,
+            ReadableConfig tableOptions,
+            RowType physicalType,
+            List<String> partitionKeys,
+            ClassLoader classLoader) {
+        KinesisProducerOptionsMapper optionsMapper =
+                new KinesisProducerOptionsMapper(tableOptions, options);
+        this.resolvedOptions = optionsMapper.mapDeprecatedClientOptions();
+        this.tableOptions = optionsMapper.mapDeprecatedTableOptions();
+        this.asyncSinkconfigurationValidator =
+                new AsyncSinkConfigurationValidator(this.tableOptions);
+        this.asyncClientOptionsUtils = new AsyncClientOptionsUtils(resolvedOptions);
+        this.partitioner =
+                KinesisPartitionKeyGeneratorFactory.getKinesisPartitioner(
+                        tableOptions, physicalType, partitionKeys, classLoader);
+    }
+
+    public Properties getValidatedSinkConfigurations() {
+        Properties properties = asyncSinkconfigurationValidator.getValidatedConfigurations();
+        properties.put(STREAM.key(), tableOptions.get(STREAM));
+        Properties kinesisClientProps = asyncClientOptionsUtils.getValidatedConfigurations();
+
+        properties.put(KINESIS_CLIENT_PROPERTIES_KEY, kinesisClientProps);
+        properties.put(SINK_PARTITIONER.key(), this.partitioner);
+
+        if (tableOptions.getOptional(SINK_FAIL_ON_ERROR).isPresent()) {
+            properties.put(
+                    SINK_FAIL_ON_ERROR.key(), tableOptions.getOptional(SINK_FAIL_ON_ERROR).get());
+        }
+        return properties;
+    }
+
+    public List<String> getNonValidatedPrefixes() {
+        return Arrays.asList(NON_VALIDATED_PREFIXES);
+    }
+
+    /** Class for Mapping and validation of deprecated producer options. */
+    @Internal
+    public static class KinesisProducerOptionsMapper {
+        private static final Logger LOG =
+                LoggerFactory.getLogger(KinesisProducerOptionsMapper.class);
+        /** prefix for deprecated producer options fallback keys. */
+        public static final String KINESIS_PRODUCER_PREFIX = "sink.producer.";
+
+        private static final String KINESIS_PRODUCER_ENDPOINT = "sink.producer.kinesis-endpoint";
+        private static final String KINESIS_PRODUCER_PORT = "sink.producer.kinesis-port";
+        private static final String KINESIS_PRODUCER_VERIFY_CERTIFICATE =
+                "sink.producer.verify-certificate";
+        private static final String DEPRECATED_FLUSH_BUFFER_TIMEOUT_KEY =
+                "sink.producer.record-max-buffered-time";
+        private static final String DEPRECATED_MAX_BATCH_SIZE_KEY =
+                "sink.producer.collection-max-size";
+        private static final String DEPRECATED_MAX_INFLIGHT_REQUESTS_KEY =
+                "sink.producer.collection-max-count";
+        private static final String DEPRECATED_SINK_FAIL_ON_ERROR_KEY =
+                "sink.producer.fail-on-error";
+
+        private final ReadableConfig tableOptions;
+        private final Map<String, String> resolvedOptions;
+
+        public KinesisProducerOptionsMapper(
+                ReadableConfig tableOptions, Map<String, String> resolvedOptions) {
+            this.tableOptions = tableOptions;
+            this.resolvedOptions = resolvedOptions;
+        }
+
+        @VisibleForTesting
+        public KinesisProducerOptionsMapper(Map<String, String> allOptions) {
+            this.tableOptions = Configuration.fromMap(allOptions);
+            this.resolvedOptions = allOptions;
+        }
+
+        public Map<String, String> mapDeprecatedClientOptions() {
+            mapDeprecatedEndpoint();
+            mapDeprecatedVerifyCertificate();
+            removeMappedOptions();
+            resolvedOptions.keySet().forEach(this::warnForDeprecatedOption);
+            return resolvedOptions;
+        }
+
+        public ReadableConfig mapDeprecatedTableOptions() {
+            Configuration mappedConfig = new Configuration();
+            mappedConfig.set(STREAM, tableOptions.get(STREAM));
+            tableOptions
+                    .getOptional(FLUSH_BUFFER_SIZE)
+                    .ifPresent(val -> mappedConfig.set(FLUSH_BUFFER_SIZE, val));
+            tableOptions
+                    .getOptional(MAX_BUFFERED_REQUESTS)
+                    .ifPresent(val -> mappedConfig.set(MAX_BUFFERED_REQUESTS, val));
+            tableOptions
+                    .getOptional(SINK_PARTITIONER)
+                    .ifPresent(val -> mappedConfig.set(SINK_PARTITIONER, val));
+            tableOptions
+                    .getOptional(SINK_PARTITIONER_FIELD_DELIMITER)
+                    .ifPresent(val -> mappedConfig.set(SINK_PARTITIONER_FIELD_DELIMITER, val));
+
+            replaceDeprecatedOptionInConfig(
+                    FLUSH_BUFFER_TIMEOUT, DEPRECATED_FLUSH_BUFFER_TIMEOUT_KEY, mappedConfig);
+            replaceDeprecatedOptionInConfig(
+                    MAX_BATCH_SIZE, DEPRECATED_MAX_BATCH_SIZE_KEY, mappedConfig);
+            replaceDeprecatedOptionInConfig(
+                    MAX_IN_FLIGHT_REQUESTS, DEPRECATED_MAX_INFLIGHT_REQUESTS_KEY, mappedConfig);
+            replaceDeprecatedOptionInConfig(
+                    SINK_FAIL_ON_ERROR, DEPRECATED_SINK_FAIL_ON_ERROR_KEY, mappedConfig);
+
+            return mappedConfig;
+        }
+
+        public static Set<ConfigOption<?>> addDeprecatedKeys(Set<ConfigOption<?>> tableOptions) {
+            HashSet<ConfigOption<?>> tableOptionsWithDeprecatedKeys = new HashSet<>(tableOptions);
+
+            tableOptionsWithDeprecatedKeys.remove(FLUSH_BUFFER_TIMEOUT);
+            tableOptionsWithDeprecatedKeys.add(
+                    FLUSH_BUFFER_TIMEOUT.withDeprecatedKeys(DEPRECATED_FLUSH_BUFFER_TIMEOUT_KEY));
+
+            tableOptionsWithDeprecatedKeys.remove(MAX_BATCH_SIZE);
+            tableOptionsWithDeprecatedKeys.add(
+                    MAX_BATCH_SIZE.withDeprecatedKeys(DEPRECATED_MAX_BATCH_SIZE_KEY));
+
+            tableOptionsWithDeprecatedKeys.remove(MAX_IN_FLIGHT_REQUESTS);
+            tableOptionsWithDeprecatedKeys.add(
+                    MAX_IN_FLIGHT_REQUESTS.withDeprecatedKeys(
+                            DEPRECATED_MAX_INFLIGHT_REQUESTS_KEY));
+
+            tableOptionsWithDeprecatedKeys.remove(SINK_FAIL_ON_ERROR);
+            tableOptionsWithDeprecatedKeys.add(
+                    SINK_FAIL_ON_ERROR.withDeprecatedKeys(DEPRECATED_SINK_FAIL_ON_ERROR_KEY));
+
+            return tableOptionsWithDeprecatedKeys;
+        }
+
+        private void mapDeprecatedEndpoint() {
+            if (resolvedOptions.containsKey(KINESIS_PRODUCER_ENDPOINT)) {
+                if (resolvedOptions.containsKey(KINESIS_PRODUCER_PORT)) {
+                    resolvedOptions.putIfAbsent(
+                            AWSConfigConstants.AWS_ENDPOINT,
+                            String.format(
+                                    "https://%s:%s",
+                                    resolvedOptions.get(KINESIS_PRODUCER_ENDPOINT),
+                                    resolvedOptions.get(KINESIS_PRODUCER_PORT)));
+                } else {
+                    resolvedOptions.putIfAbsent(
+                            AWSConfigConstants.AWS_ENDPOINT,
+                            String.format(
+                                    "https://%s", resolvedOptions.get(KINESIS_PRODUCER_ENDPOINT)));
+                }
+            }
+        }
+
+        private void mapDeprecatedVerifyCertificate() {
+            if (resolvedOptions.containsKey(KINESIS_PRODUCER_VERIFY_CERTIFICATE)) {
+                String value = resolvedOptions.get(KINESIS_PRODUCER_VERIFY_CERTIFICATE);
+                if (value.equalsIgnoreCase("true")) {
+                    resolvedOptions.putIfAbsent(AWSConfigConstants.TRUST_ALL_CERTIFICATES, "false");
+                } else if (value.equalsIgnoreCase("false")) {
+                    resolvedOptions.putIfAbsent(AWSConfigConstants.TRUST_ALL_CERTIFICATES, "true");
+                } else {
+                    LOG.warn(
+                            String.format(
+                                    "Option %s is ignored due to invalid value",
+                                    KINESIS_PRODUCER_VERIFY_CERTIFICATE));
+                }
+            }
+        }
+
+        private void removeMappedOptions() {
+            resolvedOptions.remove(KINESIS_PRODUCER_VERIFY_CERTIFICATE);
+            resolvedOptions.remove(KINESIS_PRODUCER_ENDPOINT);
+            resolvedOptions.remove(KINESIS_PRODUCER_PORT);
+            resolvedOptions.remove(DEPRECATED_FLUSH_BUFFER_TIMEOUT_KEY);
+            resolvedOptions.remove(DEPRECATED_MAX_BATCH_SIZE_KEY);
+            resolvedOptions.remove(DEPRECATED_MAX_INFLIGHT_REQUESTS_KEY);
+            resolvedOptions.remove(DEPRECATED_SINK_FAIL_ON_ERROR_KEY);
+        }
+
+        private void warnForDeprecatedOption(String key) {
+            if (key.startsWith(KINESIS_PRODUCER_PREFIX)) {
+                LOG.warn(String.format("Key %s is unsupported by Kinesis Datastream Sink", key));
+            }
+        }
+
+        private <T> void replaceDeprecatedOptionInConfig(
+                ConfigOption<T> option, String deprecatedKey, Configuration config) {
+            tableOptions
+                    .getOptional(option.withDeprecatedKeys(deprecatedKey))
+                    .ifPresent(v -> config.set(option, v));
+            tableOptions.getOptional(option).ifPresent(v -> config.set(option, v));
+        }
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory b/flink-connector-aws-kinesis-streams/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
new file mode 100644
index 0000000..adb7ea2
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.flink.connector.kinesis.table.KinesisDynamicTableSinkFactory
diff --git a/flink-connector-aws-kinesis-streams/src/main/resources/log4j2.properties b/flink-connector-aws-kinesis-streams/src/main/resources/log4j2.properties
new file mode 100644
index 0000000..c64a340
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/main/resources/log4j2.properties
@@ -0,0 +1,25 @@
+################################################################################
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+rootLogger.level = OFF
+rootLogger.appenderRef.console.ref = ConsoleAppender
+
+appender.console.name = ConsoleAppender
+appender.console.type = CONSOLE
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n
diff --git a/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java
new file mode 100644
index 0000000..dcc7fff
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.architecture;
+
+import org.apache.flink.architecture.common.ImportOptions;
+
+import com.tngtech.archunit.core.importer.ImportOption;
+import com.tngtech.archunit.junit.AnalyzeClasses;
+import com.tngtech.archunit.junit.ArchTest;
+import com.tngtech.archunit.junit.ArchTests;
+
+/** Architecture tests for test code. */
+@AnalyzeClasses(
+        packages = "org.apache.flink.connector.kinesis",
+        importOptions = {
+            ImportOption.OnlyIncludeTests.class,
+            ImportOptions.ExcludeScalaImportOption.class,
+            ImportOptions.ExcludeShadedImportOption.class
+        })
+public class TestCodeArchitectureTest {
+
+    @ArchTest
+    public static final ArchTests COMMON_TESTS = ArchTests.in(TestCodeArchitectureTestBase.class);
+}
diff --git a/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkBuilderTest.java b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkBuilderTest.java
new file mode 100644
index 0000000..ce88c63
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkBuilderTest.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.sink;
+
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.api.common.serialization.SimpleStringSchema;
+
+import org.assertj.core.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+/** Covers construction, defaults and sanity checking of KinesisStreamsSinkBuilder. */
+class KinesisStreamsSinkBuilderTest {
+    private static final SerializationSchema<String> SERIALIZATION_SCHEMA =
+            new SimpleStringSchema();
+    private static final PartitionKeyGenerator<String> PARTITION_KEY_GENERATOR =
+            element -> String.valueOf(element.hashCode());
+
+    @Test
+    void elementConverterOfSinkMustBeSetWhenBuilt() {
+        Assertions.assertThatExceptionOfType(NullPointerException.class)
+                .isThrownBy(() -> KinesisStreamsSink.builder().setStreamName("stream").build())
+                .withMessageContaining(
+                        "No SerializationSchema was supplied to the KinesisStreamsSinkElementConverter builder.");
+    }
+
+    @Test
+    void streamNameOfSinkMustBeSetWhenBuilt() {
+        Assertions.assertThatExceptionOfType(NullPointerException.class)
+                .isThrownBy(
+                        () ->
+                                KinesisStreamsSink.<String>builder()
+                                        .setPartitionKeyGenerator(PARTITION_KEY_GENERATOR)
+                                        .setSerializationSchema(SERIALIZATION_SCHEMA)
+                                        .build())
+                .withMessageContaining(
+                        "The stream name must not be null when initializing the KDS Sink.");
+    }
+
+    @Test
+    void streamNameOfSinkMustBeSetToNonEmptyWhenBuilt() {
+        Assertions.assertThatExceptionOfType(IllegalArgumentException.class)
+                .isThrownBy(
+                        () ->
+                                KinesisStreamsSink.<String>builder()
+                                        .setStreamName("")
+                                        .setPartitionKeyGenerator(PARTITION_KEY_GENERATOR)
+                                        .setSerializationSchema(SERIALIZATION_SCHEMA)
+                                        .build())
+                .withMessageContaining(
+                        "The stream name must be set when initializing the KDS Sink.");
+    }
+
+    @Test
+    void serializationSchemaMustBeSetWhenSinkIsBuilt() {
+        Assertions.assertThatExceptionOfType(NullPointerException.class)
+                .isThrownBy(
+                        () ->
+                                KinesisStreamsSink.<String>builder()
+                                        .setStreamName("stream")
+                                        .setPartitionKeyGenerator(PARTITION_KEY_GENERATOR)
+                                        .build())
+                .withMessageContaining(
+                        "No SerializationSchema was supplied to the KinesisStreamsSinkElementConverter builder.");
+    }
+
+    @Test
+    void partitionKeyGeneratorMustBeSetWhenSinkIsBuilt() {
+        Assertions.assertThatExceptionOfType(NullPointerException.class)
+                .isThrownBy(
+                        () ->
+                                KinesisStreamsSink.<String>builder()
+                                        .setStreamName("stream")
+                                        .setSerializationSchema(SERIALIZATION_SCHEMA)
+                                        .build())
+                .withMessageContaining(
+                        "No PartitionKeyGenerator lambda was supplied to the KinesisStreamsSinkElementConverter builder.");
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkITCase.java b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkITCase.java
new file mode 100644
index 0000000..2360558
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsSinkITCase.java
@@ -0,0 +1,566 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.sink;
+
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.api.common.serialization.SimpleStringSchema;
+import org.apache.flink.api.common.time.Deadline;
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+import org.apache.flink.connector.aws.testutils.AWSServicesTestUtils;
+import org.apache.flink.connector.aws.util.AWSGeneralUtil;
+import org.apache.flink.connectors.kinesis.testutils.KinesaliteContainer;
+import org.apache.flink.runtime.client.JobExecutionException;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
+import org.apache.flink.streaming.api.functions.source.datagen.DataGeneratorSource;
+import org.apache.flink.streaming.api.functions.source.datagen.RandomGenerator;
+import org.apache.flink.util.DockerImageVersions;
+
+import org.assertj.core.api.Assertions;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.rnorth.ducttape.ratelimits.RateLimiter;
+import org.rnorth.ducttape.ratelimits.RateLimiterBuilder;
+import org.testcontainers.containers.Network;
+import org.testcontainers.junit.jupiter.Container;
+import org.testcontainers.junit.jupiter.Testcontainers;
+import org.testcontainers.utility.DockerImageName;
+import software.amazon.awssdk.core.SdkSystemSetting;
+import software.amazon.awssdk.http.SdkHttpClient;
+import software.amazon.awssdk.services.kinesis.KinesisClient;
+import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest;
+import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest;
+import software.amazon.awssdk.services.kinesis.model.ShardIteratorType;
+import software.amazon.awssdk.services.kinesis.model.StreamStatus;
+
+import java.time.Duration;
+import java.util.Properties;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.AWS_ACCESS_KEY_ID;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.AWS_CREDENTIALS_PROVIDER;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.AWS_ENDPOINT;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.AWS_REGION;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.AWS_SECRET_ACCESS_KEY;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.HTTP_PROTOCOL_VERSION;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.TRUST_ALL_CERTIFICATES;
+
+/** IT cases for using Kinesis Data Streams Sink based on Kinesalite. */
+@Testcontainers
+class KinesisStreamsSinkITCase {
+
+    private static final String DEFAULT_FIRST_SHARD_NAME = "shardId-000000000000";
+
+    private final SerializationSchema<String> serializationSchema = new SimpleStringSchema();
+    private final PartitionKeyGenerator<String> partitionKeyGenerator =
+            element -> String.valueOf(element.hashCode());
+    private final PartitionKeyGenerator<String> longPartitionKeyGenerator = element -> element;
+
+    @Container
+    private static final KinesaliteContainer KINESALITE =
+            new KinesaliteContainer(DockerImageName.parse(DockerImageVersions.KINESALITE))
+                    .withNetwork(Network.newNetwork())
+                    .withNetworkAliases("kinesalite");
+
+    private StreamExecutionEnvironment env;
+    private SdkHttpClient httpClient;
+    private KinesisClient kinesisClient;
+
+    @BeforeEach
+    void setUp() {
+        System.setProperty(SdkSystemSetting.CBOR_ENABLED.property(), "false");
+
+        env = StreamExecutionEnvironment.getExecutionEnvironment();
+        env.setParallelism(1);
+
+        httpClient = AWSServicesTestUtils.createHttpClient();
+        kinesisClient = KINESALITE.createHostClient(httpClient);
+    }
+
+    @AfterEach
+    void teardown() {
+        System.clearProperty(SdkSystemSetting.CBOR_ENABLED.property());
+        AWSGeneralUtil.closeResources(httpClient, kinesisClient);
+    }
+
+    @Test
+    void elementsMaybeWrittenSuccessfullyToLocalInstanceWhenBatchSizeIsReached() throws Exception {
+
+        new Scenario()
+                .withKinesaliteStreamName("test-stream-name-1")
+                .withSinkConnectionStreamName("test-stream-name-1")
+                .runScenario();
+    }
+
+    @Test
+    void elementsBufferedAndTriggeredByTimeBasedFlushShouldBeFlushedIfSourcedIsKeptAlive()
+            throws Exception {
+
+        new Scenario()
+                .withNumberOfElementsToSend(10)
+                .withMaxBatchSize(100)
+                .withExpectedElements(10)
+                .withKinesaliteStreamName("test-stream-name-2")
+                .withSinkConnectionStreamName("test-stream-name-2")
+                .runScenario();
+    }
+
+    @Test
+    void veryLargeMessagesSucceedInBeingPersisted() throws Exception {
+
+        new Scenario()
+                .withNumberOfElementsToSend(5)
+                .withSizeOfMessageBytes(2500)
+                .withMaxBatchSize(10)
+                .withExpectedElements(5)
+                .withKinesaliteStreamName("test-stream-name-3")
+                .withSinkConnectionStreamName("test-stream-name-3")
+                .runScenario();
+    }
+
+    @Test
+    void multipleInFlightRequestsResultsInCorrectNumberOfElementsPersisted() throws Exception {
+
+        new Scenario()
+                .withNumberOfElementsToSend(150)
+                .withSizeOfMessageBytes(2500)
+                .withBufferMaxTimeMS(2000)
+                .withMaxInflightReqs(10)
+                .withMaxBatchSize(20)
+                .withExpectedElements(150)
+                .withKinesaliteStreamName("test-stream-name-4")
+                .withSinkConnectionStreamName("test-stream-name-4")
+                .runScenario();
+    }
+
+    @Test
+    void nonExistentStreamNameShouldResultInFailureInFailOnErrorIsOn() {
+        testJobFatalFailureTerminatesCorrectlyWithFailOnErrorFlagSetTo(true, "test-stream-name-5");
+    }
+
+    @Test
+    void nonExistentStreamNameShouldResultInFailureInFailOnErrorIsOff() {
+        testJobFatalFailureTerminatesCorrectlyWithFailOnErrorFlagSetTo(false, "test-stream-name-6");
+    }
+
+    private void testJobFatalFailureTerminatesCorrectlyWithFailOnErrorFlagSetTo(
+            boolean failOnError, String streamName) {
+        Assertions.assertThatExceptionOfType(JobExecutionException.class)
+                .isThrownBy(
+                        () ->
+                                new Scenario()
+                                        .withKinesaliteStreamName(streamName)
+                                        .withSinkConnectionStreamName("non-existent-stream")
+                                        .withFailOnError(failOnError)
+                                        .runScenario())
+                .havingCause()
+                .havingCause()
+                .withMessageContaining("Encountered non-recoverable exception");
+    }
+
+    @Test
+    void veryLargeMessagesFailGracefullyWithBrokenElementConverter() {
+        Assertions.assertThatExceptionOfType(JobExecutionException.class)
+                .isThrownBy(
+                        () ->
+                                new Scenario()
+                                        .withNumberOfElementsToSend(5)
+                                        .withSizeOfMessageBytes(2500)
+                                        .withExpectedElements(5)
+                                        .withKinesaliteStreamName("test-stream-name-7")
+                                        .withSinkConnectionStreamName("test-stream-name-7")
+                                        .withSerializationSchema(serializationSchema)
+                                        .withPartitionKeyGenerator(longPartitionKeyGenerator)
+                                        .runScenario())
+                .havingCause()
+                .havingCause()
+                .withMessageContaining(
+                        "Encountered an exception while persisting records, not retrying due to {failOnError} being set.");
+    }
+
+    @Test
+    void badRegionShouldResultInFailureWhenInFailOnErrorIsOn() {
+        badRegionShouldResultInFailureWhenInFailOnErrorIs(true);
+    }
+
+    @Test
+    void badRegionShouldResultInFailureWhenInFailOnErrorIsOff() {
+        badRegionShouldResultInFailureWhenInFailOnErrorIs(false);
+    }
+
+    private void badRegionShouldResultInFailureWhenInFailOnErrorIs(boolean failOnError) {
+        Properties properties = getDefaultProperties();
+        properties.setProperty(AWS_REGION, "some-bad-region");
+
+        assertRunWithPropertiesAndStreamShouldFailWithExceptionOfType(
+                failOnError, properties, "Invalid AWS region");
+    }
+
+    @Test
+    void missingRegionShouldResultInFailureWhenInFailOnErrorIsOn() {
+        missingRegionShouldResultInFailureWhenInFailOnErrorIs(true);
+    }
+
+    @Test
+    void missingRegionShouldResultInFailureWhenInFailOnErrorIsOff() {
+        missingRegionShouldResultInFailureWhenInFailOnErrorIs(false);
+    }
+
+    private void missingRegionShouldResultInFailureWhenInFailOnErrorIs(boolean failOnError) {
+        Properties properties = getDefaultProperties();
+        properties.remove(AWS_REGION);
+        assertRunWithPropertiesAndStreamShouldFailWithExceptionOfType(
+                failOnError, properties, "region must not be null.");
+    }
+
+    @Test
+    void noURIEndpointShouldResultInFailureWhenInFailOnErrorIsOn() {
+        noURIEndpointShouldResultInFailureWhenInFailOnErrorIs(true);
+    }
+
+    @Test
+    void noURIEndpointShouldResultInFailureWhenInFailOnErrorIsOff() {
+        noURIEndpointShouldResultInFailureWhenInFailOnErrorIs(false);
+    }
+
+    private void noURIEndpointShouldResultInFailureWhenInFailOnErrorIs(boolean failOnError) {
+        Properties properties = getDefaultProperties();
+        properties.setProperty(AWS_ENDPOINT, "bad-endpoint-no-uri");
+        assertRunWithPropertiesAndStreamShouldFailWithExceptionOfType(
+                failOnError, properties, "The URI scheme of endpointOverride must not be null.");
+    }
+
+    @Test
+    void badEndpointShouldResultInFailureWhenInFailOnErrorIsOn() {
+        badEndpointShouldResultInFailureWhenInFailOnErrorIs(true);
+    }
+
+    @Test
+    void badEndpointShouldResultInFailureWhenInFailOnErrorIsOff() {
+        badEndpointShouldResultInFailureWhenInFailOnErrorIs(false);
+    }
+
+    private void badEndpointShouldResultInFailureWhenInFailOnErrorIs(boolean failOnError) {
+        Properties properties = getDefaultProperties();
+        properties.setProperty(AWS_ENDPOINT, "https://bad-endpoint-with-uri");
+        assertRunWithPropertiesAndStreamShouldFailWithExceptionOfType(
+                failOnError,
+                properties,
+                "UnknownHostException when attempting to interact with a service.");
+    }
+
+    @Test
+    void envVarWithNoCredentialsShouldResultInFailureWhenInFailOnErrorIsOn() {
+        noCredentialsProvidedAndCredentialsProviderSpecifiedShouldResultInFailure(
+                true,
+                AWSConfigConstants.CredentialProvider.ENV_VAR.toString(),
+                "Access key must be specified either via environment variable");
+    }
+
+    @Test
+    void envVarWithNoCredentialsShouldResultInFailureWhenInFailOnErrorIsOff() {
+        noCredentialsProvidedAndCredentialsProviderSpecifiedShouldResultInFailure(
+                false,
+                AWSConfigConstants.CredentialProvider.ENV_VAR.toString(),
+                "Access key must be specified either via environment variable");
+    }
+
+    @Test
+    void sysPropWithNoCredentialsShouldResultInFailureWhenInFailOnErrorIsOn() {
+        noCredentialsProvidedAndCredentialsProviderSpecifiedShouldResultInFailure(
+                true,
+                AWSConfigConstants.CredentialProvider.SYS_PROP.toString(),
+                "Unable to load credentials from system settings");
+    }
+
+    @Test
+    void sysPropWithNoCredentialsShouldResultInFailureWhenInFailOnErrorIsOff() {
+        noCredentialsProvidedAndCredentialsProviderSpecifiedShouldResultInFailure(
+                false,
+                AWSConfigConstants.CredentialProvider.SYS_PROP.toString(),
+                "Unable to load credentials from system settings");
+    }
+
+    @Test
+    void basicWithNoCredentialsShouldResultInFailureWhenInFailOnErrorIsOn() {
+        noCredentialsProvidedAndCredentialsProviderSpecifiedShouldResultInFailure(
+                true,
+                AWSConfigConstants.CredentialProvider.BASIC.toString(),
+                "Please set values for AWS Access Key ID ('aws.credentials.provider.basic.accesskeyid') and Secret Key ('aws.credentials.provider.basic.secretkey') when using the BASIC AWS credential provider type.");
+    }
+
+    @Test
+    void basicWithNoCredentialsShouldResultInFailureWhenInFailOnErrorIsOff() {
+        noCredentialsProvidedAndCredentialsProviderSpecifiedShouldResultInFailure(
+                false,
+                AWSConfigConstants.CredentialProvider.BASIC.toString(),
+                "Please set values for AWS Access Key ID ('aws.credentials.provider.basic.accesskeyid') and Secret Key ('aws.credentials.provider.basic.secretkey') when using the BASIC AWS credential provider type.");
+    }
+
+    @Test
+    void webIdentityTokenWithNoCredentialsShouldResultInFailureWhenInFailOnErrorIsOn() {
+        noCredentialsProvidedAndCredentialsProviderSpecifiedShouldResultInFailure(
+                true,
+                AWSConfigConstants.CredentialProvider.WEB_IDENTITY_TOKEN.toString(),
+                "Either the environment variable AWS_WEB_IDENTITY_TOKEN_FILE or the javaproperty aws.webIdentityTokenFile must be set");
+    }
+
+    @Test
+    void webIdentityTokenWithNoCredentialsShouldResultInFailureWhenInFailOnErrorIsOff() {
+        noCredentialsProvidedAndCredentialsProviderSpecifiedShouldResultInFailure(
+                false,
+                AWSConfigConstants.CredentialProvider.WEB_IDENTITY_TOKEN.toString(),
+                "Either the environment variable AWS_WEB_IDENTITY_TOKEN_FILE or the javaproperty aws.webIdentityTokenFile must be set");
+    }
+
+    @Test
+    void wrongCredentialProviderNameShouldResultInFailureWhenInFailOnErrorIsOn() {
+        noCredentialsProvidedAndCredentialsProviderSpecifiedShouldResultInFailure(
+                true, "WRONG", "Invalid AWS Credential Provider Type");
+    }
+
+    @Test
+    void wrongCredentialProviderNameShouldResultInFailureWhenInFailOnErrorIsOff() {
+        noCredentialsProvidedAndCredentialsProviderSpecifiedShouldResultInFailure(
+                false, "WRONG", "Invalid AWS Credential Provider Type");
+    }
+
+    private void credentialsProvidedThroughProfilePathShouldResultInFailure(
+            boolean failOnError,
+            String credentialsProvider,
+            String credentialsProfileLocation,
+            String expectedMessage) {
+        Properties properties =
+                getDefaultPropertiesWithoutCredentialsSetAndCredentialProvider(credentialsProvider);
+        properties.put(
+                AWSConfigConstants.profilePath(AWS_CREDENTIALS_PROVIDER),
+                credentialsProfileLocation);
+        assertRunWithPropertiesAndStreamShouldFailWithExceptionOfType(
+                failOnError, properties, expectedMessage);
+    }
+
+    private void noCredentialsProvidedAndCredentialsProviderSpecifiedShouldResultInFailure(
+            boolean failOnError, String credentialsProvider, String expectedMessage) {
+        assertRunWithPropertiesAndStreamShouldFailWithExceptionOfType(
+                failOnError,
+                getDefaultPropertiesWithoutCredentialsSetAndCredentialProvider(credentialsProvider),
+                expectedMessage);
+    }
+
+    private void assertRunWithPropertiesAndStreamShouldFailWithExceptionOfType(
+            boolean failOnError, Properties properties, String expectedMessage) {
+        Assertions.assertThatExceptionOfType(JobExecutionException.class)
+                .isThrownBy(
+                        () ->
+                                new Scenario()
+                                        .withSinkConnectionStreamName("default-stream-name")
+                                        .withFailOnError(failOnError)
+                                        .withProperties(properties)
+                                        .runScenario())
+                .havingCause()
+                .havingCause()
+                .withMessageContaining(expectedMessage);
+    }
+
+    private Properties getDefaultPropertiesWithoutCredentialsSetAndCredentialProvider(
+            String credentialsProvider) {
+        Properties properties = getDefaultProperties();
+        properties.setProperty(AWS_CREDENTIALS_PROVIDER, credentialsProvider);
+        properties.remove(AWS_SECRET_ACCESS_KEY);
+        properties.remove(AWS_ACCESS_KEY_ID);
+        return properties;
+    }
+
+    private Properties getDefaultProperties() {
+        Properties properties = new Properties();
+        properties.setProperty(AWS_ENDPOINT, KINESALITE.getHostEndpointUrl());
+        properties.setProperty(AWS_ACCESS_KEY_ID, KINESALITE.getAccessKey());
+        properties.setProperty(AWS_SECRET_ACCESS_KEY, KINESALITE.getSecretKey());
+        properties.setProperty(AWS_REGION, KINESALITE.getRegion().toString());
+        return properties;
+    }
+
+    private class Scenario {
+        private int numberOfElementsToSend = 50;
+        private int sizeOfMessageBytes = 25;
+        private int bufferMaxTimeMS = 1000;
+        private int maxInflightReqs = 1;
+        private int maxBatchSize = 50;
+        private int expectedElements = 50;
+        private boolean failOnError = false;
+        private String kinesaliteStreamName = null;
+        private String sinkConnectionStreamName;
+        private SerializationSchema<String> serializationSchema =
+                KinesisStreamsSinkITCase.this.serializationSchema;
+        private PartitionKeyGenerator<String> partitionKeyGenerator =
+                KinesisStreamsSinkITCase.this.partitionKeyGenerator;
+        private Properties properties = KinesisStreamsSinkITCase.this.getDefaultProperties();
+
+        public void runScenario() throws Exception {
+            if (kinesaliteStreamName != null) {
+                prepareStream(kinesaliteStreamName);
+            }
+
+            properties.setProperty(TRUST_ALL_CERTIFICATES, "true");
+            properties.setProperty(HTTP_PROTOCOL_VERSION, "HTTP1_1");
+
+            DataStream<String> stream =
+                    env.addSource(
+                                    new DataGeneratorSource<String>(
+                                            RandomGenerator.stringGenerator(sizeOfMessageBytes),
+                                            100,
+                                            (long) numberOfElementsToSend))
+                            .returns(String.class);
+
+            KinesisStreamsSink<String> kdsSink =
+                    KinesisStreamsSink.<String>builder()
+                            .setSerializationSchema(serializationSchema)
+                            .setPartitionKeyGenerator(partitionKeyGenerator)
+                            .setMaxTimeInBufferMS(bufferMaxTimeMS)
+                            .setMaxInFlightRequests(maxInflightReqs)
+                            .setMaxBatchSize(maxBatchSize)
+                            .setFailOnError(failOnError)
+                            .setMaxBufferedRequests(1000)
+                            .setStreamName(sinkConnectionStreamName)
+                            .setKinesisClientProperties(properties)
+                            .setFailOnError(true)
+                            .build();
+
+            stream.sinkTo(kdsSink);
+
+            env.execute("KDS Async Sink Example Program");
+
+            String shardIterator =
+                    kinesisClient
+                            .getShardIterator(
+                                    GetShardIteratorRequest.builder()
+                                            .shardId(DEFAULT_FIRST_SHARD_NAME)
+                                            .shardIteratorType(ShardIteratorType.TRIM_HORIZON)
+                                            .streamName(kinesaliteStreamName)
+                                            .build())
+                            .shardIterator();
+
+            Assertions.assertThat(
+                            kinesisClient
+                                    .getRecords(
+                                            GetRecordsRequest.builder()
+                                                    .shardIterator(shardIterator)
+                                                    .build())
+                                    .records()
+                                    .size())
+                    .isEqualTo(expectedElements);
+        }
+
+        public Scenario withNumberOfElementsToSend(int numberOfElementsToSend) {
+            this.numberOfElementsToSend = numberOfElementsToSend;
+            return this;
+        }
+
+        public Scenario withSizeOfMessageBytes(int sizeOfMessageBytes) {
+            this.sizeOfMessageBytes = sizeOfMessageBytes;
+            return this;
+        }
+
+        public Scenario withBufferMaxTimeMS(int bufferMaxTimeMS) {
+            this.bufferMaxTimeMS = bufferMaxTimeMS;
+            return this;
+        }
+
+        public Scenario withMaxInflightReqs(int maxInflightReqs) {
+            this.maxInflightReqs = maxInflightReqs;
+            return this;
+        }
+
+        public Scenario withMaxBatchSize(int maxBatchSize) {
+            this.maxBatchSize = maxBatchSize;
+            return this;
+        }
+
+        public Scenario withExpectedElements(int expectedElements) {
+            this.expectedElements = expectedElements;
+            return this;
+        }
+
+        public Scenario withFailOnError(boolean failOnError) {
+            this.failOnError = failOnError;
+            return this;
+        }
+
+        public Scenario withSinkConnectionStreamName(String sinkConnectionStreamName) {
+            this.sinkConnectionStreamName = sinkConnectionStreamName;
+            return this;
+        }
+
+        public Scenario withKinesaliteStreamName(String kinesaliteStreamName) {
+            this.kinesaliteStreamName = kinesaliteStreamName;
+            return this;
+        }
+
+        public Scenario withSerializationSchema(SerializationSchema<String> serializationSchema) {
+            this.serializationSchema = serializationSchema;
+            return this;
+        }
+
+        public Scenario withPartitionKeyGenerator(
+                PartitionKeyGenerator<String> partitionKeyGenerator) {
+            this.partitionKeyGenerator = partitionKeyGenerator;
+            return this;
+        }
+
+        public Scenario withProperties(Properties properties) {
+            this.properties = properties;
+            return this;
+        }
+
+        private void prepareStream(String streamName) throws Exception {
+            final RateLimiter rateLimiter =
+                    RateLimiterBuilder.newBuilder()
+                            .withRate(1, SECONDS)
+                            .withConstantThroughput()
+                            .build();
+
+            kinesisClient.createStream(
+                    CreateStreamRequest.builder().streamName(streamName).shardCount(1).build());
+
+            Deadline deadline = Deadline.fromNow(Duration.ofMinutes(1));
+            while (!rateLimiter.getWhenReady(() -> streamExists(streamName))) {
+                if (deadline.isOverdue()) {
+                    throw new RuntimeException("Failed to create stream within time");
+                }
+            }
+        }
+
+        private boolean streamExists(final String streamName) {
+            try {
+                return kinesisClient
+                                .describeStream(
+                                        DescribeStreamRequest.builder()
+                                                .streamName(streamName)
+                                                .build())
+                                .streamDescription()
+                                .streamStatus()
+                        == StreamStatus.ACTIVE;
+            } catch (Exception e) {
+                return false;
+            }
+        }
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsStateSerializerTest.java b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsStateSerializerTest.java
new file mode 100644
index 0000000..9c47520
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/KinesisStreamsStateSerializerTest.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.sink;
+
+import org.apache.flink.api.common.serialization.SimpleStringSchema;
+import org.apache.flink.connector.base.sink.writer.BufferedRequestState;
+import org.apache.flink.connector.base.sink.writer.ElementConverter;
+
+import org.junit.jupiter.api.Test;
+import software.amazon.awssdk.services.kinesis.model.PutRecordsRequestEntry;
+
+import java.io.IOException;
+
+import static org.apache.flink.connector.base.sink.writer.AsyncSinkWriterTestUtils.assertThatBufferStatesAreEqual;
+import static org.apache.flink.connector.base.sink.writer.AsyncSinkWriterTestUtils.getTestState;
+
+/** Test class for {@link KinesisStreamsStateSerializer}. */
+class KinesisStreamsStateSerializerTest {
+
+    private static final ElementConverter<String, PutRecordsRequestEntry> ELEMENT_CONVERTER =
+            KinesisStreamsSinkElementConverter.<String>builder()
+                    .setSerializationSchema(new SimpleStringSchema())
+                    .setPartitionKeyGenerator(element -> String.valueOf(element.hashCode()))
+                    .build();
+
+    @Test
+    void testSerializeAndDeserialize() throws IOException {
+        BufferedRequestState<PutRecordsRequestEntry> expectedState =
+                getTestState(ELEMENT_CONVERTER, this::getRequestSize);
+
+        KinesisStreamsStateSerializer serializer = new KinesisStreamsStateSerializer();
+        BufferedRequestState<PutRecordsRequestEntry> actualState =
+                serializer.deserialize(1, serializer.serialize(expectedState));
+        assertThatBufferStatesAreEqual(actualState, expectedState);
+    }
+
+    private int getRequestSize(PutRecordsRequestEntry requestEntry) {
+        return requestEntry.data().asByteArrayUnsafe().length;
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/examples/SinkIntoKinesis.java b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/examples/SinkIntoKinesis.java
new file mode 100644
index 0000000..c861f44
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/sink/examples/SinkIntoKinesis.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.sink.examples;
+
+import org.apache.flink.api.common.serialization.SimpleStringSchema;
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+import org.apache.flink.connector.kinesis.sink.KinesisStreamsSink;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
+import org.apache.flink.util.jackson.JacksonMapperFactory;
+
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
+
+import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
+import software.amazon.awssdk.utils.ImmutableMap;
+
+import java.util.Properties;
+
+/**
+ * An example application demonstrating how to use the {@link KinesisStreamsSink} to sink into KDS.
+ *
+ * <p>The {@link KinesisAsyncClient} used here may be configured in the standard way for the AWS SDK
+ * 2.x. e.g. the provision of {@code AWS_ACCESS_KEY_ID} and {@code AWS_SECRET_ACCESS_KEY} through
+ * environment variables etc.
+ */
+public class SinkIntoKinesis {
+
+    private static final ObjectMapper OBJECT_MAPPER = JacksonMapperFactory.createObjectMapper();
+
+    public static void main(String[] args) throws Exception {
+        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
+        env.enableCheckpointing(10_000);
+
+        DataStream<String> fromGen =
+                env.fromSequence(1, 10_000_000L)
+                        .map(Object::toString)
+                        .returns(String.class)
+                        .map(
+                                data ->
+                                        OBJECT_MAPPER.writeValueAsString(
+                                                ImmutableMap.of("data", data)));
+
+        Properties sinkProperties = new Properties();
+        sinkProperties.put(AWSConfigConstants.AWS_REGION, "your-region-here");
+
+        KinesisStreamsSink<String> kdsSink =
+                KinesisStreamsSink.<String>builder()
+                        .setSerializationSchema(new SimpleStringSchema())
+                        .setPartitionKeyGenerator(element -> String.valueOf(element.hashCode()))
+                        .setStreamName("your-stream-name")
+                        .setMaxBatchSize(20)
+                        .setKinesisClientProperties(sinkProperties)
+                        .build();
+
+        fromGen.sinkTo(kdsSink);
+
+        env.execute("KDS Async Sink Example Program");
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/table/KinesisDynamicTableSinkFactoryTest.java b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/table/KinesisDynamicTableSinkFactoryTest.java
new file mode 100644
index 0000000..ef939cd
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/table/KinesisDynamicTableSinkFactoryTest.java
@@ -0,0 +1,306 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.table;
+
+import org.apache.flink.api.connector.sink2.Sink;
+import org.apache.flink.connector.kinesis.sink.KinesisStreamsSink;
+import org.apache.flink.table.api.DataTypes;
+import org.apache.flink.table.api.ValidationException;
+import org.apache.flink.table.catalog.Column;
+import org.apache.flink.table.catalog.ResolvedSchema;
+import org.apache.flink.table.connector.sink.DynamicTableSink;
+import org.apache.flink.table.connector.sink.SinkV2Provider;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.factories.TableOptionsBuilder;
+import org.apache.flink.table.factories.TestFormatFactory;
+import org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext;
+import org.apache.flink.table.types.DataType;
+import org.apache.flink.table.types.logical.RowType;
+
+import org.assertj.core.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.apache.flink.connector.base.table.AsyncSinkConnectorOptions.FLUSH_BUFFER_SIZE;
+import static org.apache.flink.connector.base.table.AsyncSinkConnectorOptions.FLUSH_BUFFER_TIMEOUT;
+import static org.apache.flink.connector.base.table.AsyncSinkConnectorOptions.MAX_BATCH_SIZE;
+import static org.apache.flink.connector.base.table.AsyncSinkConnectorOptions.MAX_BUFFERED_REQUESTS;
+import static org.apache.flink.connector.base.table.AsyncSinkConnectorOptions.MAX_IN_FLIGHT_REQUESTS;
+import static org.apache.flink.connector.kinesis.table.KinesisConnectorOptions.SINK_FAIL_ON_ERROR;
+import static org.apache.flink.table.factories.utils.FactoryMocks.createTableSink;
+
+/** Test for {@link KinesisDynamicSink} created by {@link KinesisDynamicTableSinkFactory}. */
+class KinesisDynamicTableSinkFactoryTest {
+    private static final String STREAM_NAME = "myStream";
+
+    @Test
+    void testGoodTableSinkForPartitionedTable() {
+        ResolvedSchema sinkSchema = defaultSinkSchema();
+        DataType physicalDataType = sinkSchema.toPhysicalRowDataType();
+        Map<String, String> sinkOptions = defaultTableOptions().build();
+        List<String> sinkPartitionKeys = Arrays.asList("name", "curr_id");
+
+        // Construct actual DynamicTableSink using FactoryUtil
+        KinesisDynamicSink actualSink =
+                (KinesisDynamicSink) createTableSink(sinkSchema, sinkPartitionKeys, sinkOptions);
+
+        // Construct expected DynamicTableSink using factory under test
+        KinesisDynamicSink expectedSink =
+                (KinesisDynamicSink)
+                        new KinesisDynamicSink.KinesisDynamicTableSinkBuilder()
+                                .setConsumedDataType(physicalDataType)
+                                .setStream(STREAM_NAME)
+                                .setKinesisClientProperties(defaultProducerProperties())
+                                .setEncodingFormat(new TestFormatFactory.EncodingFormatMock(","))
+                                .setPartitioner(
+                                        new RowDataFieldsKinesisPartitionKeyGenerator(
+                                                (RowType) physicalDataType.getLogicalType(),
+                                                sinkPartitionKeys))
+                                .build();
+        // verify that the constructed DynamicTableSink is as expected
+        Assertions.assertThat(actualSink).isEqualTo(expectedSink);
+
+        // verify the produced sink
+        DynamicTableSink.SinkRuntimeProvider sinkFunctionProvider =
+                actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
+        Sink<RowData> sinkFunction = ((SinkV2Provider) sinkFunctionProvider).createSink();
+        Assertions.assertThat(sinkFunction).isInstanceOf(KinesisStreamsSink.class);
+    }
+
+    @Test
+    void testGoodTableSinkCopyForPartitionedTable() {
+        ResolvedSchema sinkSchema = defaultSinkSchema();
+        DataType physicalDataType = sinkSchema.toPhysicalRowDataType();
+        Map<String, String> sinkOptions = defaultTableOptions().build();
+        List<String> sinkPartitionKeys = Arrays.asList("name", "curr_id");
+
+        // Construct actual DynamicTableSink using FactoryUtil
+        KinesisDynamicSink actualSink =
+                (KinesisDynamicSink) createTableSink(sinkSchema, sinkPartitionKeys, sinkOptions);
+
+        // Construct expected DynamicTableSink using factory under test
+        KinesisDynamicSink expectedSink =
+                (KinesisDynamicSink)
+                        new KinesisDynamicSink.KinesisDynamicTableSinkBuilder()
+                                .setConsumedDataType(physicalDataType)
+                                .setStream(STREAM_NAME)
+                                .setKinesisClientProperties(defaultProducerProperties())
+                                .setEncodingFormat(new TestFormatFactory.EncodingFormatMock(","))
+                                .setPartitioner(
+                                        new RowDataFieldsKinesisPartitionKeyGenerator(
+                                                (RowType) physicalDataType.getLogicalType(),
+                                                sinkPartitionKeys))
+                                .build();
+        Assertions.assertThat(actualSink).isEqualTo(expectedSink.copy());
+        Assertions.assertThat(expectedSink).isNotSameAs(expectedSink.copy());
+    }
+
+    @Test
+    void testGoodTableSinkForNonPartitionedTable() {
+        ResolvedSchema sinkSchema = defaultSinkSchema();
+        Map<String, String> sinkOptions = defaultTableOptions().build();
+
+        // Construct actual DynamicTableSink using FactoryUtil
+        KinesisDynamicSink actualSink =
+                (KinesisDynamicSink) createTableSink(sinkSchema, sinkOptions);
+
+        // Construct expected DynamicTableSink using factory under test
+        KinesisDynamicSink expectedSink =
+                (KinesisDynamicSink)
+                        new KinesisDynamicSink.KinesisDynamicTableSinkBuilder()
+                                .setConsumedDataType(sinkSchema.toPhysicalRowDataType())
+                                .setStream(STREAM_NAME)
+                                .setKinesisClientProperties(defaultProducerProperties())
+                                .setEncodingFormat(new TestFormatFactory.EncodingFormatMock(","))
+                                .setPartitioner(new RandomKinesisPartitionKeyGenerator<>())
+                                .build();
+
+        Assertions.assertThat(actualSink).isEqualTo(expectedSink);
+
+        // verify the produced sink
+        DynamicTableSink.SinkRuntimeProvider sinkFunctionProvider =
+                actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
+        Sink<RowData> sinkFunction = ((SinkV2Provider) sinkFunctionProvider).createSink();
+        Assertions.assertThat(sinkFunction).isInstanceOf(KinesisStreamsSink.class);
+    }
+
+    @Test
+    void testGoodTableSinkForNonPartitionedTableWithSinkOptions() {
+        ResolvedSchema sinkSchema = defaultSinkSchema();
+        Map<String, String> sinkOptions = defaultTableOptionsWithSinkOptions().build();
+
+        // Construct actual DynamicTableSink using FactoryUtil
+        KinesisDynamicSink actualSink =
+                (KinesisDynamicSink) createTableSink(sinkSchema, sinkOptions);
+
+        // Construct expected DynamicTableSink using factory under test
+        KinesisDynamicSink expectedSink =
+                (KinesisDynamicSink)
+                        getDefaultSinkBuilder()
+                                .setConsumedDataType(sinkSchema.toPhysicalRowDataType())
+                                .setStream(STREAM_NAME)
+                                .setKinesisClientProperties(defaultProducerProperties())
+                                .setEncodingFormat(new TestFormatFactory.EncodingFormatMock(","))
+                                .setPartitioner(new RandomKinesisPartitionKeyGenerator<>())
+                                .build();
+
+        Assertions.assertThat(actualSink).isEqualTo(expectedSink);
+
+        // verify the produced sink
+        DynamicTableSink.SinkRuntimeProvider sinkFunctionProvider =
+                actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
+        Sink<RowData> sinkFunction = ((SinkV2Provider) sinkFunctionProvider).createSink();
+        Assertions.assertThat(sinkFunction).isInstanceOf(KinesisStreamsSink.class);
+    }
+
+    @Test
+    void testGoodTableSinkForNonPartitionedTableWithProducerOptions() {
+        ResolvedSchema sinkSchema = defaultSinkSchema();
+        Map<String, String> sinkOptions = defaultTableOptionsWithDeprecatedOptions().build();
+
+        // Construct actual DynamicTableSink using FactoryUtil
+        KinesisDynamicSink actualSink =
+                (KinesisDynamicSink) createTableSink(sinkSchema, sinkOptions);
+
+        // Construct expected DynamicTableSink using factory under test
+        KinesisDynamicSink expectedSink =
+                (KinesisDynamicSink)
+                        new KinesisDynamicSink.KinesisDynamicTableSinkBuilder()
+                                .setFailOnError(true)
+                                .setMaxBatchSize(100)
+                                .setMaxInFlightRequests(100)
+                                .setMaxTimeInBufferMS(1000)
+                                .setConsumedDataType(sinkSchema.toPhysicalRowDataType())
+                                .setStream(STREAM_NAME)
+                                .setKinesisClientProperties(defaultProducerProperties())
+                                .setEncodingFormat(new TestFormatFactory.EncodingFormatMock(","))
+                                .setPartitioner(new RandomKinesisPartitionKeyGenerator<>())
+                                .build();
+
+        // verify that the constructed DynamicTableSink is as expected
+        Assertions.assertThat(actualSink).isEqualTo(expectedSink);
+
+        // verify the produced sink
+        DynamicTableSink.SinkRuntimeProvider sinkFunctionProvider =
+                actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
+        Sink<RowData> sinkFunction = ((SinkV2Provider) sinkFunctionProvider).createSink();
+        Assertions.assertThat(sinkFunction).isInstanceOf(KinesisStreamsSink.class);
+    }
+
+    @Test
+    void testBadTableSinkForCustomPartitionerForPartitionedTable() {
+        ResolvedSchema sinkSchema = defaultSinkSchema();
+        Map<String, String> sinkOptions =
+                defaultTableOptions()
+                        .withTableOption(KinesisConnectorOptions.SINK_PARTITIONER, "random")
+                        .build();
+
+        Assertions.assertThatExceptionOfType(ValidationException.class)
+                .isThrownBy(
+                        () ->
+                                createTableSink(
+                                        sinkSchema, Arrays.asList("name", "curr_id"), sinkOptions))
+                .havingCause()
+                .withMessageContaining(
+                        String.format(
+                                "Cannot set %s option for a table defined with a PARTITIONED BY clause",
+                                KinesisConnectorOptions.SINK_PARTITIONER.key()));
+    }
+
+    @Test
+    void testBadTableSinkForNonExistingPartitionerClass() {
+        ResolvedSchema sinkSchema = defaultSinkSchema();
+        Map<String, String> sinkOptions =
+                defaultTableOptions()
+                        .withTableOption(KinesisConnectorOptions.SINK_PARTITIONER, "abc")
+                        .build();
+
+        Assertions.assertThatExceptionOfType(ValidationException.class)
+                .isThrownBy(() -> createTableSink(sinkSchema, sinkOptions))
+                .havingCause()
+                .withMessageContaining("Could not find and instantiate partitioner class 'abc'");
+    }
+
+    private ResolvedSchema defaultSinkSchema() {
+        return ResolvedSchema.of(
+                Column.physical("name", DataTypes.STRING()),
+                Column.physical("curr_id", DataTypes.BIGINT()),
+                Column.physical("time", DataTypes.TIMESTAMP(3)));
+    }
+
+    private TableOptionsBuilder defaultTableOptionsWithSinkOptions() {
+        return defaultTableOptions()
+                .withTableOption(SINK_FAIL_ON_ERROR.key(), "true")
+                .withTableOption(MAX_BATCH_SIZE.key(), "100")
+                .withTableOption(MAX_IN_FLIGHT_REQUESTS.key(), "100")
+                .withTableOption(MAX_BUFFERED_REQUESTS.key(), "100")
+                .withTableOption(FLUSH_BUFFER_SIZE.key(), "1000")
+                .withTableOption(FLUSH_BUFFER_TIMEOUT.key(), "1000");
+    }
+
+    private TableOptionsBuilder defaultTableOptionsWithDeprecatedOptions() {
+        return defaultTableOptions()
+                .withTableOption("sink.producer.record-max-buffered-time", "1000")
+                .withTableOption("sink.producer.collection-max-size", "100")
+                .withTableOption("sink.producer.collection-max-count", "100")
+                .withTableOption("sink.producer.fail-on-error", "true");
+    }
+
+    private TableOptionsBuilder defaultTableOptions() {
+        String connector = KinesisDynamicTableSinkFactory.IDENTIFIER;
+        String format = TestFormatFactory.IDENTIFIER;
+        return new TableOptionsBuilder(connector, format)
+                // default table options
+                .withTableOption(KinesisConnectorOptions.STREAM, STREAM_NAME)
+                .withTableOption("aws.region", "us-west-2")
+                .withTableOption("aws.credentials.provider", "BASIC")
+                .withTableOption("aws.credentials.basic.accesskeyid", "ververicka")
+                .withTableOption(
+                        "aws.credentials.basic.secretkey",
+                        "SuperSecretSecretSquirrel") // default format options
+                .withFormatOption(TestFormatFactory.DELIMITER, ",")
+                .withFormatOption(TestFormatFactory.FAIL_ON_MISSING, "true");
+    }
+
+    private KinesisDynamicSink.KinesisDynamicTableSinkBuilder getDefaultSinkBuilder() {
+        return new KinesisDynamicSink.KinesisDynamicTableSinkBuilder()
+                .setFailOnError(true)
+                .setMaxBatchSize(100)
+                .setMaxInFlightRequests(100)
+                .setMaxBufferSizeInBytes(1000)
+                .setMaxBufferedRequests(100)
+                .setMaxTimeInBufferMS(1000);
+    }
+
+    private Properties defaultProducerProperties() {
+        return new Properties() {
+            {
+                setProperty("aws.region", "us-west-2");
+                setProperty("aws.credentials.provider", "BASIC");
+                setProperty("aws.credentials.provider.basic.accesskeyid", "ververicka");
+                setProperty(
+                        "aws.credentials.provider.basic.secretkey", "SuperSecretSecretSquirrel");
+            }
+        };
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/table/RowDataFieldsKinesisPartitionKeyGeneratorTest.java b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/table/RowDataFieldsKinesisPartitionKeyGeneratorTest.java
new file mode 100644
index 0000000..7305436
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/table/RowDataFieldsKinesisPartitionKeyGeneratorTest.java
@@ -0,0 +1,305 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.table;
+
+import org.apache.flink.table.api.DataTypes;
+import org.apache.flink.table.data.GenericRowData;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.data.StringData;
+import org.apache.flink.table.data.TimestampData;
+import org.apache.flink.table.factories.TableOptionsBuilder;
+import org.apache.flink.table.factories.TestFormatFactory;
+import org.apache.flink.table.types.logical.RowType;
+
+import org.junit.jupiter.api.Test;
+
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.temporal.ChronoField;
+import java.time.temporal.ChronoUnit;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+
+import static org.apache.flink.connector.kinesis.table.RowDataFieldsKinesisPartitionKeyGenerator.MAX_PARTITION_KEY_LENGTH;
+import static org.apache.flink.table.utils.EncodingUtils.repeat;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+/** Test for {@link RowDataFieldsKinesisPartitionKeyGenerator}. */
+class RowDataFieldsKinesisPartitionKeyGeneratorTest {
+
+    /** Table name to use for the tests. */
+    private static final String TABLE_NAME = "click_stream";
+
+    /** Row type to use for the tests. */
+    private static final RowType ROW_TYPE =
+            (RowType)
+                    DataTypes.ROW(
+                                    DataTypes.FIELD("time", DataTypes.TIMESTAMP(3)),
+                                    DataTypes.FIELD("ip", DataTypes.VARCHAR(16)),
+                                    DataTypes.FIELD("route", DataTypes.STRING()),
+                                    DataTypes.FIELD("date", DataTypes.STRING()),
+                                    DataTypes.FIELD("year", DataTypes.STRING()),
+                                    DataTypes.FIELD("month", DataTypes.STRING()),
+                                    DataTypes.FIELD("day", DataTypes.STRING()))
+                            .getLogicalType();
+
+    /** A list of field delimiters to use in the tests. */
+    private static final List<String> FIELD_DELIMITERS = Arrays.asList("", "|", ",", "--");
+
+    /** A {@code PARTITION BY(date, ip)} clause to use for the positive tests. */
+    private static final List<String> PARTITION_BY_DATE_AND_IP = Arrays.asList("date", "ip");
+
+    /** A {@code PARTITION BY(year, month, day)} clause to use for the positive tests. */
+    private static final List<String> PARTITION_BY_DATE = Arrays.asList("year", "month", "day");
+
+    /** A {@code PARTITION BY(route)} clause to use for the positive tests. */
+    private static final List<String> PARTITION_BY_ROUTE = Collections.singletonList("route");
+
+    /**
+     * Some not-so-random {@link LocalDateTime} instances to use for sample {@link RowData} elements
+     * in the tests.
+     */
+    private static final List<LocalDateTime> DATE_TIMES =
+            Arrays.asList(
+                    LocalDateTime.of(2014, 10, 22, 12, 0),
+                    LocalDateTime.of(2015, 11, 13, 10, 0),
+                    LocalDateTime.of(2015, 12, 14, 14, 0),
+                    LocalDateTime.of(2018, 10, 31, 15, 0));
+
+    /** A default IP to use for sample {@link RowData} elements in the tests. */
+    private static final String IP = "255.255.255.255";
+
+    // --------------------------------------------------------------------------------------------
+    // Positive tests
+    // --------------------------------------------------------------------------------------------
+
+    @Test
+    void testGoodPartitioner() {
+        for (String delimiter : FIELD_DELIMITERS) {
+            RowDataFieldsKinesisPartitionKeyGenerator partitioner =
+                    new RowDataFieldsKinesisPartitionKeyGenerator(
+                            ROW_TYPE, PARTITION_BY_DATE_AND_IP, delimiter);
+
+            for (LocalDateTime time : DATE_TIMES) {
+                String expectedKey = String.join(delimiter, String.valueOf(days(time)), IP);
+                String actualKey = partitioner.apply(createElement(time, IP));
+
+                assertThat(actualKey).isEqualTo(expectedKey);
+            }
+        }
+    }
+
+    @Test
+    void testGoodPartitionerExceedingMaxLength() {
+        RowDataFieldsKinesisPartitionKeyGenerator partitioner =
+                new RowDataFieldsKinesisPartitionKeyGenerator(ROW_TYPE, PARTITION_BY_ROUTE);
+
+        String ip = "255.255.255.255";
+        String route = "http://www.very-" + repeat("long-", 50) + "address.com/home";
+        String expectedKey = route.substring(0, MAX_PARTITION_KEY_LENGTH);
+
+        for (LocalDateTime time : DATE_TIMES) {
+            String actualKey = partitioner.apply(createElement(time, ip, route));
+            assertThat(actualKey).isEqualTo(expectedKey);
+        }
+    }
+
+    @Test
+    void testGoodPartitionerWithStaticPrefix() {
+        // fixed prefix
+        String year = String.valueOf(year(DATE_TIMES.get(0)));
+        String month = String.valueOf(monthOfYear(DATE_TIMES.get(0)));
+
+        for (String delimiter : FIELD_DELIMITERS) {
+            RowDataFieldsKinesisPartitionKeyGenerator partitioner =
+                    new RowDataFieldsKinesisPartitionKeyGenerator(
+                            ROW_TYPE, PARTITION_BY_DATE, delimiter);
+
+            partitioner.setStaticFields(
+                    new HashMap<String, String>() {
+                        {
+                            put("year", year);
+                            put("month", month);
+                        }
+                    });
+
+            for (LocalDateTime time : DATE_TIMES) {
+                String day = String.valueOf(dayOfMonth(time));
+                String expectedKey = String.join(delimiter, year, month, day);
+                String actualKey = partitioner.apply(createElement(time, IP));
+
+                assertThat(actualKey).isEqualTo(expectedKey);
+            }
+        }
+    }
+
+    @Test
+    void testGoodPartitionerWithStaticSuffix() {
+        // fixed suffix
+        String month = String.valueOf(monthOfYear(DATE_TIMES.get(0)));
+        String day = String.valueOf(dayOfMonth(DATE_TIMES.get(0)));
+
+        for (String delimiter : FIELD_DELIMITERS) {
+            RowDataFieldsKinesisPartitionKeyGenerator partitioner =
+                    new RowDataFieldsKinesisPartitionKeyGenerator(
+                            ROW_TYPE, PARTITION_BY_DATE, delimiter);
+
+            partitioner.setStaticFields(
+                    new HashMap<String, String>() {
+                        {
+                            put("month", month);
+                            put("day", day);
+                        }
+                    });
+
+            for (LocalDateTime time : DATE_TIMES) {
+                String year = String.valueOf(year(time));
+                String expectedKey = String.join(delimiter, year, month, day);
+                String actualKey = partitioner.apply(createElement(time, IP));
+
+                assertThat(actualKey).isEqualTo(expectedKey);
+            }
+        }
+    }
+
+    @Test
+    void testGoodPartitionerWithStaticInfix() {
+        // fixed infix
+        String month = String.valueOf(monthOfYear(DATE_TIMES.get(0)));
+
+        for (String delimiter : FIELD_DELIMITERS) {
+            RowDataFieldsKinesisPartitionKeyGenerator partitioner =
+                    new RowDataFieldsKinesisPartitionKeyGenerator(
+                            ROW_TYPE, PARTITION_BY_DATE, delimiter);
+
+            partitioner.setStaticFields(
+                    new HashMap<String, String>() {
+                        {
+                            put("month", month);
+                        }
+                    });
+
+            for (LocalDateTime time : DATE_TIMES) {
+                String year = String.valueOf(year(time));
+                String day = String.valueOf(dayOfMonth(time));
+                String expectedKey = String.join(delimiter, year, month, day);
+                String actualKey = partitioner.apply(createElement(time, IP));
+
+                assertThat(actualKey).isEqualTo(expectedKey);
+            }
+        }
+    }
+
+    // --------------------------------------------------------------------------------------------
+    // Negative tests
+    // --------------------------------------------------------------------------------------------
+
+    @Test
+    void testBadPartitionerWithEmptyPrefix() {
+        assertThatThrownBy(
+                        () ->
+                                new RowDataFieldsKinesisPartitionKeyGenerator(
+                                        ROW_TYPE, Collections.emptyList()))
+                .isInstanceOf(IllegalArgumentException.class)
+                .hasMessageContaining(
+                        "Cannot create a RowDataFieldsKinesisPartitioner for a non-partitioned table");
+    }
+
+    @Test
+    void testBadPartitionerWithDuplicatePartitionKeys() {
+        assertThatThrownBy(
+                        () ->
+                                new RowDataFieldsKinesisPartitionKeyGenerator(
+                                        ROW_TYPE, Arrays.asList("ip", "ip")))
+                .isInstanceOf(IllegalArgumentException.class)
+                .hasMessageContaining("The sequence of partition keys cannot contain duplicates");
+    }
+
+    @Test
+    void testBadPartitionerWithBadFieldFieldNames() {
+        assertThatThrownBy(
+                        () ->
+                                new RowDataFieldsKinesisPartitionKeyGenerator(
+                                        ROW_TYPE, Arrays.asList("ip", "abc")))
+                .isInstanceOf(IllegalArgumentException.class)
+                .hasMessageContaining(
+                        "The following partition keys are not present in the table: abc");
+    }
+
+    @Test
+    void testBadPartitionerWithBadFieldFieldTypes() {
+        assertThatThrownBy(
+                        () ->
+                                new RowDataFieldsKinesisPartitionKeyGenerator(
+                                        ROW_TYPE, Arrays.asList("time", "ip")))
+                .isInstanceOf(IllegalArgumentException.class)
+                .hasMessageContaining(
+                        "The following partition keys have types that are not supported by Kinesis: time");
+    }
+
+    // --------------------------------------------------------------------------------------------
+    // Utilities
+    // --------------------------------------------------------------------------------------------
+
+    private RowData createElement(LocalDateTime time, String ip) {
+        return createElement(time, ip, "https://flink.apache.org/home");
+    }
+
+    private RowData createElement(LocalDateTime time, String ip, String route) {
+        GenericRowData element = new GenericRowData(ROW_TYPE.getFieldCount());
+        element.setField(0, TimestampData.fromLocalDateTime(time));
+        element.setField(1, StringData.fromString(ip));
+        element.setField(2, StringData.fromString(route));
+        element.setField(3, StringData.fromString(String.valueOf(days(time))));
+        element.setField(4, StringData.fromString(String.valueOf(year(time))));
+        element.setField(5, StringData.fromString(String.valueOf(monthOfYear(time))));
+        element.setField(6, StringData.fromString(String.valueOf(dayOfMonth(time))));
+        return element;
+    }
+
+    private int days(LocalDateTime time) {
+        return (int) ChronoUnit.DAYS.between(LocalDate.ofEpochDay(0), time);
+    }
+
+    private int year(LocalDateTime time) {
+        return time.get(ChronoField.YEAR);
+    }
+
+    private int monthOfYear(LocalDateTime time) {
+        return time.get(ChronoField.MONTH_OF_YEAR);
+    }
+
+    private int dayOfMonth(LocalDateTime time) {
+        return time.get(ChronoField.DAY_OF_MONTH);
+    }
+
+    private TableOptionsBuilder defaultTableOptions() {
+        String connector = KinesisDynamicTableSinkFactory.IDENTIFIER;
+        String format = TestFormatFactory.IDENTIFIER;
+        return new TableOptionsBuilder(connector, format)
+                // default table options
+                .withTableOption(KinesisConnectorOptions.STREAM, TABLE_NAME)
+                .withTableOption("properties.aws.region", "us-west-2")
+                // default format options
+                .withFormatOption(TestFormatFactory.DELIMITER, ",");
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/table/util/KinesisProducerOptionsMapperTest.java b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/table/util/KinesisProducerOptionsMapperTest.java
new file mode 100644
index 0000000..141e870
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connector/kinesis/table/util/KinesisProducerOptionsMapperTest.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.kinesis.table.util;
+
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+import org.apache.flink.connector.kinesis.table.util.KinesisStreamsConnectorOptionsUtils.KinesisProducerOptionsMapper;
+
+import org.assertj.core.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/** Unit tests for {@link KinesisProducerOptionsMapper}. */
+class KinesisProducerOptionsMapperTest {
+
+    @Test
+    void testProducerVerifyCertificateOptionsMapping() {
+        Map<String, String> deprecatedOptions = new HashMap<>();
+        deprecatedOptions.put("sink.producer.verify-certificate", "false");
+        Map<String, String> expectedOptions = new HashMap<>();
+        expectedOptions.put(AWSConfigConstants.TRUST_ALL_CERTIFICATES, "true");
+
+        KinesisProducerOptionsMapper producerOptionsMapper =
+                new KinesisStreamsConnectorOptionsUtils.KinesisProducerOptionsMapper(
+                        deprecatedOptions);
+        Map<String, String> actualMappedProperties =
+                producerOptionsMapper.mapDeprecatedClientOptions();
+
+        Assertions.assertThat(actualMappedProperties).isEqualTo(expectedOptions);
+    }
+
+    @Test
+    void testProducerEndpointExtraction() {
+        Map<String, String> deprecatedOptions = new HashMap<>();
+        Map<String, String> expectedOptions = new HashMap<>();
+        deprecatedOptions.put("sink.producer.kinesis-endpoint", "some-end-point.kinesis");
+        expectedOptions.put(AWSConfigConstants.AWS_ENDPOINT, "https://some-end-point.kinesis");
+
+        KinesisProducerOptionsMapper producerOptionsMapper =
+                new KinesisProducerOptionsMapper(deprecatedOptions);
+        Map<String, String> actualMappedProperties =
+                producerOptionsMapper.mapDeprecatedClientOptions();
+
+        Assertions.assertThat(actualMappedProperties).isEqualTo(expectedOptions);
+    }
+
+    @Test
+    void testProducerEndpointAndPortExtraction() {
+        Map<String, String> deprecatedOptions = new HashMap<>();
+        Map<String, String> expectedOptions = new HashMap<>();
+        deprecatedOptions.put("sink.producer.kinesis-endpoint", "some-end-point.kinesis");
+        deprecatedOptions.put("sink.producer.kinesis-port", "1234");
+        expectedOptions.put(AWSConfigConstants.AWS_ENDPOINT, "https://some-end-point.kinesis:1234");
+
+        KinesisProducerOptionsMapper producerOptionsMapper =
+                new KinesisProducerOptionsMapper(deprecatedOptions);
+        Map<String, String> actualMappedProperties =
+                producerOptionsMapper.mapDeprecatedClientOptions();
+
+        Assertions.assertThat(actualMappedProperties).isEqualTo(expectedOptions);
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connectors/kinesis/testutils/KinesaliteContainer.java b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connectors/kinesis/testutils/KinesaliteContainer.java
new file mode 100644
index 0000000..42593e5
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/test/java/org/apache/flink/connectors/kinesis/testutils/KinesaliteContainer.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connectors.kinesis.testutils;
+
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+import org.apache.flink.connector.aws.testutils.AWSServicesTestUtils;
+
+import org.rnorth.ducttape.ratelimits.RateLimiter;
+import org.rnorth.ducttape.ratelimits.RateLimiterBuilder;
+import org.rnorth.ducttape.unreliables.Unreliables;
+import org.testcontainers.containers.GenericContainer;
+import org.testcontainers.containers.wait.strategy.AbstractWaitStrategy;
+import org.testcontainers.utility.DockerImageName;
+import software.amazon.awssdk.core.SdkSystemSetting;
+import software.amazon.awssdk.http.SdkHttpClient;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.kinesis.KinesisClient;
+import software.amazon.awssdk.services.kinesis.model.ListStreamsResponse;
+
+import java.util.Properties;
+import java.util.concurrent.Callable;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+
+/**
+ * A testcontainer based on Kinesalite.
+ *
+ * <p>Note that the more obvious localstack container with Kinesis took 1 minute to start vs 10
+ * seconds of Kinesalite.
+ */
+public class KinesaliteContainer extends GenericContainer<KinesaliteContainer> {
+    private static final String ACCESS_KEY = "access key";
+    private static final String SECRET_KEY = "secret key";
+    private static final int PORT = 4567;
+    private static final Region REGION = Region.US_EAST_1;
+    private static final String URL_FORMAT = "https://%s:%s";
+
+    public KinesaliteContainer(DockerImageName imageName) {
+        super(imageName);
+
+        System.setProperty(SdkSystemSetting.CBOR_ENABLED.property(), "false");
+
+        withExposedPorts(PORT);
+        waitingFor(new ListStreamsWaitStrategy());
+        startContainer();
+    }
+
+    /** Returns the endpoint url to access the container from outside the docker network. */
+    public String getContainerEndpointUrl() {
+        return String.format(URL_FORMAT, getContainerIpAddress(), getMappedPort(PORT));
+    }
+
+    /** Returns the endpoint url to access the host from inside the docker network. */
+    public String getHostEndpointUrl() {
+        return String.format(URL_FORMAT, getHost(), getMappedPort(PORT));
+    }
+
+    public String getAccessKey() {
+        return ACCESS_KEY;
+    }
+
+    public String getSecretKey() {
+        return SECRET_KEY;
+    }
+
+    public Region getRegion() {
+        return REGION;
+    }
+
+    /** Returns the properties to access the container from outside the docker network. */
+    public Properties getContainerProperties() {
+        return getProperties(getContainerEndpointUrl());
+    }
+
+    /** Returns the properties to access the host from inside the docker network. */
+    public Properties getHostProperties() {
+        return getProperties(getHostEndpointUrl());
+    }
+
+    /** Returns the client to access the container from outside the docker network. */
+    public KinesisClient createContainerClient(SdkHttpClient httpClient) {
+        return AWSServicesTestUtils.createAwsSyncClient(
+                getContainerEndpointUrl(), httpClient, KinesisClient.builder());
+    }
+
+    /** Returns the client to access the host from inside the docker network. */
+    public KinesisClient createHostClient(SdkHttpClient httpClient) {
+        return AWSServicesTestUtils.createAwsSyncClient(
+                getHostEndpointUrl(), httpClient, KinesisClient.builder());
+    }
+
+    private void startContainer() {
+        withCreateContainerCmdModifier(
+                cmd ->
+                        cmd.withEntrypoint(
+                                "/tini",
+                                "--",
+                                "/usr/src/app/node_modules/kinesalite/cli.js",
+                                "--path",
+                                "/var/lib/kinesalite",
+                                "--ssl"));
+    }
+
+    private Properties getProperties(String endpointUrl) {
+        Properties config = new Properties();
+        config.setProperty(AWSConfigConstants.AWS_REGION, REGION.toString());
+        config.setProperty(AWSConfigConstants.AWS_ENDPOINT, endpointUrl);
+        config.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, getAccessKey());
+        config.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, getSecretKey());
+        return config;
+    }
+
+    private class ListStreamsWaitStrategy extends AbstractWaitStrategy {
+        private static final int TRANSACTIONS_PER_SECOND = 1;
+
+        private final RateLimiter rateLimiter =
+                RateLimiterBuilder.newBuilder()
+                        .withRate(TRANSACTIONS_PER_SECOND, SECONDS)
+                        .withConstantThroughput()
+                        .build();
+
+        @Override
+        protected void waitUntilReady() {
+            try {
+                Thread.sleep(10000);
+            } catch (InterruptedException e) {
+                e.printStackTrace();
+                throw new IllegalStateException("Kinesalite Container startup was interrupted");
+            }
+
+            retryUntilSuccessRunner(this::list);
+        }
+
+        protected <T> void retryUntilSuccessRunner(final Callable<T> lambda) {
+            Unreliables.retryUntilSuccess(
+                    (int) startupTimeout.getSeconds(),
+                    SECONDS,
+                    () -> rateLimiter.getWhenReady(lambda));
+        }
+
+        private ListStreamsResponse list() {
+            try (SdkHttpClient httpClient = AWSServicesTestUtils.createHttpClient();
+                    KinesisClient containerClient = createContainerClient(httpClient)) {
+                return containerClient.listStreams();
+            }
+        }
+    }
+}
diff --git a/flink-connector-aws-kinesis-streams/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension b/flink-connector-aws-kinesis-streams/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension
new file mode 100644
index 0000000..2899913
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.flink.util.TestLoggerExtension
\ No newline at end of file
diff --git a/flink-connector-aws-kinesis-streams/src/test/resources/archunit.properties b/flink-connector-aws-kinesis-streams/src/test/resources/archunit.properties
new file mode 100644
index 0000000..15be88c
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/test/resources/archunit.properties
@@ -0,0 +1,31 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# By default we allow removing existing violations, but fail when new violations are added.
+freeze.store.default.allowStoreUpdate=true
+
+# Enable this if a new (frozen) rule has been added in order to create the initial store and record the existing violations.
+#freeze.store.default.allowStoreCreation=true
+
+# Enable this to add allow new violations to be recorded.
+# NOTE: Adding new violations should be avoided when possible. If the rule was correct to flag a new
+#       violation, please try to avoid creating the violation. If the violation was created due to a
+#       shortcoming of the rule, file a JIRA issue so the rule can be improved.
+#freeze.refreeze=true
+
+freeze.store.default.path=archunit-violations
diff --git a/flink-connector-aws-kinesis-streams/src/test/resources/log4j2-test.properties b/flink-connector-aws-kinesis-streams/src/test/resources/log4j2-test.properties
new file mode 100644
index 0000000..c4fa187
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/test/resources/log4j2-test.properties
@@ -0,0 +1,28 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Set root logger level to OFF to not flood build logs
+# set manually to INFO for debugging purposes
+rootLogger.level = OFF
+rootLogger.appenderRef.test.ref = TestLogger
+
+appender.testlogger.name = TestLogger
+appender.testlogger.type = CONSOLE
+appender.testlogger.target = SYSTEM_ERR
+appender.testlogger.layout.type = PatternLayout
+appender.testlogger.layout.pattern = %-4r [%t] %-5p %c %x - %m%n
diff --git a/flink-connector-aws-kinesis-streams/src/test/resources/profile b/flink-connector-aws-kinesis-streams/src/test/resources/profile
new file mode 100644
index 0000000..2573fd6
--- /dev/null
+++ b/flink-connector-aws-kinesis-streams/src/test/resources/profile
@@ -0,0 +1,7 @@
+[default]
+aws_access_key_id=11111111111111111111
+aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCY1111111111
+
+[foo]
+aws_access_key_id=22222222222222222222
+aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCY2222222222
diff --git a/flink-sql-connector-aws-kinesis-streams/pom.xml b/flink-sql-connector-aws-kinesis-streams/pom.xml
new file mode 100644
index 0000000..cad5922
--- /dev/null
+++ b/flink-sql-connector-aws-kinesis-streams/pom.xml
@@ -0,0 +1,120 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+    <parent>
+        <groupId>org.apache.flink</groupId>
+        <artifactId>flink-connector-aws-parent</artifactId>
+        <version>4.0-SNAPSHOT</version>
+    </parent>
+
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>flink-sql-connector-aws-kinesis-streams</artifactId>
+    <name>Flink : Connectors : AWS : SQL : Amazon Kinesis Data Streams</name>
+
+    <properties>
+        <japicmp.skip>true</japicmp.skip>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-aws-kinesis-streams</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-shade-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <id>shade-flink</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>shade</goal>
+                        </goals>
+                        <configuration>
+                            <artifactSet>
+                                <includes>
+                                    <include>org.apache.flink:flink-connector-base</include>
+                                    <include>org.apache.flink:flink-connector-aws-base</include>
+                                    <include>org.apache.flink:flink-connector-aws-kinesis-streams</include>
+                                    <include>software.amazon.awssdk:*</include>
+                                    <include>org.reactivestreams:*</include>
+                                    <include>com.typesafe.netty:*</include>
+                                    <include>org.apache.httpcomponents:*</include>
+                                    <include>io.netty:*</include>
+                                    <include>commons-logging:commons-logging</include>
+                                </includes>
+                            </artifactSet>
+                            <relocations>
+                                <relocation>
+                                    <pattern>software.amazon</pattern>
+                                    <shadedPattern>org.apache.flink.connector.kinesis.sink.shaded.software.amazon
+                                    </shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>org.reactivestreams</pattern>
+                                    <shadedPattern>org.apache.flink.connector.kinesis.sink.shaded.org.reactivestreams
+                                    </shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>com.typesafe.netty</pattern>
+                                    <shadedPattern>org.apache.flink.connector.kinesis.sink.shaded.com.typesafe.netty
+                                    </shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>org.apache.http</pattern>
+                                    <shadedPattern>org.apache.flink.connector.kinesis.sink.shaded.org.apache.http
+                                    </shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>io.netty</pattern>
+                                    <shadedPattern>org.apache.flink.connector.kinesis.sink.shaded.io.netty
+                                    </shadedPattern>
+                                </relocation>
+                            </relocations>
+                            <filters>
+                                <filter>
+                                    <artifact>org.apache.flink:flink-connector-aws-kinesis-streams:*</artifact>
+                                    <excludes>
+                                        <exclude>profile</exclude>
+                                    </excludes>
+                                </filter>
+                                <filter>
+                                    <artifact>org.apache.flink:flink-connector-aws-base:*</artifact>
+                                    <excludes>
+                                        <exclude>profile</exclude>
+                                    </excludes>
+                                </filter>
+                            </filters>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+</project>
diff --git a/flink-sql-connector-aws-kinesis-streams/src/main/resources/META-INF/NOTICE b/flink-sql-connector-aws-kinesis-streams/src/main/resources/META-INF/NOTICE
new file mode 100644
index 0000000..b3a38b8
--- /dev/null
+++ b/flink-sql-connector-aws-kinesis-streams/src/main/resources/META-INF/NOTICE
@@ -0,0 +1,50 @@
+flink-sql-connector-aws-kinesis-streams
+
+Copyright 2014-2022 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+This project bundles the following dependencies under the Apache Software License 2.0. (http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+- software.amazon.awssdk:kinesis:2.17.247
+- software.amazon.awssdk:aws-cbor-protocol:2.17.247
+- software.amazon.awssdk:aws-json-protocol:2.17.247
+- software.amazon.awssdk:protocol-core:2.17.247
+- software.amazon.awssdk:profiles:2.17.247
+- software.amazon.awssdk:sdk-core:2.17.247
+- software.amazon.awssdk:auth:2.17.247
+- software.amazon.awssdk:http-client-spi:2.17.247
+- software.amazon.awssdk:regions:2.17.247
+- software.amazon.awssdk:annotations:2.17.247
+- software.amazon.awssdk:utils:2.17.247
+- software.amazon.awssdk:aws-core:2.17.247
+- software.amazon.awssdk:metrics-spi:2.17.247
+- software.amazon.awssdk:apache-client:2.17.247
+- software.amazon.awssdk:netty-nio-client:2.17.247
+- software.amazon.awssdk:sts:2.17.247
+- software.amazon.awssdk:aws-query-protocol:2.17.247
+- software.amazon.awssdk:json-utils:2.17.247
+- software.amazon.awssdk:third-party-jackson-core:2.17.247
+- software.amazon.awssdk:third-party-jackson-dataformat-cbor:2.17.247
+- io.netty:netty-codec-http:4.1.70.Final
+- io.netty:netty-codec-http2:4.1.70.Final
+- io.netty:netty-codec:4.1.70.Final
+- io.netty:netty-transport:4.1.70.Final
+- io.netty:netty-resolver:4.1.70.Final
+- io.netty:netty-common:4.1.70.Final
+- io.netty:netty-buffer:4.1.70.Final
+- io.netty:netty-handler:4.1.70.Final
+- io.netty:netty-transport-classes-epoll:4.1.70.Final
+- io.netty:netty-transport-native-epoll:linux-x86_64:4.1.70.Final
+- io.netty:netty-transport-native-unix-common:4.1.70.Final
+- com.typesafe.netty:netty-reactive-streams-http:2.0.5
+- com.typesafe.netty:netty-reactive-streams:2.0.5
+- org.apache.httpcomponents:httpclient:4.5.13
+- org.apache.httpcomponents:httpcore:4.4.14
+- commons-logging:commons-logging:1.1.3
+
+
+This project bundles the following dependencies under the Creative Commons Zero license (https://creativecommons.org/publicdomain/zero/1.0/).
+
+- org.reactivestreams:reactive-streams:1.0.3
diff --git a/pom.xml b/pom.xml
index d701652..b3e7ec6 100644
--- a/pom.xml
+++ b/pom.xml
@@ -69,10 +69,14 @@ under the License.
 
     <modules>
         <module>flink-connector-aws-base</module>
+
         <module>flink-connector-dynamodb</module>
-        <module>flink-sql-connector-dynamodb</module>
         <module>flink-connector-aws-kinesis-firehose</module>
+        <module>flink-connector-aws-kinesis-streams</module>
+        
+        <module>flink-sql-connector-dynamodb</module>
         <module>flink-sql-connector-aws-kinesis-firehose</module>
+        <module>flink-sql-connector-aws-kinesis-streams</module>
     </modules>
 
     <dependencies>


[flink-connector-aws] 02/08: [FLINK-29907][Connectors/AWS] Externalize AWS Base from Flink repo

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

dannycranmer pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/flink-connector-aws.git

commit 89d4a559c3219b3c1b4e7d6bc2847c4f34c8105e
Author: Danny Cranmer <da...@apache.org>
AuthorDate: Fri Dec 2 09:25:34 2022 +0000

    [FLINK-29907][Connectors/AWS] Externalize AWS Base from Flink repo
---
 .gitignore                                         |   3 +-
 .../33252236-fc9f-4f63-b537-39e2322f7ccd           |   0
 .../733a854b-2487-43da-a5fa-9b089af5fb4e           |   0
 .../archunit-violations/stored.rules               |   4 +
 flink-connector-aws-base/pom.xml                   | 110 +++
 .../connector/aws/config/AWSConfigConstants.java   | 176 +++++
 .../connector/aws/table/util/AWSOptionUtils.java   |  87 +++
 .../aws/table/util/AsyncClientOptionsUtils.java    | 109 +++
 .../flink/connector/aws/util/AWSAsyncSinkUtil.java | 164 +++++
 .../aws/util/AWSAuthenticationException.java       |  38 +
 .../AWSCredentialFatalExceptionClassifiers.java    |  43 ++
 .../flink/connector/aws/util/AWSGeneralUtil.java   | 396 +++++++++++
 .../src/main/resources/log4j2.properties           |  25 +
 .../architecture/TestCodeArchitectureTest.java     |  40 ++
 .../aws/table/util/AWSOptionsUtilTest.java         | 137 ++++
 .../table/util/AsyncClientOptionsUtilsTest.java    | 146 ++++
 .../aws/testutils/AWSServicesTestUtils.java        | 146 ++++
 .../aws/testutils/LocalstackContainer.java         |  85 +++
 .../connector/aws/util/AWSAsyncSinkUtilTest.java   | 253 +++++++
 .../connector/aws/util/AWSGeneralUtilTest.java     | 792 +++++++++++++++++++++
 .../apache/flink/connector/aws/util/TestUtil.java  |  54 ++
 .../org.junit.jupiter.api.extension.Extension      |  16 +
 .../src/test/resources/archunit.properties         |  31 +
 .../src/test/resources/log4j2-test.properties      |  28 +
 .../src/test/resources/profile                     |   7 +
 pom.xml                                            |  40 +-
 26 files changed, 2920 insertions(+), 10 deletions(-)

diff --git a/.gitignore b/.gitignore
index 5f0068c..973e8d5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -35,4 +35,5 @@ out/
 tools/flink
 tools/flink-*
 tools/releasing/release
-tools/japicmp-output
\ No newline at end of file
+tools/japicmp-output
+*/.idea/
\ No newline at end of file
diff --git a/flink-connector-aws-base/archunit-violations/33252236-fc9f-4f63-b537-39e2322f7ccd b/flink-connector-aws-base/archunit-violations/33252236-fc9f-4f63-b537-39e2322f7ccd
new file mode 100644
index 0000000..e69de29
diff --git a/flink-connector-aws-base/archunit-violations/733a854b-2487-43da-a5fa-9b089af5fb4e b/flink-connector-aws-base/archunit-violations/733a854b-2487-43da-a5fa-9b089af5fb4e
new file mode 100644
index 0000000..e69de29
diff --git a/flink-connector-aws-base/archunit-violations/stored.rules b/flink-connector-aws-base/archunit-violations/stored.rules
new file mode 100644
index 0000000..ef2e628
--- /dev/null
+++ b/flink-connector-aws-base/archunit-violations/stored.rules
@@ -0,0 +1,4 @@
+#
+#Tue Feb 22 12:16:40 CET 2022
+Tests\ inheriting\ from\ AbstractTestBase\ should\ have\ name\ ending\ with\ ITCase=733a854b-2487-43da-a5fa-9b089af5fb4e
+ITCASE\ tests\ should\ use\ a\ MiniCluster\ resource\ or\ extension=33252236-fc9f-4f63-b537-39e2322f7ccd
diff --git a/flink-connector-aws-base/pom.xml b/flink-connector-aws-base/pom.xml
new file mode 100644
index 0000000..0249d61
--- /dev/null
+++ b/flink-connector-aws-base/pom.xml
@@ -0,0 +1,110 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.flink</groupId>
+        <artifactId>flink-connector-aws-parent</artifactId>
+        <version>4.0-SNAPSHOT</version>
+    </parent>
+
+    <artifactId>flink-connector-aws-base</artifactId>
+    <name>Flink : Connectors : AWS : Base</name>
+    <packaging>jar</packaging>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-streaming-java</artifactId>
+            <version>${flink.version}</version>
+            <scope>provided</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-base</artifactId>
+            <version>${flink.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>software.amazon.awssdk</groupId>
+            <artifactId>netty-nio-client</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>software.amazon.awssdk</groupId>
+            <artifactId>sts</artifactId>
+        </dependency>
+
+        <!-- Test dependencies -->
+        <dependency>
+            <groupId>org.testcontainers</groupId>
+            <artifactId>testcontainers</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>software.amazon.awssdk</groupId>
+            <artifactId>s3</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>software.amazon.awssdk</groupId>
+            <artifactId>iam</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+        <!-- ArchUit test dependencies -->
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-architecture-tests-test</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-jar-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>test-jar</goal>
+                        </goals>
+                        <configuration>
+                            <excludes>
+                                <!-- test-jar is still used by JUnit4 modules -->
+                                <exclude>META-INF/services/org.junit.jupiter.api.extension.Extension</exclude>
+                            </excludes>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+</project>
diff --git a/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/config/AWSConfigConstants.java b/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/config/AWSConfigConstants.java
new file mode 100644
index 0000000..b244b2c
--- /dev/null
+++ b/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/config/AWSConfigConstants.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.aws.config;
+
+import org.apache.flink.annotation.PublicEvolving;
+
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
+
+/** Configuration keys for AWS service usage. */
+@PublicEvolving
+public class AWSConfigConstants {
+
+    /**
+     * Possible configuration values for the type of credential provider to use when accessing AWS.
+     * Internally, a corresponding implementation of {@link AwsCredentialsProvider} will be used.
+     */
+    public enum CredentialProvider {
+
+        /**
+         * Look for the environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to create
+         * AWS credentials.
+         */
+        ENV_VAR,
+
+        /**
+         * Look for Java system properties aws.accessKeyId and aws.secretKey to create AWS
+         * credentials.
+         */
+        SYS_PROP,
+
+        /** Use a AWS credentials profile file to create the AWS credentials. */
+        PROFILE,
+
+        /**
+         * Simply create AWS credentials by supplying the AWS access key ID and AWS secret key in
+         * the configuration properties.
+         */
+        BASIC,
+
+        /**
+         * Create AWS credentials by assuming a role. The credentials for assuming the role must be
+         * supplied. *
+         */
+        ASSUME_ROLE,
+
+        /**
+         * Use AWS WebIdentityToken in order to assume a role. A token file and role details can be
+         * supplied as configuration or environment variables. *
+         */
+        WEB_IDENTITY_TOKEN,
+
+        /**
+         * A credentials provider chain will be used that searches for credentials in this order:
+         * ENV_VARS, SYS_PROPS, WEB_IDENTITY_TOKEN, PROFILE in the AWS instance metadata. *
+         */
+        AUTO,
+    }
+
+    /** The AWS region of the service ("us-east-1" is used if not set). */
+    public static final String AWS_REGION = "aws.region";
+
+    /**
+     * The credential provider type to use when AWS credentials are required (BASIC is used if not
+     * set).
+     */
+    public static final String AWS_CREDENTIALS_PROVIDER = "aws.credentials.provider";
+
+    /** The AWS access key ID to use when setting credentials provider type to BASIC. */
+    public static final String AWS_ACCESS_KEY_ID = accessKeyId(AWS_CREDENTIALS_PROVIDER);
+
+    /** The AWS secret key to use when setting credentials provider type to BASIC. */
+    public static final String AWS_SECRET_ACCESS_KEY = secretKey(AWS_CREDENTIALS_PROVIDER);
+
+    /** Optional configuration for profile path if credential provider type is set to be PROFILE. */
+    public static final String AWS_PROFILE_PATH = profilePath(AWS_CREDENTIALS_PROVIDER);
+
+    /** Optional configuration for profile name if credential provider type is set to be PROFILE. */
+    public static final String AWS_PROFILE_NAME = profileName(AWS_CREDENTIALS_PROVIDER);
+
+    /**
+     * The role ARN to use when credential provider type is set to ASSUME_ROLE or
+     * WEB_IDENTITY_TOKEN.
+     */
+    public static final String AWS_ROLE_ARN = roleArn(AWS_CREDENTIALS_PROVIDER);
+
+    /**
+     * The role session name to use when credential provider type is set to ASSUME_ROLE or
+     * WEB_IDENTITY_TOKEN.
+     */
+    public static final String AWS_ROLE_SESSION_NAME = roleSessionName(AWS_CREDENTIALS_PROVIDER);
+
+    /** The external ID to use when credential provider type is set to ASSUME_ROLE. */
+    public static final String AWS_ROLE_EXTERNAL_ID = externalId(AWS_CREDENTIALS_PROVIDER);
+
+    /**
+     * The absolute path to the web identity token file that should be used if provider type is set
+     * to WEB_IDENTITY_TOKEN.
+     */
+    public static final String AWS_WEB_IDENTITY_TOKEN_FILE =
+            webIdentityTokenFile(AWS_CREDENTIALS_PROVIDER);
+
+    /**
+     * The credentials provider that provides credentials for assuming the role when credential
+     * provider type is set to ASSUME_ROLE. Roles can be nested, so AWS_ROLE_CREDENTIALS_PROVIDER
+     * can again be set to "ASSUME_ROLE"
+     */
+    public static final String AWS_ROLE_CREDENTIALS_PROVIDER =
+            roleCredentialsProvider(AWS_CREDENTIALS_PROVIDER);
+
+    /** The AWS endpoint for the service (derived from the AWS region setting if not set). */
+    public static final String AWS_ENDPOINT = "aws.endpoint";
+
+    /** Whether to trust all SSL certificates. */
+    public static final String TRUST_ALL_CERTIFICATES = "aws.trust.all.certificates";
+
+    /** The HTTP protocol version to use. */
+    public static final String HTTP_PROTOCOL_VERSION = "aws.http.protocol.version";
+
+    /** Maximum request concurrency for {@link SdkAsyncHttpClient}. */
+    public static final String HTTP_CLIENT_MAX_CONCURRENCY = "aws.http-client.max-concurrency";
+
+    /** Read Request timeout for {@link SdkAsyncHttpClient}. */
+    public static final String HTTP_CLIENT_READ_TIMEOUT_MILLIS = "aws.http-client.read-timeout";
+
+    public static String accessKeyId(String prefix) {
+        return prefix + ".basic.accesskeyid";
+    }
+
+    public static String secretKey(String prefix) {
+        return prefix + ".basic.secretkey";
+    }
+
+    public static String profilePath(String prefix) {
+        return prefix + ".profile.path";
+    }
+
+    public static String profileName(String prefix) {
+        return prefix + ".profile.name";
+    }
+
+    public static String roleArn(String prefix) {
+        return prefix + ".role.arn";
+    }
+
+    public static String roleSessionName(String prefix) {
+        return prefix + ".role.sessionName";
+    }
+
+    public static String externalId(String prefix) {
+        return prefix + ".role.externalId";
+    }
+
+    public static String roleCredentialsProvider(String prefix) {
+        return prefix + ".role.provider";
+    }
+
+    public static String webIdentityTokenFile(String prefix) {
+        return prefix + ".webIdentityToken.file";
+    }
+}
diff --git a/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/table/util/AWSOptionUtils.java b/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/table/util/AWSOptionUtils.java
new file mode 100644
index 0000000..c9a7f58
--- /dev/null
+++ b/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/table/util/AWSOptionUtils.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.aws.table.util;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+import org.apache.flink.connector.aws.util.AWSGeneralUtil;
+import org.apache.flink.connector.base.table.options.ConfigurationValidator;
+import org.apache.flink.connector.base.table.options.TableOptionsUtils;
+import org.apache.flink.connector.base.table.util.ConfigurationValidatorUtil;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+/** Handler for AWS specific table options. */
+@PublicEvolving
+public class AWSOptionUtils implements TableOptionsUtils, ConfigurationValidator {
+    /** Prefix for properties defined in {@link AWSConfigConstants}. */
+    public static final String AWS_PROPERTIES_PREFIX = "aws.";
+
+    private final Map<String, String> resolvedOptions;
+
+    public AWSOptionUtils(Map<String, String> resolvedOptions) {
+        this.resolvedOptions = resolvedOptions;
+    }
+
+    @Override
+    public Map<String, String> getProcessedResolvedOptions() {
+        Map<String, String> mappedResolvedOptions = new HashMap<>();
+        for (String key : resolvedOptions.keySet()) {
+            if (key.startsWith(AWS_PROPERTIES_PREFIX)) {
+                mappedResolvedOptions.put(translateAwsKey(key), resolvedOptions.get(key));
+            }
+        }
+        return mappedResolvedOptions;
+    }
+
+    @Override
+    public List<String> getNonValidatedPrefixes() {
+        return Collections.singletonList(AWS_PROPERTIES_PREFIX);
+    }
+
+    @Override
+    public Properties getValidatedConfigurations() {
+        Properties awsConfigurations = new Properties();
+        Map<String, String> mappedProperties = getProcessedResolvedOptions();
+        for (Map.Entry<String, String> entry : mappedProperties.entrySet()) {
+            awsConfigurations.setProperty(entry.getKey(), entry.getValue());
+        }
+        AWSGeneralUtil.validateAwsConfiguration(awsConfigurations);
+        ConfigurationValidatorUtil.validateOptionalBooleanProperty(
+                awsConfigurations,
+                AWSConfigConstants.TRUST_ALL_CERTIFICATES,
+                String.format(
+                        "Invalid %s value, must be a boolean.",
+                        AWSConfigConstants.TRUST_ALL_CERTIFICATES));
+        return awsConfigurations;
+    }
+
+    /** Map {@code scan.foo.bar} to {@code flink.foo.bar}. */
+    private static String translateAwsKey(String key) {
+        if (!key.endsWith("credentials.provider")) {
+            return key.replace("credentials.", "credentials.provider.");
+        } else {
+            return key;
+        }
+    }
+}
diff --git a/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/table/util/AsyncClientOptionsUtils.java b/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/table/util/AsyncClientOptionsUtils.java
new file mode 100644
index 0000000..b347a01
--- /dev/null
+++ b/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/table/util/AsyncClientOptionsUtils.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.aws.table.util;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+import org.apache.flink.connector.base.table.util.ConfigurationValidatorUtil;
+
+import software.amazon.awssdk.http.Protocol;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+/** Class for handling Kinesis async client specific options. */
+@PublicEvolving
+public class AsyncClientOptionsUtils extends AWSOptionUtils {
+    /** Prefix for properties defined in {@link AWSConfigConstants}. */
+    public static final String SINK_CLIENT_PREFIX = "sink.http-client.";
+
+    private static final String CLIENT_MAX_CONCURRENCY_OPTION = "max-concurrency";
+    private static final String CLIENT_MAX_TIMEOUT_OPTION = "read-timeout";
+    private static final String CLIENT_HTTP_PROTOCOL_VERSION_OPTION = "protocol.version";
+
+    private final Map<String, String> resolvedOptions;
+
+    public AsyncClientOptionsUtils(Map<String, String> resolvedOptions) {
+        super(resolvedOptions);
+        this.resolvedOptions = resolvedOptions;
+    }
+
+    @Override
+    public Map<String, String> getProcessedResolvedOptions() {
+        Map<String, String> mappedResolvedOptions = super.getProcessedResolvedOptions();
+        for (String key : resolvedOptions.keySet()) {
+            if (key.startsWith(SINK_CLIENT_PREFIX)) {
+                mappedResolvedOptions.put(translateClientKeys(key), resolvedOptions.get(key));
+            }
+        }
+        return mappedResolvedOptions;
+    }
+
+    @Override
+    public List<String> getNonValidatedPrefixes() {
+        return Arrays.asList(AWS_PROPERTIES_PREFIX, SINK_CLIENT_PREFIX);
+    }
+
+    @Override
+    public Properties getValidatedConfigurations() {
+        Properties clientConfigurations = super.getValidatedConfigurations();
+        clientConfigurations.putAll(getProcessedResolvedOptions());
+        validatedConfigurations(clientConfigurations);
+        return clientConfigurations;
+    }
+
+    private static String translateClientKeys(String key) {
+        String truncatedKey = key.substring(SINK_CLIENT_PREFIX.length());
+        switch (truncatedKey) {
+            case CLIENT_MAX_CONCURRENCY_OPTION:
+                return AWSConfigConstants.HTTP_CLIENT_MAX_CONCURRENCY;
+            case CLIENT_MAX_TIMEOUT_OPTION:
+                return AWSConfigConstants.HTTP_CLIENT_READ_TIMEOUT_MILLIS;
+            case CLIENT_HTTP_PROTOCOL_VERSION_OPTION:
+                return AWSConfigConstants.HTTP_PROTOCOL_VERSION;
+            default:
+                return truncatedKey;
+        }
+    }
+
+    private void validatedConfigurations(Properties config) {
+        ConfigurationValidatorUtil.validateOptionalPositiveIntProperty(
+                config,
+                AWSConfigConstants.HTTP_CLIENT_MAX_CONCURRENCY,
+                "Invalid value given for HTTP client max concurrency. Must be positive integer.");
+        ConfigurationValidatorUtil.validateOptionalPositiveIntProperty(
+                config,
+                AWSConfigConstants.HTTP_CLIENT_READ_TIMEOUT_MILLIS,
+                "Invalid value given for HTTP read timeout. Must be positive integer.");
+        validateOptionalHttpProtocolProperty(config);
+    }
+
+    private void validateOptionalHttpProtocolProperty(Properties config) {
+        if (config.containsKey(AWSConfigConstants.HTTP_PROTOCOL_VERSION)) {
+            try {
+                Protocol.valueOf(config.getProperty(AWSConfigConstants.HTTP_PROTOCOL_VERSION));
+            } catch (IllegalArgumentException e) {
+                throw new IllegalArgumentException(
+                        "Invalid value given for HTTP protocol. Must be HTTP1_1 or HTTP2.");
+            }
+        }
+    }
+}
diff --git a/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSAsyncSinkUtil.java b/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSAsyncSinkUtil.java
new file mode 100644
index 0000000..1256142
--- /dev/null
+++ b/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSAsyncSinkUtil.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.aws.util;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+import org.apache.flink.runtime.util.EnvironmentInformation;
+
+import software.amazon.awssdk.awscore.client.builder.AwsAsyncClientBuilder;
+import software.amazon.awssdk.awscore.client.builder.AwsClientBuilder;
+import software.amazon.awssdk.core.SdkClient;
+import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration;
+import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption;
+import software.amazon.awssdk.core.client.config.SdkClientConfiguration;
+import software.amazon.awssdk.core.client.config.SdkClientOption;
+import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
+
+import java.net.URI;
+import java.util.Optional;
+import java.util.Properties;
+
+/** Some utilities specific to Amazon Web Service. */
+@Internal
+public class AWSAsyncSinkUtil extends AWSGeneralUtil {
+
+    /** V2 suffix to denote the unified sinks. V1 sinks are based on KPL etc. */
+    static final String V2_USER_AGENT_SUFFIX = " V2";
+
+    /**
+     * Creates a user agent prefix for Flink. This can be used by HTTP Clients.
+     *
+     * @param userAgentFormat flink user agent prefix format with placeholders for version and
+     *     commit id.
+     * @return a user agent prefix for Flink
+     */
+    public static String formatFlinkUserAgentPrefix(String userAgentFormat) {
+        return String.format(
+                userAgentFormat,
+                EnvironmentInformation.getVersion(),
+                EnvironmentInformation.getRevisionInformation().commitId);
+    }
+
+    /**
+     * @param configProps configuration properties
+     * @param httpClient the underlying HTTP client used to talk to AWS
+     * @return a new AWS Client
+     */
+    public static <
+                    S extends SdkClient,
+                    T extends
+                            AwsAsyncClientBuilder<? extends T, S>
+                                    & AwsClientBuilder<? extends T, S>>
+            S createAwsAsyncClient(
+                    final Properties configProps,
+                    final SdkAsyncHttpClient httpClient,
+                    final T clientBuilder,
+                    final String awsUserAgentPrefixFormat,
+                    final String awsClientUserAgentPrefix) {
+        SdkClientConfiguration clientConfiguration = SdkClientConfiguration.builder().build();
+        return createAwsAsyncClient(
+                configProps,
+                clientConfiguration,
+                httpClient,
+                clientBuilder,
+                awsUserAgentPrefixFormat,
+                awsClientUserAgentPrefix);
+    }
+
+    /**
+     * @param configProps configuration properties
+     * @param clientConfiguration the AWS SDK v2 config to instantiate the client
+     * @param httpClient the underlying HTTP client used to talk to AWS
+     * @return a new AWS Client
+     */
+    public static <
+                    S extends SdkClient,
+                    T extends
+                            AwsAsyncClientBuilder<? extends T, S>
+                                    & AwsClientBuilder<? extends T, S>>
+            S createAwsAsyncClient(
+                    final Properties configProps,
+                    final SdkClientConfiguration clientConfiguration,
+                    final SdkAsyncHttpClient httpClient,
+                    final T clientBuilder,
+                    final String awsUserAgentPrefixFormat,
+                    final String awsClientUserAgentPrefix) {
+        String flinkUserAgentPrefix =
+                Optional.ofNullable(configProps.getProperty(awsClientUserAgentPrefix))
+                        .orElse(
+                                formatFlinkUserAgentPrefix(
+                                        awsUserAgentPrefixFormat + V2_USER_AGENT_SUFFIX));
+
+        final ClientOverrideConfiguration overrideConfiguration =
+                createClientOverrideConfiguration(
+                        clientConfiguration,
+                        ClientOverrideConfiguration.builder(),
+                        flinkUserAgentPrefix);
+
+        return createAwsAsyncClient(configProps, clientBuilder, httpClient, overrideConfiguration);
+    }
+
+    @VisibleForTesting
+    static ClientOverrideConfiguration createClientOverrideConfiguration(
+            final SdkClientConfiguration config,
+            final ClientOverrideConfiguration.Builder overrideConfigurationBuilder,
+            String flinkUserAgentPrefix) {
+
+        overrideConfigurationBuilder
+                .putAdvancedOption(SdkAdvancedClientOption.USER_AGENT_PREFIX, flinkUserAgentPrefix)
+                .putAdvancedOption(
+                        SdkAdvancedClientOption.USER_AGENT_SUFFIX,
+                        config.option(SdkAdvancedClientOption.USER_AGENT_SUFFIX));
+
+        Optional.ofNullable(config.option(SdkClientOption.API_CALL_ATTEMPT_TIMEOUT))
+                .ifPresent(overrideConfigurationBuilder::apiCallAttemptTimeout);
+
+        Optional.ofNullable(config.option(SdkClientOption.API_CALL_TIMEOUT))
+                .ifPresent(overrideConfigurationBuilder::apiCallTimeout);
+
+        return overrideConfigurationBuilder.build();
+    }
+
+    @VisibleForTesting
+    static <
+                    S extends SdkClient,
+                    T extends
+                            AwsAsyncClientBuilder<? extends T, S>
+                                    & AwsClientBuilder<? extends T, S>>
+            S createAwsAsyncClient(
+                    final Properties configProps,
+                    final T clientBuilder,
+                    final SdkAsyncHttpClient httpClient,
+                    final ClientOverrideConfiguration overrideConfiguration) {
+
+        if (configProps.containsKey(AWSConfigConstants.AWS_ENDPOINT)) {
+            final URI endpointOverride =
+                    URI.create(configProps.getProperty(AWSConfigConstants.AWS_ENDPOINT));
+            clientBuilder.endpointOverride(endpointOverride);
+        }
+
+        return clientBuilder
+                .httpClient(httpClient)
+                .overrideConfiguration(overrideConfiguration)
+                .credentialsProvider(getCredentialsProvider(configProps))
+                .region(getRegion(configProps))
+                .build();
+    }
+}
diff --git a/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSAuthenticationException.java b/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSAuthenticationException.java
new file mode 100644
index 0000000..e5527b3
--- /dev/null
+++ b/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSAuthenticationException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.aws.util;
+
+import org.apache.flink.annotation.Internal;
+
+/**
+ * Exception thrown on failure of authentication of Aws Credentials, this includes missing
+ * configuration, illegal access and unreachable endpoints. All {@code AWSAuthenticationException}
+ * should be non-retryable.
+ */
+@Internal
+public class AWSAuthenticationException extends RuntimeException {
+
+    public AWSAuthenticationException(final String message) {
+        super(message);
+    }
+
+    public AWSAuthenticationException(final String message, final Throwable cause) {
+        super(message, cause);
+    }
+}
diff --git a/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSCredentialFatalExceptionClassifiers.java b/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSCredentialFatalExceptionClassifiers.java
new file mode 100644
index 0000000..713e11d
--- /dev/null
+++ b/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSCredentialFatalExceptionClassifiers.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.aws.util;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.connector.base.sink.throwable.FatalExceptionClassifier;
+
+import software.amazon.awssdk.core.exception.SdkClientException;
+import software.amazon.awssdk.services.sts.model.StsException;
+
+/** Class containing set of {@link FatalExceptionClassifier} for AWS credential failures. */
+@Internal
+public class AWSCredentialFatalExceptionClassifiers {
+    public static FatalExceptionClassifier getInvalidCredentialsExceptionClassifier() {
+        return FatalExceptionClassifier.withRootCauseOfType(
+                StsException.class,
+                err ->
+                        new AWSAuthenticationException(
+                                "Encountered non-recoverable exception relating to the provided credentials.",
+                                err));
+    }
+
+    public static FatalExceptionClassifier getSdkClientMisconfiguredExceptionClassifier() {
+        return FatalExceptionClassifier.withRootCauseOfType(
+                SdkClientException.class, err -> (Exception) err);
+    }
+}
diff --git a/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSGeneralUtil.java b/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSGeneralUtil.java
new file mode 100644
index 0000000..e1d7f98
--- /dev/null
+++ b/flink-connector-aws-base/src/main/java/org/apache/flink/connector/aws/util/AWSGeneralUtil.java
@@ -0,0 +1,396 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.aws.util;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+import org.apache.flink.connector.aws.config.AWSConfigConstants.CredentialProvider;
+import org.apache.flink.util.ExceptionUtils;
+
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.SystemPropertyCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.WebIdentityTokenFileCredentialsProvider;
+import software.amazon.awssdk.http.Protocol;
+import software.amazon.awssdk.http.SdkHttpConfigurationOption;
+import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
+import software.amazon.awssdk.http.nio.netty.Http2Configuration;
+import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient;
+import software.amazon.awssdk.profiles.ProfileFile;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.sts.StsClient;
+import software.amazon.awssdk.services.sts.auth.StsAssumeRoleCredentialsProvider;
+import software.amazon.awssdk.services.sts.model.AssumeRoleRequest;
+import software.amazon.awssdk.utils.AttributeMap;
+import software.amazon.awssdk.utils.SdkAutoCloseable;
+
+import java.nio.file.Paths;
+import java.time.Duration;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Properties;
+import java.util.regex.Pattern;
+
+/** Some general utilities specific to Amazon Web Service. */
+@Internal
+public class AWSGeneralUtil {
+    private static final Duration CONNECTION_ACQUISITION_TIMEOUT = Duration.ofSeconds(60);
+    private static final int INITIAL_WINDOW_SIZE_BYTES = 512 * 1024; // 512 KB
+    private static final Duration HEALTH_CHECK_PING_PERIOD = Duration.ofSeconds(60);
+
+    private static final int HTTP_CLIENT_MAX_CONCURRENCY = 10_000;
+    private static final Duration HTTP_CLIENT_READ_TIMEOUT = Duration.ofMinutes(6);
+    private static final Protocol HTTP_PROTOCOL = Protocol.HTTP2;
+    private static final boolean TRUST_ALL_CERTIFICATES = false;
+    private static final AttributeMap HTTP_CLIENT_DEFAULTS =
+            AttributeMap.builder()
+                    .put(SdkHttpConfigurationOption.MAX_CONNECTIONS, HTTP_CLIENT_MAX_CONCURRENCY)
+                    .put(SdkHttpConfigurationOption.READ_TIMEOUT, HTTP_CLIENT_READ_TIMEOUT)
+                    .put(SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES, TRUST_ALL_CERTIFICATES)
+                    .put(SdkHttpConfigurationOption.PROTOCOL, HTTP_PROTOCOL)
+                    .build();
+
+    /**
+     * Determines and returns the credential provider type from the given properties.
+     *
+     * @return the credential provider type
+     */
+    public static CredentialProvider getCredentialProviderType(
+            final Properties configProps, final String configPrefix) {
+        if (!configProps.containsKey(configPrefix)) {
+            if (configProps.containsKey(AWSConfigConstants.accessKeyId(configPrefix))
+                    && configProps.containsKey(AWSConfigConstants.secretKey(configPrefix))) {
+                // if the credential provider type is not specified, but the Access Key ID and
+                // Secret Key are given, it will default to BASIC
+                return CredentialProvider.BASIC;
+            } else {
+                // if the credential provider type is not specified, it will default to AUTO
+                return CredentialProvider.AUTO;
+            }
+        } else {
+            try {
+                return CredentialProvider.valueOf(configProps.getProperty(configPrefix));
+            } catch (IllegalArgumentException e) {
+                throw new IllegalArgumentException(
+                        String.format(
+                                "Invalid AWS Credential Provider Type %s.",
+                                configProps.getProperty(configPrefix)),
+                        e);
+            }
+        }
+    }
+
+    /**
+     * Return a {@link AwsCredentialsProvider} instance corresponding to the configuration
+     * properties.
+     *
+     * @param configProps the configuration property map
+     * @return The corresponding AWS Credentials Provider instance
+     */
+    public static AwsCredentialsProvider getCredentialsProvider(final Map<String, ?> configProps) {
+        Properties properties = new Properties();
+        properties.putAll(configProps);
+
+        return getCredentialsProvider(properties);
+    }
+
+    /**
+     * Return a {@link AwsCredentialsProvider} instance corresponding to the configuration
+     * properties.
+     *
+     * @param configProps the configuration properties
+     * @return The corresponding AWS Credentials Provider instance
+     */
+    public static AwsCredentialsProvider getCredentialsProvider(final Properties configProps) {
+        return getCredentialsProvider(configProps, AWSConfigConstants.AWS_CREDENTIALS_PROVIDER);
+    }
+
+    public static AwsCredentialsProvider getCredentialsProvider(
+            final Properties configProps, final String configPrefix) {
+        CredentialProvider credentialProviderType =
+                getCredentialProviderType(configProps, configPrefix);
+
+        switch (credentialProviderType) {
+            case ENV_VAR:
+                return EnvironmentVariableCredentialsProvider.create();
+
+            case SYS_PROP:
+                return SystemPropertyCredentialsProvider.create();
+
+            case PROFILE:
+                return getProfileCredentialProvider(configProps, configPrefix);
+
+            case BASIC:
+                return () ->
+                        AwsBasicCredentials.create(
+                                configProps.getProperty(
+                                        AWSConfigConstants.accessKeyId(configPrefix)),
+                                configProps.getProperty(
+                                        AWSConfigConstants.secretKey(configPrefix)));
+
+            case ASSUME_ROLE:
+                return getAssumeRoleCredentialProvider(configProps, configPrefix);
+
+            case WEB_IDENTITY_TOKEN:
+                return getWebIdentityTokenFileCredentialsProvider(
+                        WebIdentityTokenFileCredentialsProvider.builder(),
+                        configProps,
+                        configPrefix);
+
+            case AUTO:
+                return DefaultCredentialsProvider.create();
+
+            default:
+                throw new IllegalArgumentException(
+                        "Credential provider not supported: " + credentialProviderType);
+        }
+    }
+
+    public static AwsCredentialsProvider getProfileCredentialProvider(
+            final Properties configProps, final String configPrefix) {
+        String profileName =
+                configProps.getProperty(AWSConfigConstants.profileName(configPrefix), null);
+
+        ProfileCredentialsProvider.Builder profileBuilder =
+                ProfileCredentialsProvider.builder().profileName(profileName);
+
+        Optional.ofNullable(configProps.getProperty(AWSConfigConstants.profilePath(configPrefix)))
+                .map(Paths::get)
+                .ifPresent(
+                        path ->
+                                profileBuilder.profileFile(
+                                        ProfileFile.builder()
+                                                .type(ProfileFile.Type.CREDENTIALS)
+                                                .content(path)
+                                                .build()));
+
+        return profileBuilder.build();
+    }
+
+    private static AwsCredentialsProvider getAssumeRoleCredentialProvider(
+            final Properties configProps, final String configPrefix) {
+        return StsAssumeRoleCredentialsProvider.builder()
+                .refreshRequest(
+                        AssumeRoleRequest.builder()
+                                .roleArn(
+                                        configProps.getProperty(
+                                                AWSConfigConstants.roleArn(configPrefix)))
+                                .roleSessionName(
+                                        configProps.getProperty(
+                                                AWSConfigConstants.roleSessionName(configPrefix)))
+                                .externalId(
+                                        configProps.getProperty(
+                                                AWSConfigConstants.externalId(configPrefix)))
+                                .build())
+                .stsClient(
+                        StsClient.builder()
+                                .credentialsProvider(
+                                        getCredentialsProvider(
+                                                configProps,
+                                                AWSConfigConstants.roleCredentialsProvider(
+                                                        configPrefix)))
+                                .region(getRegion(configProps))
+                                .build())
+                .build();
+    }
+
+    @VisibleForTesting
+    static AwsCredentialsProvider getWebIdentityTokenFileCredentialsProvider(
+            final WebIdentityTokenFileCredentialsProvider.Builder webIdentityBuilder,
+            final Properties configProps,
+            final String configPrefix) {
+
+        Optional.ofNullable(configProps.getProperty(AWSConfigConstants.roleArn(configPrefix)))
+                .ifPresent(webIdentityBuilder::roleArn);
+
+        Optional.ofNullable(
+                        configProps.getProperty(AWSConfigConstants.roleSessionName(configPrefix)))
+                .ifPresent(webIdentityBuilder::roleSessionName);
+
+        Optional.ofNullable(
+                        configProps.getProperty(
+                                AWSConfigConstants.webIdentityTokenFile(configPrefix)))
+                .map(Paths::get)
+                .ifPresent(webIdentityBuilder::webIdentityTokenFile);
+
+        return webIdentityBuilder.build();
+    }
+
+    public static SdkAsyncHttpClient createAsyncHttpClient(final Properties configProperties) {
+        return createAsyncHttpClient(configProperties, NettyNioAsyncHttpClient.builder());
+    }
+
+    public static SdkAsyncHttpClient createAsyncHttpClient(
+            final Properties configProperties,
+            final NettyNioAsyncHttpClient.Builder httpClientBuilder) {
+        final AttributeMap.Builder clientConfiguration =
+                AttributeMap.builder().put(SdkHttpConfigurationOption.TCP_KEEPALIVE, true);
+
+        Optional.ofNullable(
+                        configProperties.getProperty(
+                                AWSConfigConstants.HTTP_CLIENT_MAX_CONCURRENCY))
+                .map(Integer::parseInt)
+                .ifPresent(
+                        integer ->
+                                clientConfiguration.put(
+                                        SdkHttpConfigurationOption.MAX_CONNECTIONS, integer));
+
+        Optional.ofNullable(
+                        configProperties.getProperty(
+                                AWSConfigConstants.HTTP_CLIENT_READ_TIMEOUT_MILLIS))
+                .map(Integer::parseInt)
+                .map(Duration::ofMillis)
+                .ifPresent(
+                        timeout ->
+                                clientConfiguration.put(
+                                        SdkHttpConfigurationOption.READ_TIMEOUT, timeout));
+
+        Optional.ofNullable(configProperties.getProperty(AWSConfigConstants.TRUST_ALL_CERTIFICATES))
+                .map(Boolean::parseBoolean)
+                .ifPresent(
+                        bool ->
+                                clientConfiguration.put(
+                                        SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES, bool));
+
+        Optional.ofNullable(configProperties.getProperty(AWSConfigConstants.HTTP_PROTOCOL_VERSION))
+                .map(Protocol::valueOf)
+                .ifPresent(
+                        protocol ->
+                                clientConfiguration.put(
+                                        SdkHttpConfigurationOption.PROTOCOL, protocol));
+        return createAsyncHttpClient(clientConfiguration.build(), httpClientBuilder);
+    }
+
+    public static SdkAsyncHttpClient createAsyncHttpClient(
+            final NettyNioAsyncHttpClient.Builder httpClientBuilder) {
+        return createAsyncHttpClient(AttributeMap.empty(), httpClientBuilder);
+    }
+
+    public static SdkAsyncHttpClient createAsyncHttpClient(
+            final AttributeMap config, final NettyNioAsyncHttpClient.Builder httpClientBuilder) {
+        httpClientBuilder
+                .connectionAcquisitionTimeout(CONNECTION_ACQUISITION_TIMEOUT)
+                .http2Configuration(
+                        Http2Configuration.builder()
+                                .healthCheckPingPeriod(HEALTH_CHECK_PING_PERIOD)
+                                .initialWindowSize(INITIAL_WINDOW_SIZE_BYTES)
+                                .build());
+        return httpClientBuilder.buildWithDefaults(config.merge(HTTP_CLIENT_DEFAULTS));
+    }
+
+    /**
+     * Creates a {@link Region} object from the given Properties.
+     *
+     * @param configProps the properties containing the region
+     * @return the region specified by the properties
+     */
+    public static Region getRegion(final Properties configProps) {
+        return Region.of(configProps.getProperty(AWSConfigConstants.AWS_REGION));
+    }
+
+    /**
+     * Checks whether or not a region is valid.
+     *
+     * @param region The AWS region to check
+     * @return true if the supplied region is valid, false otherwise
+     */
+    public static boolean isValidRegion(Region region) {
+        return Pattern.matches(
+                "^[a-z]+-([a-z]+[-]{0,1}[a-z]+-([0-9]|global)|global)$", region.id());
+    }
+
+    /**
+     * Validates configuration properties related to Amazon AWS service.
+     *
+     * @param config the properties to setup credentials and region
+     */
+    public static void validateAwsConfiguration(Properties config) {
+        if (config.containsKey(AWSConfigConstants.AWS_CREDENTIALS_PROVIDER)) {
+
+            validateCredentialProvider(config);
+            // if BASIC type is used, also check that the Access Key ID and Secret Key is supplied
+            CredentialProvider credentialsProviderType =
+                    getCredentialProviderType(config, AWSConfigConstants.AWS_CREDENTIALS_PROVIDER);
+            if (credentialsProviderType == CredentialProvider.BASIC) {
+                if (!config.containsKey(AWSConfigConstants.AWS_ACCESS_KEY_ID)
+                        || !config.containsKey(AWSConfigConstants.AWS_SECRET_ACCESS_KEY)) {
+                    throw new IllegalArgumentException(
+                            "Please set values for AWS Access Key ID ('"
+                                    + AWSConfigConstants.AWS_ACCESS_KEY_ID
+                                    + "') "
+                                    + "and Secret Key ('"
+                                    + AWSConfigConstants.AWS_SECRET_ACCESS_KEY
+                                    + "') when using the BASIC AWS credential provider type.");
+                }
+            }
+        }
+
+        if (config.containsKey(AWSConfigConstants.AWS_REGION)) {
+            // specified AWS Region name must be recognizable
+            if (!isValidRegion(getRegion(config))) {
+                StringBuilder sb = new StringBuilder();
+                for (Region region : Region.regions()) {
+                    sb.append(region).append(", ");
+                }
+                throw new IllegalArgumentException(
+                        "Invalid AWS region set in config. Valid values are: " + sb.toString());
+            }
+        }
+    }
+
+    public static void closeResources(SdkAutoCloseable... resources) {
+        RuntimeException exception = null;
+        for (SdkAutoCloseable resource : resources) {
+            if (resource != null) {
+                try {
+                    resource.close();
+                } catch (RuntimeException e) {
+                    exception = ExceptionUtils.firstOrSuppressed(e, exception);
+                }
+            }
+        }
+        if (exception != null) {
+            throw exception;
+        }
+    }
+
+    public static void validateAwsCredentials(Properties config) {
+        validateAwsConfiguration(config);
+        getCredentialsProvider(config).resolveCredentials();
+    }
+
+    private static void validateCredentialProvider(Properties config) {
+        // value specified for AWSConfigConstants.AWS_CREDENTIALS_PROVIDER needs to be
+        // recognizable
+        try {
+            getCredentialsProvider(config);
+        } catch (IllegalArgumentException e) {
+            StringBuilder sb = new StringBuilder();
+            for (CredentialProvider type : CredentialProvider.values()) {
+                sb.append(type.toString()).append(", ");
+            }
+            throw new IllegalArgumentException(
+                    "Invalid AWS Credential Provider Type set in config. Valid values are: "
+                            + sb.toString());
+        }
+    }
+}
diff --git a/flink-connector-aws-base/src/main/resources/log4j2.properties b/flink-connector-aws-base/src/main/resources/log4j2.properties
new file mode 100644
index 0000000..c64a340
--- /dev/null
+++ b/flink-connector-aws-base/src/main/resources/log4j2.properties
@@ -0,0 +1,25 @@
+################################################################################
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+rootLogger.level = OFF
+rootLogger.appenderRef.console.ref = ConsoleAppender
+
+appender.console.name = ConsoleAppender
+appender.console.type = CONSOLE
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n
diff --git a/flink-connector-aws-base/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java b/flink-connector-aws-base/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java
new file mode 100644
index 0000000..8d533a6
--- /dev/null
+++ b/flink-connector-aws-base/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.architecture;
+
+import org.apache.flink.architecture.common.ImportOptions;
+
+import com.tngtech.archunit.core.importer.ImportOption;
+import com.tngtech.archunit.junit.AnalyzeClasses;
+import com.tngtech.archunit.junit.ArchTest;
+import com.tngtech.archunit.junit.ArchTests;
+
+/** Architecture tests for test code. */
+@AnalyzeClasses(
+        packages = "org.apache.flink.connector.aws",
+        importOptions = {
+            ImportOption.OnlyIncludeTests.class,
+            ImportOptions.ExcludeScalaImportOption.class,
+            ImportOptions.ExcludeShadedImportOption.class
+        })
+public class TestCodeArchitectureTest {
+
+    @ArchTest
+    public static final ArchTests COMMON_TESTS = ArchTests.in(TestCodeArchitectureTestBase.class);
+}
diff --git a/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/table/util/AWSOptionsUtilTest.java b/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/table/util/AWSOptionsUtilTest.java
new file mode 100644
index 0000000..6786317
--- /dev/null
+++ b/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/table/util/AWSOptionsUtilTest.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.aws.table.util;
+
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+
+import org.assertj.core.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+/** Unit tests for {@link AWSOptionUtils}. */
+class AWSOptionsUtilTest {
+
+    @Test
+    void testAWSKeyMapper() {
+        AWSOptionUtils awsOptionUtils = new AWSOptionUtils(getDefaultAWSConfigurations());
+        Map<String, String> expectedProperties = getDefaultExpectedAWSConfigurations();
+
+        // process default aws options.
+        Map<String, String> actualMappedProperties = awsOptionUtils.getProcessedResolvedOptions();
+
+        Assertions.assertThat(actualMappedProperties).isEqualTo(expectedProperties);
+    }
+
+    @Test
+    void testAWSKeySelectionAndMapping() {
+        Map<String, String> resolvedTableOptions = getDefaultAWSConfigurations();
+        Map<String, String> expectedProperties = getDefaultExpectedAWSConfigurations();
+        // adding irrelevant configurations
+        resolvedTableOptions.put("non.aws.key1", "value1");
+        resolvedTableOptions.put("non.aws.key2", "value2");
+        resolvedTableOptions.put("non.aws.key3", "value3");
+        resolvedTableOptions.put("non.aws.key4", "value4");
+
+        AWSOptionUtils awsOptionUtils = new AWSOptionUtils(resolvedTableOptions);
+        Map<String, String> actualMappedProperties = awsOptionUtils.getProcessedResolvedOptions();
+
+        Assertions.assertThat(actualMappedProperties).isEqualTo(expectedProperties);
+    }
+
+    @Test
+    void testGoodAWSProperties() {
+        AWSOptionUtils awsOptionUtils = new AWSOptionUtils(getDefaultAWSConfigurations());
+        Properties expectedProperties = new Properties();
+        expectedProperties.putAll(getDefaultExpectedAWSConfigurations());
+        // extract aws configuration from properties
+        Properties actualProperties = awsOptionUtils.getValidatedConfigurations();
+
+        Assertions.assertThat(actualProperties).isEqualTo(expectedProperties);
+    }
+
+    @Test
+    void testBadAWSRegion() {
+        Map<String, String> defaultProperties = getDefaultAWSConfigurations();
+        defaultProperties.put("aws.region", "invalid-aws-region");
+
+        AWSOptionUtils awsOptionUtils = new AWSOptionUtils(defaultProperties);
+
+        Assertions.assertThatExceptionOfType(IllegalArgumentException.class)
+                .isThrownBy(awsOptionUtils::getValidatedConfigurations)
+                .withMessageContaining("Invalid AWS region set in config.");
+    }
+
+    @Test
+    void testMissingAWSCredentials() {
+        Map<String, String> defaultProperties = getDefaultAWSConfigurations();
+        defaultProperties.remove("aws.credentials.basic.accesskeyid");
+
+        AWSOptionUtils awsOptionUtils = new AWSOptionUtils(defaultProperties);
+
+        Assertions.assertThatExceptionOfType(IllegalArgumentException.class)
+                .isThrownBy(awsOptionUtils::getValidatedConfigurations)
+                .withMessageContaining(
+                        String.format(
+                                "Please set values for AWS Access Key ID ('%s') "
+                                        + "and Secret Key ('%s') when using the BASIC AWS credential provider type.",
+                                AWSConfigConstants.AWS_ACCESS_KEY_ID,
+                                AWSConfigConstants.AWS_SECRET_ACCESS_KEY));
+    }
+
+    @Test
+    void testInvalidTrustAllCertificatesOption() {
+        Map<String, String> defaultProperties = getDefaultAWSConfigurations();
+        defaultProperties.put("aws.trust.all.certificates", "invalid-boolean");
+
+        AWSOptionUtils awsOptionUtils = new AWSOptionUtils(defaultProperties);
+
+        Assertions.assertThatExceptionOfType(IllegalArgumentException.class)
+                .isThrownBy(awsOptionUtils::getValidatedConfigurations)
+                .withMessageContaining(
+                        String.format(
+                                "Invalid %s value, must be a boolean.",
+                                AWSConfigConstants.TRUST_ALL_CERTIFICATES));
+    }
+
+    private Map<String, String> getDefaultAWSConfigurations() {
+        Map<String, String> defaultAWSConfigurations = new HashMap<String, String>();
+        defaultAWSConfigurations.put("aws.region", "us-west-2");
+        defaultAWSConfigurations.put("aws.credentials.provider", "BASIC");
+        defaultAWSConfigurations.put("aws.credentials.basic.accesskeyid", "ververicka");
+        defaultAWSConfigurations.put(
+                "aws.credentials.basic.secretkey", "SuperSecretSecretSquirrel");
+        defaultAWSConfigurations.put("aws.trust.all.certificates", "true");
+        return defaultAWSConfigurations;
+    }
+
+    private Map<String, String> getDefaultExpectedAWSConfigurations() {
+        Map<String, String> defaultExpectedAWSConfigurations = new HashMap<String, String>();
+        defaultExpectedAWSConfigurations.put("aws.region", "us-west-2");
+        defaultExpectedAWSConfigurations.put("aws.credentials.provider", "BASIC");
+        defaultExpectedAWSConfigurations.put(
+                "aws.credentials.provider.basic.accesskeyid", "ververicka");
+        defaultExpectedAWSConfigurations.put(
+                "aws.credentials.provider.basic.secretkey", "SuperSecretSecretSquirrel");
+        defaultExpectedAWSConfigurations.put("aws.trust.all.certificates", "true");
+        return defaultExpectedAWSConfigurations;
+    }
+}
diff --git a/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/table/util/AsyncClientOptionsUtilsTest.java b/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/table/util/AsyncClientOptionsUtilsTest.java
new file mode 100644
index 0000000..afc35e2
--- /dev/null
+++ b/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/table/util/AsyncClientOptionsUtilsTest.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.aws.table.util;
+
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+
+import org.assertj.core.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+/** Unit tests for {@link AsyncClientOptionsUtils}. */
+class AsyncClientOptionsUtilsTest {
+
+    @Test
+    public void testGoodAsyncClientOptionsMapping() {
+        AsyncClientOptionsUtils asyncClientOptionsUtils =
+                new AsyncClientOptionsUtils(getDefaultClientOptions());
+
+        Map<String, String> expectedConfigurations = getDefaultExpectedClientOptions();
+        Map<String, String> actualConfigurations =
+                asyncClientOptionsUtils.getProcessedResolvedOptions();
+
+        Assertions.assertThat(actualConfigurations).isEqualTo(expectedConfigurations);
+    }
+
+    @Test
+    void testAsyncClientOptionsUtilsFilteringNonPrefixedOptions() {
+        Map<String, String> defaultClientOptions = getDefaultClientOptions();
+        defaultClientOptions.put("sink.not.http-client.some.option", "someValue");
+
+        AsyncClientOptionsUtils asyncClientOptionsUtils =
+                new AsyncClientOptionsUtils(defaultClientOptions);
+
+        Map<String, String> expectedConfigurations = getDefaultExpectedClientOptions();
+        Map<String, String> actualConfigurations =
+                asyncClientOptionsUtils.getProcessedResolvedOptions();
+
+        Assertions.assertThat(actualConfigurations).isEqualTo(expectedConfigurations);
+    }
+
+    @Test
+    void testAsyncClientOptionsUtilsExtractingCorrectConfiguration() {
+        AsyncClientOptionsUtils asyncClientOptionsUtils =
+                new AsyncClientOptionsUtils(getDefaultClientOptions());
+
+        Properties expectedConfigurations = getDefaultExpectedClientConfigs();
+        Properties actualConfigurations = asyncClientOptionsUtils.getValidatedConfigurations();
+
+        Assertions.assertThat(actualConfigurations).isEqualTo(expectedConfigurations);
+    }
+
+    @Test
+    void testAsyncClientOptionsUtilsFailOnInvalidMaxConcurrency() {
+        Map<String, String> defaultClientOptions = getDefaultClientOptions();
+        defaultClientOptions.put("sink.http-client.max-concurrency", "invalid-integer");
+
+        AsyncClientOptionsUtils asyncClientOptionsUtils =
+                new AsyncClientOptionsUtils(defaultClientOptions);
+
+        Assertions.assertThatExceptionOfType(IllegalArgumentException.class)
+                .isThrownBy(asyncClientOptionsUtils::getValidatedConfigurations)
+                .withMessageContaining(
+                        "Invalid value given for HTTP client max concurrency. Must be positive integer.");
+    }
+
+    @Test
+    void testAsyncClientOptionsUtilsFailOnInvalidReadTimeout() {
+        Map<String, String> defaultClientOptions = getDefaultClientOptions();
+        defaultClientOptions.put("sink.http-client.read-timeout", "invalid-integer");
+
+        AsyncClientOptionsUtils asyncClientOptionsUtils =
+                new AsyncClientOptionsUtils(defaultClientOptions);
+
+        Assertions.assertThatExceptionOfType(IllegalArgumentException.class)
+                .isThrownBy(asyncClientOptionsUtils::getValidatedConfigurations)
+                .withMessageContaining(
+                        "Invalid value given for HTTP read timeout. Must be positive integer.");
+    }
+
+    @Test
+    void testAsyncClientOptionsUtilsFailOnInvalidHttpProtocol() {
+        Map<String, String> defaultProperties = getDefaultClientOptions();
+        defaultProperties.put("sink.http-client.protocol.version", "invalid-http-protocol");
+
+        AsyncClientOptionsUtils asyncClientOptionsUtils =
+                new AsyncClientOptionsUtils(defaultProperties);
+
+        Assertions.assertThatExceptionOfType(IllegalArgumentException.class)
+                .isThrownBy(asyncClientOptionsUtils::getValidatedConfigurations)
+                .withMessageContaining(
+                        "Invalid value given for HTTP protocol. Must be HTTP1_1 or HTTP2.");
+    }
+
+    private static Map<String, String> getDefaultClientOptions() {
+        Map<String, String> defaultKinesisClientOptions = new HashMap<String, String>();
+        defaultKinesisClientOptions.put("aws.region", "us-east-1");
+        defaultKinesisClientOptions.put("sink.http-client.max-concurrency", "10000");
+        defaultKinesisClientOptions.put("sink.http-client.read-timeout", "360000");
+        defaultKinesisClientOptions.put("sink.http-client.protocol.version", "HTTP2");
+        return defaultKinesisClientOptions;
+    }
+
+    private static Map<String, String> getDefaultExpectedClientOptions() {
+        Map<String, String> defaultExpectedKinesisClientConfigurations =
+                new HashMap<String, String>();
+        defaultExpectedKinesisClientConfigurations.put(AWSConfigConstants.AWS_REGION, "us-east-1");
+        defaultExpectedKinesisClientConfigurations.put(
+                AWSConfigConstants.HTTP_CLIENT_MAX_CONCURRENCY, "10000");
+        defaultExpectedKinesisClientConfigurations.put(
+                AWSConfigConstants.HTTP_CLIENT_READ_TIMEOUT_MILLIS, "360000");
+        defaultExpectedKinesisClientConfigurations.put(
+                AWSConfigConstants.HTTP_PROTOCOL_VERSION, "HTTP2");
+        return defaultExpectedKinesisClientConfigurations;
+    }
+
+    private static Properties getDefaultExpectedClientConfigs() {
+        Properties defaultExpectedKinesisClientConfigurations = new Properties();
+        defaultExpectedKinesisClientConfigurations.put(AWSConfigConstants.AWS_REGION, "us-east-1");
+        defaultExpectedKinesisClientConfigurations.put(
+                AWSConfigConstants.HTTP_CLIENT_MAX_CONCURRENCY, "10000");
+        defaultExpectedKinesisClientConfigurations.put(
+                AWSConfigConstants.HTTP_CLIENT_READ_TIMEOUT_MILLIS, "360000");
+        defaultExpectedKinesisClientConfigurations.put(
+                AWSConfigConstants.HTTP_PROTOCOL_VERSION, "HTTP2");
+        return defaultExpectedKinesisClientConfigurations;
+    }
+}
diff --git a/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/testutils/AWSServicesTestUtils.java b/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/testutils/AWSServicesTestUtils.java
new file mode 100644
index 0000000..7d54c39
--- /dev/null
+++ b/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/testutils/AWSServicesTestUtils.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.aws.testutils;
+
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+import org.apache.flink.connector.aws.util.AWSGeneralUtil;
+
+import software.amazon.awssdk.awscore.client.builder.AwsClientBuilder;
+import software.amazon.awssdk.awscore.client.builder.AwsSyncClientBuilder;
+import software.amazon.awssdk.core.ResponseBytes;
+import software.amazon.awssdk.core.SdkClient;
+import software.amazon.awssdk.http.Protocol;
+import software.amazon.awssdk.http.SdkHttpClient;
+import software.amazon.awssdk.http.SdkHttpConfigurationOption;
+import software.amazon.awssdk.http.apache.ApacheHttpClient;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.iam.IamClient;
+import software.amazon.awssdk.services.iam.model.CreateRoleRequest;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.CreateBucketRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectResponse;
+import software.amazon.awssdk.services.s3.model.HeadBucketRequest;
+import software.amazon.awssdk.services.s3.model.ListObjectsV2Request;
+import software.amazon.awssdk.services.s3.model.ListObjectsV2Response;
+import software.amazon.awssdk.services.s3.model.S3Object;
+import software.amazon.awssdk.services.s3.waiters.S3Waiter;
+import software.amazon.awssdk.utils.AttributeMap;
+
+import java.net.URI;
+import java.util.List;
+import java.util.Properties;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.AWS_CREDENTIALS_PROVIDER;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.AWS_ENDPOINT;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.AWS_REGION;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.TRUST_ALL_CERTIFICATES;
+
+/**
+ * A set of static methods that can be used to call common AWS services on the Localstack container.
+ */
+public class AWSServicesTestUtils {
+
+    private static final String ACCESS_KEY_ID = "accessKeyId";
+    private static final String SECRET_ACCESS_KEY = "secretAccessKey";
+
+    public static S3Client createS3Client(String endpoint, SdkHttpClient httpClient) {
+        return createAwsSyncClient(endpoint, httpClient, S3Client.builder());
+    }
+
+    public static IamClient createIamClient(String endpoint, SdkHttpClient httpClient) {
+        return createAwsSyncClient(endpoint, httpClient, IamClient.builder());
+    }
+
+    public static <
+                    S extends SdkClient,
+                    T extends
+                            AwsSyncClientBuilder<? extends T, S> & AwsClientBuilder<? extends T, S>>
+            S createAwsSyncClient(String endpoint, SdkHttpClient httpClient, T clientBuilder) {
+        Properties config = createConfig(endpoint);
+        return clientBuilder
+                .httpClient(httpClient)
+                .endpointOverride(URI.create(endpoint))
+                .credentialsProvider(AWSGeneralUtil.getCredentialsProvider(config))
+                .region(AWSGeneralUtil.getRegion(config))
+                .build();
+    }
+
+    public static Properties createConfig(String endpoint) {
+        Properties config = new Properties();
+        config.setProperty(AWS_REGION, Region.AP_SOUTHEAST_1.toString());
+        config.setProperty(AWS_ENDPOINT, endpoint);
+        config.setProperty(AWSConfigConstants.accessKeyId(AWS_CREDENTIALS_PROVIDER), ACCESS_KEY_ID);
+        config.setProperty(
+                AWSConfigConstants.secretKey(AWS_CREDENTIALS_PROVIDER), SECRET_ACCESS_KEY);
+        config.setProperty(TRUST_ALL_CERTIFICATES, "true");
+        return config;
+    }
+
+    public static SdkHttpClient createHttpClient() {
+        AttributeMap.Builder attributeMapBuilder = AttributeMap.builder();
+        attributeMapBuilder.put(SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES, true);
+        attributeMapBuilder.put(SdkHttpConfigurationOption.PROTOCOL, Protocol.HTTP1_1);
+        return ApacheHttpClient.builder().buildWithDefaults(attributeMapBuilder.build());
+    }
+
+    public static void createBucket(S3Client s3Client, String bucketName) {
+        CreateBucketRequest bucketRequest =
+                CreateBucketRequest.builder().bucket(bucketName).build();
+        s3Client.createBucket(bucketRequest);
+
+        HeadBucketRequest bucketRequestWait =
+                HeadBucketRequest.builder().bucket(bucketName).build();
+
+        try (final S3Waiter waiter = s3Client.waiter()) {
+            waiter.waitUntilBucketExists(bucketRequestWait);
+        }
+    }
+
+    public static void createIAMRole(IamClient iam, String roleName) {
+        CreateRoleRequest request = CreateRoleRequest.builder().roleName(roleName).build();
+
+        iam.createRole(request);
+    }
+
+    public static List<S3Object> listBucketObjects(S3Client s3, String bucketName) {
+        ListObjectsV2Request listObjects =
+                ListObjectsV2Request.builder().bucket(bucketName).build();
+        ListObjectsV2Response res = s3.listObjectsV2(listObjects);
+        return res.contents();
+    }
+
+    public static <T> List<T> readObjectsFromS3Bucket(
+            S3Client s3Client,
+            List<S3Object> objects,
+            String bucketName,
+            Function<ResponseBytes<GetObjectResponse>, T> deserializer) {
+        return objects.stream()
+                .map(
+                        object ->
+                                GetObjectRequest.builder()
+                                        .bucket(bucketName)
+                                        .key(object.key())
+                                        .build())
+                .map(s3Client::getObjectAsBytes)
+                .map(deserializer)
+                .collect(Collectors.toList());
+    }
+}
diff --git a/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/testutils/LocalstackContainer.java b/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/testutils/LocalstackContainer.java
new file mode 100644
index 0000000..e11ec27
--- /dev/null
+++ b/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/testutils/LocalstackContainer.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.aws.testutils;
+
+import org.rnorth.ducttape.ratelimits.RateLimiter;
+import org.rnorth.ducttape.ratelimits.RateLimiterBuilder;
+import org.rnorth.ducttape.unreliables.Unreliables;
+import org.testcontainers.containers.GenericContainer;
+import org.testcontainers.containers.wait.strategy.AbstractWaitStrategy;
+import org.testcontainers.utility.DockerImageName;
+import software.amazon.awssdk.http.SdkHttpClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.S3Object;
+
+import java.util.List;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+
+/**
+ * A class wrapping the Localstack container that provides mock implementations of many common AWS
+ * services.
+ */
+public class LocalstackContainer extends GenericContainer<LocalstackContainer> {
+
+    private static final int CONTAINER_PORT = 4566;
+
+    public LocalstackContainer(DockerImageName imageName) {
+        super(imageName);
+        withExposedPorts(CONTAINER_PORT);
+        waitingFor(new ListBucketObjectsWaitStrategy());
+    }
+
+    public String getEndpoint() {
+        return String.format("https://%s:%s", getHost(), getMappedPort(CONTAINER_PORT));
+    }
+
+    private class ListBucketObjectsWaitStrategy extends AbstractWaitStrategy {
+        private static final int TRANSACTIONS_PER_SECOND = 1;
+
+        private final RateLimiter rateLimiter =
+                RateLimiterBuilder.newBuilder()
+                        .withRate(TRANSACTIONS_PER_SECOND, SECONDS)
+                        .withConstantThroughput()
+                        .build();
+
+        @Override
+        protected void waitUntilReady() {
+            try {
+                Thread.sleep(30_000);
+            } catch (InterruptedException e) {
+                e.printStackTrace();
+                throw new IllegalStateException("Localstack Container startup was interrupted");
+            }
+            Unreliables.retryUntilSuccess(
+                    (int) startupTimeout.getSeconds(),
+                    SECONDS,
+                    () -> rateLimiter.getWhenReady(this::list));
+        }
+
+        private List<S3Object> list() {
+            final String bucketName = "bucket-name-not-to-be-used";
+            try (final SdkHttpClient httpClient = AWSServicesTestUtils.createHttpClient();
+                    final S3Client client =
+                            AWSServicesTestUtils.createS3Client(getEndpoint(), httpClient)) {
+                AWSServicesTestUtils.createBucket(client, bucketName);
+                return AWSServicesTestUtils.listBucketObjects(client, bucketName);
+            }
+        }
+    }
+}
diff --git a/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/util/AWSAsyncSinkUtilTest.java b/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/util/AWSAsyncSinkUtilTest.java
new file mode 100644
index 0000000..cfc12bf
--- /dev/null
+++ b/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/util/AWSAsyncSinkUtilTest.java
@@ -0,0 +1,253 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.aws.util;
+
+import org.junit.jupiter.api.Test;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider;
+import software.amazon.awssdk.awscore.client.builder.AwsAsyncClientBuilder;
+import software.amazon.awssdk.awscore.client.builder.AwsClientBuilder;
+import software.amazon.awssdk.core.SdkClient;
+import software.amazon.awssdk.core.client.config.ClientAsyncConfiguration;
+import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration;
+import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption;
+import software.amazon.awssdk.core.client.config.SdkClientConfiguration;
+import software.amazon.awssdk.core.client.config.SdkClientOption;
+import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
+import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient;
+import software.amazon.awssdk.regions.Region;
+
+import java.net.URI;
+import java.time.Duration;
+import java.util.Properties;
+
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.AWS_ENDPOINT;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.AWS_REGION;
+import static org.apache.flink.connector.aws.util.AWSAsyncSinkUtil.formatFlinkUserAgentPrefix;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.argThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/** Tests for {@link AWSAsyncSinkUtil}. */
+class AWSAsyncSinkUtilTest {
+
+    private static final String DEFAULT_USER_AGENT_PREFIX_FORMAT =
+            "Apache Flink %s (%s) *Destination* Connector";
+    private static final String DEFAULT_USER_AGENT_PREFIX_FORMAT_V2 =
+            "Apache Flink %s (%s) *Destination* Connector V2";
+
+    @Test
+    void testCreateKinesisAsyncClient() {
+        Properties properties = TestUtil.properties(AWS_REGION, "eu-west-2");
+        MockAsyncClientBuilder builder = mockKinesisAsyncClientBuilder();
+        ClientOverrideConfiguration clientOverrideConfiguration =
+                ClientOverrideConfiguration.builder().build();
+        SdkAsyncHttpClient httpClient = NettyNioAsyncHttpClient.builder().build();
+
+        AWSAsyncSinkUtil.createAwsAsyncClient(
+                properties, builder, httpClient, clientOverrideConfiguration);
+
+        verify(builder).overrideConfiguration(clientOverrideConfiguration);
+        verify(builder).httpClient(httpClient);
+        verify(builder).region(Region.of("eu-west-2"));
+        verify(builder)
+                .credentialsProvider(argThat(cp -> cp instanceof DefaultCredentialsProvider));
+        verify(builder, never()).endpointOverride(any());
+    }
+
+    @Test
+    void testCreateKinesisAsyncClientWithEndpointOverride() {
+        Properties properties = TestUtil.properties(AWS_REGION, "eu-west-2");
+        properties.setProperty(AWS_ENDPOINT, "https://localhost");
+
+        MockAsyncClientBuilder builder = mockKinesisAsyncClientBuilder();
+        ClientOverrideConfiguration clientOverrideConfiguration =
+                ClientOverrideConfiguration.builder().build();
+        SdkAsyncHttpClient httpClient = NettyNioAsyncHttpClient.builder().build();
+
+        AWSAsyncSinkUtil.createAwsAsyncClient(
+                properties, builder, httpClient, clientOverrideConfiguration);
+
+        verify(builder).endpointOverride(URI.create("https://localhost"));
+    }
+
+    @Test
+    void testClientOverrideConfigurationWithDefaults() {
+        SdkClientConfiguration clientConfiguration = SdkClientConfiguration.builder().build();
+
+        ClientOverrideConfiguration.Builder builder = mockClientOverrideConfigurationBuilder();
+
+        AWSAsyncSinkUtil.createClientOverrideConfiguration(
+                clientConfiguration,
+                builder,
+                formatFlinkUserAgentPrefix(
+                        DEFAULT_USER_AGENT_PREFIX_FORMAT + AWSAsyncSinkUtil.V2_USER_AGENT_SUFFIX));
+
+        verify(builder).build();
+        verify(builder)
+                .putAdvancedOption(
+                        SdkAdvancedClientOption.USER_AGENT_PREFIX,
+                        formatFlinkUserAgentPrefix(DEFAULT_USER_AGENT_PREFIX_FORMAT_V2));
+        verify(builder).putAdvancedOption(SdkAdvancedClientOption.USER_AGENT_SUFFIX, null);
+        verify(builder, never()).apiCallAttemptTimeout(any());
+        verify(builder, never()).apiCallTimeout(any());
+    }
+
+    @Test
+    void testClientOverrideConfigurationUserAgentSuffix() {
+        SdkClientConfiguration clientConfiguration =
+                SdkClientConfiguration.builder()
+                        .option(SdkAdvancedClientOption.USER_AGENT_SUFFIX, "suffix")
+                        .build();
+
+        ClientOverrideConfiguration.Builder builder = mockClientOverrideConfigurationBuilder();
+
+        AWSAsyncSinkUtil.createClientOverrideConfiguration(
+                clientConfiguration,
+                builder,
+                formatFlinkUserAgentPrefix(
+                        DEFAULT_USER_AGENT_PREFIX_FORMAT + AWSAsyncSinkUtil.V2_USER_AGENT_SUFFIX));
+
+        verify(builder).putAdvancedOption(SdkAdvancedClientOption.USER_AGENT_SUFFIX, "suffix");
+    }
+
+    @Test
+    void testClientOverrideConfigurationApiCallAttemptTimeout() {
+        SdkClientConfiguration clientConfiguration =
+                SdkClientConfiguration.builder()
+                        .option(SdkClientOption.API_CALL_ATTEMPT_TIMEOUT, Duration.ofMillis(500))
+                        .build();
+
+        ClientOverrideConfiguration.Builder builder = mockClientOverrideConfigurationBuilder();
+
+        AWSAsyncSinkUtil.createClientOverrideConfiguration(
+                clientConfiguration,
+                builder,
+                formatFlinkUserAgentPrefix(
+                        DEFAULT_USER_AGENT_PREFIX_FORMAT_V2
+                                + AWSAsyncSinkUtil.V2_USER_AGENT_SUFFIX));
+
+        verify(builder).apiCallAttemptTimeout(Duration.ofMillis(500));
+    }
+
+    @Test
+    void testClientOverrideConfigurationApiCallTimeout() {
+        SdkClientConfiguration clientConfiguration =
+                SdkClientConfiguration.builder()
+                        .option(SdkClientOption.API_CALL_TIMEOUT, Duration.ofMillis(600))
+                        .build();
+
+        ClientOverrideConfiguration.Builder builder = mockClientOverrideConfigurationBuilder();
+
+        AWSAsyncSinkUtil.createClientOverrideConfiguration(
+                clientConfiguration,
+                builder,
+                formatFlinkUserAgentPrefix(
+                        DEFAULT_USER_AGENT_PREFIX_FORMAT_V2
+                                + AWSAsyncSinkUtil.V2_USER_AGENT_SUFFIX));
+
+        verify(builder).apiCallTimeout(Duration.ofMillis(600));
+    }
+
+    private MockAsyncClientBuilder mockKinesisAsyncClientBuilder() {
+        MockAsyncClientBuilder builder = mock(MockAsyncClientBuilder.class);
+        when(builder.overrideConfiguration(any(ClientOverrideConfiguration.class)))
+                .thenReturn(builder);
+        when(builder.httpClient(any())).thenReturn(builder);
+        when(builder.credentialsProvider(any())).thenReturn(builder);
+        when(builder.region(any())).thenReturn(builder);
+
+        return builder;
+    }
+
+    private ClientOverrideConfiguration.Builder mockClientOverrideConfigurationBuilder() {
+        ClientOverrideConfiguration.Builder builder =
+                mock(ClientOverrideConfiguration.Builder.class);
+        when(builder.putAdvancedOption(any(), any())).thenReturn(builder);
+        when(builder.apiCallAttemptTimeout(any())).thenReturn(builder);
+        when(builder.apiCallTimeout(any())).thenReturn(builder);
+
+        return builder;
+    }
+
+    private static class MockAsyncClientBuilder
+            implements AwsAsyncClientBuilder<MockAsyncClientBuilder, SdkClient>,
+                    AwsClientBuilder<MockAsyncClientBuilder, SdkClient> {
+
+        @Override
+        public MockAsyncClientBuilder asyncConfiguration(
+                ClientAsyncConfiguration clientAsyncConfiguration) {
+            return null;
+        }
+
+        @Override
+        public MockAsyncClientBuilder httpClient(SdkAsyncHttpClient sdkAsyncHttpClient) {
+            return null;
+        }
+
+        @Override
+        public MockAsyncClientBuilder httpClientBuilder(SdkAsyncHttpClient.Builder builder) {
+            return null;
+        }
+
+        @Override
+        public MockAsyncClientBuilder credentialsProvider(
+                AwsCredentialsProvider awsCredentialsProvider) {
+            return null;
+        }
+
+        @Override
+        public MockAsyncClientBuilder region(Region region) {
+            return null;
+        }
+
+        @Override
+        public MockAsyncClientBuilder dualstackEnabled(Boolean aBoolean) {
+            return null;
+        }
+
+        @Override
+        public MockAsyncClientBuilder fipsEnabled(Boolean aBoolean) {
+            return null;
+        }
+
+        @Override
+        public MockAsyncClientBuilder overrideConfiguration(
+                ClientOverrideConfiguration clientOverrideConfiguration) {
+            return null;
+        }
+
+        @Override
+        public ClientOverrideConfiguration overrideConfiguration() {
+            return null;
+        }
+
+        @Override
+        public MockAsyncClientBuilder endpointOverride(URI uri) {
+            return null;
+        }
+
+        @Override
+        public SdkClient build() {
+            return null;
+        }
+    }
+}
diff --git a/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/util/AWSGeneralUtilTest.java b/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/util/AWSGeneralUtilTest.java
new file mode 100644
index 0000000..ca5613a
--- /dev/null
+++ b/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/util/AWSGeneralUtilTest.java
@@ -0,0 +1,792 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.aws.util;
+
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+import org.apache.flink.connector.aws.config.AWSConfigConstants.CredentialProvider;
+
+import org.junit.jupiter.api.Test;
+import software.amazon.awssdk.auth.credentials.AwsCredentials;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.SystemPropertyCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.WebIdentityTokenFileCredentialsProvider;
+import software.amazon.awssdk.core.exception.SdkClientException;
+import software.amazon.awssdk.http.Protocol;
+import software.amazon.awssdk.http.SdkHttpConfigurationOption;
+import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
+import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient;
+import software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.sts.auth.StsAssumeRoleCredentialsProvider;
+import software.amazon.awssdk.utils.AttributeMap;
+import software.amazon.awssdk.utils.ImmutableMap;
+
+import java.nio.file.Paths;
+import java.time.Duration;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.AWS_CREDENTIALS_PROVIDER;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.AWS_REGION;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.CredentialProvider.ASSUME_ROLE;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.CredentialProvider.AUTO;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.CredentialProvider.BASIC;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.CredentialProvider.WEB_IDENTITY_TOKEN;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.roleArn;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.roleSessionName;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.webIdentityTokenFile;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+import static software.amazon.awssdk.http.Protocol.HTTP1_1;
+import static software.amazon.awssdk.http.Protocol.HTTP2;
+
+/** Tests for {@link AWSGeneralUtil}. */
+class AWSGeneralUtilTest {
+
+    @Test
+    void testGetCredentialsProviderTypeDefaultsAuto() {
+        assertThat(
+                        AWSGeneralUtil.getCredentialProviderType(
+                                new Properties(), AWS_CREDENTIALS_PROVIDER))
+                .isEqualTo(AUTO);
+    }
+
+    @Test
+    void testGetCredentialsProviderTypeBasic() {
+        Properties testConfig =
+                TestUtil.properties(AWSConfigConstants.accessKeyId(AWS_CREDENTIALS_PROVIDER), "ak");
+        testConfig.setProperty(AWSConfigConstants.secretKey(AWS_CREDENTIALS_PROVIDER), "sk");
+
+        assertThat(AWSGeneralUtil.getCredentialProviderType(testConfig, AWS_CREDENTIALS_PROVIDER))
+                .isEqualTo(BASIC);
+    }
+
+    @Test
+    void testGetCredentialsProviderTypeWebIdentityToken() {
+        Properties testConfig = TestUtil.properties(AWS_CREDENTIALS_PROVIDER, "WEB_IDENTITY_TOKEN");
+
+        CredentialProvider type =
+                AWSGeneralUtil.getCredentialProviderType(testConfig, AWS_CREDENTIALS_PROVIDER);
+        assertThat(type).isEqualTo(WEB_IDENTITY_TOKEN);
+    }
+
+    @Test
+    void testGetCredentialsProviderTypeAssumeRole() {
+        Properties testConfig = TestUtil.properties(AWS_CREDENTIALS_PROVIDER, "ASSUME_ROLE");
+
+        CredentialProvider type =
+                AWSGeneralUtil.getCredentialProviderType(testConfig, AWS_CREDENTIALS_PROVIDER);
+        assertThat(type).isEqualTo(ASSUME_ROLE);
+    }
+
+    @Test
+    void testGetCredentialsProviderEnvironmentVariables() {
+        Properties properties = TestUtil.properties(AWS_CREDENTIALS_PROVIDER, "ENV_VAR");
+
+        AwsCredentialsProvider credentialsProvider =
+                AWSGeneralUtil.getCredentialsProvider(properties);
+
+        assertThat(credentialsProvider).isInstanceOf(EnvironmentVariableCredentialsProvider.class);
+    }
+
+    @Test
+    void testGetCredentialsProviderSystemProperties() {
+        Properties properties = TestUtil.properties(AWS_CREDENTIALS_PROVIDER, "SYS_PROP");
+
+        AwsCredentialsProvider credentialsProvider =
+                AWSGeneralUtil.getCredentialsProvider(properties);
+
+        assertThat(credentialsProvider).isInstanceOf(SystemPropertyCredentialsProvider.class);
+    }
+
+    @Test
+    void testGetCredentialsProviderWebIdentityTokenFileCredentialsProvider() {
+        Properties properties = TestUtil.properties(AWS_CREDENTIALS_PROVIDER, "WEB_IDENTITY_TOKEN");
+
+        AwsCredentialsProvider credentialsProvider =
+                AWSGeneralUtil.getCredentialsProvider(properties);
+
+        assertThat(credentialsProvider).isInstanceOf(WebIdentityTokenFileCredentialsProvider.class);
+    }
+
+    @Test
+    void testGetWebIdentityTokenFileCredentialsProvider() {
+        Properties properties = TestUtil.properties(AWS_CREDENTIALS_PROVIDER, "WEB_IDENTITY_TOKEN");
+        properties.setProperty(roleArn(AWS_CREDENTIALS_PROVIDER), "roleArn");
+        properties.setProperty(roleSessionName(AWS_CREDENTIALS_PROVIDER), "roleSessionName");
+
+        WebIdentityTokenFileCredentialsProvider.Builder builder =
+                mockWebIdentityTokenFileCredentialsProviderBuilder();
+
+        AWSGeneralUtil.getWebIdentityTokenFileCredentialsProvider(
+                builder, properties, AWS_CREDENTIALS_PROVIDER);
+
+        verify(builder).roleArn("roleArn");
+        verify(builder).roleSessionName("roleSessionName");
+        verify(builder, never()).webIdentityTokenFile(any());
+    }
+
+    @Test
+    void testGetWebIdentityTokenFileCredentialsProviderWithWebIdentityFile() {
+        Properties properties = TestUtil.properties(AWS_CREDENTIALS_PROVIDER, "WEB_IDENTITY_TOKEN");
+        properties.setProperty(
+                webIdentityTokenFile(AWS_CREDENTIALS_PROVIDER), "webIdentityTokenFile");
+
+        WebIdentityTokenFileCredentialsProvider.Builder builder =
+                mockWebIdentityTokenFileCredentialsProviderBuilder();
+
+        AWSGeneralUtil.getWebIdentityTokenFileCredentialsProvider(
+                builder, properties, AWS_CREDENTIALS_PROVIDER);
+
+        verify(builder).webIdentityTokenFile(Paths.get("webIdentityTokenFile"));
+    }
+
+    @Test
+    void testGetCredentialsProviderAuto() {
+        Properties properties = TestUtil.properties(AWS_CREDENTIALS_PROVIDER, "AUTO");
+
+        AwsCredentialsProvider credentialsProvider =
+                AWSGeneralUtil.getCredentialsProvider(properties);
+
+        assertThat(credentialsProvider).isInstanceOf(DefaultCredentialsProvider.class);
+    }
+
+    @Test
+    void testGetCredentialsProviderFromMap() {
+        Map<String, Object> config = ImmutableMap.of(AWS_CREDENTIALS_PROVIDER, "AUTO");
+
+        AwsCredentialsProvider credentialsProvider = AWSGeneralUtil.getCredentialsProvider(config);
+
+        assertThat(credentialsProvider).isInstanceOf(DefaultCredentialsProvider.class);
+    }
+
+    @Test
+    void testGetCredentialsProviderAssumeRole() {
+        Properties properties = spy(TestUtil.properties(AWS_CREDENTIALS_PROVIDER, "ASSUME_ROLE"));
+        properties.setProperty(AWS_REGION, "eu-west-2");
+
+        AwsCredentialsProvider credentialsProvider =
+                AWSGeneralUtil.getCredentialsProvider(properties);
+
+        assertThat(credentialsProvider).isInstanceOf(StsAssumeRoleCredentialsProvider.class);
+
+        verify(properties).getProperty(AWSConfigConstants.roleArn(AWS_CREDENTIALS_PROVIDER));
+        verify(properties)
+                .getProperty(AWSConfigConstants.roleSessionName(AWS_CREDENTIALS_PROVIDER));
+        verify(properties).getProperty(AWSConfigConstants.externalId(AWS_CREDENTIALS_PROVIDER));
+        verify(properties).getProperty(AWS_REGION);
+    }
+
+    @Test
+    void testGetCredentialsProviderBasic() {
+        Properties properties = TestUtil.properties(AWS_CREDENTIALS_PROVIDER, "BASIC");
+        properties.setProperty(AWSConfigConstants.accessKeyId(AWS_CREDENTIALS_PROVIDER), "ak");
+        properties.setProperty(AWSConfigConstants.secretKey(AWS_CREDENTIALS_PROVIDER), "sk");
+
+        AwsCredentials credentials =
+                AWSGeneralUtil.getCredentialsProvider(properties).resolveCredentials();
+
+        assertThat(credentials.accessKeyId()).isEqualTo("ak");
+        assertThat(credentials.secretAccessKey()).isEqualTo("sk");
+    }
+
+    @Test
+    void testGetCredentialsProviderProfile() {
+        Properties properties = TestUtil.properties(AWS_CREDENTIALS_PROVIDER, "PROFILE");
+        properties.put(AWSConfigConstants.profileName(AWS_CREDENTIALS_PROVIDER), "default");
+        properties.put(
+                AWSConfigConstants.profilePath(AWS_CREDENTIALS_PROVIDER),
+                "src/test/resources/profile");
+
+        AwsCredentialsProvider credentialsProvider =
+                AWSGeneralUtil.getCredentialsProvider(properties);
+
+        assertThat(credentialsProvider).isInstanceOf(ProfileCredentialsProvider.class);
+
+        AwsCredentials credentials = credentialsProvider.resolveCredentials();
+        assertThat(credentials.accessKeyId()).isEqualTo("11111111111111111111");
+        assertThat(credentials.secretAccessKey())
+                .isEqualTo("wJalrXUtnFEMI/K7MDENG/bPxRfiCY1111111111");
+    }
+
+    @Test
+    void testGetCredentialsProviderNamedProfile() {
+        Properties properties = TestUtil.properties(AWS_CREDENTIALS_PROVIDER, "PROFILE");
+        properties.setProperty(AWSConfigConstants.profileName(AWS_CREDENTIALS_PROVIDER), "foo");
+        properties.setProperty(
+                AWSConfigConstants.profilePath(AWS_CREDENTIALS_PROVIDER),
+                "src/test/resources/profile");
+
+        AwsCredentialsProvider credentialsProvider =
+                AWSGeneralUtil.getCredentialsProvider(properties);
+
+        assertThat(credentialsProvider).isInstanceOf(ProfileCredentialsProvider.class);
+
+        AwsCredentials credentials = credentialsProvider.resolveCredentials();
+        assertThat(credentials.accessKeyId()).isEqualTo("22222222222222222222");
+        assertThat(credentials.secretAccessKey())
+                .isEqualTo("wJalrXUtnFEMI/K7MDENG/bPxRfiCY2222222222");
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithPropertyTcpKeepAlive() throws Exception {
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(new Properties());
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.tcpKeepAlive()).isTrue();
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithPropertyMaxConcurrency() throws Exception {
+        int maxConnections = 45678;
+        Properties properties = new Properties();
+        properties.setProperty(
+                AWSConfigConstants.HTTP_CLIENT_MAX_CONCURRENCY, String.valueOf(maxConnections));
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(properties);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.maxConnections()).isEqualTo(maxConnections);
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithPropertyReadTimeout() throws Exception {
+        int readTimeoutMillis = 45678;
+        Properties properties = new Properties();
+        properties.setProperty(
+                AWSConfigConstants.HTTP_CLIENT_READ_TIMEOUT_MILLIS,
+                String.valueOf(readTimeoutMillis));
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(properties);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.readTimeoutMillis()).isEqualTo(readTimeoutMillis);
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithPropertyTrustAllCertificates() throws Exception {
+        boolean trustAllCerts = true;
+        Properties properties = new Properties();
+        properties.setProperty(
+                AWSConfigConstants.TRUST_ALL_CERTIFICATES, String.valueOf(trustAllCerts));
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(properties);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.trustAllCertificates()).isEqualTo(trustAllCerts);
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithPropertyProtocol() throws Exception {
+        Protocol httpVersion = HTTP1_1;
+        Properties properties = new Properties();
+        properties.setProperty(
+                AWSConfigConstants.HTTP_PROTOCOL_VERSION, String.valueOf(httpVersion));
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(properties);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.attribute(SdkHttpConfigurationOption.PROTOCOL))
+                .isEqualTo(httpVersion);
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithDefaultsConnectionAcquireTimeout() throws Exception {
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.connectionAcquireTimeoutMillis()).isEqualTo(60_000);
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithDefaultsConnectionTtl() throws Exception {
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        SdkAsyncHttpClient httpDefaultClient = NettyNioAsyncHttpClient.create();
+        NettyConfiguration nettyDefaultConfiguration =
+                TestUtil.getNettyConfiguration(httpDefaultClient);
+
+        assertThat(nettyConfiguration.connectionTtlMillis())
+                .isEqualTo(nettyDefaultConfiguration.connectionTtlMillis());
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithDefaultsConnectionTimeout() throws Exception {
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        SdkAsyncHttpClient httpDefaultClient = NettyNioAsyncHttpClient.create();
+        NettyConfiguration nettyDefaultConfiguration =
+                TestUtil.getNettyConfiguration(httpDefaultClient);
+
+        assertThat(nettyDefaultConfiguration.connectTimeoutMillis())
+                .isEqualTo(nettyConfiguration.connectTimeoutMillis());
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithDefaultsIdleTimeout() throws Exception {
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        SdkAsyncHttpClient httpDefaultClient = NettyNioAsyncHttpClient.create();
+        NettyConfiguration nettyDefaultConfiguration =
+                TestUtil.getNettyConfiguration(httpDefaultClient);
+
+        assertThat(nettyConfiguration.idleTimeoutMillis())
+                .isEqualTo(nettyDefaultConfiguration.idleTimeoutMillis());
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithDefaultsMaxConnections() throws Exception {
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.maxConnections()).isEqualTo(10_000);
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithDefaultsMaxPendingConnectionAcquires() throws Exception {
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        SdkAsyncHttpClient httpDefaultClient = NettyNioAsyncHttpClient.create();
+        NettyConfiguration nettyDefaultConfiguration =
+                TestUtil.getNettyConfiguration(httpDefaultClient);
+
+        assertThat(nettyConfiguration.maxPendingConnectionAcquires())
+                .isEqualTo(nettyDefaultConfiguration.maxPendingConnectionAcquires());
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithDefaultsReadTimeout() throws Exception {
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.readTimeoutMillis()).isEqualTo(360_000);
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithDefaultsReapIdleConnections() throws Exception {
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        SdkAsyncHttpClient httpDefaultClient = NettyNioAsyncHttpClient.create();
+        NettyConfiguration nettyDefaultConfiguration =
+                TestUtil.getNettyConfiguration(httpDefaultClient);
+
+        assertThat(nettyConfiguration.reapIdleConnections())
+                .isEqualTo(nettyDefaultConfiguration.reapIdleConnections());
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithDefaultsTcpKeepAlive() throws Exception {
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        SdkAsyncHttpClient httpDefaultClient = NettyNioAsyncHttpClient.create();
+        NettyConfiguration nettyDefaultConfiguration =
+                TestUtil.getNettyConfiguration(httpDefaultClient);
+
+        assertThat(nettyConfiguration.tcpKeepAlive())
+                .isEqualTo(nettyDefaultConfiguration.tcpKeepAlive());
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithDefaultsTlsKeyManagersProvider() throws Exception {
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        SdkAsyncHttpClient httpDefaultClient = NettyNioAsyncHttpClient.create();
+        NettyConfiguration nettyDefaultConfiguration =
+                TestUtil.getNettyConfiguration(httpDefaultClient);
+
+        assertThat(nettyConfiguration.tlsKeyManagersProvider())
+                .isEqualTo(nettyDefaultConfiguration.tlsKeyManagersProvider());
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithDefaultsTlsTrustManagersProvider() throws Exception {
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        SdkAsyncHttpClient httpDefaultClient = NettyNioAsyncHttpClient.create();
+        NettyConfiguration nettyDefaultConfiguration =
+                TestUtil.getNettyConfiguration(httpDefaultClient);
+
+        assertThat(nettyConfiguration.tlsTrustManagersProvider())
+                .isEqualTo(nettyDefaultConfiguration.tlsTrustManagersProvider());
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithDefaultsTrustAllCertificates() throws Exception {
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.trustAllCertificates()).isFalse();
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithDefaultsWriteTimeout() throws Exception {
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        SdkAsyncHttpClient httpDefaultClient = NettyNioAsyncHttpClient.create();
+        NettyConfiguration nettyDefaultConfiguration =
+                TestUtil.getNettyConfiguration(httpDefaultClient);
+
+        assertThat(nettyConfiguration.writeTimeoutMillis())
+                .isEqualTo(nettyDefaultConfiguration.writeTimeoutMillis());
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWithDefaultsProtocol() throws Exception {
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+
+        SdkAsyncHttpClient httpClient = AWSGeneralUtil.createAsyncHttpClient(builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.attribute(SdkHttpConfigurationOption.PROTOCOL))
+                .isEqualTo(HTTP2);
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientReadTimeout() throws Exception {
+        Duration readTimeout = Duration.ofMillis(1234);
+
+        AttributeMap clientConfiguration =
+                AttributeMap.builder()
+                        .put(SdkHttpConfigurationOption.READ_TIMEOUT, readTimeout)
+                        .build();
+
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+        SdkAsyncHttpClient httpClient =
+                AWSGeneralUtil.createAsyncHttpClient(clientConfiguration, builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.readTimeoutMillis()).isEqualTo(readTimeout.toMillis());
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientTcpKeepAlive() throws Exception {
+        boolean tcpKeepAlive = true;
+
+        AttributeMap clientConfiguration =
+                AttributeMap.builder()
+                        .put(SdkHttpConfigurationOption.TCP_KEEPALIVE, tcpKeepAlive)
+                        .build();
+
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+        SdkAsyncHttpClient httpClient =
+                AWSGeneralUtil.createAsyncHttpClient(clientConfiguration, builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.tcpKeepAlive()).isEqualTo(tcpKeepAlive);
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientConnectionTimeout() throws Exception {
+        Duration connectionTimeout = Duration.ofMillis(1000);
+
+        AttributeMap clientConfiguration =
+                AttributeMap.builder()
+                        .put(SdkHttpConfigurationOption.CONNECTION_TIMEOUT, connectionTimeout)
+                        .build();
+
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+        SdkAsyncHttpClient httpClient =
+                AWSGeneralUtil.createAsyncHttpClient(clientConfiguration, builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.connectTimeoutMillis())
+                .isEqualTo(connectionTimeout.toMillis());
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientMaxConcurrency() throws Exception {
+        int maxConnections = 123;
+
+        AttributeMap clientConfiguration =
+                AttributeMap.builder()
+                        .put(SdkHttpConfigurationOption.MAX_CONNECTIONS, maxConnections)
+                        .build();
+
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+        SdkAsyncHttpClient httpClient =
+                AWSGeneralUtil.createAsyncHttpClient(clientConfiguration, builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.maxConnections()).isEqualTo(maxConnections);
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientWriteTimeout() throws Exception {
+        Duration writeTimeout = Duration.ofMillis(3000);
+
+        AttributeMap clientConfiguration =
+                AttributeMap.builder()
+                        .put(SdkHttpConfigurationOption.WRITE_TIMEOUT, writeTimeout)
+                        .build();
+
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+        SdkAsyncHttpClient httpClient =
+                AWSGeneralUtil.createAsyncHttpClient(clientConfiguration, builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.writeTimeoutMillis()).isEqualTo(writeTimeout.toMillis());
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientConnectionMaxIdleTime() throws Exception {
+        Duration maxIdleTime = Duration.ofMillis(2000);
+
+        AttributeMap clientConfiguration =
+                AttributeMap.builder()
+                        .put(SdkHttpConfigurationOption.CONNECTION_MAX_IDLE_TIMEOUT, maxIdleTime)
+                        .build();
+
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+        SdkAsyncHttpClient httpClient =
+                AWSGeneralUtil.createAsyncHttpClient(clientConfiguration, builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.idleTimeoutMillis()).isEqualTo(maxIdleTime.toMillis());
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientIdleConnectionReaper() throws Exception {
+        boolean reapIdleConnections = false;
+
+        AttributeMap clientConfiguration =
+                AttributeMap.builder()
+                        .put(SdkHttpConfigurationOption.REAP_IDLE_CONNECTIONS, reapIdleConnections)
+                        .build();
+
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+        SdkAsyncHttpClient httpClient =
+                AWSGeneralUtil.createAsyncHttpClient(clientConfiguration, builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.reapIdleConnections()).isEqualTo(reapIdleConnections);
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientIdleConnectionTtl() throws Exception {
+        Duration connectionTtl = Duration.ofMillis(5000);
+
+        AttributeMap clientConfiguration =
+                AttributeMap.builder()
+                        .put(SdkHttpConfigurationOption.CONNECTION_TIME_TO_LIVE, connectionTtl)
+                        .build();
+
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+        SdkAsyncHttpClient httpClient =
+                AWSGeneralUtil.createAsyncHttpClient(clientConfiguration, builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.connectionTtlMillis()).isEqualTo(connectionTtl.toMillis());
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientTrustAllCertificates() throws Exception {
+        boolean trustAllCertificates = true;
+
+        AttributeMap clientConfiguration =
+                AttributeMap.builder()
+                        .put(
+                                SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES,
+                                trustAllCertificates)
+                        .build();
+
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+        SdkAsyncHttpClient httpClient =
+                AWSGeneralUtil.createAsyncHttpClient(clientConfiguration, builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.trustAllCertificates()).isEqualTo(trustAllCertificates);
+    }
+
+    @Test
+    void testCreateNettyAsyncHttpClientHttpVersion() throws Exception {
+        Protocol httpVersion = HTTP1_1;
+
+        AttributeMap clientConfiguration =
+                AttributeMap.builder()
+                        .put(SdkHttpConfigurationOption.PROTOCOL, httpVersion)
+                        .build();
+
+        NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder();
+        SdkAsyncHttpClient httpClient =
+                AWSGeneralUtil.createAsyncHttpClient(clientConfiguration, builder);
+        NettyConfiguration nettyConfiguration = TestUtil.getNettyConfiguration(httpClient);
+
+        assertThat(nettyConfiguration.attribute(SdkHttpConfigurationOption.PROTOCOL))
+                .isEqualTo(httpVersion);
+    }
+
+    @Test
+    void testGetRegion() {
+        Region region = AWSGeneralUtil.getRegion(TestUtil.properties(AWS_REGION, "eu-west-2"));
+
+        assertThat(region).isEqualTo(Region.EU_WEST_2);
+    }
+
+    @Test
+    void testValidRegion() {
+        assertThat(AWSGeneralUtil.isValidRegion(Region.of("us-east-1"))).isTrue();
+        assertThat(AWSGeneralUtil.isValidRegion(Region.of("us-gov-west-1"))).isTrue();
+        assertThat(AWSGeneralUtil.isValidRegion(Region.of("us-isob-east-1"))).isTrue();
+        assertThat(AWSGeneralUtil.isValidRegion(Region.of("aws-global"))).isTrue();
+        assertThat(AWSGeneralUtil.isValidRegion(Region.of("aws-iso-global"))).isTrue();
+        assertThat(AWSGeneralUtil.isValidRegion(Region.of("aws-iso-b-global"))).isTrue();
+    }
+
+    @Test
+    void testInvalidRegion() {
+        assertThat(AWSGeneralUtil.isValidRegion(Region.of("unstructured-string"))).isFalse();
+    }
+
+    @Test
+    void testUnrecognizableAwsRegionInConfig() {
+
+        Properties testConfig = TestUtil.properties(AWSConfigConstants.AWS_REGION, "wrongRegionId");
+        testConfig.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, "accessKeyId");
+        testConfig.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, "secretKey");
+
+        assertThatThrownBy(() -> AWSGeneralUtil.validateAwsConfiguration(testConfig))
+                .isInstanceOf(IllegalArgumentException.class)
+                .hasMessageContaining("Invalid AWS region");
+    }
+
+    @Test
+    void testCredentialProviderTypeSetToBasicButNoCredentialSetInConfig() {
+        Properties testConfig = TestUtil.properties(AWSConfigConstants.AWS_REGION, "us-east-1");
+        testConfig.setProperty(AWSConfigConstants.AWS_CREDENTIALS_PROVIDER, "BASIC");
+
+        assertThatThrownBy(() -> AWSGeneralUtil.validateAwsConfiguration(testConfig))
+                .isInstanceOf(IllegalArgumentException.class)
+                .hasMessage(
+                        "Please set values for AWS Access Key ID ('"
+                                + AWSConfigConstants.AWS_ACCESS_KEY_ID
+                                + "') "
+                                + "and Secret Key ('"
+                                + AWSConfigConstants.AWS_SECRET_ACCESS_KEY
+                                + "') when using the BASIC AWS credential provider type.");
+    }
+
+    @Test
+    void testUnrecognizableCredentialProviderTypeInConfig() {
+        Properties testConfig = TestUtil.getStandardProperties();
+        testConfig.setProperty(AWSConfigConstants.AWS_CREDENTIALS_PROVIDER, "wrongProviderType");
+
+        assertThatThrownBy(() -> AWSGeneralUtil.validateAwsConfiguration(testConfig))
+                .isInstanceOf(IllegalArgumentException.class)
+                .hasMessageContaining("Invalid AWS Credential Provider Type");
+    }
+
+    @Test
+    void testMissingWebIdentityTokenFileInCredentials() {
+        Properties properties = TestUtil.getStandardProperties();
+        properties.setProperty(AWS_CREDENTIALS_PROVIDER, "WEB_IDENTITY_TOKEN");
+
+        assertThatThrownBy(() -> AWSGeneralUtil.validateAwsCredentials(properties))
+                .isInstanceOf(IllegalStateException.class)
+                .hasMessage(
+                        "Either the environment variable AWS_WEB_IDENTITY_TOKEN_FILE or the javaproperty aws.webIdentityTokenFile must be set.");
+    }
+
+    @Test
+    void testMissingEnvironmentVariableCredentials() {
+        Properties properties = TestUtil.getStandardProperties();
+        properties.setProperty(AWS_CREDENTIALS_PROVIDER, "ENV_VAR");
+
+        assertThatThrownBy(() -> AWSGeneralUtil.validateAwsCredentials(properties))
+                .isInstanceOf(SdkClientException.class)
+                .hasMessageContaining(
+                        "Access key must be specified either via environment variable");
+    }
+
+    @Test
+    void testFailedSystemPropertiesCredentialsValidationsOnMissingAccessKey() {
+        Properties properties = TestUtil.getStandardProperties();
+        properties.setProperty(AWS_CREDENTIALS_PROVIDER, "SYS_PROP");
+
+        assertThatThrownBy(() -> AWSGeneralUtil.validateAwsCredentials(properties))
+                .isInstanceOf(SdkClientException.class)
+                .hasMessageContaining(
+                        "Access key must be specified either via environment variable (AWS_ACCESS_KEY_ID) or system property (aws.accessKeyId)");
+    }
+
+    @Test
+    void testFailedSystemPropertiesCredentialsValidationsOnMissingSecretKey() {
+        System.setProperty("aws.accessKeyId", "accesKeyId");
+        Properties properties = TestUtil.getStandardProperties();
+        properties.setProperty(AWS_CREDENTIALS_PROVIDER, "SYS_PROP");
+
+        assertThatThrownBy(() -> AWSGeneralUtil.validateAwsCredentials(properties))
+                .isInstanceOf(SdkClientException.class)
+                .hasMessageContaining(
+                        "Secret key must be specified either via environment variable (AWS_SECRET_ACCESS_KEY) or system property (aws.secretAccessKey)");
+    }
+
+    private WebIdentityTokenFileCredentialsProvider.Builder
+            mockWebIdentityTokenFileCredentialsProviderBuilder() {
+        WebIdentityTokenFileCredentialsProvider.Builder builder =
+                mock(WebIdentityTokenFileCredentialsProvider.Builder.class);
+        when(builder.roleArn(any())).thenReturn(builder);
+        when(builder.roleSessionName(any())).thenReturn(builder);
+        when(builder.webIdentityTokenFile(any())).thenReturn(builder);
+
+        return builder;
+    }
+}
diff --git a/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/util/TestUtil.java b/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/util/TestUtil.java
new file mode 100644
index 0000000..cc52d17
--- /dev/null
+++ b/flink-connector-aws-base/src/test/java/org/apache/flink/connector/aws/util/TestUtil.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.aws.util;
+
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+
+import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
+import software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration;
+
+import java.lang.reflect.Field;
+import java.util.Properties;
+
+/** Utilities for tests in the package. */
+public class TestUtil {
+    public static Properties properties(final String key, final String value) {
+        Properties properties = new Properties();
+        properties.setProperty(key, value);
+        return properties;
+    }
+
+    public static Properties getStandardProperties() {
+        Properties config = properties(AWSConfigConstants.AWS_REGION, "us-east-1");
+        config.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, "accessKeyId");
+        config.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, "secretKey");
+
+        return config;
+    }
+
+    public static NettyConfiguration getNettyConfiguration(final SdkAsyncHttpClient httpClient)
+            throws Exception {
+        return getField("configuration", httpClient);
+    }
+
+    public static <T> T getField(String fieldName, Object obj) throws Exception {
+        Field field = obj.getClass().getDeclaredField(fieldName);
+        field.setAccessible(true);
+        return (T) field.get(obj);
+    }
+}
diff --git a/flink-connector-aws-base/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension b/flink-connector-aws-base/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension
new file mode 100644
index 0000000..2899913
--- /dev/null
+++ b/flink-connector-aws-base/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.flink.util.TestLoggerExtension
\ No newline at end of file
diff --git a/flink-connector-aws-base/src/test/resources/archunit.properties b/flink-connector-aws-base/src/test/resources/archunit.properties
new file mode 100644
index 0000000..15be88c
--- /dev/null
+++ b/flink-connector-aws-base/src/test/resources/archunit.properties
@@ -0,0 +1,31 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# By default we allow removing existing violations, but fail when new violations are added.
+freeze.store.default.allowStoreUpdate=true
+
+# Enable this if a new (frozen) rule has been added in order to create the initial store and record the existing violations.
+#freeze.store.default.allowStoreCreation=true
+
+# Enable this to add allow new violations to be recorded.
+# NOTE: Adding new violations should be avoided when possible. If the rule was correct to flag a new
+#       violation, please try to avoid creating the violation. If the violation was created due to a
+#       shortcoming of the rule, file a JIRA issue so the rule can be improved.
+#freeze.refreeze=true
+
+freeze.store.default.path=archunit-violations
diff --git a/flink-connector-aws-base/src/test/resources/log4j2-test.properties b/flink-connector-aws-base/src/test/resources/log4j2-test.properties
new file mode 100644
index 0000000..c4fa187
--- /dev/null
+++ b/flink-connector-aws-base/src/test/resources/log4j2-test.properties
@@ -0,0 +1,28 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Set root logger level to OFF to not flood build logs
+# set manually to INFO for debugging purposes
+rootLogger.level = OFF
+rootLogger.appenderRef.test.ref = TestLogger
+
+appender.testlogger.name = TestLogger
+appender.testlogger.type = CONSOLE
+appender.testlogger.target = SYSTEM_ERR
+appender.testlogger.layout.type = PatternLayout
+appender.testlogger.layout.pattern = %-4r [%t] %-5p %c %x - %m%n
diff --git a/flink-connector-aws-base/src/test/resources/profile b/flink-connector-aws-base/src/test/resources/profile
new file mode 100644
index 0000000..2573fd6
--- /dev/null
+++ b/flink-connector-aws-base/src/test/resources/profile
@@ -0,0 +1,7 @@
+[default]
+aws_access_key_id=11111111111111111111
+aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCY1111111111
+
+[foo]
+aws_access_key_id=22222222222222222222
+aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCY2222222222
diff --git a/pom.xml b/pom.xml
index fbf9545..0dc0342 100644
--- a/pom.xml
+++ b/pom.xml
@@ -62,17 +62,13 @@ under the License.
         <assertj.version>3.21.0</assertj.version>
         <archunit.version>0.22.0</archunit.version>
         <testcontainers.version>1.17.2</testcontainers.version>
-        <mockito.version>2.21.0</mockito.version>
-
-        <!-- For dependency convergence -->
-        <kryo.version>2.24.0</kryo.version>
-        <slf4j.version>1.7.36</slf4j.version>
-        <snappy.version>1.1.8.3</snappy.version>
+        <mockito.version>3.4.6</mockito.version>
 
         <flink.parent.artifactId>flink-connector-aws-parent</flink.parent.artifactId>
     </properties>
 
     <modules>
+        <module>flink-connector-aws-base</module>
         <module>flink-connector-dynamodb</module>
         <module>flink-sql-connector-dynamodb</module>
     </modules>
@@ -102,6 +98,20 @@ under the License.
             <scope>test</scope>
         </dependency>
 
+        <dependency>
+            <groupId>org.assertj</groupId>
+            <artifactId>assertj-core</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.mockito</groupId>
+            <artifactId>mockito-core</artifactId>
+            <version>${mockito.version}</version>
+            <type>jar</type>
+            <scope>test</scope>
+        </dependency>
+
         <dependency>
             <groupId>org.apache.flink</groupId>
             <artifactId>flink-test-utils</artifactId>
@@ -141,6 +151,13 @@ under the License.
                 <scope>test</scope>
             </dependency>
 
+            <dependency>
+                <groupId>org.apache.flink</groupId>
+                <artifactId>flink-architecture-tests-test</artifactId>
+                <version>${flink.version}</version>
+                <scope>test</scope>
+            </dependency>
+
             <dependency>
                 <groupId>org.junit</groupId>
                 <artifactId>junit-bom</artifactId>
@@ -168,23 +185,28 @@ under the License.
             <dependency>
                 <groupId>org.slf4j</groupId>
                 <artifactId>slf4j-api</artifactId>
-                <version>${slf4j.version}</version>
+                <version>1.7.36</version>
             </dependency>
             <dependency>
                 <groupId>com.esotericsoftware.kryo</groupId>
                 <artifactId>kryo</artifactId>
-                <version>${kryo.version}</version>
+                <version>2.24.0</version>
             </dependency>
             <dependency>
                 <groupId>org.xerial.snappy</groupId>
                 <artifactId>snappy-java</artifactId>
-                <version>${snappy.version}</version>
+                <version>1.1.8.3</version>
             </dependency>
             <dependency>
                 <groupId>com.google.code.findbugs</groupId>
                 <artifactId>jsr305</artifactId>
                 <version>1.3.9</version>
             </dependency>
+            <dependency>
+                <groupId>org.objenesis</groupId>
+                <artifactId>objenesis</artifactId>
+                <version>2.1</version>
+            </dependency>
         </dependencies>
     </dependencyManagement>
 


[flink-connector-aws] 03/08: [FLINK-29907][Connectors/Firehose] Externalize Amazon Firehose connectors from Flink repo

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

dannycranmer pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/flink-connector-aws.git

commit 5993e34901152396a937cee399ced3beef602f0f
Author: Danny Cranmer <da...@apache.org>
AuthorDate: Fri Dec 2 09:31:35 2022 +0000

    [FLINK-29907][Connectors/Firehose] Externalize Amazon Firehose connectors from Flink repo
---
 .../54da9a7d-14d2-4632-a045-1dd8fc665c8f           |   0
 .../a6cbd99c-b115-447a-8f19-43c1094db549           |   6 +
 .../archunit-violations/stored.rules               |   4 +
 flink-connector-aws-kinesis-firehose/pom.xml       | 153 ++++++++++++
 .../sink/KinesisFirehoseConfigConstants.java       |  32 +++
 .../firehose/sink/KinesisFirehoseException.java    |  54 +++++
 .../firehose/sink/KinesisFirehoseSink.java         | 135 +++++++++++
 .../firehose/sink/KinesisFirehoseSinkBuilder.java  | 164 +++++++++++++
 .../sink/KinesisFirehoseSinkElementConverter.java  | 104 ++++++++
 .../firehose/sink/KinesisFirehoseSinkWriter.java   | 264 +++++++++++++++++++++
 .../sink/KinesisFirehoseStateSerializer.java       |  52 ++++
 .../table/KinesisFirehoseConnectorOptions.java     |  43 ++++
 .../firehose/table/KinesisFirehoseDynamicSink.java | 183 ++++++++++++++
 .../table/KinesisFirehoseDynamicTableFactory.java  |  89 +++++++
 .../util/KinesisFirehoseConnectorOptionUtils.java  |  67 ++++++
 .../org.apache.flink.table.factories.Factory       |  16 ++
 .../src/main/resources/log4j2.properties           |  25 ++
 .../architecture/TestCodeArchitectureTest.java     |  40 ++++
 .../sink/KinesisFirehoseSinkBuilderTest.java       |  81 +++++++
 .../KinesisFirehoseSinkElementConverterTest.java   |  54 +++++
 .../firehose/sink/KinesisFirehoseSinkITCase.java   | 125 ++++++++++
 .../firehose/sink/KinesisFirehoseSinkTest.java     | 132 +++++++++++
 .../sink/KinesisFirehoseSinkWriterTest.java        | 106 +++++++++
 .../sink/KinesisFirehoseStateSerializerTest.java   |  56 +++++
 .../sink/testutils/KinesisFirehoseTestUtils.java   |  86 +++++++
 .../KinesisFirehoseDynamicTableFactoryTest.java    | 157 ++++++++++++
 .../org.junit.jupiter.api.extension.Extension      |  16 ++
 .../src/test/resources/archunit.properties         |  31 +++
 .../src/test/resources/log4j2-test.properties      |  28 +++
 flink-sql-connector-aws-kinesis-firehose/pom.xml   | 107 +++++++++
 .../src/main/resources/META-INF/NOTICE             |  48 ++++
 pom.xml                                            |   2 +
 32 files changed, 2460 insertions(+)

diff --git a/flink-connector-aws-kinesis-firehose/archunit-violations/54da9a7d-14d2-4632-a045-1dd8fc665c8f b/flink-connector-aws-kinesis-firehose/archunit-violations/54da9a7d-14d2-4632-a045-1dd8fc665c8f
new file mode 100644
index 0000000..e69de29
diff --git a/flink-connector-aws-kinesis-firehose/archunit-violations/a6cbd99c-b115-447a-8f19-43c1094db549 b/flink-connector-aws-kinesis-firehose/archunit-violations/a6cbd99c-b115-447a-8f19-43c1094db549
new file mode 100644
index 0000000..5ad7b14
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/archunit-violations/a6cbd99c-b115-447a-8f19-43c1094db549
@@ -0,0 +1,6 @@
+org.apache.flink.connector.firehose.sink.KinesisFirehoseSinkITCase does not satisfy: only one of the following predicates match:\
+* reside in a package 'org.apache.flink.runtime.*' and contain any fields that are static, final, and of type InternalMiniClusterExtension and annotated with @RegisterExtension\
+* reside outside of package 'org.apache.flink.runtime.*' and contain any fields that are static, final, and of type MiniClusterExtension and annotated with @RegisterExtension\
+* reside in a package 'org.apache.flink.runtime.*' and is annotated with @ExtendWith with class InternalMiniClusterExtension\
+* reside outside of package 'org.apache.flink.runtime.*' and is annotated with @ExtendWith with class MiniClusterExtension\
+ or contain any fields that are public, static, and of type MiniClusterWithClientResource and final and annotated with @ClassRule or contain any fields that is of type MiniClusterWithClientResource and public and final and not static and annotated with @Rule
\ No newline at end of file
diff --git a/flink-connector-aws-kinesis-firehose/archunit-violations/stored.rules b/flink-connector-aws-kinesis-firehose/archunit-violations/stored.rules
new file mode 100644
index 0000000..cf8b667
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/archunit-violations/stored.rules
@@ -0,0 +1,4 @@
+#
+#Tue Feb 22 12:19:26 CET 2022
+Tests\ inheriting\ from\ AbstractTestBase\ should\ have\ name\ ending\ with\ ITCase=54da9a7d-14d2-4632-a045-1dd8fc665c8f
+ITCASE\ tests\ should\ use\ a\ MiniCluster\ resource\ or\ extension=a6cbd99c-b115-447a-8f19-43c1094db549
diff --git a/flink-connector-aws-kinesis-firehose/pom.xml b/flink-connector-aws-kinesis-firehose/pom.xml
new file mode 100644
index 0000000..8fa9179
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/pom.xml
@@ -0,0 +1,153 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.flink</groupId>
+        <artifactId>flink-connector-aws-parent</artifactId>
+        <version>4.0-SNAPSHOT</version>
+    </parent>
+
+    <artifactId>flink-connector-aws-kinesis-firehose</artifactId>
+    <name>Flink : Connectors : AWS : Amazon Kinesis Data Firehose</name>
+    <packaging>jar</packaging>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-streaming-java</artifactId>
+            <version>${flink.version}</version>
+            <scope>provided</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-base</artifactId>
+            <version>${flink.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-aws-base</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>software.amazon.awssdk</groupId>
+            <artifactId>firehose</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>software.amazon.awssdk</groupId>
+            <artifactId>netty-nio-client</artifactId>
+        </dependency>
+
+        <!--Table Api Dependencies-->
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-table-common</artifactId>
+            <version>${flink.version}</version>
+        </dependency>
+
+        <!-- Test dependencies -->
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-test-utils</artifactId>
+            <version>${flink.version}</version>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-aws-base</artifactId>
+            <version>${project.version}</version>
+            <type>test-jar</type>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-base</artifactId>
+            <version>${flink.version}</version>
+            <type>test-jar</type>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-table-common</artifactId>
+            <version>${flink.version}</version>
+            <type>test-jar</type>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-table-test-utils</artifactId>
+            <version>${flink.version}</version>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.testcontainers</groupId>
+            <artifactId>testcontainers</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>software.amazon.awssdk</groupId>
+            <artifactId>s3</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>software.amazon.awssdk</groupId>
+            <artifactId>iam</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+        <!-- ArchUit test dependencies -->
+
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-architecture-tests-test</artifactId>
+            <scope>test</scope>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-jar-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>test-jar</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+</project>
diff --git a/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseConfigConstants.java b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseConfigConstants.java
new file mode 100644
index 0000000..527f74a
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseConfigConstants.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.sink;
+
+import org.apache.flink.annotation.PublicEvolving;
+
+/** Defaults for {@link KinesisFirehoseSinkWriter}. */
+@PublicEvolving
+public class KinesisFirehoseConfigConstants {
+
+    public static final String BASE_FIREHOSE_USER_AGENT_PREFIX_FORMAT =
+            "Apache Flink %s (%s) Firehose Connector";
+
+    /** Firehose identifier for user agent prefix. */
+    public static final String FIREHOSE_CLIENT_USER_AGENT_PREFIX =
+            "aws.firehose.client.user-agent-prefix";
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseException.java b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseException.java
new file mode 100644
index 0000000..e76c10b
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseException.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.sink;
+
+import org.apache.flink.annotation.PublicEvolving;
+
+/**
+ * A {@link RuntimeException} wrapper indicating the exception was thrown from the Kinesis Data
+ * Firehose Sink.
+ */
+@PublicEvolving
+class KinesisFirehoseException extends RuntimeException {
+
+    public KinesisFirehoseException(final String message) {
+        super(message);
+    }
+
+    public KinesisFirehoseException(final String message, final Throwable cause) {
+        super(message, cause);
+    }
+
+    /**
+     * When the flag {@code failOnError} is set in {@link KinesisFirehoseSinkWriter}, this exception
+     * is raised as soon as any exception occurs when writing to KDF.
+     */
+    static class KinesisFirehoseFailFastException extends KinesisFirehoseException {
+
+        private static final String ERROR_MESSAGE =
+                "Encountered an exception while persisting records, not retrying due to {failOnError} being set.";
+
+        public KinesisFirehoseFailFastException() {
+            super(ERROR_MESSAGE);
+        }
+
+        public KinesisFirehoseFailFastException(final Throwable cause) {
+            super(ERROR_MESSAGE, cause);
+        }
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSink.java b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSink.java
new file mode 100644
index 0000000..6f9ed54
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSink.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.sink;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.connector.base.sink.AsyncSinkBase;
+import org.apache.flink.connector.base.sink.writer.BufferedRequestState;
+import org.apache.flink.connector.base.sink.writer.ElementConverter;
+import org.apache.flink.core.io.SimpleVersionedSerializer;
+import org.apache.flink.util.Preconditions;
+
+import software.amazon.awssdk.services.firehose.model.Record;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Properties;
+
+/**
+ * A Kinesis Data Firehose (KDF) Sink that performs async requests against a destination delivery
+ * stream using the buffering protocol specified in {@link AsyncSinkBase}.
+ *
+ * <p>The sink internally uses a {@link
+ * software.amazon.awssdk.services.firehose.FirehoseAsyncClient} to communicate with the AWS
+ * endpoint.
+ *
+ * <p>Please see the writer implementation in {@link KinesisFirehoseSinkWriter}
+ *
+ * @param <InputT> Type of the elements handled by this sink
+ */
+@PublicEvolving
+public class KinesisFirehoseSink<InputT> extends AsyncSinkBase<InputT, Record> {
+
+    private final boolean failOnError;
+    private final String deliveryStreamName;
+    private final Properties firehoseClientProperties;
+
+    KinesisFirehoseSink(
+            ElementConverter<InputT, Record> elementConverter,
+            Integer maxBatchSize,
+            Integer maxInFlightRequests,
+            Integer maxBufferedRequests,
+            Long maxBatchSizeInBytes,
+            Long maxTimeInBufferMS,
+            Long maxRecordSizeInBytes,
+            boolean failOnError,
+            String deliveryStreamName,
+            Properties firehoseClientProperties) {
+        super(
+                elementConverter,
+                maxBatchSize,
+                maxInFlightRequests,
+                maxBufferedRequests,
+                maxBatchSizeInBytes,
+                maxTimeInBufferMS,
+                maxRecordSizeInBytes);
+        this.deliveryStreamName =
+                Preconditions.checkNotNull(
+                        deliveryStreamName,
+                        "The delivery stream name must not be null when initializing the KDF Sink.");
+        Preconditions.checkArgument(
+                !this.deliveryStreamName.isEmpty(),
+                "The delivery stream name must be set when initializing the KDF Sink.");
+        this.failOnError = failOnError;
+        this.firehoseClientProperties = firehoseClientProperties;
+    }
+
+    /**
+     * Create a {@link KinesisFirehoseSinkBuilder} to allow the fluent construction of a new {@code
+     * KinesisFirehoseSink}.
+     *
+     * @param <InputT> type of incoming records
+     * @return {@link KinesisFirehoseSinkBuilder}
+     */
+    public static <InputT> KinesisFirehoseSinkBuilder<InputT> builder() {
+        return new KinesisFirehoseSinkBuilder<>();
+    }
+
+    @Override
+    public StatefulSinkWriter<InputT, BufferedRequestState<Record>> createWriter(
+            InitContext context) throws IOException {
+        return new KinesisFirehoseSinkWriter<>(
+                getElementConverter(),
+                context,
+                getMaxBatchSize(),
+                getMaxInFlightRequests(),
+                getMaxBufferedRequests(),
+                getMaxBatchSizeInBytes(),
+                getMaxTimeInBufferMS(),
+                getMaxRecordSizeInBytes(),
+                failOnError,
+                deliveryStreamName,
+                firehoseClientProperties,
+                Collections.emptyList());
+    }
+
+    @Override
+    public StatefulSinkWriter<InputT, BufferedRequestState<Record>> restoreWriter(
+            InitContext context, Collection<BufferedRequestState<Record>> recoveredState)
+            throws IOException {
+        return new KinesisFirehoseSinkWriter<>(
+                getElementConverter(),
+                context,
+                getMaxBatchSize(),
+                getMaxInFlightRequests(),
+                getMaxBufferedRequests(),
+                getMaxBatchSizeInBytes(),
+                getMaxTimeInBufferMS(),
+                getMaxRecordSizeInBytes(),
+                failOnError,
+                deliveryStreamName,
+                firehoseClientProperties,
+                recoveredState);
+    }
+
+    @Override
+    public SimpleVersionedSerializer<BufferedRequestState<Record>> getWriterStateSerializer() {
+        return new KinesisFirehoseStateSerializer();
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkBuilder.java b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkBuilder.java
new file mode 100644
index 0000000..087fd6b
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkBuilder.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.sink;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.connector.base.sink.AsyncSinkBaseBuilder;
+
+import software.amazon.awssdk.http.Protocol;
+import software.amazon.awssdk.services.firehose.model.Record;
+
+import java.util.Optional;
+import java.util.Properties;
+
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.HTTP_PROTOCOL_VERSION;
+import static software.amazon.awssdk.http.Protocol.HTTP1_1;
+
+/**
+ * Builder to construct {@link KinesisFirehoseSink}.
+ *
+ * <p>The following example shows the minimum setup to create a {@link KinesisFirehoseSink} that
+ * writes String values to a Kinesis Data Firehose delivery stream named delivery-stream-name.
+ *
+ * <pre>{@code
+ * Properties sinkProperties = new Properties();
+ * sinkProperties.put(AWSConfigConstants.AWS_REGION, "eu-west-1");
+ *
+ * KinesisFirehoseSink<String> kdfSink =
+ *         KinesisFirehoseSink.<String>builder()
+ *                 .setElementConverter(elementConverter)
+ *                 .setDeliveryStreamName("delivery-stream-name")
+ *                 .setMaxBatchSize(20)
+ *                 .setFirehoseClientProperties(sinkProperties)
+ *                 .setSerializationSchema(new SimpleStringSchema())
+ *                 .build();
+ * }</pre>
+ *
+ * <p>If the following parameters are not set in this builder, the following defaults will be used:
+ *
+ * <ul>
+ *   <li>{@code maxBatchSize} will be 500
+ *   <li>{@code maxInFlightRequests} will be 50
+ *   <li>{@code maxBufferedRequests} will be 10000
+ *   <li>{@code maxBatchSizeInBytes} will be 4 MB i.e. {@code 4 * 1024 * 1024}
+ *   <li>{@code maxTimeInBufferMS} will be 5000ms
+ *   <li>{@code maxRecordSizeInBytes} will be 1000 KB i.e. {@code 1000 * 1024}
+ *   <li>{@code failOnError} will be false
+ * </ul>
+ *
+ * @param <InputT> type of elements that should be persisted in the destination
+ */
+@PublicEvolving
+public class KinesisFirehoseSinkBuilder<InputT>
+        extends AsyncSinkBaseBuilder<InputT, Record, KinesisFirehoseSinkBuilder<InputT>> {
+
+    private static final int DEFAULT_MAX_BATCH_SIZE = 500;
+    private static final int DEFAULT_MAX_IN_FLIGHT_REQUESTS = 50;
+    private static final int DEFAULT_MAX_BUFFERED_REQUESTS = 10_000;
+    private static final long DEFAULT_MAX_BATCH_SIZE_IN_B = 4 * 1024 * 1024;
+    private static final long DEFAULT_MAX_TIME_IN_BUFFER_MS = 5000;
+    private static final long DEFAULT_MAX_RECORD_SIZE_IN_B = 1000 * 1024;
+    private static final boolean DEFAULT_FAIL_ON_ERROR = false;
+    private static final Protocol DEFAULT_HTTP_PROTOCOL = HTTP1_1;
+
+    private Boolean failOnError;
+    private String deliveryStreamName;
+    private Properties firehoseClientProperties;
+    private SerializationSchema<InputT> serializationSchema;
+
+    KinesisFirehoseSinkBuilder() {}
+
+    /**
+     * Sets the name of the KDF delivery stream that the sink will connect to. There is no default
+     * for this parameter, therefore, this must be provided at sink creation time otherwise the
+     * build will fail.
+     *
+     * @param deliveryStreamName the name of the delivery stream
+     * @return {@link KinesisFirehoseSinkBuilder} itself
+     */
+    public KinesisFirehoseSinkBuilder<InputT> setDeliveryStreamName(String deliveryStreamName) {
+        this.deliveryStreamName = deliveryStreamName;
+        return this;
+    }
+
+    /**
+     * Allows the user to specify a serialization schema to serialize each record to persist to
+     * Firehose.
+     *
+     * @param serializationSchema serialization schema to use
+     * @return {@link KinesisFirehoseSinkBuilder} itself
+     */
+    public KinesisFirehoseSinkBuilder<InputT> setSerializationSchema(
+            SerializationSchema<InputT> serializationSchema) {
+        this.serializationSchema = serializationSchema;
+        return this;
+    }
+
+    /**
+     * If writing to Kinesis Data Firehose results in a partial or full failure being returned, the
+     * job will fail immediately with a {@link KinesisFirehoseException} if failOnError is set.
+     *
+     * @param failOnError whether to fail on error
+     * @return {@link KinesisFirehoseSinkBuilder} itself
+     */
+    public KinesisFirehoseSinkBuilder<InputT> setFailOnError(boolean failOnError) {
+        this.failOnError = failOnError;
+        return this;
+    }
+
+    /**
+     * A set of properties used by the sink to create the firehose client. This may be used to set
+     * the aws region, credentials etc. See the docs for usage and syntax.
+     *
+     * @param firehoseClientProperties Firehose client properties
+     * @return {@link KinesisFirehoseSinkBuilder} itself
+     */
+    public KinesisFirehoseSinkBuilder<InputT> setFirehoseClientProperties(
+            Properties firehoseClientProperties) {
+        this.firehoseClientProperties = firehoseClientProperties;
+        return this;
+    }
+
+    @VisibleForTesting
+    Properties getClientPropertiesWithDefaultHttpProtocol() {
+        Properties clientProperties =
+                Optional.ofNullable(firehoseClientProperties).orElse(new Properties());
+        clientProperties.putIfAbsent(HTTP_PROTOCOL_VERSION, DEFAULT_HTTP_PROTOCOL.toString());
+        return clientProperties;
+    }
+
+    @Override
+    public KinesisFirehoseSink<InputT> build() {
+        return new KinesisFirehoseSink<>(
+                KinesisFirehoseSinkElementConverter.<InputT>builder()
+                        .setSerializationSchema(serializationSchema)
+                        .build(),
+                Optional.ofNullable(getMaxBatchSize()).orElse(DEFAULT_MAX_BATCH_SIZE),
+                Optional.ofNullable(getMaxInFlightRequests())
+                        .orElse(DEFAULT_MAX_IN_FLIGHT_REQUESTS),
+                Optional.ofNullable(getMaxBufferedRequests()).orElse(DEFAULT_MAX_BUFFERED_REQUESTS),
+                Optional.ofNullable(getMaxBatchSizeInBytes()).orElse(DEFAULT_MAX_BATCH_SIZE_IN_B),
+                Optional.ofNullable(getMaxTimeInBufferMS()).orElse(DEFAULT_MAX_TIME_IN_BUFFER_MS),
+                Optional.ofNullable(getMaxRecordSizeInBytes()).orElse(DEFAULT_MAX_RECORD_SIZE_IN_B),
+                Optional.ofNullable(failOnError).orElse(DEFAULT_FAIL_ON_ERROR),
+                deliveryStreamName,
+                getClientPropertiesWithDefaultHttpProtocol());
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkElementConverter.java b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkElementConverter.java
new file mode 100644
index 0000000..b90db33
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkElementConverter.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.sink;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.api.connector.sink2.SinkWriter;
+import org.apache.flink.connector.base.sink.writer.ElementConverter;
+import org.apache.flink.metrics.MetricGroup;
+import org.apache.flink.metrics.groups.UnregisteredMetricsGroup;
+import org.apache.flink.util.FlinkRuntimeException;
+import org.apache.flink.util.Preconditions;
+import org.apache.flink.util.SimpleUserCodeClassLoader;
+import org.apache.flink.util.UserCodeClassLoader;
+
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.services.firehose.model.Record;
+
+/**
+ * An implementation of the {@link ElementConverter} that uses the AWS Kinesis SDK v2. The user only
+ * needs to provide a {@link SerializationSchema} of the {@code InputT} to transform it into a
+ * {@link Record} that may be persisted.
+ */
+@Internal
+public class KinesisFirehoseSinkElementConverter<InputT>
+        implements ElementConverter<InputT, Record> {
+    private boolean schemaOpened = false;
+
+    /** A serialization schema to specify how the input element should be serialized. */
+    private final SerializationSchema<InputT> serializationSchema;
+
+    private KinesisFirehoseSinkElementConverter(SerializationSchema<InputT> serializationSchema) {
+        this.serializationSchema = serializationSchema;
+    }
+
+    @Override
+    public Record apply(InputT element, SinkWriter.Context context) {
+        checkOpened();
+        return Record.builder()
+                .data(SdkBytes.fromByteArray(serializationSchema.serialize(element)))
+                .build();
+    }
+
+    private void checkOpened() {
+        if (!schemaOpened) {
+            try {
+                serializationSchema.open(
+                        new SerializationSchema.InitializationContext() {
+                            @Override
+                            public MetricGroup getMetricGroup() {
+                                return new UnregisteredMetricsGroup();
+                            }
+
+                            @Override
+                            public UserCodeClassLoader getUserCodeClassLoader() {
+                                return SimpleUserCodeClassLoader.create(
+                                        KinesisFirehoseSinkElementConverter.class.getClassLoader());
+                            }
+                        });
+                schemaOpened = true;
+            } catch (Exception e) {
+                throw new FlinkRuntimeException("Failed to initialize serialization schema.", e);
+            }
+        }
+    }
+
+    public static <InputT> Builder<InputT> builder() {
+        return new Builder<>();
+    }
+
+    /** A builder for the KinesisFirehoseSinkElementConverter. */
+    public static class Builder<InputT> {
+
+        private SerializationSchema<InputT> serializationSchema;
+
+        public Builder<InputT> setSerializationSchema(
+                SerializationSchema<InputT> serializationSchema) {
+            this.serializationSchema = serializationSchema;
+            return this;
+        }
+
+        public KinesisFirehoseSinkElementConverter<InputT> build() {
+            Preconditions.checkNotNull(
+                    serializationSchema,
+                    "No SerializationSchema was supplied to the " + "KinesisFirehoseSink builder.");
+            return new KinesisFirehoseSinkElementConverter<>(serializationSchema);
+        }
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkWriter.java b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkWriter.java
new file mode 100644
index 0000000..c16018d
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkWriter.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.sink;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.connector.sink2.Sink;
+import org.apache.flink.connector.aws.util.AWSAsyncSinkUtil;
+import org.apache.flink.connector.aws.util.AWSGeneralUtil;
+import org.apache.flink.connector.base.sink.throwable.FatalExceptionClassifier;
+import org.apache.flink.connector.base.sink.writer.AsyncSinkWriter;
+import org.apache.flink.connector.base.sink.writer.BufferedRequestState;
+import org.apache.flink.connector.base.sink.writer.ElementConverter;
+import org.apache.flink.connector.base.sink.writer.config.AsyncSinkWriterConfiguration;
+import org.apache.flink.metrics.Counter;
+import org.apache.flink.metrics.groups.SinkWriterMetricGroup;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
+import software.amazon.awssdk.services.firehose.FirehoseAsyncClient;
+import software.amazon.awssdk.services.firehose.model.PutRecordBatchRequest;
+import software.amazon.awssdk.services.firehose.model.PutRecordBatchResponse;
+import software.amazon.awssdk.services.firehose.model.PutRecordBatchResponseEntry;
+import software.amazon.awssdk.services.firehose.model.Record;
+import software.amazon.awssdk.services.firehose.model.ResourceNotFoundException;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.CompletableFuture;
+import java.util.function.Consumer;
+
+import static org.apache.flink.connector.aws.util.AWSCredentialFatalExceptionClassifiers.getInvalidCredentialsExceptionClassifier;
+import static org.apache.flink.connector.aws.util.AWSCredentialFatalExceptionClassifiers.getSdkClientMisconfiguredExceptionClassifier;
+import static org.apache.flink.connector.base.sink.writer.AsyncSinkFatalExceptionClassifiers.getInterruptedExceptionClassifier;
+
+/**
+ * Sink writer created by {@link KinesisFirehoseSink} to write to Kinesis Data Firehose. More
+ * details on the operation of this sink writer may be found in the doc for {@link
+ * KinesisFirehoseSink}. More details on the internals of this sink writer may be found in {@link
+ * AsyncSinkWriter}.
+ *
+ * <p>The {@link FirehoseAsyncClient} used here may be configured in the standard way for the AWS
+ * SDK 2.x. e.g. the provision of {@code AWS_REGION}, {@code AWS_ACCESS_KEY_ID} and {@code
+ * AWS_SECRET_ACCESS_KEY} through environment variables etc.
+ */
+@Internal
+class KinesisFirehoseSinkWriter<InputT> extends AsyncSinkWriter<InputT, Record> {
+
+    private static final Logger LOG = LoggerFactory.getLogger(KinesisFirehoseSinkWriter.class);
+
+    private static SdkAsyncHttpClient createHttpClient(Properties firehoseClientProperties) {
+        return AWSGeneralUtil.createAsyncHttpClient(firehoseClientProperties);
+    }
+
+    private static FirehoseAsyncClient createFirehoseClient(
+            Properties firehoseClientProperties, SdkAsyncHttpClient httpClient) {
+        AWSGeneralUtil.validateAwsCredentials(firehoseClientProperties);
+        return AWSAsyncSinkUtil.createAwsAsyncClient(
+                firehoseClientProperties,
+                httpClient,
+                FirehoseAsyncClient.builder(),
+                KinesisFirehoseConfigConstants.BASE_FIREHOSE_USER_AGENT_PREFIX_FORMAT,
+                KinesisFirehoseConfigConstants.FIREHOSE_CLIENT_USER_AGENT_PREFIX);
+    }
+
+    private static final FatalExceptionClassifier RESOURCE_NOT_FOUND_EXCEPTION_CLASSIFIER =
+            FatalExceptionClassifier.withRootCauseOfType(
+                    ResourceNotFoundException.class,
+                    err ->
+                            new KinesisFirehoseException(
+                                    "Encountered non-recoverable exception relating to not being able to find the specified resources",
+                                    err));
+
+    private static final FatalExceptionClassifier FIREHOSE_FATAL_EXCEPTION_CLASSIFIER =
+            FatalExceptionClassifier.createChain(
+                    getInterruptedExceptionClassifier(),
+                    getInvalidCredentialsExceptionClassifier(),
+                    RESOURCE_NOT_FOUND_EXCEPTION_CLASSIFIER,
+                    getSdkClientMisconfiguredExceptionClassifier());
+
+    private final Counter numRecordsOutErrorsCounter;
+
+    /* Name of the delivery stream in Kinesis Data Firehose */
+    private final String deliveryStreamName;
+
+    /* The sink writer metric group */
+    private final SinkWriterMetricGroup metrics;
+
+    /* The asynchronous http client */
+    private final SdkAsyncHttpClient httpClient;
+
+    /* The asynchronous Firehose client */
+    private final FirehoseAsyncClient firehoseClient;
+
+    /* Flag to whether fatally fail any time we encounter an exception when persisting records */
+    private final boolean failOnError;
+
+    KinesisFirehoseSinkWriter(
+            ElementConverter<InputT, Record> elementConverter,
+            Sink.InitContext context,
+            int maxBatchSize,
+            int maxInFlightRequests,
+            int maxBufferedRequests,
+            long maxBatchSizeInBytes,
+            long maxTimeInBufferMS,
+            long maxRecordSizeInBytes,
+            boolean failOnError,
+            String deliveryStreamName,
+            Properties firehoseClientProperties) {
+        this(
+                elementConverter,
+                context,
+                maxBatchSize,
+                maxInFlightRequests,
+                maxBufferedRequests,
+                maxBatchSizeInBytes,
+                maxTimeInBufferMS,
+                maxRecordSizeInBytes,
+                failOnError,
+                deliveryStreamName,
+                firehoseClientProperties,
+                Collections.emptyList());
+    }
+
+    KinesisFirehoseSinkWriter(
+            ElementConverter<InputT, Record> elementConverter,
+            Sink.InitContext context,
+            int maxBatchSize,
+            int maxInFlightRequests,
+            int maxBufferedRequests,
+            long maxBatchSizeInBytes,
+            long maxTimeInBufferMS,
+            long maxRecordSizeInBytes,
+            boolean failOnError,
+            String deliveryStreamName,
+            Properties firehoseClientProperties,
+            Collection<BufferedRequestState<Record>> initialStates) {
+        super(
+                elementConverter,
+                context,
+                AsyncSinkWriterConfiguration.builder()
+                        .setMaxBatchSize(maxBatchSize)
+                        .setMaxBatchSizeInBytes(maxBatchSizeInBytes)
+                        .setMaxInFlightRequests(maxInFlightRequests)
+                        .setMaxBufferedRequests(maxBufferedRequests)
+                        .setMaxTimeInBufferMS(maxTimeInBufferMS)
+                        .setMaxRecordSizeInBytes(maxRecordSizeInBytes)
+                        .build(),
+                initialStates);
+        this.failOnError = failOnError;
+        this.deliveryStreamName = deliveryStreamName;
+        this.metrics = context.metricGroup();
+        this.numRecordsOutErrorsCounter = metrics.getNumRecordsOutErrorsCounter();
+        this.httpClient = createHttpClient(firehoseClientProperties);
+        this.firehoseClient = createFirehoseClient(firehoseClientProperties, httpClient);
+    }
+
+    @Override
+    protected void submitRequestEntries(
+            List<Record> requestEntries, Consumer<List<Record>> requestResult) {
+
+        PutRecordBatchRequest batchRequest =
+                PutRecordBatchRequest.builder()
+                        .records(requestEntries)
+                        .deliveryStreamName(deliveryStreamName)
+                        .build();
+
+        CompletableFuture<PutRecordBatchResponse> future =
+                firehoseClient.putRecordBatch(batchRequest);
+
+        future.whenComplete(
+                (response, err) -> {
+                    if (err != null) {
+                        handleFullyFailedRequest(err, requestEntries, requestResult);
+                    } else if (response.failedPutCount() > 0) {
+                        handlePartiallyFailedRequest(response, requestEntries, requestResult);
+                    } else {
+                        requestResult.accept(Collections.emptyList());
+                    }
+                });
+    }
+
+    @Override
+    protected long getSizeInBytes(Record requestEntry) {
+        return requestEntry.data().asByteArrayUnsafe().length;
+    }
+
+    @Override
+    public void close() {
+        AWSGeneralUtil.closeResources(httpClient, firehoseClient);
+    }
+
+    private void handleFullyFailedRequest(
+            Throwable err, List<Record> requestEntries, Consumer<List<Record>> requestResult) {
+        LOG.debug(
+                "KDF Sink failed to write and will retry {} entries to KDF first request was {}",
+                requestEntries.size(),
+                requestEntries.get(0).toString(),
+                err);
+        numRecordsOutErrorsCounter.inc(requestEntries.size());
+
+        if (isRetryable(err)) {
+            requestResult.accept(requestEntries);
+        }
+    }
+
+    private void handlePartiallyFailedRequest(
+            PutRecordBatchResponse response,
+            List<Record> requestEntries,
+            Consumer<List<Record>> requestResult) {
+        LOG.debug(
+                "KDF Sink failed to write and will retry {} entries to KDF first request was {}",
+                requestEntries.size(),
+                requestEntries.get(0).toString());
+        numRecordsOutErrorsCounter.inc(response.failedPutCount());
+
+        if (failOnError) {
+            getFatalExceptionCons()
+                    .accept(new KinesisFirehoseException.KinesisFirehoseFailFastException());
+            return;
+        }
+        List<Record> failedRequestEntries = new ArrayList<>(response.failedPutCount());
+        List<PutRecordBatchResponseEntry> records = response.requestResponses();
+
+        for (int i = 0; i < records.size(); i++) {
+            if (records.get(i).errorCode() != null) {
+                failedRequestEntries.add(requestEntries.get(i));
+            }
+        }
+
+        requestResult.accept(failedRequestEntries);
+    }
+
+    private boolean isRetryable(Throwable err) {
+        if (!FIREHOSE_FATAL_EXCEPTION_CLASSIFIER.isFatal(err, getFatalExceptionCons())) {
+            return false;
+        }
+        if (failOnError) {
+            getFatalExceptionCons()
+                    .accept(new KinesisFirehoseException.KinesisFirehoseFailFastException(err));
+            return false;
+        }
+
+        return true;
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseStateSerializer.java b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseStateSerializer.java
new file mode 100644
index 0000000..36162e6
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseStateSerializer.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.sink;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.connector.base.sink.writer.AsyncSinkWriterStateSerializer;
+
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.services.firehose.model.Record;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+/** Kinesis Firehose implementation {@link AsyncSinkWriterStateSerializer}. */
+@Internal
+public class KinesisFirehoseStateSerializer extends AsyncSinkWriterStateSerializer<Record> {
+    @Override
+    protected void serializeRequestToStream(Record request, DataOutputStream out)
+            throws IOException {
+        out.write(request.data().asByteArrayUnsafe());
+    }
+
+    @Override
+    protected Record deserializeRequestFromStream(long requestSize, DataInputStream in)
+            throws IOException {
+        byte[] requestData = new byte[(int) requestSize];
+        in.read(requestData);
+        return Record.builder().data(SdkBytes.fromByteArray(requestData)).build();
+    }
+
+    @Override
+    public int getVersion() {
+        return 1;
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/KinesisFirehoseConnectorOptions.java b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/KinesisFirehoseConnectorOptions.java
new file mode 100644
index 0000000..92773c6
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/KinesisFirehoseConnectorOptions.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.table;
+
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.flink.configuration.ConfigOption;
+import org.apache.flink.configuration.ConfigOptions;
+import org.apache.flink.connector.base.table.AsyncSinkConnectorOptions;
+
+/** Options for the Kinesis firehose connector. */
+@PublicEvolving
+public class KinesisFirehoseConnectorOptions extends AsyncSinkConnectorOptions {
+
+    public static final ConfigOption<String> DELIVERY_STREAM =
+            ConfigOptions.key("delivery-stream")
+                    .stringType()
+                    .noDefaultValue()
+                    .withDescription(
+                            "Name of the Kinesis Firehose delivery stream backing this table.");
+
+    public static final ConfigOption<Boolean> SINK_FAIL_ON_ERROR =
+            ConfigOptions.key("sink.fail-on-error")
+                    .booleanType()
+                    .defaultValue(false)
+                    .withDescription(
+                            "Optional fail on error value for kinesis Firehose sink, default is false");
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/KinesisFirehoseDynamicSink.java b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/KinesisFirehoseDynamicSink.java
new file mode 100644
index 0000000..149dbaf
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/KinesisFirehoseDynamicSink.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.table;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.connector.base.table.sink.AsyncDynamicTableSink;
+import org.apache.flink.connector.base.table.sink.AsyncDynamicTableSinkBuilder;
+import org.apache.flink.connector.firehose.sink.KinesisFirehoseSink;
+import org.apache.flink.connector.firehose.sink.KinesisFirehoseSinkBuilder;
+import org.apache.flink.table.connector.ChangelogMode;
+import org.apache.flink.table.connector.format.EncodingFormat;
+import org.apache.flink.table.connector.sink.DynamicTableSink;
+import org.apache.flink.table.connector.sink.SinkV2Provider;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.types.DataType;
+import org.apache.flink.util.Preconditions;
+
+import software.amazon.awssdk.services.firehose.model.Record;
+
+import javax.annotation.Nullable;
+
+import java.util.Optional;
+import java.util.Properties;
+
+/** Kinesis firehose backed {@link AsyncDynamicTableSink}. */
+@Internal
+public class KinesisFirehoseDynamicSink extends AsyncDynamicTableSink<Record> {
+
+    /** Consumed data type of the table. */
+    private final DataType consumedDataType;
+
+    /** The Firehose delivery stream to write to. */
+    private final String deliveryStream;
+
+    /** Properties for the Firehose DataStream Sink. */
+    private final Properties firehoseClientProperties;
+
+    /** Sink format for encoding records to Kinesis. */
+    private final EncodingFormat<SerializationSchema<RowData>> encodingFormat;
+
+    private final Boolean failOnError;
+
+    protected KinesisFirehoseDynamicSink(
+            @Nullable Integer maxBatchSize,
+            @Nullable Integer maxInFlightRequests,
+            @Nullable Integer maxBufferedRequests,
+            @Nullable Long maxBufferSizeInBytes,
+            @Nullable Long maxTimeInBufferMS,
+            @Nullable Boolean failOnError,
+            @Nullable DataType consumedDataType,
+            String deliveryStream,
+            @Nullable Properties firehoseClientProperties,
+            EncodingFormat<SerializationSchema<RowData>> encodingFormat) {
+        super(
+                maxBatchSize,
+                maxInFlightRequests,
+                maxBufferedRequests,
+                maxBufferSizeInBytes,
+                maxTimeInBufferMS);
+        this.failOnError = failOnError;
+        this.firehoseClientProperties = firehoseClientProperties;
+        this.consumedDataType =
+                Preconditions.checkNotNull(consumedDataType, "Consumed data type must not be null");
+        this.deliveryStream =
+                Preconditions.checkNotNull(
+                        deliveryStream, "Firehose Delivery stream name must not be null");
+        this.encodingFormat =
+                Preconditions.checkNotNull(encodingFormat, "Encoding format must not be null");
+    }
+
+    @Override
+    public ChangelogMode getChangelogMode(ChangelogMode requestedMode) {
+        return encodingFormat.getChangelogMode();
+    }
+
+    @Override
+    public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
+        SerializationSchema<RowData> serializationSchema =
+                encodingFormat.createRuntimeEncoder(context, consumedDataType);
+
+        KinesisFirehoseSinkBuilder<RowData> builder =
+                KinesisFirehoseSink.<RowData>builder()
+                        .setSerializationSchema(serializationSchema)
+                        .setFirehoseClientProperties(firehoseClientProperties)
+                        .setDeliveryStreamName(deliveryStream);
+
+        Optional.ofNullable(failOnError).ifPresent(builder::setFailOnError);
+        super.addAsyncOptionsToSinkBuilder(builder);
+
+        return SinkV2Provider.of(builder.build());
+    }
+
+    @Override
+    public DynamicTableSink copy() {
+        return new KinesisFirehoseDynamicSink(
+                maxBatchSize,
+                maxInFlightRequests,
+                maxBufferedRequests,
+                maxBufferSizeInBytes,
+                maxTimeInBufferMS,
+                failOnError,
+                consumedDataType,
+                deliveryStream,
+                firehoseClientProperties,
+                encodingFormat);
+    }
+
+    @Override
+    public String asSummaryString() {
+        return "firehose";
+    }
+
+    /** Builder class for {@link KinesisFirehoseDynamicSink}. */
+    @Internal
+    public static class KinesisFirehoseDynamicSinkBuilder
+            extends AsyncDynamicTableSinkBuilder<Record, KinesisFirehoseDynamicSinkBuilder> {
+
+        private DataType consumedDataType = null;
+        private String deliveryStream = null;
+        private Properties firehoseClientProperties = null;
+        private EncodingFormat<SerializationSchema<RowData>> encodingFormat = null;
+        private Boolean failOnError = null;
+
+        public KinesisFirehoseDynamicSinkBuilder setConsumedDataType(DataType consumedDataType) {
+            this.consumedDataType = consumedDataType;
+            return this;
+        }
+
+        public KinesisFirehoseDynamicSinkBuilder setDeliveryStream(String deliveryStream) {
+            this.deliveryStream = deliveryStream;
+            return this;
+        }
+
+        public KinesisFirehoseDynamicSinkBuilder setFirehoseClientProperties(
+                Properties firehoseClientProperties) {
+            this.firehoseClientProperties = firehoseClientProperties;
+            return this;
+        }
+
+        public KinesisFirehoseDynamicSinkBuilder setEncodingFormat(
+                EncodingFormat<SerializationSchema<RowData>> encodingFormat) {
+            this.encodingFormat = encodingFormat;
+            return this;
+        }
+
+        public KinesisFirehoseDynamicSinkBuilder setFailOnError(Boolean failOnError) {
+            this.failOnError = failOnError;
+            return this;
+        }
+
+        @Override
+        public KinesisFirehoseDynamicSink build() {
+            return new KinesisFirehoseDynamicSink(
+                    getMaxBatchSize(),
+                    getMaxInFlightRequests(),
+                    getMaxBufferedRequests(),
+                    getMaxBufferSizeInBytes(),
+                    getMaxTimeInBufferMS(),
+                    failOnError,
+                    consumedDataType,
+                    deliveryStream,
+                    firehoseClientProperties,
+                    encodingFormat);
+        }
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/KinesisFirehoseDynamicTableFactory.java b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/KinesisFirehoseDynamicTableFactory.java
new file mode 100644
index 0000000..a7ca38e
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/KinesisFirehoseDynamicTableFactory.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.table;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.configuration.ConfigOption;
+import org.apache.flink.connector.base.table.AsyncDynamicTableSinkFactory;
+import org.apache.flink.connector.firehose.table.util.KinesisFirehoseConnectorOptionUtils;
+import org.apache.flink.table.connector.sink.DynamicTableSink;
+
+import java.util.HashSet;
+import java.util.Optional;
+import java.util.Properties;
+import java.util.Set;
+
+import static org.apache.flink.connector.firehose.table.KinesisFirehoseConnectorOptions.DELIVERY_STREAM;
+import static org.apache.flink.connector.firehose.table.KinesisFirehoseConnectorOptions.SINK_FAIL_ON_ERROR;
+import static org.apache.flink.connector.firehose.table.util.KinesisFirehoseConnectorOptionUtils.FIREHOSE_CLIENT_PROPERTIES_KEY;
+import static org.apache.flink.table.factories.FactoryUtil.FORMAT;
+
+/** Factory for creating {@link KinesisFirehoseDynamicSink} . */
+@Internal
+public class KinesisFirehoseDynamicTableFactory extends AsyncDynamicTableSinkFactory {
+
+    public static final String IDENTIFIER = "firehose";
+
+    @Override
+    public DynamicTableSink createDynamicTableSink(Context context) {
+
+        AsyncDynamicSinkContext factoryContext = new AsyncDynamicSinkContext(this, context);
+
+        KinesisFirehoseDynamicSink.KinesisFirehoseDynamicSinkBuilder builder =
+                new KinesisFirehoseDynamicSink.KinesisFirehoseDynamicSinkBuilder();
+
+        KinesisFirehoseConnectorOptionUtils optionsUtils =
+                new KinesisFirehoseConnectorOptionUtils(
+                        factoryContext.getResolvedOptions(), factoryContext.getTableOptions());
+        // validate the data types of the table options
+        factoryContext
+                .getFactoryHelper()
+                .validateExcept(optionsUtils.getNonValidatedPrefixes().toArray(new String[0]));
+        Properties properties = optionsUtils.getSinkProperties();
+
+        builder.setDeliveryStream((String) properties.get(DELIVERY_STREAM.key()))
+                .setFirehoseClientProperties(
+                        (Properties) properties.get(FIREHOSE_CLIENT_PROPERTIES_KEY))
+                .setEncodingFormat(factoryContext.getEncodingFormat())
+                .setConsumedDataType(factoryContext.getPhysicalDataType());
+        Optional.ofNullable((Boolean) properties.get(SINK_FAIL_ON_ERROR.key()))
+                .ifPresent(builder::setFailOnError);
+        return super.addAsyncOptionsToBuilder(properties, builder).build();
+    }
+
+    @Override
+    public String factoryIdentifier() {
+        return IDENTIFIER;
+    }
+
+    @Override
+    public Set<ConfigOption<?>> requiredOptions() {
+        final Set<ConfigOption<?>> options = new HashSet<>();
+        options.add(DELIVERY_STREAM);
+        options.add(FORMAT);
+        return options;
+    }
+
+    @Override
+    public Set<ConfigOption<?>> optionalOptions() {
+        final Set<ConfigOption<?>> options = super.optionalOptions();
+        options.add(SINK_FAIL_ON_ERROR);
+        return options;
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/util/KinesisFirehoseConnectorOptionUtils.java b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/util/KinesisFirehoseConnectorOptionUtils.java
new file mode 100644
index 0000000..bd1ccfb
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/main/java/org/apache/flink/connector/firehose/table/util/KinesisFirehoseConnectorOptionUtils.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.table.util;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.configuration.ReadableConfig;
+import org.apache.flink.connector.aws.table.util.AsyncClientOptionsUtils;
+import org.apache.flink.connector.base.table.sink.options.AsyncSinkConfigurationValidator;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.apache.flink.connector.firehose.table.KinesisFirehoseConnectorOptions.DELIVERY_STREAM;
+import static org.apache.flink.connector.firehose.table.KinesisFirehoseConnectorOptions.SINK_FAIL_ON_ERROR;
+
+/** Class for extracting firehose configurations from table options. */
+@Internal
+public class KinesisFirehoseConnectorOptionUtils {
+
+    public static final String FIREHOSE_CLIENT_PROPERTIES_KEY = "sink.client.properties";
+
+    private final AsyncSinkConfigurationValidator asyncSinkConfigurationValidator;
+    private final AsyncClientOptionsUtils asyncClientOptionsUtils;
+    private final Map<String, String> resolvedOptions;
+    private final ReadableConfig tableOptions;
+
+    public KinesisFirehoseConnectorOptionUtils(
+            Map<String, String> resolvedOptions, ReadableConfig tableOptions) {
+        this.asyncSinkConfigurationValidator = new AsyncSinkConfigurationValidator(tableOptions);
+        this.asyncClientOptionsUtils = new AsyncClientOptionsUtils(resolvedOptions);
+        this.resolvedOptions = resolvedOptions;
+        this.tableOptions = tableOptions;
+    }
+
+    public List<String> getNonValidatedPrefixes() {
+        return this.asyncClientOptionsUtils.getNonValidatedPrefixes();
+    }
+
+    public Properties getSinkProperties() {
+        Properties properties = asyncSinkConfigurationValidator.getValidatedConfigurations();
+        properties.put(DELIVERY_STREAM.key(), tableOptions.get(DELIVERY_STREAM));
+        Properties kinesisClientProps = asyncClientOptionsUtils.getValidatedConfigurations();
+        properties.put(FIREHOSE_CLIENT_PROPERTIES_KEY, kinesisClientProps);
+        if (tableOptions.getOptional(SINK_FAIL_ON_ERROR).isPresent()) {
+            properties.put(
+                    SINK_FAIL_ON_ERROR.key(), tableOptions.getOptional(SINK_FAIL_ON_ERROR).get());
+        }
+        return properties;
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory b/flink-connector-aws-kinesis-firehose/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
new file mode 100644
index 0000000..2147c30
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.flink.connector.firehose.table.KinesisFirehoseDynamicTableFactory
diff --git a/flink-connector-aws-kinesis-firehose/src/main/resources/log4j2.properties b/flink-connector-aws-kinesis-firehose/src/main/resources/log4j2.properties
new file mode 100644
index 0000000..c64a340
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/main/resources/log4j2.properties
@@ -0,0 +1,25 @@
+################################################################################
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+rootLogger.level = OFF
+rootLogger.appenderRef.console.ref = ConsoleAppender
+
+appender.console.name = ConsoleAppender
+appender.console.type = CONSOLE
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n
diff --git a/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java
new file mode 100644
index 0000000..d7fbc74
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/architecture/TestCodeArchitectureTest.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.architecture;
+
+import org.apache.flink.architecture.common.ImportOptions;
+
+import com.tngtech.archunit.core.importer.ImportOption;
+import com.tngtech.archunit.junit.AnalyzeClasses;
+import com.tngtech.archunit.junit.ArchTest;
+import com.tngtech.archunit.junit.ArchTests;
+
+/** Architecture tests for test code. */
+@AnalyzeClasses(
+        packages = "org.apache.flink.connector.firehose",
+        importOptions = {
+            ImportOption.OnlyIncludeTests.class,
+            ImportOptions.ExcludeScalaImportOption.class,
+            ImportOptions.ExcludeShadedImportOption.class
+        })
+public class TestCodeArchitectureTest {
+
+    @ArchTest
+    public static final ArchTests COMMON_TESTS = ArchTests.in(TestCodeArchitectureTestBase.class);
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkBuilderTest.java b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkBuilderTest.java
new file mode 100644
index 0000000..e375193
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkBuilderTest.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.sink;
+
+import org.apache.flink.api.common.serialization.SerializationSchema;
+import org.apache.flink.api.common.serialization.SimpleStringSchema;
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+
+import org.assertj.core.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+import java.util.Properties;
+
+/** Covers construction, defaults and sanity checking of {@link KinesisFirehoseSinkBuilder}. */
+class KinesisFirehoseSinkBuilderTest {
+
+    private static final SerializationSchema<String> SERIALIZATION_SCHEMA =
+            new SimpleStringSchema();
+
+    @Test
+    void elementConverterOfSinkMustBeSetWhenBuilt() {
+        Assertions.assertThatExceptionOfType(NullPointerException.class)
+                .isThrownBy(
+                        () ->
+                                KinesisFirehoseSink.builder()
+                                        .setDeliveryStreamName("deliveryStream")
+                                        .build())
+                .withMessageContaining(
+                        "No SerializationSchema was supplied to the KinesisFirehoseSink builder.");
+    }
+
+    @Test
+    void streamNameOfSinkMustBeSetWhenBuilt() {
+        Assertions.assertThatExceptionOfType(NullPointerException.class)
+                .isThrownBy(
+                        () ->
+                                KinesisFirehoseSink.<String>builder()
+                                        .setSerializationSchema(SERIALIZATION_SCHEMA)
+                                        .build())
+                .withMessageContaining(
+                        "The delivery stream name must not be null when initializing the KDF Sink.");
+    }
+
+    @Test
+    void streamNameOfSinkMustBeSetToNonEmptyWhenBuilt() {
+        Assertions.assertThatExceptionOfType(IllegalArgumentException.class)
+                .isThrownBy(
+                        () ->
+                                KinesisFirehoseSink.<String>builder()
+                                        .setDeliveryStreamName("")
+                                        .setSerializationSchema(SERIALIZATION_SCHEMA)
+                                        .build())
+                .withMessageContaining(
+                        "The delivery stream name must be set when initializing the KDF Sink.");
+    }
+
+    @Test
+    void defaultProtocolVersionInsertedToConfiguration() {
+        Properties expectedProps = new Properties();
+        expectedProps.setProperty(AWSConfigConstants.HTTP_PROTOCOL_VERSION, "HTTP1_1");
+        Properties defaultProperties =
+                KinesisFirehoseSink.<String>builder().getClientPropertiesWithDefaultHttpProtocol();
+
+        Assertions.assertThat(defaultProperties).isEqualTo(expectedProps);
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkElementConverterTest.java b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkElementConverterTest.java
new file mode 100644
index 0000000..ccb4582
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkElementConverterTest.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.sink;
+
+import org.apache.flink.api.common.serialization.SimpleStringSchema;
+import org.apache.flink.connector.base.sink.writer.ElementConverter;
+
+import org.assertj.core.api.Assertions;
+import org.junit.jupiter.api.Test;
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.services.firehose.model.Record;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+/** Covers construction and sanity checking of {@link KinesisFirehoseSinkElementConverter}. */
+class KinesisFirehoseSinkElementConverterTest {
+
+    @Test
+    void elementConverterWillComplainASerializationSchemaIsNotSetIfBuildIsCalledWithoutIt() {
+        Assertions.assertThatExceptionOfType(NullPointerException.class)
+                .isThrownBy(() -> KinesisFirehoseSinkElementConverter.<String>builder().build())
+                .withMessageContaining(
+                        "No SerializationSchema was supplied to the KinesisFirehoseSink builder.");
+    }
+
+    @Test
+    void elementConverterUsesProvidedSchemaToSerializeRecord() {
+        ElementConverter<String, Record> elementConverter =
+                KinesisFirehoseSinkElementConverter.<String>builder()
+                        .setSerializationSchema(new SimpleStringSchema())
+                        .build();
+
+        String testString = "{many hands make light work;";
+
+        Record serializedRecord = elementConverter.apply(testString, null);
+        byte[] serializedString = (new SimpleStringSchema()).serialize(testString);
+        assertThat(serializedRecord.data()).isEqualTo(SdkBytes.fromByteArray(serializedString));
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkITCase.java b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkITCase.java
new file mode 100644
index 0000000..2e4105a
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkITCase.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.sink;
+
+import org.apache.flink.api.common.serialization.SimpleStringSchema;
+import org.apache.flink.connector.aws.testutils.AWSServicesTestUtils;
+import org.apache.flink.connector.aws.testutils.LocalstackContainer;
+import org.apache.flink.connector.firehose.sink.testutils.KinesisFirehoseTestUtils;
+import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
+import org.apache.flink.util.DockerImageVersions;
+
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testcontainers.junit.jupiter.Container;
+import org.testcontainers.junit.jupiter.Testcontainers;
+import org.testcontainers.utility.DockerImageName;
+import software.amazon.awssdk.core.SdkSystemSetting;
+import software.amazon.awssdk.http.SdkHttpClient;
+import software.amazon.awssdk.services.firehose.FirehoseClient;
+import software.amazon.awssdk.services.iam.IamClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.S3Object;
+
+import java.util.List;
+
+import static org.apache.flink.connector.aws.testutils.AWSServicesTestUtils.createBucket;
+import static org.apache.flink.connector.aws.testutils.AWSServicesTestUtils.createConfig;
+import static org.apache.flink.connector.aws.testutils.AWSServicesTestUtils.createIAMRole;
+import static org.apache.flink.connector.aws.testutils.AWSServicesTestUtils.createIamClient;
+import static org.apache.flink.connector.aws.testutils.AWSServicesTestUtils.createS3Client;
+import static org.apache.flink.connector.aws.testutils.AWSServicesTestUtils.listBucketObjects;
+import static org.apache.flink.connector.aws.testutils.AWSServicesTestUtils.readObjectsFromS3Bucket;
+import static org.apache.flink.connector.firehose.sink.testutils.KinesisFirehoseTestUtils.createDeliveryStream;
+import static org.apache.flink.connector.firehose.sink.testutils.KinesisFirehoseTestUtils.createFirehoseClient;
+import static org.assertj.core.api.Assertions.assertThat;
+
+/** Integration test suite for the {@code KinesisFirehoseSink} using a localstack container. */
+@Testcontainers
+class KinesisFirehoseSinkITCase {
+
+    private static final Logger LOG = LoggerFactory.getLogger(KinesisFirehoseSinkITCase.class);
+    private static final String ROLE_NAME = "super-role";
+    private static final String ROLE_ARN = "arn:aws:iam::000000000000:role/" + ROLE_NAME;
+    private static final String BUCKET_NAME = "s3-firehose";
+    private static final String STREAM_NAME = "s3-stream";
+    private static final int NUMBER_OF_ELEMENTS = 92;
+    private StreamExecutionEnvironment env;
+
+    private SdkHttpClient httpClient;
+    private S3Client s3Client;
+    private FirehoseClient firehoseClient;
+    private IamClient iamClient;
+
+    @Container
+    private static LocalstackContainer mockFirehoseContainer =
+            new LocalstackContainer(DockerImageName.parse(DockerImageVersions.LOCALSTACK));
+
+    @BeforeEach
+    void setup() {
+        System.setProperty(SdkSystemSetting.CBOR_ENABLED.property(), "false");
+        httpClient = AWSServicesTestUtils.createHttpClient();
+        s3Client = createS3Client(mockFirehoseContainer.getEndpoint(), httpClient);
+        firehoseClient = createFirehoseClient(mockFirehoseContainer.getEndpoint(), httpClient);
+        iamClient = createIamClient(mockFirehoseContainer.getEndpoint(), httpClient);
+        env = StreamExecutionEnvironment.getExecutionEnvironment();
+    }
+
+    @AfterEach
+    void teardown() {
+        System.clearProperty(SdkSystemSetting.CBOR_ENABLED.property());
+    }
+
+    @Test
+    void firehoseSinkWritesCorrectDataToMockAWSServices() throws Exception {
+        LOG.info("1 - Creating the bucket for Firehose to deliver into...");
+        createBucket(s3Client, BUCKET_NAME);
+        LOG.info("2 - Creating the IAM Role for Firehose to write into the s3 bucket...");
+        createIAMRole(iamClient, ROLE_NAME);
+        LOG.info("3 - Creating the Firehose delivery stream...");
+        createDeliveryStream(STREAM_NAME, BUCKET_NAME, ROLE_ARN, firehoseClient);
+
+        KinesisFirehoseSink<String> kdsSink =
+                KinesisFirehoseSink.<String>builder()
+                        .setSerializationSchema(new SimpleStringSchema())
+                        .setDeliveryStreamName(STREAM_NAME)
+                        .setMaxBatchSize(1)
+                        .setFirehoseClientProperties(
+                                createConfig(mockFirehoseContainer.getEndpoint()))
+                        .build();
+
+        KinesisFirehoseTestUtils.getSampleDataGenerator(env, NUMBER_OF_ELEMENTS).sinkTo(kdsSink);
+        env.execute("Integration Test");
+
+        List<S3Object> objects =
+                listBucketObjects(
+                        createS3Client(mockFirehoseContainer.getEndpoint(), httpClient),
+                        BUCKET_NAME);
+        assertThat(objects.size()).isEqualTo(NUMBER_OF_ELEMENTS);
+        assertThat(
+                        readObjectsFromS3Bucket(
+                                s3Client,
+                                objects,
+                                BUCKET_NAME,
+                                response -> new String(response.asByteArrayUnsafe())))
+                .containsAll(KinesisFirehoseTestUtils.getSampleData(NUMBER_OF_ELEMENTS));
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkTest.java b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkTest.java
new file mode 100644
index 0000000..40e9ace
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkTest.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.sink;
+
+import org.apache.flink.api.common.serialization.SimpleStringSchema;
+import org.apache.flink.connector.aws.config.AWSConfigConstants;
+import org.apache.flink.connector.base.sink.writer.ElementConverter;
+import org.apache.flink.connector.firehose.sink.testutils.KinesisFirehoseTestUtils;
+import org.apache.flink.runtime.client.JobExecutionException;
+import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
+
+import org.assertj.core.api.Assertions;
+import org.junit.jupiter.api.Test;
+import software.amazon.awssdk.services.firehose.model.Record;
+
+import java.util.Properties;
+
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.AWS_CREDENTIALS_PROVIDER;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.AWS_REGION;
+import static org.apache.flink.connector.aws.config.AWSConfigConstants.TRUST_ALL_CERTIFICATES;
+import static org.apache.flink.connector.aws.testutils.AWSServicesTestUtils.createConfig;
+
+/** Covers construction, defaults and sanity checking of {@link KinesisFirehoseSink}. */
+class KinesisFirehoseSinkTest {
+
+    private static final ElementConverter<String, Record> elementConverter =
+            KinesisFirehoseSinkElementConverter.<String>builder()
+                    .setSerializationSchema(new SimpleStringSchema())
+                    .build();
+
+    @Test
+    void deliveryStreamNameMustNotBeNull() {
+        Assertions.assertThatExceptionOfType(NullPointerException.class)
+                .isThrownBy(
+                        () ->
+                                new KinesisFirehoseSink<>(
+                                        elementConverter,
+                                        500,
+                                        16,
+                                        10000,
+                                        4 * 1024 * 1024L,
+                                        5000L,
+                                        1000 * 1024L,
+                                        false,
+                                        null,
+                                        new Properties()))
+                .withMessageContaining(
+                        "The delivery stream name must not be null when initializing the KDF Sink.");
+    }
+
+    @Test
+    void deliveryStreamNameMustNotBeEmpty() {
+        Assertions.assertThatExceptionOfType(IllegalArgumentException.class)
+                .isThrownBy(
+                        () ->
+                                new KinesisFirehoseSink<>(
+                                        elementConverter,
+                                        500,
+                                        16,
+                                        10000,
+                                        4 * 1024 * 1024L,
+                                        5000L,
+                                        1000 * 1024L,
+                                        false,
+                                        "",
+                                        new Properties()))
+                .withMessageContaining(
+                        "The delivery stream name must be set when initializing the KDF Sink.");
+    }
+
+    @Test
+    void firehoseSinkFailsWhenAccessKeyIdIsNotProvided() {
+        Properties properties = createConfig("https://non-exisitent-location");
+        properties.setProperty(
+                AWS_CREDENTIALS_PROVIDER, AWSConfigConstants.CredentialProvider.BASIC.toString());
+        properties.remove(AWSConfigConstants.accessKeyId(AWS_CREDENTIALS_PROVIDER));
+        firehoseSinkFailsWithAppropriateMessageWhenInitialConditionsAreMisconfigured(
+                properties, "Please set values for AWS Access Key ID");
+    }
+
+    @Test
+    void firehoseSinkFailsWhenRegionIsNotProvided() {
+        Properties properties = createConfig("https://non-exisitent-location");
+        properties.remove(AWS_REGION);
+        firehoseSinkFailsWithAppropriateMessageWhenInitialConditionsAreMisconfigured(
+                properties, "region must not be null.");
+    }
+
+    @Test
+    void firehoseSinkFailsWhenUnableToConnectToRemoteService() {
+        Properties properties = createConfig("https://non-exisitent-location");
+        properties.remove(TRUST_ALL_CERTIFICATES);
+        firehoseSinkFailsWithAppropriateMessageWhenInitialConditionsAreMisconfigured(
+                properties,
+                "Received an UnknownHostException when attempting to interact with a service.");
+    }
+
+    private void firehoseSinkFailsWithAppropriateMessageWhenInitialConditionsAreMisconfigured(
+            Properties properties, String errorMessage) {
+        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
+        KinesisFirehoseSink<String> kdsSink =
+                KinesisFirehoseSink.<String>builder()
+                        .setSerializationSchema(new SimpleStringSchema())
+                        .setDeliveryStreamName("non-existent-stream")
+                        .setMaxBatchSize(1)
+                        .setFirehoseClientProperties(properties)
+                        .build();
+
+        KinesisFirehoseTestUtils.getSampleDataGenerator(env, 10).sinkTo(kdsSink);
+
+        Assertions.assertThatExceptionOfType(JobExecutionException.class)
+                .isThrownBy(() -> env.execute("Integration Test"))
+                .havingCause()
+                .havingCause()
+                .withMessageContaining(errorMessage);
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkWriterTest.java b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkWriterTest.java
new file mode 100644
index 0000000..29160f7
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseSinkWriterTest.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.sink;
+
+import org.apache.flink.api.common.serialization.SimpleStringSchema;
+import org.apache.flink.api.connector.sink2.SinkWriter;
+import org.apache.flink.connector.aws.testutils.AWSServicesTestUtils;
+import org.apache.flink.connector.base.sink.writer.ElementConverter;
+import org.apache.flink.connector.base.sink.writer.TestSinkInitContext;
+
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.core.exception.SdkClientException;
+import software.amazon.awssdk.services.firehose.model.Record;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.Properties;
+import java.util.concurrent.CompletionException;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatExceptionOfType;
+
+/** Covers construction, defaults and sanity checking of {@link KinesisFirehoseSinkWriter}. */
+public class KinesisFirehoseSinkWriterTest {
+
+    private KinesisFirehoseSinkWriter<String> sinkWriter;
+
+    private static final ElementConverter<String, Record> ELEMENT_CONVERTER_PLACEHOLDER =
+            KinesisFirehoseSinkElementConverter.<String>builder()
+                    .setSerializationSchema(new SimpleStringSchema())
+                    .build();
+
+    @BeforeEach
+    void setup() {
+        TestSinkInitContext sinkInitContext = new TestSinkInitContext();
+        Properties sinkProperties = AWSServicesTestUtils.createConfig("https://fake_aws_endpoint");
+        sinkWriter =
+                new KinesisFirehoseSinkWriter<>(
+                        ELEMENT_CONVERTER_PLACEHOLDER,
+                        sinkInitContext,
+                        50,
+                        16,
+                        10000,
+                        4 * 1024 * 1024,
+                        5000,
+                        1000 * 1024,
+                        true,
+                        "streamName",
+                        sinkProperties);
+    }
+
+    @Test
+    void getSizeInBytesReturnsSizeOfBlobBeforeBase64Encoding() {
+        String testString = "{many hands make light work;";
+        Record record = Record.builder().data(SdkBytes.fromUtf8String(testString)).build();
+        assertThat(sinkWriter.getSizeInBytes(record))
+                .isEqualTo(testString.getBytes(StandardCharsets.US_ASCII).length);
+    }
+
+    @Test
+    void getNumRecordsOutErrorsCounterRecordsCorrectNumberOfFailures()
+            throws IOException, InterruptedException {
+        TestSinkInitContext ctx = new TestSinkInitContext();
+        KinesisFirehoseSink<String> kinesisFirehoseSink =
+                new KinesisFirehoseSink<>(
+                        ELEMENT_CONVERTER_PLACEHOLDER,
+                        12,
+                        16,
+                        10000,
+                        4 * 1024 * 1024L,
+                        5000L,
+                        1000 * 1024L,
+                        true,
+                        "test-stream",
+                        AWSServicesTestUtils.createConfig("https://localhost"));
+        SinkWriter<String> writer = kinesisFirehoseSink.createWriter(ctx);
+
+        for (int i = 0; i < 12; i++) {
+            writer.write("data_bytes", null);
+        }
+        assertThatExceptionOfType(CompletionException.class)
+                .isThrownBy(() -> writer.flush(true))
+                .withCauseInstanceOf(SdkClientException.class)
+                .withMessageContaining(
+                        "Unable to execute HTTP request: Connection refused: localhost/127.0.0.1:443");
+        assertThat(ctx.metricGroup().getNumRecordsOutErrorsCounter().getCount()).isEqualTo(12);
+        assertThat(ctx.metricGroup().getNumRecordsSendErrorsCounter().getCount()).isEqualTo(12);
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseStateSerializerTest.java b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseStateSerializerTest.java
new file mode 100644
index 0000000..5264e97
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/KinesisFirehoseStateSerializerTest.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.sink;
+
+import org.apache.flink.api.common.serialization.SimpleStringSchema;
+import org.apache.flink.connector.base.sink.writer.BufferedRequestState;
+import org.apache.flink.connector.base.sink.writer.ElementConverter;
+
+import org.junit.jupiter.api.Test;
+import software.amazon.awssdk.services.firehose.model.Record;
+
+import java.io.IOException;
+
+import static org.apache.flink.connector.base.sink.writer.AsyncSinkWriterTestUtils.assertThatBufferStatesAreEqual;
+import static org.apache.flink.connector.base.sink.writer.AsyncSinkWriterTestUtils.getTestState;
+
+/** Test class for {@link KinesisFirehoseStateSerializer}. */
+class KinesisFirehoseStateSerializerTest {
+
+    private static final ElementConverter<String, Record> ELEMENT_CONVERTER =
+            KinesisFirehoseSinkElementConverter.<String>builder()
+                    .setSerializationSchema(new SimpleStringSchema())
+                    .build();
+
+    @Test
+    void testSerializeAndDeserialize() throws IOException {
+        BufferedRequestState<Record> expectedState =
+                getTestState(ELEMENT_CONVERTER, this::getRequestSize);
+
+        KinesisFirehoseStateSerializer serializer = new KinesisFirehoseStateSerializer();
+        BufferedRequestState<Record> actualState =
+                serializer.deserialize(1, serializer.serialize(expectedState));
+
+        assertThatBufferStatesAreEqual(actualState, expectedState);
+    }
+
+    private int getRequestSize(Record requestEntry) {
+        return requestEntry.data().asByteArrayUnsafe().length;
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/testutils/KinesisFirehoseTestUtils.java b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/testutils/KinesisFirehoseTestUtils.java
new file mode 100644
index 0000000..f4dee62
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/sink/testutils/KinesisFirehoseTestUtils.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.sink.testutils;
+
+import org.apache.flink.connector.aws.testutils.AWSServicesTestUtils;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
+import org.apache.flink.util.jackson.JacksonMapperFactory;
+
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonProcessingException;
+import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
+
+import software.amazon.awssdk.http.SdkHttpClient;
+import software.amazon.awssdk.services.firehose.FirehoseClient;
+import software.amazon.awssdk.services.firehose.model.CreateDeliveryStreamRequest;
+import software.amazon.awssdk.services.firehose.model.DeliveryStreamType;
+import software.amazon.awssdk.services.firehose.model.ExtendedS3DestinationConfiguration;
+import software.amazon.awssdk.utils.ImmutableMap;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A set of static methods that can be used to call common AWS services on the Localstack container.
+ */
+public class KinesisFirehoseTestUtils {
+
+    private static final ObjectMapper MAPPER = JacksonMapperFactory.createObjectMapper();
+
+    public static FirehoseClient createFirehoseClient(String endpoint, SdkHttpClient httpClient) {
+        return AWSServicesTestUtils.createAwsSyncClient(
+                endpoint, httpClient, FirehoseClient.builder());
+    }
+
+    public static void createDeliveryStream(
+            String deliveryStreamName,
+            String bucketName,
+            String roleARN,
+            FirehoseClient firehoseClient) {
+        ExtendedS3DestinationConfiguration s3Config =
+                ExtendedS3DestinationConfiguration.builder()
+                        .bucketARN(bucketName)
+                        .roleARN(roleARN)
+                        .build();
+        CreateDeliveryStreamRequest request =
+                CreateDeliveryStreamRequest.builder()
+                        .deliveryStreamName(deliveryStreamName)
+                        .extendedS3DestinationConfiguration(s3Config)
+                        .deliveryStreamType(DeliveryStreamType.DIRECT_PUT)
+                        .build();
+
+        firehoseClient.createDeliveryStream(request);
+    }
+
+    public static DataStream<String> getSampleDataGenerator(
+            StreamExecutionEnvironment env, int endValue) {
+        return env.fromSequence(1, endValue)
+                .map(Object::toString)
+                .returns(String.class)
+                .map(data -> MAPPER.writeValueAsString(ImmutableMap.of("data", data)));
+    }
+
+    public static List<String> getSampleData(int endValue) throws JsonProcessingException {
+        List<String> expectedElements = new ArrayList<>();
+        for (int i = 1; i <= endValue; i++) {
+            expectedElements.add(
+                    MAPPER.writeValueAsString(ImmutableMap.of("data", String.valueOf(i))));
+        }
+        return expectedElements;
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/table/KinesisFirehoseDynamicTableFactoryTest.java b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/table/KinesisFirehoseDynamicTableFactoryTest.java
new file mode 100644
index 0000000..532b3b6
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/test/java/org/apache/flink/connector/firehose/table/KinesisFirehoseDynamicTableFactoryTest.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.firehose.table;
+
+import org.apache.flink.api.connector.sink2.Sink;
+import org.apache.flink.connector.firehose.sink.KinesisFirehoseSink;
+import org.apache.flink.table.api.DataTypes;
+import org.apache.flink.table.catalog.Column;
+import org.apache.flink.table.catalog.ResolvedSchema;
+import org.apache.flink.table.connector.sink.DynamicTableSink;
+import org.apache.flink.table.connector.sink.SinkV2Provider;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.factories.TableOptionsBuilder;
+import org.apache.flink.table.factories.TestFormatFactory;
+import org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext;
+
+import org.assertj.core.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+import java.util.Map;
+import java.util.Properties;
+
+import static org.apache.flink.table.factories.utils.FactoryMocks.createTableSink;
+
+/**
+ * Test for {@link KinesisFirehoseDynamicSink} created by {@link
+ * KinesisFirehoseDynamicTableFactory}.
+ */
+class KinesisFirehoseDynamicTableFactoryTest {
+    private static final String DELIVERY_STREAM_NAME = "myDeliveryStream";
+
+    @Test
+    void testGoodTableSink() {
+        ResolvedSchema sinkSchema = defaultSinkSchema();
+        Map<String, String> sinkOptions = defaultTableOptions().build();
+
+        // Construct actual DynamicTableSink using FactoryUtil
+        KinesisFirehoseDynamicSink actualSink =
+                (KinesisFirehoseDynamicSink) createTableSink(sinkSchema, sinkOptions);
+
+        // Construct expected DynamicTableSink using factory under test
+        KinesisFirehoseDynamicSink expectedSink =
+                new KinesisFirehoseDynamicSink.KinesisFirehoseDynamicSinkBuilder()
+                        .setConsumedDataType(sinkSchema.toPhysicalRowDataType())
+                        .setDeliveryStream(DELIVERY_STREAM_NAME)
+                        .setFirehoseClientProperties(defaultSinkProperties())
+                        .setEncodingFormat(new TestFormatFactory.EncodingFormatMock(","))
+                        .build();
+
+        Assertions.assertThat(actualSink).isEqualTo(expectedSink);
+
+        // verify the produced sink
+        DynamicTableSink.SinkRuntimeProvider sinkFunctionProvider =
+                actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
+        Sink<RowData> sinkFunction = ((SinkV2Provider) sinkFunctionProvider).createSink();
+        Assertions.assertThat(sinkFunction).isInstanceOf(KinesisFirehoseSink.class);
+    }
+
+    @Test
+    void testGoodTableSinkWithSinkOptions() {
+        ResolvedSchema sinkSchema = defaultSinkSchema();
+        Map<String, String> sinkOptions = defaultTableOptionsWithSinkOptions().build();
+
+        // Construct actual DynamicTableSink using FactoryUtil
+        KinesisFirehoseDynamicSink actualSink =
+                (KinesisFirehoseDynamicSink) createTableSink(sinkSchema, sinkOptions);
+
+        // Construct expected DynamicTableSink using factory under test
+        KinesisFirehoseDynamicSink expectedSink =
+                getDefaultSinkBuilder()
+                        .setConsumedDataType(sinkSchema.toPhysicalRowDataType())
+                        .setDeliveryStream(DELIVERY_STREAM_NAME)
+                        .setFirehoseClientProperties(defaultSinkProperties())
+                        .setEncodingFormat(new TestFormatFactory.EncodingFormatMock(","))
+                        .build();
+
+        Assertions.assertThat(actualSink).isEqualTo(expectedSink);
+
+        // verify the produced sink
+        DynamicTableSink.SinkRuntimeProvider sinkFunctionProvider =
+                actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
+        Sink<RowData> sinkFunction = ((SinkV2Provider) sinkFunctionProvider).createSink();
+        Assertions.assertThat(sinkFunction).isInstanceOf(KinesisFirehoseSink.class);
+    }
+
+    private ResolvedSchema defaultSinkSchema() {
+        return ResolvedSchema.of(
+                Column.physical("name", DataTypes.STRING()),
+                Column.physical("curr_id", DataTypes.BIGINT()),
+                Column.physical("time", DataTypes.TIMESTAMP(3)));
+    }
+
+    private TableOptionsBuilder defaultTableOptionsWithSinkOptions() {
+        return defaultTableOptions()
+                .withTableOption("sink.fail-on-error", "true")
+                .withTableOption("sink.batch.max-size", "100")
+                .withTableOption("sink.requests.max-inflight", "100")
+                .withTableOption("sink.requests.max-buffered", "100")
+                .withTableOption("sink.flush-buffer.size", "1000")
+                .withTableOption("sink.flush-buffer.timeout", "1000");
+    }
+
+    private TableOptionsBuilder defaultTableOptions() {
+        String connector = KinesisFirehoseDynamicTableFactory.IDENTIFIER;
+        String format = TestFormatFactory.IDENTIFIER;
+        return new TableOptionsBuilder(connector, format)
+                // default table options
+                .withTableOption(
+                        KinesisFirehoseConnectorOptions.DELIVERY_STREAM, DELIVERY_STREAM_NAME)
+                .withTableOption("aws.region", "us-west-2")
+                .withTableOption("aws.credentials.provider", "BASIC")
+                .withTableOption("aws.credentials.basic.accesskeyid", "ververicka")
+                .withTableOption(
+                        "aws.credentials.basic.secretkey",
+                        "SuperSecretSecretSquirrel") // default format options
+                .withFormatOption(TestFormatFactory.DELIMITER, ",")
+                .withFormatOption(TestFormatFactory.FAIL_ON_MISSING, "true");
+    }
+
+    private KinesisFirehoseDynamicSink.KinesisFirehoseDynamicSinkBuilder getDefaultSinkBuilder() {
+        return new KinesisFirehoseDynamicSink.KinesisFirehoseDynamicSinkBuilder()
+                .setFailOnError(true)
+                .setMaxBatchSize(100)
+                .setMaxInFlightRequests(100)
+                .setMaxBufferSizeInBytes(1000)
+                .setMaxBufferedRequests(100)
+                .setMaxTimeInBufferMS(1000);
+    }
+
+    private Properties defaultSinkProperties() {
+        return new Properties() {
+            {
+                setProperty("aws.region", "us-west-2");
+                setProperty("aws.credentials.provider", "BASIC");
+                setProperty("aws.credentials.provider.basic.accesskeyid", "ververicka");
+                setProperty(
+                        "aws.credentials.provider.basic.secretkey", "SuperSecretSecretSquirrel");
+            }
+        };
+    }
+}
diff --git a/flink-connector-aws-kinesis-firehose/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension b/flink-connector-aws-kinesis-firehose/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension
new file mode 100644
index 0000000..2899913
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.flink.util.TestLoggerExtension
\ No newline at end of file
diff --git a/flink-connector-aws-kinesis-firehose/src/test/resources/archunit.properties b/flink-connector-aws-kinesis-firehose/src/test/resources/archunit.properties
new file mode 100644
index 0000000..15be88c
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/test/resources/archunit.properties
@@ -0,0 +1,31 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# By default we allow removing existing violations, but fail when new violations are added.
+freeze.store.default.allowStoreUpdate=true
+
+# Enable this if a new (frozen) rule has been added in order to create the initial store and record the existing violations.
+#freeze.store.default.allowStoreCreation=true
+
+# Enable this to add allow new violations to be recorded.
+# NOTE: Adding new violations should be avoided when possible. If the rule was correct to flag a new
+#       violation, please try to avoid creating the violation. If the violation was created due to a
+#       shortcoming of the rule, file a JIRA issue so the rule can be improved.
+#freeze.refreeze=true
+
+freeze.store.default.path=archunit-violations
diff --git a/flink-connector-aws-kinesis-firehose/src/test/resources/log4j2-test.properties b/flink-connector-aws-kinesis-firehose/src/test/resources/log4j2-test.properties
new file mode 100644
index 0000000..c4fa187
--- /dev/null
+++ b/flink-connector-aws-kinesis-firehose/src/test/resources/log4j2-test.properties
@@ -0,0 +1,28 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Set root logger level to OFF to not flood build logs
+# set manually to INFO for debugging purposes
+rootLogger.level = OFF
+rootLogger.appenderRef.test.ref = TestLogger
+
+appender.testlogger.name = TestLogger
+appender.testlogger.type = CONSOLE
+appender.testlogger.target = SYSTEM_ERR
+appender.testlogger.layout.type = PatternLayout
+appender.testlogger.layout.pattern = %-4r [%t] %-5p %c %x - %m%n
diff --git a/flink-sql-connector-aws-kinesis-firehose/pom.xml b/flink-sql-connector-aws-kinesis-firehose/pom.xml
new file mode 100644
index 0000000..8a587ae
--- /dev/null
+++ b/flink-sql-connector-aws-kinesis-firehose/pom.xml
@@ -0,0 +1,107 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+    <parent>
+        <groupId>org.apache.flink</groupId>
+        <artifactId>flink-connector-aws-parent</artifactId>
+        <version>4.0-SNAPSHOT</version>
+    </parent>
+
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>flink-sql-connector-aws-kinesis-firehose</artifactId>
+    <name>Flink : Connectors : AWS : SQL : Amazon Kinesis Data Firehose</name>
+
+    <properties>
+        <japicmp.skip>true</japicmp.skip>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.flink</groupId>
+            <artifactId>flink-connector-aws-kinesis-firehose</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-shade-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <id>shade-flink</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>shade</goal>
+                        </goals>
+                        <configuration>
+                            <artifactSet>
+                                <includes>
+                                    <include>org.apache.flink:flink-connector-base</include>
+                                    <include>org.apache.flink:flink-connector-aws-base</include>
+                                    <include>org.apache.flink:flink-connector-aws-kinesis-firehose</include>
+                                    <include>software.amazon.awssdk:*</include>
+                                    <include>io.netty:*</include>
+                                    <include>org.reactivestreams:*</include>
+                                    <include>org.apache.httpcomponents:*</include>
+                                    <include>com.typesafe.netty:*</include>
+                                    <include>commons-logging:commons-logging</include>
+                                </includes>
+                            </artifactSet>
+                            <relocations>
+                                <relocation>
+                                    <pattern>software.amazon</pattern>
+                                    <shadedPattern>org.apache.flink.connector.firehose.sink.shaded.software.amazon
+                                    </shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>io.netty</pattern>
+                                    <shadedPattern>org.apache.flink.connector.firehose.sink.shaded.io.netty
+                                    </shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>org.reactivestreams</pattern>
+                                    <shadedPattern>org.apache.flink.connector.firehose.sink.shaded.org.reactivestreams
+                                    </shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>org.apache.http</pattern>
+                                    <shadedPattern>org.apache.flink.connector.firehose.sink.shaded.org.apache.http
+                                    </shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>com.typesafe.netty</pattern>
+                                    <shadedPattern>org.apache.flink.connector.firehose.sink.shaded.com.typesafe.netty
+                                    </shadedPattern>
+                                </relocation>
+                            </relocations>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+
+</project>
diff --git a/flink-sql-connector-aws-kinesis-firehose/src/main/resources/META-INF/NOTICE b/flink-sql-connector-aws-kinesis-firehose/src/main/resources/META-INF/NOTICE
new file mode 100644
index 0000000..7cf7302
--- /dev/null
+++ b/flink-sql-connector-aws-kinesis-firehose/src/main/resources/META-INF/NOTICE
@@ -0,0 +1,48 @@
+flink-sql-connector-aws-kinesis-firehose
+
+Copyright 2014-2022 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+This project bundles the following dependencies under the Apache Software License 2.0. (http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+- software.amazon.awssdk:firehose:2.17.247
+- software.amazon.awssdk:aws-json-protocol:2.17.247
+- software.amazon.awssdk:protocol-core:2.17.247
+- software.amazon.awssdk:profiles:2.17.247
+- software.amazon.awssdk:sdk-core:2.17.247
+- software.amazon.awssdk:auth:2.17.247
+- software.amazon.awssdk:http-client-spi:2.17.247
+- software.amazon.awssdk:regions:2.17.247
+- software.amazon.awssdk:annotations:2.17.247
+- software.amazon.awssdk:utils:2.17.247
+- software.amazon.awssdk:aws-core:2.17.247
+- software.amazon.awssdk:metrics-spi:2.17.247
+- software.amazon.awssdk:apache-client:2.17.247
+- software.amazon.awssdk:netty-nio-client:2.17.247
+- software.amazon.awssdk:sts:2.17.247
+- software.amazon.awssdk:aws-query-protocol:2.17.247
+- software.amazon.awssdk:json-utils:2.17.247
+- software.amazon.awssdk:third-party-jackson-core:2.17.247
+- io.netty:netty-codec-http:4.1.70.Final
+- io.netty:netty-codec-http2:4.1.70.Final
+- io.netty:netty-codec:4.1.70.Final
+- io.netty:netty-transport:4.1.70.Final
+- io.netty:netty-resolver:4.1.70.Final
+- io.netty:netty-common:4.1.70.Final
+- io.netty:netty-buffer:4.1.70.Final
+- io.netty:netty-handler:4.1.70.Final
+- io.netty:netty-transport-native-epoll:linux-x86_64:4.1.70.Final
+- io.netty:netty-transport-native-unix-common:4.1.70.Final
+- io.netty:netty-transport-classes-epoll:4.1.70.Final
+- com.typesafe.netty:netty-reactive-streams-http:2.0.5
+- com.typesafe.netty:netty-reactive-streams:2.0.5
+- org.apache.httpcomponents:httpclient:4.5.13
+- org.apache.httpcomponents:httpcore:4.4.14
+- commons-logging:commons-logging:1.1.3
+
+This project bundles the following dependencies under the Creative Commons Zero license (https://creativecommons.org/publicdomain/zero/1.0/).
+
+- org.reactivestreams:reactive-streams:1.0.3
+
diff --git a/pom.xml b/pom.xml
index 0dc0342..d701652 100644
--- a/pom.xml
+++ b/pom.xml
@@ -71,6 +71,8 @@ under the License.
         <module>flink-connector-aws-base</module>
         <module>flink-connector-dynamodb</module>
         <module>flink-sql-connector-dynamodb</module>
+        <module>flink-connector-aws-kinesis-firehose</module>
+        <module>flink-sql-connector-aws-kinesis-firehose</module>
     </modules>
 
     <dependencies>


[flink-connector-aws] 07/08: [FLINK-29907][Connectors/Kinesis] Sync changes from release-1.16 branch

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

dannycranmer pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/flink-connector-aws.git

commit d5d6a6334312c99d1b24324da2de3f77e98654f2
Author: Danny Cranmer <da...@apache.org>
AuthorDate: Fri Dec 2 13:17:29 2022 +0000

    [FLINK-29907][Connectors/Kinesis] Sync changes from release-1.16 branch
---
 .../kinesis/proxy/DynamoDBStreamsProxy.java        |  17 +
 .../kinesis/FlinkKinesisConsumerMigrationTest.java |   3 +-
 .../kinesis/proxy/DynamoDBStreamsProxyTest.java    |  82 +++++
 .../testutils/FakeKinesisClientFactory.java        | 361 +++++++++++++++++++++
 ...onsumer-migration-test-flink1.16-empty-snapshot | Bin 0 -> 2870 bytes
 ...esis-consumer-migration-test-flink1.16-snapshot | Bin 0 -> 2938 bytes
 6 files changed, 462 insertions(+), 1 deletion(-)

diff --git a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/DynamoDBStreamsProxy.java b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/DynamoDBStreamsProxy.java
index 65c4035..79086f8 100644
--- a/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/DynamoDBStreamsProxy.java
+++ b/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/DynamoDBStreamsProxy.java
@@ -27,6 +27,7 @@ import com.amazonaws.regions.RegionUtils;
 import com.amazonaws.services.dynamodbv2.streamsadapter.AmazonDynamoDBStreamsAdapterClient;
 import com.amazonaws.services.kinesis.AmazonKinesis;
 import com.amazonaws.services.kinesis.model.DescribeStreamResult;
+import com.amazonaws.services.kinesis.model.ResourceNotFoundException;
 import com.amazonaws.services.kinesis.model.Shard;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -110,6 +111,22 @@ public class DynamoDBStreamsProxy extends KinesisProxy {
         return result;
     }
 
+    @Override
+    public String getShardIterator(
+            StreamShardHandle shard, String shardIteratorType, @Nullable Object startingMarker)
+            throws InterruptedException {
+        try {
+            return super.getShardIterator(shard, shardIteratorType, startingMarker);
+        } catch (ResourceNotFoundException re) {
+            LOG.info(
+                    "Received ResourceNotFoundException. "
+                            + "Shard {} of stream {} is no longer valid, marking it as complete.",
+                    shard.getShard().getShardId(),
+                    shard.getStreamName());
+            return null;
+        }
+    }
+
     private List<StreamShardHandle> getShardsOfStream(
             String streamName, @Nullable String lastSeenShardId) throws InterruptedException {
         List<StreamShardHandle> shardsOfStream = new ArrayList<>();
diff --git a/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerMigrationTest.java b/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerMigrationTest.java
index 0099f48..5770ee2 100644
--- a/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerMigrationTest.java
+++ b/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerMigrationTest.java
@@ -106,7 +106,8 @@ public class FlinkKinesisConsumerMigrationTest {
                 FlinkVersion.v1_12,
                 FlinkVersion.v1_13,
                 FlinkVersion.v1_14,
-                FlinkVersion.v1_15);
+                FlinkVersion.v1_15,
+                FlinkVersion.v1_16);
     }
 
     public FlinkKinesisConsumerMigrationTest(FlinkVersion testMigrateVersion) {
diff --git a/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/DynamoDBStreamsProxyTest.java b/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/DynamoDBStreamsProxyTest.java
new file mode 100644
index 0000000..261e69d
--- /dev/null
+++ b/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/DynamoDBStreamsProxyTest.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.proxy;
+
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisClientFactory;
+import org.apache.flink.streaming.connectors.kinesis.testutils.TestUtils;
+
+import com.amazonaws.services.kinesis.AmazonKinesis;
+import com.amazonaws.services.kinesis.model.Shard;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Properties;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+/** Test for methods in the {@link DynamoDBStreamsProxy} class. */
+class DynamoDBStreamsProxyTest {
+
+    private static final String FAKE_STREAM_NAME = "fake-stream";
+
+    private static final List<String> SHARD_IDS =
+            Arrays.asList(
+                    "shardId-000000000000",
+                    "shardId-000000000001",
+                    "shardId-000000000002",
+                    "shardId-000000000003");
+
+    @Test
+    void testGetShardIterator() throws Exception {
+
+        String invalidShardId = "shardId-000000000004";
+
+        DynamoDBStreamsProxy ddbStreamsProxy = new TestableDynamoDBStreamsProxy();
+
+        for (String shardId : SHARD_IDS) {
+            String shardIterator =
+                    ddbStreamsProxy.getShardIterator(getStreamShardHandle(shardId), "LATEST", null);
+
+            assertThat(shardIterator).isEqualTo("fakeShardIterator");
+        }
+
+        String invalidShardIterator =
+                ddbStreamsProxy.getShardIterator(
+                        getStreamShardHandle(invalidShardId), "LATEST", null);
+
+        assertThat(invalidShardIterator).isNull();
+    }
+
+    private StreamShardHandle getStreamShardHandle(String shardId) {
+        return new StreamShardHandle(FAKE_STREAM_NAME, new Shard().withShardId(shardId));
+    }
+
+    private static class TestableDynamoDBStreamsProxy extends DynamoDBStreamsProxy {
+
+        private TestableDynamoDBStreamsProxy() {
+            super(TestUtils.getStandardProperties());
+        }
+
+        protected AmazonKinesis createKinesisClient(Properties configProps) {
+            return FakeKinesisClientFactory.resourceNotFoundWhenGettingShardIterator(
+                    FAKE_STREAM_NAME, SHARD_IDS);
+        }
+    }
+}
diff --git a/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisClientFactory.java b/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisClientFactory.java
new file mode 100644
index 0000000..a9f72a0
--- /dev/null
+++ b/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisClientFactory.java
@@ -0,0 +1,361 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.testutils;
+
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
+
+import com.amazonaws.AmazonServiceException;
+import com.amazonaws.AmazonWebServiceRequest;
+import com.amazonaws.ResponseMetadata;
+import com.amazonaws.regions.Region;
+import com.amazonaws.services.kinesis.AmazonKinesis;
+import com.amazonaws.services.kinesis.model.AddTagsToStreamRequest;
+import com.amazonaws.services.kinesis.model.AddTagsToStreamResult;
+import com.amazonaws.services.kinesis.model.CreateStreamRequest;
+import com.amazonaws.services.kinesis.model.CreateStreamResult;
+import com.amazonaws.services.kinesis.model.DecreaseStreamRetentionPeriodRequest;
+import com.amazonaws.services.kinesis.model.DecreaseStreamRetentionPeriodResult;
+import com.amazonaws.services.kinesis.model.DeleteStreamRequest;
+import com.amazonaws.services.kinesis.model.DeleteStreamResult;
+import com.amazonaws.services.kinesis.model.DeregisterStreamConsumerRequest;
+import com.amazonaws.services.kinesis.model.DeregisterStreamConsumerResult;
+import com.amazonaws.services.kinesis.model.DescribeLimitsRequest;
+import com.amazonaws.services.kinesis.model.DescribeLimitsResult;
+import com.amazonaws.services.kinesis.model.DescribeStreamConsumerRequest;
+import com.amazonaws.services.kinesis.model.DescribeStreamConsumerResult;
+import com.amazonaws.services.kinesis.model.DescribeStreamRequest;
+import com.amazonaws.services.kinesis.model.DescribeStreamResult;
+import com.amazonaws.services.kinesis.model.DescribeStreamSummaryRequest;
+import com.amazonaws.services.kinesis.model.DescribeStreamSummaryResult;
+import com.amazonaws.services.kinesis.model.DisableEnhancedMonitoringRequest;
+import com.amazonaws.services.kinesis.model.DisableEnhancedMonitoringResult;
+import com.amazonaws.services.kinesis.model.EnableEnhancedMonitoringRequest;
+import com.amazonaws.services.kinesis.model.EnableEnhancedMonitoringResult;
+import com.amazonaws.services.kinesis.model.GetRecordsRequest;
+import com.amazonaws.services.kinesis.model.GetRecordsResult;
+import com.amazonaws.services.kinesis.model.GetShardIteratorRequest;
+import com.amazonaws.services.kinesis.model.GetShardIteratorResult;
+import com.amazonaws.services.kinesis.model.IncreaseStreamRetentionPeriodRequest;
+import com.amazonaws.services.kinesis.model.IncreaseStreamRetentionPeriodResult;
+import com.amazonaws.services.kinesis.model.ListShardsRequest;
+import com.amazonaws.services.kinesis.model.ListShardsResult;
+import com.amazonaws.services.kinesis.model.ListStreamConsumersRequest;
+import com.amazonaws.services.kinesis.model.ListStreamConsumersResult;
+import com.amazonaws.services.kinesis.model.ListStreamsRequest;
+import com.amazonaws.services.kinesis.model.ListStreamsResult;
+import com.amazonaws.services.kinesis.model.ListTagsForStreamRequest;
+import com.amazonaws.services.kinesis.model.ListTagsForStreamResult;
+import com.amazonaws.services.kinesis.model.MergeShardsRequest;
+import com.amazonaws.services.kinesis.model.MergeShardsResult;
+import com.amazonaws.services.kinesis.model.PutRecordRequest;
+import com.amazonaws.services.kinesis.model.PutRecordResult;
+import com.amazonaws.services.kinesis.model.PutRecordsRequest;
+import com.amazonaws.services.kinesis.model.PutRecordsResult;
+import com.amazonaws.services.kinesis.model.RegisterStreamConsumerRequest;
+import com.amazonaws.services.kinesis.model.RegisterStreamConsumerResult;
+import com.amazonaws.services.kinesis.model.RemoveTagsFromStreamRequest;
+import com.amazonaws.services.kinesis.model.RemoveTagsFromStreamResult;
+import com.amazonaws.services.kinesis.model.ResourceNotFoundException;
+import com.amazonaws.services.kinesis.model.SplitShardRequest;
+import com.amazonaws.services.kinesis.model.SplitShardResult;
+import com.amazonaws.services.kinesis.model.StartStreamEncryptionRequest;
+import com.amazonaws.services.kinesis.model.StartStreamEncryptionResult;
+import com.amazonaws.services.kinesis.model.StopStreamEncryptionRequest;
+import com.amazonaws.services.kinesis.model.StopStreamEncryptionResult;
+import com.amazonaws.services.kinesis.model.UpdateShardCountRequest;
+import com.amazonaws.services.kinesis.model.UpdateShardCountResult;
+import com.amazonaws.services.kinesis.model.UpdateStreamModeRequest;
+import com.amazonaws.services.kinesis.model.UpdateStreamModeResult;
+import com.amazonaws.services.kinesis.waiters.AmazonKinesisWaiters;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+
+/**
+ * Factory for different kinds of fake Amazon Kinesis Client behaviours using the {@link
+ * KinesisProxyInterface} interface.
+ */
+public class FakeKinesisClientFactory {
+
+    public static AmazonKinesis resourceNotFoundWhenGettingShardIterator(
+            String streamName, List<String> shardIds) {
+        return new AmazonKinesis() {
+
+            @Override
+            public GetShardIteratorResult getShardIterator(
+                    GetShardIteratorRequest getShardIteratorRequest) {
+                if (getShardIteratorRequest.getStreamName().equals(streamName)
+                        && shardIds.contains(getShardIteratorRequest.getShardId())) {
+                    return new GetShardIteratorResult().withShardIterator("fakeShardIterator");
+                }
+
+                final ResourceNotFoundException ex =
+                        new ResourceNotFoundException(
+                                "Requested resource not found: Shard does not exist");
+                ex.setErrorType(AmazonServiceException.ErrorType.Client);
+
+                throw ex;
+            }
+
+            @Override
+            public void setEndpoint(String s) {}
+
+            @Override
+            public void setRegion(Region region) {}
+
+            @Override
+            public AddTagsToStreamResult addTagsToStream(
+                    AddTagsToStreamRequest addTagsToStreamRequest) {
+                return null;
+            }
+
+            @Override
+            public CreateStreamResult createStream(CreateStreamRequest createStreamRequest) {
+                return null;
+            }
+
+            @Override
+            public CreateStreamResult createStream(String s, Integer integer) {
+                return null;
+            }
+
+            @Override
+            public DecreaseStreamRetentionPeriodResult decreaseStreamRetentionPeriod(
+                    DecreaseStreamRetentionPeriodRequest decreaseStreamRetentionPeriodRequest) {
+                return null;
+            }
+
+            @Override
+            public DeleteStreamResult deleteStream(DeleteStreamRequest deleteStreamRequest) {
+                return null;
+            }
+
+            @Override
+            public DeleteStreamResult deleteStream(String s) {
+                return null;
+            }
+
+            @Override
+            public DeregisterStreamConsumerResult deregisterStreamConsumer(
+                    DeregisterStreamConsumerRequest deregisterStreamConsumerRequest) {
+                return null;
+            }
+
+            @Override
+            public DescribeLimitsResult describeLimits(
+                    DescribeLimitsRequest describeLimitsRequest) {
+                return null;
+            }
+
+            @Override
+            public DescribeStreamResult describeStream(
+                    DescribeStreamRequest describeStreamRequest) {
+                return null;
+            }
+
+            @Override
+            public DescribeStreamResult describeStream(String s) {
+                return null;
+            }
+
+            @Override
+            public DescribeStreamResult describeStream(String s, String s1) {
+                return null;
+            }
+
+            @Override
+            public DescribeStreamResult describeStream(String s, Integer integer, String s1) {
+                return null;
+            }
+
+            @Override
+            public DescribeStreamConsumerResult describeStreamConsumer(
+                    DescribeStreamConsumerRequest describeStreamConsumerRequest) {
+                return null;
+            }
+
+            @Override
+            public DescribeStreamSummaryResult describeStreamSummary(
+                    DescribeStreamSummaryRequest describeStreamSummaryRequest) {
+                return null;
+            }
+
+            @Override
+            public DisableEnhancedMonitoringResult disableEnhancedMonitoring(
+                    DisableEnhancedMonitoringRequest disableEnhancedMonitoringRequest) {
+                return null;
+            }
+
+            @Override
+            public EnableEnhancedMonitoringResult enableEnhancedMonitoring(
+                    EnableEnhancedMonitoringRequest enableEnhancedMonitoringRequest) {
+                return null;
+            }
+
+            @Override
+            public GetRecordsResult getRecords(GetRecordsRequest getRecordsRequest) {
+                return null;
+            }
+
+            @Override
+            public GetShardIteratorResult getShardIterator(String s, String s1, String s2) {
+                return null;
+            }
+
+            @Override
+            public GetShardIteratorResult getShardIterator(
+                    String s, String s1, String s2, String s3) {
+                return null;
+            }
+
+            @Override
+            public IncreaseStreamRetentionPeriodResult increaseStreamRetentionPeriod(
+                    IncreaseStreamRetentionPeriodRequest increaseStreamRetentionPeriodRequest) {
+                return null;
+            }
+
+            @Override
+            public ListShardsResult listShards(ListShardsRequest listShardsRequest) {
+                return null;
+            }
+
+            @Override
+            public ListStreamConsumersResult listStreamConsumers(
+                    ListStreamConsumersRequest listStreamConsumersRequest) {
+                return null;
+            }
+
+            @Override
+            public ListStreamsResult listStreams(ListStreamsRequest listStreamsRequest) {
+                return null;
+            }
+
+            @Override
+            public ListStreamsResult listStreams() {
+                return null;
+            }
+
+            @Override
+            public ListStreamsResult listStreams(String s) {
+                return null;
+            }
+
+            @Override
+            public ListStreamsResult listStreams(Integer integer, String s) {
+                return null;
+            }
+
+            @Override
+            public ListTagsForStreamResult listTagsForStream(
+                    ListTagsForStreamRequest listTagsForStreamRequest) {
+                return null;
+            }
+
+            @Override
+            public MergeShardsResult mergeShards(MergeShardsRequest mergeShardsRequest) {
+                return null;
+            }
+
+            @Override
+            public MergeShardsResult mergeShards(String s, String s1, String s2) {
+                return null;
+            }
+
+            @Override
+            public PutRecordResult putRecord(PutRecordRequest putRecordRequest) {
+                return null;
+            }
+
+            @Override
+            public PutRecordResult putRecord(String s, ByteBuffer byteBuffer, String s1) {
+                return null;
+            }
+
+            @Override
+            public PutRecordResult putRecord(
+                    String s, ByteBuffer byteBuffer, String s1, String s2) {
+                return null;
+            }
+
+            @Override
+            public PutRecordsResult putRecords(PutRecordsRequest putRecordsRequest) {
+                return null;
+            }
+
+            @Override
+            public RegisterStreamConsumerResult registerStreamConsumer(
+                    RegisterStreamConsumerRequest registerStreamConsumerRequest) {
+                return null;
+            }
+
+            @Override
+            public RemoveTagsFromStreamResult removeTagsFromStream(
+                    RemoveTagsFromStreamRequest removeTagsFromStreamRequest) {
+                return null;
+            }
+
+            @Override
+            public SplitShardResult splitShard(SplitShardRequest splitShardRequest) {
+                return null;
+            }
+
+            @Override
+            public SplitShardResult splitShard(String s, String s1, String s2) {
+                return null;
+            }
+
+            @Override
+            public StartStreamEncryptionResult startStreamEncryption(
+                    StartStreamEncryptionRequest startStreamEncryptionRequest) {
+                return null;
+            }
+
+            @Override
+            public StopStreamEncryptionResult stopStreamEncryption(
+                    StopStreamEncryptionRequest stopStreamEncryptionRequest) {
+                return null;
+            }
+
+            @Override
+            public UpdateShardCountResult updateShardCount(
+                    UpdateShardCountRequest updateShardCountRequest) {
+                return null;
+            }
+
+            @Override
+            public UpdateStreamModeResult updateStreamMode(
+                    UpdateStreamModeRequest updateStreamModeRequest) {
+                return null;
+            }
+
+            @Override
+            public void shutdown() {}
+
+            @Override
+            public ResponseMetadata getCachedResponseMetadata(
+                    AmazonWebServiceRequest amazonWebServiceRequest) {
+                return null;
+            }
+
+            @Override
+            public AmazonKinesisWaiters waiters() {
+                return null;
+            }
+        };
+    }
+}
diff --git a/flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.16-empty-snapshot b/flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.16-empty-snapshot
new file mode 100644
index 0000000..0cdae17
Binary files /dev/null and b/flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.16-empty-snapshot differ
diff --git a/flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.16-snapshot b/flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.16-snapshot
new file mode 100644
index 0000000..1e8d549
Binary files /dev/null and b/flink-connector-kinesis/src/test/resources/kinesis-consumer-migration-test-flink1.16-snapshot differ