You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flink.apache.org by tz...@apache.org on 2020/09/21 08:33:00 UTC

[flink] branch master updated (9565099 -> 17f1af2)

This is an automated email from the ASF dual-hosted git repository.

tzulitai pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git.


    from 9565099  [FLINK-19280][jdbc] Fix option "sink.buffer-flush.max-rows" for JDBC can't be disabled by setting to zero
     new bbcd0c7  [FLINK-18515][Kinesis] Adding FanOutRecordPublisher for Kinesis EFO support
     new ad4e9a6  [FLINK-18661][Kinesis] Stream consumer Registration/Deregistration
     new 17f1af2  [FLINK-18661] [kinesis] Updated FullJitterBackoff Default values for describeStream and describeStreamConsumer

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 flink-connectors/flink-connector-kinesis/pom.xml   |   1 +
 .../connectors/kinesis/FlinkKinesisConsumer.java   |   3 +
 ...V2Interface.java => FlinkKinesisException.java} |  26 +-
 .../kinesis/config/ConsumerConfigConstants.java    |  45 +-
 .../internals/DynamoDBStreamsDataFetcher.java      |   3 +-
 .../kinesis/internals/KinesisDataFetcher.java      | 103 ++--
 .../kinesis/internals/ShardConsumer.java           |  13 +-
 .../publisher/RecordPublisherFactory.java          |   7 +
 .../publisher/fanout/FanOutRecordPublisher.java    | 246 +++++++++
 .../fanout/FanOutRecordPublisherConfiguration.java |  61 ++-
 .../FanOutRecordPublisherFactory.java}             |  77 +--
 .../publisher/fanout/FanOutShardSubscriber.java    | 468 ++++++++++++++++
 .../publisher/fanout/StreamConsumerRegistrar.java  | 275 ++++++++++
 .../publisher/polling/PollingRecordPublisher.java  |   7 +-
 .../polling/PollingRecordPublisherFactory.java     |   2 +-
 .../kinesis/proxy/FullJitterBackoff.java           |  61 +++
 .../connectors/kinesis/proxy/KinesisProxy.java     |  41 +-
 .../kinesis/proxy/KinesisProxyInterface.java       |   1 +
 .../connectors/kinesis/proxy/KinesisProxyV2.java   | 168 +++++-
 .../kinesis/proxy/KinesisProxyV2Factory.java       |  60 +++
 .../kinesis/proxy/KinesisProxyV2Interface.java     |  29 +
 .../streaming/connectors/kinesis/util/AWSUtil.java |  12 +-
 .../connectors/kinesis/util/AwsV2Util.java         |  85 ++-
 .../connectors/kinesis/util/KinesisConfigUtil.java |   9 +
 .../kinesis/util/StreamConsumerRegistrarUtil.java  | 164 ++++++
 .../kinesis/FlinkKinesisConsumerTest.java          |  10 +-
 .../kinesis/internals/KinesisDataFetcherTest.java  |  48 +-
 .../kinesis/internals/ShardConsumerFanOutTest.java | 242 +++++++++
 .../kinesis/internals/ShardConsumerTest.java       | 111 +---
 .../kinesis/internals/ShardConsumerTestUtils.java  | 129 +++++
 .../FanOutRecordPublisherConfigurationTest.java    |  66 ++-
 .../fanout/FanOutRecordPublisherTest.java          | 443 +++++++++++++++
 .../fanout/StreamConsumerRegistrarTest.java        | 309 +++++++++++
 .../polling/PollingRecordPublisherTest.java        |  30 +-
 .../kinesis/proxy/KinesisProxyV2Test.java          | 357 +++++++++++++
 .../FakeKinesisFanOutBehavioursFactory.java        | 592 +++++++++++++++++++++
 .../connectors/kinesis/testutils/TestUtils.java    |  41 ++
 .../testutils/TestableKinesisDataFetcher.java      |  39 +-
 ...inesisDataFetcherForShardConsumerException.java |   4 +-
 .../connectors/kinesis/util/AWSUtilTest.java       |   8 +-
 .../connectors/kinesis/util/AwsV2UtilTest.java     | 137 ++++-
 .../kinesis/util/KinesisConfigUtilTest.java        |  57 +-
 .../util/StreamConsumerRegistrarUtilTest.java      |  74 +++
 43 files changed, 4337 insertions(+), 327 deletions(-)
 copy flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/{proxy/KinesisProxyV2Interface.java => FlinkKinesisException.java} (52%)
 create mode 100644 flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisher.java
 copy flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/{polling/PollingRecordPublisherFactory.java => fanout/FanOutRecordPublisherFactory.java} (52%)
 create mode 100644 flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutShardSubscriber.java
 create mode 100644 flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrar.java
 create mode 100644 flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/FullJitterBackoff.java
 create mode 100644 flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Factory.java
 create mode 100644 flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/StreamConsumerRegistrarUtil.java
 create mode 100644 flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerFanOutTest.java
 create mode 100644 flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTestUtils.java
 create mode 100644 flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherTest.java
 create mode 100644 flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrarTest.java
 create mode 100644 flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Test.java
 create mode 100644 flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisFanOutBehavioursFactory.java
 create mode 100644 flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/StreamConsumerRegistrarUtilTest.java


[flink] 03/03: [FLINK-18661] [kinesis] Updated FullJitterBackoff Default values for describeStream and describeStreamConsumer

Posted by tz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tzulitai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 17f1af220c2849e7ab2e7e004ce411b761c8966c
Author: Hong Teoh <li...@amazon.com>
AuthorDate: Mon Sep 14 22:48:10 2020 +0100

    [FLINK-18661] [kinesis] Updated FullJitterBackoff Default values for describeStream and describeStreamConsumer
    
    This closes #13189.
---
 .../connectors/kinesis/config/ConsumerConfigConstants.java     | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
index 00da231..5c9f62c 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
@@ -242,9 +242,9 @@ public class ConsumerConfigConstants extends AWSConfigConstants {
 
 	public static final String DEFAULT_STREAM_TIMESTAMP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSSXXX";
 
-	public static final int DEFAULT_STREAM_DESCRIBE_RETRIES = 10;
+	public static final int DEFAULT_STREAM_DESCRIBE_RETRIES = 50;
 
-	public static final long DEFAULT_STREAM_DESCRIBE_BACKOFF_BASE = 1000L;
+	public static final long DEFAULT_STREAM_DESCRIBE_BACKOFF_BASE = 2000L;
 
 	public static final long DEFAULT_STREAM_DESCRIBE_BACKOFF_MAX = 5000L;
 
@@ -258,11 +258,11 @@ public class ConsumerConfigConstants extends AWSConfigConstants {
 
 	public static final int DEFAULT_LIST_SHARDS_RETRIES = 10;
 
-	public static final int DEFAULT_DESCRIBE_STREAM_CONSUMER_RETRIES = 10;
+	public static final int DEFAULT_DESCRIBE_STREAM_CONSUMER_RETRIES = 50;
 
-	public static final long DEFAULT_DESCRIBE_STREAM_CONSUMER_BACKOFF_BASE = 200L;
+	public static final long DEFAULT_DESCRIBE_STREAM_CONSUMER_BACKOFF_BASE = 2000L;
 
-	public static final long DEFAULT_DESCRIBE_STREAM_CONSUMER_BACKOFF_MAX = 1000L;
+	public static final long DEFAULT_DESCRIBE_STREAM_CONSUMER_BACKOFF_MAX = 5000L;
 
 	public static final double DEFAULT_DESCRIBE_STREAM_CONSUMER_BACKOFF_EXPONENTIAL_CONSTANT = 1.5;
 


[flink] 01/03: [FLINK-18515][Kinesis] Adding FanOutRecordPublisher for Kinesis EFO support

Posted by tz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tzulitai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit bbcd0c791371c2c6b3e477a83adfbd78dbee2602
Author: Danny Cranmer <cr...@amazon.com>
AuthorDate: Fri Sep 4 10:35:35 2020 +0100

    [FLINK-18515][Kinesis] Adding FanOutRecordPublisher for Kinesis EFO support
    
    This closes #13189.
---
 flink-connectors/flink-connector-kinesis/pom.xml   |   1 +
 .../kinesis/config/ConsumerConfigConstants.java    |  15 +-
 .../internals/DynamoDBStreamsDataFetcher.java      |   3 +-
 .../kinesis/internals/KinesisDataFetcher.java      | 100 +++--
 .../kinesis/internals/ShardConsumer.java           |  13 +-
 .../publisher/RecordPublisherFactory.java          |   7 +
 .../publisher/fanout/FanOutRecordPublisher.java    | 246 +++++++++++
 .../fanout/FanOutRecordPublisherConfiguration.java |   2 +-
 .../FanOutRecordPublisherFactory.java}             |  77 ++--
 .../publisher/fanout/FanOutShardSubscriber.java    | 468 +++++++++++++++++++++
 .../publisher/polling/PollingRecordPublisher.java  |   5 -
 .../polling/PollingRecordPublisherFactory.java     |   2 +-
 .../kinesis/proxy/FullJitterBackoff.java           |  61 +++
 .../connectors/kinesis/proxy/KinesisProxy.java     |  41 +-
 .../kinesis/proxy/KinesisProxyInterface.java       |   1 +
 .../connectors/kinesis/proxy/KinesisProxyV2.java   |  19 +-
 .../kinesis/proxy/KinesisProxyV2Interface.java     |  14 +
 .../streaming/connectors/kinesis/util/AWSUtil.java |  12 +-
 .../connectors/kinesis/util/AwsV2Util.java         |  36 +-
 .../connectors/kinesis/util/KinesisConfigUtil.java |   3 +
 .../kinesis/FlinkKinesisConsumerTest.java          |  10 +-
 .../kinesis/internals/KinesisDataFetcherTest.java  |  45 +-
 .../kinesis/internals/ShardConsumerFanOutTest.java | 242 +++++++++++
 .../kinesis/internals/ShardConsumerTest.java       | 111 +----
 .../kinesis/internals/ShardConsumerTestUtils.java  | 129 ++++++
 .../FanOutRecordPublisherConfigurationTest.java    |   1 -
 .../fanout/FanOutRecordPublisherTest.java          | 443 +++++++++++++++++++
 .../polling/PollingRecordPublisherTest.java        |  30 +-
 .../kinesis/proxy/KinesisProxyV2Test.java          |  60 +++
 .../FakeKinesisFanOutBehavioursFactory.java        | 391 +++++++++++++++++
 .../connectors/kinesis/testutils/TestUtils.java    |  41 ++
 .../testutils/TestableKinesisDataFetcher.java      |  39 +-
 ...inesisDataFetcherForShardConsumerException.java |   4 +-
 .../connectors/kinesis/util/AWSUtilTest.java       |   8 +-
 .../connectors/kinesis/util/AwsV2UtilTest.java     |  38 +-
 .../kinesis/util/KinesisConfigUtilTest.java        |  35 +-
 36 files changed, 2484 insertions(+), 269 deletions(-)

diff --git a/flink-connectors/flink-connector-kinesis/pom.xml b/flink-connectors/flink-connector-kinesis/pom.xml
index 2a96598..9d22d8c 100644
--- a/flink-connectors/flink-connector-kinesis/pom.xml
+++ b/flink-connectors/flink-connector-kinesis/pom.xml
@@ -103,6 +103,7 @@ under the License.
 			<scope>test</scope>
 		</dependency>
 
+		<!-- Amazon AWS SDK v1.x dependencies -->
 		<dependency>
 			<groupId>com.amazonaws</groupId>
 			<artifactId>aws-java-sdk-kinesis</artifactId>
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
index dde4821..f003b3b 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
@@ -224,6 +224,8 @@ public class ConsumerConfigConstants extends AWSConfigConstants {
 	/** The maximum number of records that will be buffered before suspending consumption of a shard. */
 	public static final String WATERMARK_SYNC_QUEUE_CAPACITY = "flink.watermark.sync.queue.capacity";
 
+	public static final String EFO_HTTP_CLIENT_MAX_CONCURRENCY = "flink.stream.efo.http-client.max-concurrency";
+
 	// ------------------------------------------------------------------------
 	//  Default values for consumer configuration
 	// ------------------------------------------------------------------------
@@ -272,7 +274,7 @@ public class ConsumerConfigConstants extends AWSConfigConstants {
 
 	public static final double DEFAULT_DEREGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT = 1.5;
 
-	public static final int DEFAULT_SUBSCRIBE_TO_SHARD_RETRIES = 5;
+	public static final int DEFAULT_SUBSCRIBE_TO_SHARD_RETRIES = 10;
 
 	public static final long DEFAULT_SUBSCRIBE_TO_SHARD_BACKOFF_BASE = 1000L;
 
@@ -308,10 +310,21 @@ public class ConsumerConfigConstants extends AWSConfigConstants {
 
 	public static final long DEFAULT_WATERMARK_SYNC_MILLIS = 30_000;
 
+	public static final int DEFAULT_EFO_HTTP_CLIENT_MAX_CONURRENCY = 10_000;
+
 	/**
 	 * To avoid shard iterator expires in {@link ShardConsumer}s, the value for the configured
 	 * getRecords interval can not exceed 5 minutes, which is the expire time for retrieved iterators.
 	 */
 	public static final long MAX_SHARD_GETRECORDS_INTERVAL_MILLIS = 300000L;
 
+	/**
+	 * Build the key of an EFO consumer ARN according to a stream name.
+	 * @param streamName the stream name the key is built upon.
+	 * @return a key of EFO consumer ARN.
+	 */
+	public static String efoConsumerArn(final String streamName) {
+		return EFO_CONSUMER_ARN_PREFIX + "." + streamName;
+	}
+
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/DynamoDBStreamsDataFetcher.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/DynamoDBStreamsDataFetcher.java
index afa1c28..fbf0d01 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/DynamoDBStreamsDataFetcher.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/DynamoDBStreamsDataFetcher.java
@@ -70,7 +70,8 @@ public class DynamoDBStreamsDataFetcher<T> extends KinesisDataFetcher<T> {
 			new ArrayList<>(),
 			createInitialSubscribedStreamsToLastDiscoveredShardsState(streams),
 			// use DynamoDBStreamsProxy
-			DynamoDBStreamsProxy::create);
+			DynamoDBStreamsProxy::create,
+			null);
 	}
 
 	@Override
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
index 2be9b1c..133a4d3 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
@@ -27,8 +27,10 @@ import org.apache.flink.streaming.api.operators.StreamingRuntimeContext;
 import org.apache.flink.streaming.api.watermark.Watermark;
 import org.apache.flink.streaming.connectors.kinesis.KinesisShardAssigner;
 import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
+import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType;
 import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher;
 import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisherFactory;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.FanOutRecordPublisherFactory;
 import org.apache.flink.streaming.connectors.kinesis.internals.publisher.polling.PollingRecordPublisherFactory;
 import org.apache.flink.streaming.connectors.kinesis.metrics.KinesisConsumerMetricConstants;
 import org.apache.flink.streaming.connectors.kinesis.metrics.ShardConsumerMetricsReporter;
@@ -41,8 +43,11 @@ import org.apache.flink.streaming.connectors.kinesis.model.StreamShardMetadata;
 import org.apache.flink.streaming.connectors.kinesis.proxy.GetShardListResult;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxy;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
 import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
+import org.apache.flink.streaming.connectors.kinesis.util.AwsV2Util;
 import org.apache.flink.streaming.connectors.kinesis.util.RecordEmitter;
 import org.apache.flink.streaming.connectors.kinesis.util.WatermarkTracker;
 import org.apache.flink.streaming.runtime.operators.windowing.TimestampedValue;
@@ -56,6 +61,9 @@ import com.amazonaws.services.kinesis.model.SequenceNumberRange;
 import com.amazonaws.services.kinesis.model.Shard;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
+
+import javax.annotation.Nullable;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -74,6 +82,8 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RECORD_PUBLISHER_TYPE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.POLLING;
 import static org.apache.flink.util.Preconditions.checkNotNull;
 
 /**
@@ -183,6 +193,9 @@ public class KinesisDataFetcher<T> {
 	/** The Kinesis proxy factory that will be used to create instances for discovery and shard consumers. */
 	private final FlinkKinesisProxyFactory kinesisProxyFactory;
 
+	/** The Kinesis proxy V2 factory that will be used to create instances for EFO shard consumers. */
+	private final FlinkKinesisProxyV2Factory kinesisProxyV2Factory;
+
 	/** The Kinesis proxy that the fetcher will be using to discover new shards. */
 	private final KinesisProxyInterface kinesis;
 
@@ -243,6 +256,13 @@ public class KinesisDataFetcher<T> {
 	}
 
 	/**
+	 * Factory to create Kinesis proxy V@ instances used by a fetcher.
+	 */
+	public interface FlinkKinesisProxyV2Factory {
+		KinesisProxyV2Interface create(Properties configProps);
+	}
+
+	/**
 	 * The wrapper that holds the watermark handling related parameters
 	 * of a record produced by the shard consumer thread.
 	 *
@@ -318,14 +338,15 @@ public class KinesisDataFetcher<T> {
 	 * @param configProps the consumer configuration properties
 	 * @param deserializationSchema deserialization schema
 	 */
-	public KinesisDataFetcher(List<String> streams,
-							SourceFunction.SourceContext<T> sourceContext,
-							RuntimeContext runtimeContext,
-							Properties configProps,
-							KinesisDeserializationSchema<T> deserializationSchema,
-							KinesisShardAssigner shardAssigner,
-							AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner,
-							WatermarkTracker watermarkTracker) {
+	public KinesisDataFetcher(
+			final List<String> streams,
+			final SourceFunction.SourceContext<T> sourceContext,
+			final RuntimeContext runtimeContext,
+			final Properties configProps,
+			final KinesisDeserializationSchema<T> deserializationSchema,
+			final KinesisShardAssigner shardAssigner,
+			final AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner,
+			final WatermarkTracker watermarkTracker) {
 		this(streams,
 			sourceContext,
 			sourceContext.getCheckpointLock(),
@@ -338,23 +359,26 @@ public class KinesisDataFetcher<T> {
 			new AtomicReference<>(),
 			new ArrayList<>(),
 			createInitialSubscribedStreamsToLastDiscoveredShardsState(streams),
-			KinesisProxy::create);
+			KinesisProxy::create,
+			KinesisDataFetcher::createKinesisProxyV2);
 	}
 
 	@VisibleForTesting
-	protected KinesisDataFetcher(List<String> streams,
-								SourceFunction.SourceContext<T> sourceContext,
-								Object checkpointLock,
-								RuntimeContext runtimeContext,
-								Properties configProps,
-								KinesisDeserializationSchema<T> deserializationSchema,
-								KinesisShardAssigner shardAssigner,
-								AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner,
-								WatermarkTracker watermarkTracker,
-								AtomicReference<Throwable> error,
-								List<KinesisStreamShardState> subscribedShardsState,
-								HashMap<String, String> subscribedStreamsToLastDiscoveredShardIds,
-								FlinkKinesisProxyFactory kinesisProxyFactory) {
+	protected KinesisDataFetcher(
+			final List<String> streams,
+			final SourceFunction.SourceContext<T> sourceContext,
+			final Object checkpointLock,
+			final RuntimeContext runtimeContext,
+			final Properties configProps,
+			final KinesisDeserializationSchema<T> deserializationSchema,
+			final KinesisShardAssigner shardAssigner,
+			final AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner,
+			final WatermarkTracker watermarkTracker,
+			final AtomicReference<Throwable> error,
+			final List<KinesisStreamShardState> subscribedShardsState,
+			final HashMap<String, String> subscribedStreamsToLastDiscoveredShardIds,
+			final FlinkKinesisProxyFactory kinesisProxyFactory,
+			@Nullable final FlinkKinesisProxyV2Factory kinesisProxyV2Factory) {
 		this.streams = checkNotNull(streams);
 		this.configProps = checkNotNull(configProps);
 		this.sourceContext = checkNotNull(sourceContext);
@@ -367,6 +391,7 @@ public class KinesisDataFetcher<T> {
 		this.periodicWatermarkAssigner = periodicWatermarkAssigner;
 		this.watermarkTracker = watermarkTracker;
 		this.kinesisProxyFactory = checkNotNull(kinesisProxyFactory);
+		this.kinesisProxyV2Factory = kinesisProxyV2Factory;
 		this.kinesis = kinesisProxyFactory.create(configProps);
 		this.recordPublisherFactory = createRecordPublisherFactory();
 
@@ -379,6 +404,7 @@ public class KinesisDataFetcher<T> {
 
 		this.shardConsumersExecutor =
 			createShardConsumersThreadPool(runtimeContext.getTaskNameWithSubtasks());
+
 		this.recordEmitter = createRecordEmitter(configProps);
 	}
 
@@ -402,11 +428,11 @@ public class KinesisDataFetcher<T> {
 	 * @return shard consumer
 	 */
 	protected ShardConsumer<T> createShardConsumer(
-		Integer subscribedShardStateIndex,
-		StreamShardHandle subscribedShard,
-		SequenceNumber lastSequenceNum,
-		MetricGroup metricGroup,
-		KinesisDeserializationSchema<T> shardDeserializer) throws InterruptedException {
+			final Integer subscribedShardStateIndex,
+			final StreamShardHandle subscribedShard,
+			final SequenceNumber lastSequenceNum,
+			final MetricGroup metricGroup,
+			final KinesisDeserializationSchema<T> shardDeserializer) throws InterruptedException {
 
 		return new ShardConsumer<>(
 			this,
@@ -418,8 +444,17 @@ public class KinesisDataFetcher<T> {
 			shardDeserializer);
 	}
 
-	private RecordPublisherFactory createRecordPublisherFactory() {
-		return new PollingRecordPublisherFactory(kinesisProxyFactory);
+	protected RecordPublisherFactory createRecordPublisherFactory() {
+		RecordPublisherType recordPublisherType = RecordPublisherType.valueOf(
+			configProps.getProperty(RECORD_PUBLISHER_TYPE, POLLING.name()));
+
+		switch (recordPublisherType) {
+			case EFO:
+				return new FanOutRecordPublisherFactory(kinesisProxyV2Factory.create(configProps));
+			case POLLING:
+			default:
+				return new PollingRecordPublisherFactory(kinesisProxyFactory);
+		}
 	}
 
 	protected RecordPublisher createRecordPublisher(
@@ -432,6 +467,11 @@ public class KinesisDataFetcher<T> {
 		return recordPublisherFactory.create(startingPosition, configProps, metricGroup, subscribedShard);
 	}
 
+	private static KinesisProxyV2Interface createKinesisProxyV2(final Properties configProps) {
+		final KinesisAsyncClient client = AwsV2Util.createKinesisAsyncClient(configProps);
+		return new KinesisProxyV2(client);
+	}
+
 	/**
 	 * Starts the fetcher. After starting the fetcher, it can only
 	 * be stopped by calling {@link KinesisDataFetcher#shutdownFetcher()}.
@@ -672,6 +712,8 @@ public class KinesisDataFetcher<T> {
 			LOG.info("Shutting down the shard consumer threads of subtask {} ...", indexOfThisConsumerSubtask);
 		}
 		shardConsumersExecutor.shutdownNow();
+
+		recordPublisherFactory.close();
 	}
 
 	/** After calling {@link KinesisDataFetcher#shutdownFetcher()}, this can be called to await the fetcher shutdown. */
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
index d9c0d9d..5bf0b09 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumer.java
@@ -28,6 +28,8 @@ import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
 
 import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
@@ -51,6 +53,9 @@ import static org.apache.flink.util.Preconditions.checkNotNull;
  */
 @Internal
 public class ShardConsumer<T> implements Runnable {
+
+	private static final Logger LOG = LoggerFactory.getLogger(ShardConsumer.class);
+
 	private final KinesisDeserializationSchema<T> deserializer;
 
 	private final int subscribedShardStateIndex;
@@ -102,6 +107,11 @@ public class ShardConsumer<T> implements Runnable {
 		try {
 			while (isRunning()) {
 				final RecordPublisherRunResult result = recordPublisher.run(batch -> {
+					if (!batch.getDeaggregatedRecords().isEmpty()) {
+						LOG.debug("stream: {}, shard: {}, millis behind latest: {}, batch size: {}",
+							subscribedShard.getStreamName(), subscribedShard.getShard().getShardId(),
+							batch.getMillisBehindLatest(), batch.getDeaggregatedRecordSize());
+					}
 					for (UserRecord userRecord : batch.getDeaggregatedRecords()) {
 						if (filterDeaggregatedRecord(userRecord)) {
 							deserializeRecordForCollectionAndUpdateState(userRecord);
@@ -118,7 +128,6 @@ public class ShardConsumer<T> implements Runnable {
 
 				if (result == COMPLETE) {
 					fetcherRef.updateState(subscribedShardStateIndex, SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get());
-
 					// we can close this consumer thread once we've reached the end of the subscribed shard
 					break;
 				}
@@ -188,7 +197,7 @@ public class ShardConsumer<T> implements Runnable {
 	 * This method is to support restarting from a partially consumed aggregated sequence number.
 	 *
 	 * @param record the record to filter
-	 * @return {@code true} if the record should be retained
+	 * @return true if the record should be retained
 	 */
 	private boolean filterDeaggregatedRecord(final UserRecord record) {
 		if (!lastSequenceNum.isAggregated()) {
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordPublisherFactory.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordPublisherFactory.java
index 0dcd0cb..672dc38 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordPublisherFactory.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordPublisherFactory.java
@@ -45,4 +45,11 @@ public interface RecordPublisherFactory {
 			MetricGroup metricGroup,
 			StreamShardHandle streamShardHandle) throws InterruptedException;
 
+	/**
+	 * Destroy any open resources used by the factory.
+	 */
+	default void close() {
+		// Do nothing by default
+	}
+
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisher.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisher.java
new file mode 100644
index 0000000..2174029
--- /dev/null
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisher.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordBatch;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.FanOutShardSubscriber.FanOutSubscriberException;
+import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.StartingPosition;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.proxy.FullJitterBackoff;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
+import org.apache.flink.util.Preconditions;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.kinesis.model.EncryptionType;
+import software.amazon.awssdk.services.kinesis.model.Record;
+import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEvent;
+
+import javax.annotation.Nonnull;
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.function.Consumer;
+
+import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.COMPLETE;
+import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.INCOMPLETE;
+import static software.amazon.awssdk.services.kinesis.model.StartingPosition.builder;
+
+/**
+ * A {@link RecordPublisher} that will read and forward records from Kinesis using EFO, to the subscriber.
+ * Records are consumed via Enhanced Fan Out subscriptions using SubscribeToShard API.
+ */
+@Internal
+public class FanOutRecordPublisher implements RecordPublisher {
+
+	private static final Logger LOG = LoggerFactory.getLogger(FanOutRecordPublisher.class);
+
+	private final FullJitterBackoff backoff;
+
+	private final String consumerArn;
+
+	private final KinesisProxyV2Interface kinesisProxy;
+
+	private final StreamShardHandle subscribedShard;
+
+	private final FanOutRecordPublisherConfiguration configuration;
+
+	/** The current attempt in the case of subsequent recoverable errors. */
+	private int attempt = 0;
+
+	private StartingPosition nextStartingPosition;
+
+	/**
+	 * Instantiate a new FanOutRecordPublisher.
+	 * Consumes data from KDS using EFO SubscribeToShard over AWS SDK V2.x
+	 *
+	 * @param startingPosition the position in the shard to start consuming from
+	 * @param consumerArn the consumer ARN of the stream consumer
+	 * @param subscribedShard the shard to consumer from
+	 * @param kinesisProxy the proxy used to talk to Kinesis services
+	 * @param configuration the record publisher configuration
+	 */
+	public FanOutRecordPublisher(
+			final StartingPosition startingPosition,
+			final String consumerArn,
+			final StreamShardHandle subscribedShard,
+			final KinesisProxyV2Interface kinesisProxy,
+			final FanOutRecordPublisherConfiguration configuration,
+			final FullJitterBackoff backoff) {
+		this.nextStartingPosition = Preconditions.checkNotNull(startingPosition);
+		this.consumerArn = Preconditions.checkNotNull(consumerArn);
+		this.subscribedShard = Preconditions.checkNotNull(subscribedShard);
+		this.kinesisProxy = Preconditions.checkNotNull(kinesisProxy);
+		this.configuration = Preconditions.checkNotNull(configuration);
+		this.backoff = Preconditions.checkNotNull(backoff);
+	}
+
+	@Override
+	public RecordPublisherRunResult run(final RecordBatchConsumer recordConsumer) throws InterruptedException {
+		LOG.info("Running fan out record publisher on {}::{} from {} - {}",
+			subscribedShard.getStreamName(),
+			subscribedShard.getShard().getShardId(),
+			nextStartingPosition.getShardIteratorType(),
+			nextStartingPosition.getStartingMarker());
+
+		Consumer<SubscribeToShardEvent> eventConsumer = event -> {
+			RecordBatch recordBatch = new RecordBatch(toSdkV1Records(event.records()), subscribedShard, event.millisBehindLatest());
+			SequenceNumber sequenceNumber = recordConsumer.accept(recordBatch);
+			nextStartingPosition = StartingPosition.continueFromSequenceNumber(sequenceNumber);
+		};
+
+		RecordPublisherRunResult result = runWithBackoff(eventConsumer);
+
+		LOG.info("Subscription expired {}::{}, with status {}",
+			subscribedShard.getStreamName(),
+			subscribedShard.getShard().getShardId(),
+			result);
+
+		return result;
+	}
+
+	/**
+	 * Runs the record publisher, will sleep for configuration computed jitter period in the case of certain exceptions.
+	 * Unrecoverable exceptions are thrown to terminate the application.
+	 *
+	 * @param eventConsumer the consumer to pass events to
+	 * @return {@code COMPLETE} if the shard is complete and this shard consumer should exit
+	 * @throws InterruptedException
+	 */
+	private RecordPublisherRunResult runWithBackoff(
+			final Consumer<SubscribeToShardEvent> eventConsumer) throws InterruptedException {
+		FanOutShardSubscriber fanOutShardSubscriber = new FanOutShardSubscriber(
+			consumerArn,
+			subscribedShard.getShard().getShardId(),
+			kinesisProxy);
+		boolean complete;
+
+		try {
+			complete = fanOutShardSubscriber.subscribeToShardAndConsumeRecords(
+				toSdkV2StartingPosition(nextStartingPosition), eventConsumer);
+			attempt = 0;
+		} catch (FanOutSubscriberException ex) {
+			// We have received an error from the network layer
+			// This can be due to limits being exceeded, network timeouts, etc
+			// We should backoff, reacquire a subscription and try again
+			if (ex.getCause() instanceof ResourceNotFoundException) {
+				LOG.warn("Received ResourceNotFoundException. Either the shard does not exist, or the stream subscriber has been deregistered." +
+					"Marking this shard as complete {} ({})", subscribedShard.getShard().getShardId(), consumerArn);
+
+				return COMPLETE;
+			}
+
+			if (attempt == configuration.getSubscribeToShardMaxRetries()) {
+				throw new RuntimeException("Maximum reties exceeded for SubscribeToShard. " +
+					"Failed " + configuration.getSubscribeToShardMaxRetries() + " times.");
+			}
+
+			attempt++;
+			backoff(ex);
+			return INCOMPLETE;
+		}
+
+		return complete ? COMPLETE : INCOMPLETE;
+	}
+
+	private void backoff(final Throwable ex) throws InterruptedException {
+		long backoffMillis = backoff.calculateFullJitterBackoff(
+			configuration.getSubscribeToShardBaseBackoffMillis(),
+			configuration.getSubscribeToShardMaxBackoffMillis(),
+			configuration.getSubscribeToShardExpConstant(),
+			attempt);
+
+		LOG.warn("Encountered recoverable error {}. Backing off for {} millis {} ({})",
+			ex.getCause().getClass().getSimpleName(),
+			backoffMillis,
+			subscribedShard.getShard().getShardId(),
+			consumerArn,
+			ex);
+
+		backoff.sleep(backoffMillis);
+	}
+
+	/**
+	 * Records that come from KPL may be aggregated.
+	 * Records must be deaggregated before they are processed by the application.
+	 * Deaggregation is performed by KCL.
+	 * In order to prevent having to import KCL 1.x and 2.x we convert the records to v1 format and use KCL v1.
+	 *
+	 * @param records the SDK v2 records
+	 * @return records converted to SDK v1 format
+	 */
+	private List<com.amazonaws.services.kinesis.model.Record> toSdkV1Records(final List<Record> records) {
+		final List<com.amazonaws.services.kinesis.model.Record> sdkV1Records = new ArrayList<>();
+
+		for (Record record : records) {
+			sdkV1Records.add(toSdkV1Record(record));
+		}
+
+		return sdkV1Records;
+	}
+
+	private com.amazonaws.services.kinesis.model.Record toSdkV1Record(@Nonnull final Record record) {
+		final com.amazonaws.services.kinesis.model.Record recordV1 = new com.amazonaws.services.kinesis.model.Record()
+			.withData(record.data().asByteBuffer())
+			.withSequenceNumber(record.sequenceNumber())
+			.withPartitionKey(record.partitionKey())
+			.withApproximateArrivalTimestamp(new Date(record.approximateArrivalTimestamp().toEpochMilli()));
+
+		EncryptionType encryptionType = record.encryptionType();
+		if (encryptionType != null) {
+			recordV1.withEncryptionType(encryptionType.name());
+		}
+
+		return recordV1;
+	}
+
+	/**
+	 * Converts a local {@link StartingPosition} to an AWS SDK V2 object representation.
+	 *
+	 * @param startingPosition the local {@link StartingPosition}
+	 * @return an AWS SDK V2 representation
+	 */
+	private software.amazon.awssdk.services.kinesis.model.StartingPosition toSdkV2StartingPosition(StartingPosition startingPosition) {
+		software.amazon.awssdk.services.kinesis.model.StartingPosition.Builder builder = builder()
+			.type(startingPosition.getShardIteratorType().toString());
+
+		Object marker = startingPosition.getStartingMarker();
+
+		switch (startingPosition.getShardIteratorType()) {
+			case AT_TIMESTAMP: {
+				Preconditions.checkNotNull(marker, "StartingPosition AT_TIMESTAMP date marker is null.");
+				builder.timestamp(((Date) marker).toInstant());
+				break;
+			}
+			case AT_SEQUENCE_NUMBER:
+			case AFTER_SEQUENCE_NUMBER: {
+				Preconditions.checkNotNull(marker, "StartingPosition *_SEQUENCE_NUMBER position is null.");
+				builder.sequenceNumber(marker.toString());
+				break;
+			}
+		}
+
+		return builder.build();
+	}
+
+}
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfiguration.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfiguration.java
index 03705cf..89ffad3 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfiguration.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfiguration.java
@@ -124,7 +124,7 @@ public class FanOutRecordPublisherConfiguration {
 	private final long describeStreamBaseBackoffMillis;
 
 	/**
-	 * Maximum backoff millis for the describe stream operation.
+	 *  Maximum backoff millis for the describe stream operation.
 	 */
 	private final long describeStreamMaxBackoffMillis;
 
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactory.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherFactory.java
similarity index 52%
copy from flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactory.java
copy to flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherFactory.java
index ee5034f..f21bfdc 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactory.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherFactory.java
@@ -15,74 +15,81 @@
  * limitations under the License.
  */
 
-package org.apache.flink.streaming.connectors.kinesis.internals.publisher.polling;
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout;
 
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.metrics.MetricGroup;
-import org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetcher.FlinkKinesisProxyFactory;
 import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher;
 import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisherFactory;
-import org.apache.flink.streaming.connectors.kinesis.metrics.PollingRecordPublisherMetricsReporter;
 import org.apache.flink.streaming.connectors.kinesis.model.StartingPosition;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
-import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
+import org.apache.flink.streaming.connectors.kinesis.proxy.FullJitterBackoff;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
 import org.apache.flink.util.Preconditions;
 
+import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
+
+import java.util.Optional;
 import java.util.Properties;
 
+import static java.util.Collections.singletonList;
+
 /**
- * A {@link RecordPublisher} factory used to create instances of {@link PollingRecordPublisher}.
+ * A {@link RecordPublisher} factory used to create instances of {@link FanOutRecordPublisher}.
  */
 @Internal
-public class PollingRecordPublisherFactory implements RecordPublisherFactory {
+public class FanOutRecordPublisherFactory implements RecordPublisherFactory {
 
-	private final FlinkKinesisProxyFactory kinesisProxyFactory;
+	private static final FullJitterBackoff BACKOFF = new FullJitterBackoff();
 
-	public PollingRecordPublisherFactory(FlinkKinesisProxyFactory kinesisProxyFactory) {
-		this.kinesisProxyFactory = kinesisProxyFactory;
+	/**
+	 * A singleton {@link KinesisProxyV2} is used per Flink task.
+	 * The {@link KinesisAsyncClient} uses an internal thread pool; using a single client reduces overhead.
+	 */
+	private final KinesisProxyV2Interface kinesisProxy;
+
+	/**
+	 * Instantiate a factory responsible for creating {@link FanOutRecordPublisher}.
+	 *
+	 * @param kinesisProxy the singleton proxy used by all record publishers created by this factory
+	 */
+	public FanOutRecordPublisherFactory(final KinesisProxyV2Interface kinesisProxy) {
+		this.kinesisProxy = kinesisProxy;
 	}
 
 	/**
-	 * Create a {@link PollingRecordPublisher}.
-	 * An {@link AdaptivePollingRecordPublisher} will be created should adaptive reads be enabled in the configuration.
+	 * Create a {@link FanOutRecordPublisher}.
 	 *
-	 * @param startingPosition the position in the shard to start consuming records from
+	 * @param startingPosition the starting position in the shard to start consuming from
 	 * @param consumerConfig the consumer configuration properties
 	 * @param metricGroup the metric group to report metrics to
 	 * @param streamShardHandle the shard this consumer is subscribed to
-	 * @return a {@link PollingRecordPublisher}
+	 * @return a {@link FanOutRecordPublisher}
 	 */
 	@Override
-	public PollingRecordPublisher create(
+	public FanOutRecordPublisher create(
 			final StartingPosition startingPosition,
 			final Properties consumerConfig,
 			final MetricGroup metricGroup,
-			final StreamShardHandle streamShardHandle) throws InterruptedException {
+			final StreamShardHandle streamShardHandle) {
 		Preconditions.checkNotNull(startingPosition);
 		Preconditions.checkNotNull(consumerConfig);
 		Preconditions.checkNotNull(metricGroup);
 		Preconditions.checkNotNull(streamShardHandle);
 
-		final PollingRecordPublisherConfiguration configuration = new PollingRecordPublisherConfiguration(consumerConfig);
-		final PollingRecordPublisherMetricsReporter metricsReporter = new PollingRecordPublisherMetricsReporter(metricGroup);
-		final KinesisProxyInterface kinesisProxy = kinesisProxyFactory.create(consumerConfig);
+		String stream = streamShardHandle.getStreamName();
+		FanOutRecordPublisherConfiguration configuration = new FanOutRecordPublisherConfiguration(consumerConfig, singletonList(stream));
+
+		Optional<String> streamConsumerArn = configuration.getStreamConsumerArn(stream);
+		Preconditions.checkState(streamConsumerArn.isPresent());
+
+		return new FanOutRecordPublisher(startingPosition, streamConsumerArn.get(), streamShardHandle, kinesisProxy, configuration, BACKOFF);
+	}
 
-		if (configuration.isAdaptiveReads()) {
-			return new AdaptivePollingRecordPublisher(
-				startingPosition,
-				streamShardHandle,
-				metricsReporter,
-				kinesisProxy,
-				configuration.getMaxNumberOfRecordsPerFetch(),
-				configuration.getFetchIntervalMillis());
-		} else {
-			return new PollingRecordPublisher(
-				startingPosition,
-				streamShardHandle,
-				metricsReporter,
-				kinesisProxy,
-				configuration.getMaxNumberOfRecordsPerFetch(),
-				configuration.getFetchIntervalMillis());
-		}
+	@Override
+	public void close() {
+		kinesisProxy.close();
 	}
+
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutShardSubscriber.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutShardSubscriber.java
new file mode 100644
index 0000000..0cc1eaf
--- /dev/null
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutShardSubscriber.java
@@ -0,0 +1,468 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
+import org.apache.flink.util.Preconditions;
+
+import org.reactivestreams.Subscriber;
+import org.reactivestreams.Subscription;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
+import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
+import software.amazon.awssdk.services.kinesis.model.StartingPosition;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEvent;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEventStream;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.CompletionException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Consumer;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+
+/**
+ * This class is responsible for acquiring an Enhanced Fan Out subscription and consuming records from a shard.
+ * A queue is used to buffer records between the Kinesis Proxy and Flink application. This allows processing
+ * to be separated from consumption; errors thrown in the consumption layer do not propagate up to application.
+ *
+ * <pre>{@code [
+ * | ----------- Source Connector Thread ----------- |                      | --- KinesisAsyncClient Thread(s) -- |
+ * | FanOutRecordPublisher | FanOutShardSubscription | == blocking queue == | KinesisProxyV2 | KinesisAsyncClient |
+ * ]}</pre>
+ * <p>
+ * 	 Three types of message are passed over the queue for inter-thread communication:
+ * 	 <ul>
+ * 	   	<li>{@link SubscriptionNextEvent} - passes data from the network to the consumer</li>
+ * 	  	<li>{@link SubscriptionCompleteEvent} - indicates a subscription has expired</li>
+ * 	   	<li>{@link SubscriptionErrorEvent} - passes an exception from the network to the consumer</li>
+ * 	 </ul>
+ * </p>
+ * <p>
+ *   The blocking queue has a maximum capacity of 1 record.
+ *   This allows backpressure to be applied closer to the network stack and results in record prefetch.
+ *   At maximum capacity we will have three {@link SubscribeToShardEvent} in memory (per instance of this class):
+ *   <ul>
+ *      <li>1 event being processed by the consumer</li>
+ *      <li>1 event enqueued in the blocking queue</li>
+ *      <li>1 event being added to the queue by the network (blocking)</li>
+ *   </ul>
+ * </p>
+ */
+@Internal
+public class FanOutShardSubscriber {
+
+	private static final Logger LOG = LoggerFactory.getLogger(FanOutShardSubscriber.class);
+
+	/**
+	 * The maximum capacity of the queue between the network and consumption thread.
+	 * The queue is mainly used to isolate networking from consumption such that errors do not bubble up.
+	 * This queue also acts as a buffer resulting in a record prefetch and reduced latency.
+	 */
+	private static final int QUEUE_CAPACITY = 1;
+
+	/**
+	 * Read timeout will occur after 30 seconds, a sanity timeout to prevent lockup in unexpected error states.
+	 * If the consumer does not receive a new event within the DEQUEUE_WAIT_SECONDS it will backoff and resubscribe.
+	 * Under normal conditions heartbeat events are received even when there are no records to consume, so it is not
+	 * expected for this timeout to occur under normal conditions.
+	 */
+	private static final int DEQUEUE_WAIT_SECONDS = 35;
+
+	/** The time to wait when enqueuing events to allow error events to "push in front" of data . */
+	private static final int ENQUEUE_WAIT_SECONDS = 5;
+
+	private final BlockingQueue<FanOutSubscriptionEvent> queue = new LinkedBlockingQueue<>(QUEUE_CAPACITY);
+
+	private final KinesisProxyV2Interface kinesis;
+
+	private final String consumerArn;
+
+	private final String shardId;
+
+	/**
+	 * Create a new Fan Out subscriber.
+	 *
+	 * @param consumerArn the stream consumer ARN
+	 * @param shardId the shard ID to subscribe to
+	 * @param kinesis the Kinesis Proxy used to communicate via AWS SDK v2
+	 */
+	FanOutShardSubscriber(final String consumerArn, final String shardId, final KinesisProxyV2Interface kinesis) {
+		this.kinesis = Preconditions.checkNotNull(kinesis);
+		this.consumerArn = Preconditions.checkNotNull(consumerArn);
+		this.shardId = Preconditions.checkNotNull(shardId);
+	}
+
+	/**
+	 * Obtains a subscription to the shard from the specified {@code startingPosition}.
+	 * {@link SubscribeToShardEvent} received from KDS are delivered to the given {@code eventConsumer}.
+	 * Returns false if there are records left to consume from the shard.
+	 *
+	 * @param startingPosition the position in the stream in which to start receiving records
+	 * @param eventConsumer the consumer to deliver received events to
+	 * @return true if there are no more messages (complete), false if a subsequent subscription should be obtained
+	 * @throws FanOutSubscriberException when an exception is propagated from the networking stack
+	 * @throws InterruptedException when the thread is interrupted
+	 */
+	boolean subscribeToShardAndConsumeRecords(
+			final StartingPosition startingPosition,
+			final Consumer<SubscribeToShardEvent> eventConsumer) throws InterruptedException, FanOutSubscriberException {
+		LOG.debug("Subscribing to shard {} ({})", shardId, consumerArn);
+
+		try {
+			openSubscriptionToShard(startingPosition);
+		} catch (FanOutSubscriberException ex) {
+			// The only exception that should cause a failure is a ResourceNotFoundException
+			// Rethrow the exception to trigger the application to terminate
+			if (ex.getCause() instanceof ResourceNotFoundException) {
+				throw (ResourceNotFoundException) ex.getCause();
+			}
+
+			throw ex;
+		}
+
+		return consumeAllRecordsFromKinesisShard(eventConsumer);
+	}
+
+	/**
+	 * Calls {@link KinesisProxyV2#subscribeToShard} and waits to acquire a subscription.
+	 * In the event a non-recoverable error occurs this method will rethrow the exception.
+	 * Once the subscription is acquired the client signals to the producer that we are ready to receive records.
+	 *
+	 * @param startingPosition the position in which to start consuming from
+	 * @throws FanOutSubscriberException when an exception is propagated from the networking stack
+	 */
+	private void openSubscriptionToShard(final StartingPosition startingPosition) throws FanOutSubscriberException, InterruptedException {
+		SubscribeToShardRequest request = SubscribeToShardRequest.builder()
+			.consumerARN(consumerArn)
+			.shardId(shardId)
+			.startingPosition(startingPosition)
+			.build();
+
+		AtomicReference<Throwable> exception = new AtomicReference<>();
+		CountDownLatch waitForSubscriptionLatch = new CountDownLatch(1);
+		FanOutShardSubscription subscription = new FanOutShardSubscription(waitForSubscriptionLatch);
+
+		SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler
+			.builder()
+			.onError(e -> {
+				// Errors that occur while trying to acquire a subscription are only thrown from here
+				// Errors that occur during the subscription are surfaced here and to the FanOutShardSubscription
+				//	(errors are ignored here once the subscription is open)
+				if (waitForSubscriptionLatch.getCount() > 0) {
+					exception.set(e);
+					waitForSubscriptionLatch.countDown();
+				}
+			})
+			.subscriber(() -> subscription)
+			.build();
+
+		kinesis.subscribeToShard(request, responseHandler);
+
+		waitForSubscriptionLatch.await();
+
+		Throwable throwable = exception.get();
+		if (throwable != null) {
+			handleError(throwable);
+		}
+
+		LOG.debug("Acquired subscription - {} ({})", shardId, consumerArn);
+
+		// Request the first record to kick off consumption
+		// Following requests are made by the FanOutShardSubscription on the netty thread
+		subscription.requestRecord();
+	}
+
+	/**
+	 * Update the reference to the latest networking error in this object.
+	 * Parent caller can interrogate to decide how to handle error.
+	 *
+	 * @param throwable the exception that has occurred
+	 */
+	private void handleError(final Throwable throwable) throws FanOutSubscriberException {
+		Throwable cause;
+		if (throwable instanceof CompletionException || throwable instanceof ExecutionException) {
+			cause = throwable.getCause();
+		} else {
+			cause = throwable;
+		}
+
+		LOG.warn("Error occurred on EFO subscription: {} - ({}).  {} ({})",
+			throwable.getClass().getName(), throwable.getMessage(), shardId, consumerArn, cause);
+
+		throw new FanOutSubscriberException(cause);
+	}
+
+	/**
+	 * Once the subscription is open, records will be delivered to the {@link BlockingQueue}.
+	 * Queue capacity is hardcoded to 1 record, the queue is used solely to separate consumption and processing.
+	 * However, this buffer will result in latency reduction as records are pre-fetched as a result.
+	 * This method will poll the queue and exit under any of these conditions:
+	 * - {@code continuationSequenceNumber} is {@code null}, indicating the shard is complete
+	 * - The subscription expires, indicated by a {@link SubscriptionCompleteEvent}
+	 * - There is an error while consuming records, indicated by a {@link SubscriptionErrorEvent}
+	 *
+	 * @param eventConsumer the event consumer to deliver records to
+	 * @return true if there are no more messages (complete), false if a subsequent subscription should be obtained
+	 * @throws FanOutSubscriberException when an exception is propagated from the networking stack
+	 * @throws InterruptedException when the thread is interrupted
+	 */
+	private boolean consumeAllRecordsFromKinesisShard(
+			final Consumer<SubscribeToShardEvent> eventConsumer) throws InterruptedException, FanOutSubscriberException {
+		String continuationSequenceNumber;
+
+		do {
+			// Read timeout will occur after 30 seconds, add a sanity timeout here to prevent lockup
+			FanOutSubscriptionEvent subscriptionEvent = queue.poll(DEQUEUE_WAIT_SECONDS, SECONDS);
+
+			if (subscriptionEvent == null) {
+				LOG.debug("Timed out polling events from network, reacquiring subscription - {} ({})", shardId, consumerArn);
+				return false;
+			} else if (subscriptionEvent.isSubscribeToShardEvent()) {
+				SubscribeToShardEvent event = subscriptionEvent.getSubscribeToShardEvent();
+				continuationSequenceNumber = event.continuationSequenceNumber();
+				if (!event.records().isEmpty()) {
+					eventConsumer.accept(event);
+				}
+			} else if (subscriptionEvent.isSubscriptionComplete()) {
+				// The subscription is complete, but the shard might not be, so we return incomplete
+				return false;
+			} else {
+				handleError(subscriptionEvent.getThrowable());
+				return false;
+			}
+		} while (continuationSequenceNumber != null);
+
+		return true;
+	}
+
+	/**
+	 * The {@link FanOutShardSubscription} subscribes to the events coming from KDS and adds them to the {@link BlockingQueue}.
+	 * Backpressure is applied based on the maximum capacity of the queue.
+	 * The {@link Subscriber} methods of this class are invoked by a thread from the {@link KinesisAsyncClient}.
+	 */
+	private class FanOutShardSubscription implements Subscriber<SubscribeToShardEventStream> {
+
+		private Subscription subscription;
+
+		private volatile boolean cancelled = false;
+
+		private final CountDownLatch waitForSubscriptionLatch;
+
+		private final Object lockObject = new Object();
+
+		private FanOutShardSubscription(final CountDownLatch waitForSubscriptionLatch) {
+			this.waitForSubscriptionLatch = waitForSubscriptionLatch;
+		}
+
+		/**
+		 * Flag to the producer that we are ready to receive more events.
+		 */
+		void requestRecord() {
+			if (!cancelled) {
+				LOG.debug("Requesting more records from EFO subscription - {} ({})", shardId, consumerArn);
+				subscription.request(1);
+			}
+		}
+
+		@Override
+		public void onSubscribe(Subscription subscription) {
+			this.subscription = subscription;
+			waitForSubscriptionLatch.countDown();
+		}
+
+		@Override
+		public void onNext(SubscribeToShardEventStream subscribeToShardEventStream) {
+			subscribeToShardEventStream.accept(new SubscribeToShardResponseHandler.Visitor() {
+				@Override
+				public void visit(SubscribeToShardEvent event) {
+					synchronized (lockObject) {
+						if (enqueueEventWithRetry(new SubscriptionNextEvent(event))) {
+							requestRecord();
+						}
+					}
+				}
+			});
+		}
+
+		@Override
+		public void onError(Throwable throwable) {
+			LOG.debug("Error occurred on EFO subscription: {} - ({}).  {} ({})",
+				throwable.getClass().getName(), throwable.getMessage(), shardId, consumerArn);
+
+			// Cancel the subscription to signal the onNext to stop queuing and requesting data
+			cancelSubscription();
+
+			synchronized (lockObject) {
+				// Empty the queue and add a poison pill to terminate this subscriber
+				// The synchronized block ensures that new data is not written in the meantime
+				queue.clear();
+				enqueueEvent(new SubscriptionErrorEvent(throwable));
+			}
+		}
+
+		@Override
+		public void onComplete() {
+			LOG.debug("EFO subscription complete - {} ({})", shardId, consumerArn);
+			enqueueEvent(new SubscriptionCompleteEvent());
+		}
+
+		private void cancelSubscription() {
+			if (!cancelled) {
+				cancelled = true;
+				subscription.cancel();
+			}
+		}
+
+		/**
+		 * Continuously attempt to enqueue an event until successful or the subscription is cancelled (due to error).
+		 * When backpressure applied by the consumer exceeds 30s for a single batch, a ReadTimeoutException will be
+		 * thrown by the network stack. This will result in the subscription be cancelled and this event being discarded.
+		 * The subscription would subsequently be reacquired and the discarded data would be fetched again.
+		 *
+		 * @param event the event to enqueue
+		 * @return true if the event was successfully enqueued.
+		 */
+		private boolean enqueueEventWithRetry(final FanOutSubscriptionEvent event) {
+			boolean result = false;
+			do {
+				if (cancelled) {
+					break;
+				}
+
+				synchronized (lockObject) {
+					result = enqueueEvent(event);
+				}
+			} while (!result);
+
+			return result;
+		}
+
+		/**
+		 * Offers the event to the queue.
+		 *
+		 * @param event the event to enqueue
+		 * @return true if the event was successfully enqueued.
+		 */
+		private boolean enqueueEvent(final FanOutSubscriptionEvent event) {
+			try {
+				if (!queue.offer(event, ENQUEUE_WAIT_SECONDS, SECONDS)) {
+					LOG.debug("Timed out enqueuing event {} - {} ({})", event.getClass().getSimpleName(), shardId, consumerArn);
+					return false;
+				}
+			} catch (InterruptedException e) {
+				Thread.currentThread().interrupt();
+				throw new RuntimeException(e);
+			}
+
+			return true;
+		}
+	}
+
+	/**
+	 * An exception wrapper to indicate an error has been thrown from the networking stack.
+	 */
+	static class FanOutSubscriberException extends Exception {
+
+		private static final long serialVersionUID = 2275015497000437736L;
+
+		public FanOutSubscriberException(Throwable cause) {
+			super(cause);
+		}
+	}
+
+	/**
+	 * An interface used to pass messages between {@link FanOutShardSubscription} and {@link FanOutShardSubscriber}
+	 * via the {@link BlockingQueue}.
+	 */
+	private interface FanOutSubscriptionEvent {
+
+		default boolean isSubscribeToShardEvent() {
+			return false;
+		}
+
+		default boolean isSubscriptionComplete() {
+			return false;
+		}
+
+		default SubscribeToShardEvent getSubscribeToShardEvent() {
+			throw new UnsupportedOperationException("This event does not support getSubscribeToShardEvent()");
+		}
+
+		default Throwable getThrowable() {
+			throw new UnsupportedOperationException("This event does not support getThrowable()");
+		}
+	}
+
+	/**
+	 * Indicates that an EFO subscription has completed/expired.
+	 */
+	private static class SubscriptionCompleteEvent implements FanOutSubscriptionEvent {
+
+		@Override
+		public boolean isSubscriptionComplete() {
+			return true;
+		}
+	}
+
+	/**
+	 * Poison pill, indicates that an error occurred while consuming from KDS.
+	 */
+	private static class SubscriptionErrorEvent implements FanOutSubscriptionEvent {
+		private final Throwable throwable;
+
+		private SubscriptionErrorEvent(Throwable throwable) {
+			this.throwable = throwable;
+		}
+
+		@Override
+		public Throwable getThrowable() {
+			return throwable;
+		}
+	}
+
+	/**
+	 * A wrapper to pass the next {@link SubscribeToShardEvent} between threads.
+	 */
+	private static class SubscriptionNextEvent implements FanOutSubscriptionEvent {
+		private final SubscribeToShardEvent subscribeToShardEvent;
+
+		private SubscriptionNextEvent(SubscribeToShardEvent subscribeToShardEvent) {
+			this.subscribeToShardEvent = subscribeToShardEvent;
+		}
+
+		@Override
+		public boolean isSubscribeToShardEvent() {
+			return true;
+		}
+
+		@Override
+		public SubscribeToShardEvent getSubscribeToShardEvent() {
+			return subscribeToShardEvent;
+		}
+	}
+
+}
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisher.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisher.java
index 4edc1f0..36d3c69 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisher.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisher.java
@@ -34,7 +34,6 @@ import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
 
-import static com.amazonaws.services.kinesis.model.ShardIteratorType.LATEST;
 import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.COMPLETE;
 import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.INCOMPLETE;
 
@@ -156,10 +155,6 @@ public class PollingRecordPublisher implements RecordPublisher {
 	 */
 	@Nullable
 	private String getShardIterator() throws InterruptedException {
-		if (nextStartingPosition.getShardIteratorType() == LATEST && subscribedShard.isClosed()) {
-			return null;
-		}
-
 		return kinesisProxy.getShardIterator(
 			subscribedShard,
 			nextStartingPosition.getShardIteratorType().toString(),
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactory.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactory.java
index ee5034f..00680e8 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactory.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactory.java
@@ -38,7 +38,7 @@ public class PollingRecordPublisherFactory implements RecordPublisherFactory {
 
 	private final FlinkKinesisProxyFactory kinesisProxyFactory;
 
-	public PollingRecordPublisherFactory(FlinkKinesisProxyFactory kinesisProxyFactory) {
+	public PollingRecordPublisherFactory(final FlinkKinesisProxyFactory kinesisProxyFactory) {
 		this.kinesisProxyFactory = kinesisProxyFactory;
 	}
 
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/FullJitterBackoff.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/FullJitterBackoff.java
new file mode 100644
index 0000000..5d2ffa7
--- /dev/null
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/FullJitterBackoff.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.proxy;
+
+import org.apache.flink.annotation.Internal;
+
+import java.util.Random;
+
+/**
+ * Used to calculate full jitter backoff sleep durations.
+ * @see <a href="https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/">
+ *        Exponential Backoff and Jitter
+ * 		</a>
+ */
+@Internal
+public class FullJitterBackoff {
+
+	/** Random seed used to calculate backoff jitter for Kinesis operations. */
+	private final Random seed = new Random();
+
+	/**
+	 * Calculates the sleep time for full jitter based on the given parameters.
+	 *
+	 * @param baseMillis the base backoff time in milliseconds
+	 * @param maxMillis the maximum backoff time in milliseconds
+	 * @param power the power constant for exponential backoff
+	 * @param attempt the attempt number
+	 * @return the time to wait before trying again
+	 */
+	public long calculateFullJitterBackoff(long baseMillis, long maxMillis, double power, int attempt) {
+		long exponentialBackoff = (long) Math.min(maxMillis, baseMillis * Math.pow(power, attempt));
+		return (long) (seed.nextDouble() * exponentialBackoff);
+	}
+
+	/**
+	 * Puts the current thread to sleep for the specified number of millis.
+	 * Simply delegates to {@link Thread#sleep}.
+	 *
+	 * @param millisToSleep the number of milliseconds to sleep for
+	 * @throws InterruptedException
+	 */
+	public void sleep(long millisToSleep) throws InterruptedException {
+		Thread.sleep(millisToSleep);
+	}
+
+}
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
index 5f2d6d5..b8d3086 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxy.java
@@ -55,7 +55,6 @@ import java.util.Date;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
-import java.util.Random;
 
 import static org.apache.flink.util.Preconditions.checkNotNull;
 
@@ -74,12 +73,12 @@ public class KinesisProxy implements KinesisProxyInterface {
 
 	private static final Logger LOG = LoggerFactory.getLogger(KinesisProxy.class);
 
+	/** Calculates full jitter backoff delays. */
+	private static final FullJitterBackoff BACKOFF = new FullJitterBackoff();
+
 	/** The actual Kinesis client from the AWS SDK that we will be using to make calls. */
 	private final AmazonKinesis kinesisClient;
 
-	/** Random seed used to calculate backoff jitter for Kinesis operations. */
-	private static final Random seed = new Random();
-
 	// ------------------------------------------------------------------------
 	//  listShards() related performance settings
 	// ------------------------------------------------------------------------
@@ -206,7 +205,6 @@ public class KinesisProxy implements KinesisProxyInterface {
 			configProps.getProperty(
 				ConsumerConfigConstants.SHARD_GETITERATOR_RETRIES,
 				Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETITERATOR_RETRIES)));
-
 	}
 
 	/**
@@ -230,9 +228,6 @@ public class KinesisProxy implements KinesisProxyInterface {
 		return new KinesisProxy(configProps);
 	}
 
-	/**
-	 * {@inheritDoc}
-	 */
 	@Override
 	public GetRecordsResult getRecords(String shardIterator, int maxRecordsToGet) throws InterruptedException {
 		final GetRecordsRequest getRecordsRequest = new GetRecordsRequest();
@@ -247,11 +242,11 @@ public class KinesisProxy implements KinesisProxyInterface {
 				getRecordsResult = kinesisClient.getRecords(getRecordsRequest);
 			} catch (SdkClientException ex) {
 				if (isRecoverableSdkClientException(ex)) {
-					long backoffMillis = fullJitterBackoff(
+					long backoffMillis = BACKOFF.calculateFullJitterBackoff(
 						getRecordsBaseBackoffMillis, getRecordsMaxBackoffMillis, getRecordsExpConstant, retryCount++);
 					LOG.warn("Got recoverable SdkClientException. Backing off for "
 						+ backoffMillis + " millis (" + ex.getClass().getName() + ": " + ex.getMessage() + ")");
-					Thread.sleep(backoffMillis);
+					BACKOFF.sleep(backoffMillis);
 				} else {
 					throw ex;
 				}
@@ -266,9 +261,6 @@ public class KinesisProxy implements KinesisProxyInterface {
 		return getRecordsResult;
 	}
 
-	/**
-	 * {@inheritDoc}
-	 */
 	@Override
 	public GetShardListResult getShardList(Map<String, String> streamNamesWithLastSeenShardIds) throws InterruptedException {
 		GetShardListResult result = new GetShardListResult();
@@ -281,9 +273,6 @@ public class KinesisProxy implements KinesisProxyInterface {
 		return result;
 	}
 
-	/**
-	 * {@inheritDoc}
-	 */
 	@Override
 	public String getShardIterator(StreamShardHandle shard, String shardIteratorType, @Nullable Object startingMarker) throws InterruptedException {
 		GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest()
@@ -322,11 +311,11 @@ public class KinesisProxy implements KinesisProxyInterface {
 					getShardIteratorResult = kinesisClient.getShardIterator(getShardIteratorRequest);
 			} catch (AmazonServiceException ex) {
 				if (isRecoverableException(ex)) {
-					long backoffMillis = fullJitterBackoff(
+					long backoffMillis = BACKOFF.calculateFullJitterBackoff(
 						getShardIteratorBaseBackoffMillis, getShardIteratorMaxBackoffMillis, getShardIteratorExpConstant, retryCount++);
 					LOG.warn("Got recoverable AmazonServiceException. Backing off for "
 						+ backoffMillis + " millis (" + ex.getClass().getName() + ": " + ex.getMessage() + ")");
-					Thread.sleep(backoffMillis);
+					BACKOFF.sleep(backoffMillis);
 				} else {
 					throw ex;
 				}
@@ -438,11 +427,11 @@ public class KinesisProxy implements KinesisProxyInterface {
 
 				listShardsResults = kinesisClient.listShards(listShardsRequest);
 			} catch (LimitExceededException le) {
-				long backoffMillis = fullJitterBackoff(
+				long backoffMillis = BACKOFF.calculateFullJitterBackoff(
 						listShardsBaseBackoffMillis, listShardsMaxBackoffMillis, listShardsExpConstant, retryCount++);
 					LOG.warn("Got LimitExceededException when listing shards from stream " + streamName
 									+ ". Backing off for " + backoffMillis + " millis.");
-				Thread.sleep(backoffMillis);
+				BACKOFF.sleep(backoffMillis);
 			} catch (ResourceInUseException reInUse) {
 				if (LOG.isWarnEnabled()) {
 					// List Shards will throw an exception if stream in not in active state. Return and re-use previous state available.
@@ -459,11 +448,11 @@ public class KinesisProxy implements KinesisProxyInterface {
 				break;
 			} catch (SdkClientException ex) {
 				if (retryCount < listShardsMaxRetries && isRecoverableSdkClientException(ex)) {
-					long backoffMillis = fullJitterBackoff(
+					long backoffMillis = BACKOFF.calculateFullJitterBackoff(
 						listShardsBaseBackoffMillis, listShardsMaxBackoffMillis, listShardsExpConstant, retryCount++);
 					LOG.warn("Got SdkClientException when listing shards from stream {}. Backing off for {} millis.",
 						streamName, backoffMillis);
-					Thread.sleep(backoffMillis);
+					BACKOFF.sleep(backoffMillis);
 				} else {
 					// propagate if retries exceeded or not recoverable
 					// (otherwise would return null result and keep trying forever)
@@ -515,14 +504,14 @@ public class KinesisProxy implements KinesisProxyInterface {
 			try {
 				describeStreamResult = kinesisClient.describeStream(describeStreamRequest);
 			} catch (LimitExceededException le) {
-				long backoffMillis = fullJitterBackoff(
+				long backoffMillis = BACKOFF.calculateFullJitterBackoff(
 						describeStreamBaseBackoffMillis,
 						describeStreamMaxBackoffMillis,
 						describeStreamExpConstant,
 						attemptCount++);
 				LOG.warn(String.format("Got LimitExceededException when describing stream %s. "
 						+ "Backing off for %d millis.", streamName, backoffMillis));
-				Thread.sleep(backoffMillis);
+				BACKOFF.sleep(backoffMillis);
 			} catch (ResourceNotFoundException re) {
 				throw new RuntimeException("Error while getting stream details", re);
 			}
@@ -541,8 +530,4 @@ public class KinesisProxy implements KinesisProxyInterface {
 		return describeStreamResult;
 	}
 
-	protected static long fullJitterBackoff(long base, long max, double power, int attempt) {
-		long exponentialBackoff = (long) Math.min(max, base * Math.pow(power, attempt));
-		return (long) (seed.nextDouble() * exponentialBackoff); // random jitter between 0 and the exponential backoff
-	}
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java
index 30464f3..db9c7ca 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyInterface.java
@@ -71,4 +71,5 @@ public interface KinesisProxyInterface {
 	 *                              if the backoff is interrupted.
 	 */
 	GetShardListResult getShardList(Map<String, String> streamNamesWithLastSeenShardIds) throws InterruptedException;
+
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2.java
index d1310e5..26908ce 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2.java
@@ -21,6 +21,10 @@ import org.apache.flink.annotation.Internal;
 import org.apache.flink.util.Preconditions;
 
 import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler;
+
+import java.util.concurrent.CompletableFuture;
 
 /**
  * Kinesis proxy implementation using AWS SDK v2.x - a utility class that is used as a proxy to make
@@ -30,10 +34,11 @@ import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
 @Internal
 public class KinesisProxyV2 implements KinesisProxyV2Interface {
 
+	/** An Asynchronous client used to communicate with AWS services. */
 	private final KinesisAsyncClient kinesisAsyncClient;
 
 	/**
-	 * Create a new KinesisProxyV2 based on the supplied configuration properties.
+	 * Create a new KinesisProxyV2 using the provided Async Client.
 	 *
 	 * @param kinesisAsyncClient the kinesis async client used to communicate with Kinesis
 	 */
@@ -41,4 +46,16 @@ public class KinesisProxyV2 implements KinesisProxyV2Interface {
 		this.kinesisAsyncClient = Preconditions.checkNotNull(kinesisAsyncClient);
 	}
 
+	@Override
+	public CompletableFuture<Void> subscribeToShard(
+			final SubscribeToShardRequest request,
+			final SubscribeToShardResponseHandler responseHandler) {
+		return kinesisAsyncClient.subscribeToShard(request, responseHandler);
+	}
+
+	@Override
+	public void close() {
+		kinesisAsyncClient.close();
+	}
+
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Interface.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Interface.java
index aff6a85..e748eb2 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Interface.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Interface.java
@@ -19,10 +19,24 @@ package org.apache.flink.streaming.connectors.kinesis.proxy;
 
 import org.apache.flink.annotation.Internal;
 
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler;
+
+import java.util.concurrent.CompletableFuture;
+
 /**
  * Interface for a Kinesis proxy using AWS SDK v2.x operating on multiple Kinesis streams within the same AWS service region.
  */
 @Internal
 public interface KinesisProxyV2Interface {
 
+	CompletableFuture<Void> subscribeToShard(SubscribeToShardRequest request, SubscribeToShardResponseHandler responseHandler);
+
+	/**
+	 * Destroy any open resources used by the factory.
+	 */
+	default void close() {
+		// Do nothing by default
+	}
+
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
index 7301e7a..43e47b7 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtil.java
@@ -56,6 +56,7 @@ import java.util.Map;
 import java.util.Properties;
 
 import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM;
+import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM;
 
 /**
  * Some utilities specific to Amazon Web Service.
@@ -277,7 +278,16 @@ public class AWSUtil {
 	 * @return the starting position
 	 */
 	public static StartingPosition getStartingPosition(final SequenceNumber sequenceNumber, final Properties configProps) {
-		if (SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM.get().equals(sequenceNumber)) {
+		if (sequenceNumber.equals(SENTINEL_LATEST_SEQUENCE_NUM.get())) {
+			// LATEST starting positions are translated to AT_TIMESTAMP starting positions. This is to prevent data loss
+			// in the situation where the first read times out and is re-attempted. Consider the following scenario:
+			// 1. Consume from LATEST
+			// 2. No records are consumed and Record Publisher throws retryable error
+			// 3. Restart consumption from LATEST
+			// Any records sent between steps 1 and 3 are lost. Using the timestamp of step 1 allows the consumer to
+			// restart from shard position of step 1, and hence no records are lost.
+			return StartingPosition.fromTimestamp(new Date());
+		} else if (SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM.get().equals(sequenceNumber)) {
 			Date timestamp = KinesisConfigUtil.parseStreamTimestampStartingPosition(configProps);
 			return StartingPosition.fromTimestamp(timestamp);
 		} else {
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2Util.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2Util.java
index c4073c3..2326314 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2Util.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2Util.java
@@ -34,7 +34,9 @@ import software.amazon.awssdk.auth.credentials.SystemPropertyCredentialsProvider
 import software.amazon.awssdk.auth.credentials.WebIdentityTokenFileCredentialsProvider;
 import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration;
 import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption;
+import software.amazon.awssdk.http.Protocol;
 import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
+import software.amazon.awssdk.http.nio.netty.Http2Configuration;
 import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient;
 import software.amazon.awssdk.profiles.ProfileFile;
 import software.amazon.awssdk.regions.Region;
@@ -50,12 +52,19 @@ import java.time.Duration;
 import java.util.Optional;
 import java.util.Properties;
 
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DEFAULT_EFO_HTTP_CLIENT_MAX_CONURRENCY;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_HTTP_CLIENT_MAX_CONCURRENCY;
+
 /**
  * Utility methods specific to Amazon Web Service SDK v2.x.
  */
 @Internal
 public class AwsV2Util {
 
+	private static final int INITIAL_WINDOW_SIZE_BYTES = 512 * 1024; // 512 KB
+	private static final Duration HEALTH_CHECK_PING_PERIOD = Duration.ofSeconds(60);
+	private static final Duration CONNECTION_ACQUISITION_TIMEOUT = Duration.ofSeconds(60);
+
 	/**
 	 * Creates an Amazon Kinesis Async Client from the provided properties.
 	 * Configuration is copied from AWS SDK v1 configuration class as per:
@@ -65,8 +74,8 @@ public class AwsV2Util {
 	 * @return a new Amazon Kinesis Client
 	 */
 	public static KinesisAsyncClient createKinesisAsyncClient(final Properties configProps) {
-		final ClientConfiguration config = new ClientConfigurationFactory().getConfig();
-		return createKinesisAsyncClient(configProps, config);
+		ClientConfiguration clientConfiguration = new ClientConfigurationFactory().getConfig();
+		return createKinesisAsyncClient(configProps, clientConfiguration);
 	}
 
 	/**
@@ -79,7 +88,7 @@ public class AwsV2Util {
 	 * @return a new Amazon Kinesis Client
 	 */
 	public static KinesisAsyncClient createKinesisAsyncClient(final Properties configProps, final ClientConfiguration config) {
-		final SdkAsyncHttpClient httpClient = createHttpClient(config, NettyNioAsyncHttpClient.builder());
+		final SdkAsyncHttpClient httpClient = createHttpClient(config, NettyNioAsyncHttpClient.builder(), configProps);
 		final ClientOverrideConfiguration overrideConfiguration = createClientOverrideConfiguration(config, ClientOverrideConfiguration.builder());
 		final KinesisAsyncClientBuilder clientBuilder = KinesisAsyncClient.builder();
 
@@ -89,13 +98,27 @@ public class AwsV2Util {
 	@VisibleForTesting
 	static SdkAsyncHttpClient createHttpClient(
 			final ClientConfiguration config,
-			final NettyNioAsyncHttpClient.Builder httpClientBuilder) {
+			final NettyNioAsyncHttpClient.Builder httpClientBuilder,
+			final Properties consumerConfig) {
+
+		int maxConcurrency = Optional
+			.ofNullable(consumerConfig.getProperty(EFO_HTTP_CLIENT_MAX_CONCURRENCY))
+			.map(Integer::parseInt)
+			.orElse(DEFAULT_EFO_HTTP_CLIENT_MAX_CONURRENCY);
+
 		httpClientBuilder
-			.maxConcurrency(config.getMaxConnections())
+			.maxConcurrency(maxConcurrency)
 			.connectionTimeout(Duration.ofMillis(config.getConnectionTimeout()))
 			.writeTimeout(Duration.ofMillis(config.getSocketTimeout()))
 			.connectionMaxIdleTime(Duration.ofMillis(config.getConnectionMaxIdleMillis()))
-			.useIdleConnectionReaper(config.useReaper());
+			.useIdleConnectionReaper(config.useReaper())
+			.protocol(Protocol.HTTP2)
+			.connectionAcquisitionTimeout(CONNECTION_ACQUISITION_TIMEOUT)
+			.http2Configuration(Http2Configuration
+				.builder()
+				.healthCheckPingPeriod(HEALTH_CHECK_PING_PERIOD)
+				.initialWindowSize(INITIAL_WINDOW_SIZE_BYTES)
+				.build());
 
 		if (config.getConnectionTTL() > -1) {
 			httpClientBuilder.connectionTimeToLive(Duration.ofMillis(config.getConnectionTTL()));
@@ -248,4 +271,5 @@ public class AwsV2Util {
 	public static Region getRegion(final Properties configProps) {
 		return Region.of(configProps.getProperty(AWSConfigConstants.AWS_REGION));
 	}
+
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
index fc512e7..9626478 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
@@ -247,6 +247,9 @@ public class KinesisConfigUtil {
 					ConsumerConfigConstants.MAX_SHARD_GETRECORDS_INTERVAL_MILLIS + " milliseconds."
 			);
 		}
+
+		validateOptionalPositiveIntProperty(config, ConsumerConfigConstants.EFO_HTTP_CLIENT_MAX_CONCURRENCY,
+			"Invalid value given for EFO HTTP client max concurrency. Must be positive.");
 	}
 
 	/**
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java
index 061120b..8f77d45 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java
@@ -748,8 +748,9 @@ public class FlinkKinesisConsumerTest extends TestLogger {
 							new AtomicReference<>(),
 							new ArrayList<>(),
 							subscribedStreamsToLastDiscoveredShardIds,
-							(props) -> FakeKinesisBehavioursFactory.blockingQueueGetRecords(streamToQueueMap)
-							) {};
+							(props) -> FakeKinesisBehavioursFactory.blockingQueueGetRecords(streamToQueueMap),
+							null) {
+						};
 					return fetcher;
 				}
 			};
@@ -880,9 +881,8 @@ public class FlinkKinesisConsumerTest extends TestLogger {
 							new AtomicReference<>(),
 							new ArrayList<>(),
 							subscribedStreamsToLastDiscoveredShardIds,
-							(props) -> FakeKinesisBehavioursFactory.blockingQueueGetRecords(
-								streamToQueueMap)
-						) {
+							(props) -> FakeKinesisBehavioursFactory.blockingQueueGetRecords(streamToQueueMap),
+							null) {
 							@Override
 							protected void emitWatermark() {
 								// necessary in this test to ensure that watermark state is updated
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java
index 478564b..f6de864 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java
@@ -26,11 +26,13 @@ import org.apache.flink.streaming.api.watermark.Watermark;
 import org.apache.flink.streaming.api.windowing.time.Time;
 import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisConsumer;
 import org.apache.flink.streaming.connectors.kinesis.KinesisShardAssigner;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher;
 import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
 import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardMetadata;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchemaWrapper;
 import org.apache.flink.streaming.connectors.kinesis.testutils.AlwaysThrowsDeserializationSchema;
@@ -67,12 +69,16 @@ import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 
+import static java.util.Collections.singletonList;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RECORD_PUBLISHER_TYPE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.EFO;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
 /**
@@ -114,7 +120,7 @@ public class KinesisDataFetcherTest extends TestLogger {
 		final TestSourceContext<String> sourceContext = new TestSourceContext<>();
 
 		final TestableKinesisDataFetcher<String> fetcher = new TestableKinesisDataFetcher<>(
-			Collections.singletonList(stream),
+			singletonList(stream),
 			sourceContext,
 			TestUtils.getStandardProperties(),
 			new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
@@ -761,7 +767,7 @@ public class KinesisDataFetcherTest extends TestLogger {
 
 		final KinesisDataFetcher<String> fetcher =
 			new TestableKinesisDataFetcher<String>(
-				Collections.singletonList(fakeStream1),
+				singletonList(fakeStream1),
 				sourceContext,
 				new java.util.Properties(),
 				new KinesisDeserializationSchemaWrapper<>(new org.apache.flink.streaming.util.serialization.SimpleStringSchema()),
@@ -825,14 +831,14 @@ public class KinesisDataFetcherTest extends TestLogger {
 		Map<String, List<BlockingQueue<String>>> streamsToShardQueues = new HashMap<>();
 		LinkedBlockingQueue<String> queue = new LinkedBlockingQueue<>(10);
 		queue.put("item1");
-		streamsToShardQueues.put(stream, Collections.singletonList(queue));
+		streamsToShardQueues.put(stream, singletonList(queue));
 
 		AlwaysThrowsDeserializationSchema deserializationSchema = new AlwaysThrowsDeserializationSchema();
 		KinesisProxyInterface fakeKinesis =
 			FakeKinesisBehavioursFactory.blockingQueueGetRecords(streamsToShardQueues);
 
 		TestableKinesisDataFetcherForShardConsumerException<String> fetcher = new TestableKinesisDataFetcherForShardConsumerException<>(
-			Collections.singletonList(stream),
+			singletonList(stream),
 			new TestSourceContext<>(),
 			TestUtils.getStandardProperties(),
 			new KinesisDeserializationSchemaWrapper<>(deserializationSchema),
@@ -841,7 +847,8 @@ public class KinesisDataFetcherTest extends TestLogger {
 			new AtomicReference<>(),
 			new LinkedList<>(),
 			new HashMap<>(),
-			fakeKinesis);
+			fakeKinesis,
+			(sequence, properties, metricGroup, streamShardHandle) -> mock(RecordPublisher.class));
 
 		DummyFlinkKinesisConsumer<String> consumer = new DummyFlinkKinesisConsumer<>(
 			TestUtils.getStandardProperties(), fetcher, 1, 0);
@@ -881,4 +888,32 @@ public class KinesisDataFetcherTest extends TestLogger {
 		assertTrue("Expected Fetcher to have been interrupted. This test didn't accomplish its goal.",
 			fetcher.wasInterrupted);
 	}
+
+	@Test
+	public void testRecordPublisherFactoryIsTornDown() {
+		Properties config = TestUtils.getStandardProperties();
+		config.setProperty(RECORD_PUBLISHER_TYPE, EFO.name());
+
+		KinesisProxyV2Interface kinesisV2 = mock(KinesisProxyV2Interface.class);
+
+		TestableKinesisDataFetcher<String> fetcher =
+			new TestableKinesisDataFetcher<String>(
+				singletonList("fakeStream1"),
+				new TestSourceContext<>(),
+				config,
+				new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
+				10,
+				2,
+				new AtomicReference<>(),
+				new LinkedList<>(),
+				new HashMap<>(),
+				mock(KinesisProxyInterface.class),
+				kinesisV2) {
+			};
+
+		fetcher.shutdownFetcher();
+
+		verify(kinesisV2).close();
+	}
+
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerFanOutTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerFanOutTest.java
new file mode 100644
index 0000000..c9ae709
--- /dev/null
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerFanOutTest.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals;
+
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.FanOutRecordPublisherFactory;
+import org.apache.flink.streaming.connectors.kinesis.metrics.ShardConsumerMetricsReporter;
+import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
+import org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory;
+import org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.AbstractSingleShardFanOutKinesisV2;
+import org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.SingleShardFanOutKinesisV2;
+
+import org.junit.Test;
+import software.amazon.awssdk.services.kinesis.model.StartingPosition;
+
+import java.text.SimpleDateFormat;
+import java.time.Instant;
+import java.util.Properties;
+
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_INITIAL_TIMESTAMP;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_TIMESTAMP_DATE_FORMAT;
+import static org.apache.flink.streaming.connectors.kinesis.internals.ShardConsumerTestUtils.fakeSequenceNumber;
+import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM;
+import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM;
+import static org.apache.flink.streaming.connectors.kinesis.testutils.TestUtils.efoProperties;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static software.amazon.awssdk.services.kinesis.model.ShardIteratorType.AFTER_SEQUENCE_NUMBER;
+import static software.amazon.awssdk.services.kinesis.model.ShardIteratorType.AT_SEQUENCE_NUMBER;
+import static software.amazon.awssdk.services.kinesis.model.ShardIteratorType.AT_TIMESTAMP;
+
+/**
+ * Tests for the {@link ShardConsumer} using Fan Out consumption mocked Kinesis behaviours.
+ */
+public class ShardConsumerFanOutTest {
+
+	@Test
+	public void testEmptyShard() throws Exception {
+		SingleShardFanOutKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory.emptyShard();
+
+		assertNumberOfMessagesReceivedFromKinesis(0, kinesis, fakeSequenceNumber());
+
+		assertEquals(1, kinesis.getNumberOfSubscribeToShardInvocations());
+	}
+
+	@Test
+	public void testStartFromLatestIsTranslatedToTimestamp() throws Exception {
+		Instant now = Instant.now();
+		SingleShardFanOutKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory.boundedShard().build();
+		SequenceNumber sequenceNumber = SENTINEL_LATEST_SEQUENCE_NUM.get();
+
+		// Fake behaviour defaults to 10 messages
+		assertNumberOfMessagesReceivedFromKinesis(10, kinesis, sequenceNumber, efoProperties());
+
+		StartingPosition actual = kinesis.getStartingPositionForSubscription(0);
+		assertEquals(AT_TIMESTAMP, actual.type());
+		assertTrue(now.equals(actual.timestamp()) || now.isBefore(actual.timestamp()));
+	}
+
+	@Test
+	public void testStartFromLatestReceivesNoRecordsContinuesToUseTimestamp() throws Exception {
+		AbstractSingleShardFanOutKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory.emptyBatchFollowedBySingleRecord();
+
+		SequenceNumber sequenceNumber = SENTINEL_LATEST_SEQUENCE_NUM.get();
+
+		// Fake behaviour defaults to 10 messages
+		assertNumberOfMessagesReceivedFromKinesis(1, kinesis, sequenceNumber, efoProperties());
+
+		// This fake Kinesis will give 2 subscriptions
+		assertEquals(2, kinesis.getNumberOfSubscribeToShardInvocations());
+
+		assertEquals(AT_TIMESTAMP, kinesis.getStartingPositionForSubscription(0).type());
+		assertEquals(AT_TIMESTAMP, kinesis.getStartingPositionForSubscription(1).type());
+	}
+
+	@Test
+	public void testBoundedShardConsumesFromTimestamp() throws Exception {
+		String format = "yyyy-MM-dd'T'HH:mm";
+		String timestamp = "2020-07-02T09:14";
+		Instant expectedTimestamp = new SimpleDateFormat(format).parse(timestamp).toInstant();
+
+		SingleShardFanOutKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory.boundedShard().build();
+
+		Properties consumerConfig = efoProperties();
+		consumerConfig.setProperty(STREAM_INITIAL_TIMESTAMP, timestamp);
+		consumerConfig.setProperty(STREAM_TIMESTAMP_DATE_FORMAT, format);
+		SequenceNumber sequenceNumber = SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM.get();
+
+		// Fake behaviour defaults to 10 messages
+		assertNumberOfMessagesReceivedFromKinesis(10, kinesis, sequenceNumber, consumerConfig);
+
+		StartingPosition actual = kinesis.getStartingPositionForSubscription(0);
+		assertEquals(AT_TIMESTAMP, actual.type());
+		assertEquals(expectedTimestamp, actual.timestamp());
+	}
+
+	@Test
+	public void testMillisBehindReported() throws Exception {
+		SingleShardFanOutKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory
+			.boundedShard()
+			.withMillisBehindLatest(123L)
+			.build();
+
+		// Fake behaviour defaults to 10 messages
+		ShardConsumerMetricsReporter metrics = assertNumberOfMessagesReceivedFromKinesis(10, kinesis, fakeSequenceNumber());
+
+		assertEquals(123L, metrics.getMillisBehindLatest());
+	}
+
+	@Test
+	public void testBoundedShardConsumesCorrectNumberOfMessages() throws Exception {
+		SingleShardFanOutKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory
+			.boundedShard()
+			.withBatchCount(10)
+			.withRecordsPerBatch(5)
+			.build();
+
+		// 10 batches of 5 records = 50
+		assertNumberOfMessagesReceivedFromKinesis(50, kinesis, fakeSequenceNumber());
+
+		assertEquals(1, kinesis.getNumberOfSubscribeToShardInvocations());
+	}
+
+	@Test
+	public void testBoundedShardResubscribesToShard() throws Exception {
+		SingleShardFanOutKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory
+			.boundedShard()
+			.withBatchCount(100)
+			.withRecordsPerBatch(10)
+			.withBatchesPerSubscription(5)
+			.build();
+
+		// 100 batches of 10 records = 1000
+		assertNumberOfMessagesReceivedFromKinesis(1000, kinesis, fakeSequenceNumber());
+
+		// 100 batches / 5 batches per subscription = 20 subscriptions
+		assertEquals(20, kinesis.getNumberOfSubscribeToShardInvocations());
+
+		// Starting from non-aggregated sequence number means we should start AFTER the sequence number
+		assertEquals(AFTER_SEQUENCE_NUMBER, kinesis.getStartingPositionForSubscription(0).type());
+	}
+
+	@Test
+	public void testBoundedShardWithAggregatedRecords() throws Exception {
+		SingleShardFanOutKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory
+			.boundedShard()
+			.withBatchCount(100)
+			.withRecordsPerBatch(10)
+			.withAggregationFactor(100)
+			.build();
+
+		// 100 batches of 10 records * 100 aggregation factor = 100000
+		assertNumberOfMessagesReceivedFromKinesis(100000, kinesis, fakeSequenceNumber());
+	}
+
+	@Test
+	public void testBoundedShardResumingConsumptionFromAggregatedSubsequenceNumber() throws Exception {
+		SingleShardFanOutKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory
+			.boundedShard()
+			.withBatchCount(10)
+			.withRecordsPerBatch(1)
+			.withAggregationFactor(10)
+			.build();
+
+		SequenceNumber subsequenceNumber = new SequenceNumber("1", 5);
+
+		// 10 batches of 1 record * 10 aggregation factor - 6 previously consumed subsequence records (0,1,2,3,4,5) = 94
+		assertNumberOfMessagesReceivedFromKinesis(94, kinesis, subsequenceNumber);
+
+		// Starting from aggregated sequence number means we should start AT the sequence number
+		assertEquals(AT_SEQUENCE_NUMBER, kinesis.getStartingPositionForSubscription(0).type());
+	}
+
+	@Test
+	public void testSubscribeToShardUsesCorrectStartingSequenceNumbers() throws Exception {
+		SingleShardFanOutKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory
+			.boundedShard()
+			.withBatchCount(10)
+			.withRecordsPerBatch(1)
+			.withBatchesPerSubscription(2)
+			.build();
+
+		// 10 batches of 1 records = 10
+		assertNumberOfMessagesReceivedFromKinesis(10, kinesis, new SequenceNumber("0"));
+
+		// 10 batches / 2 batches per subscription = 5 subscriptions
+		assertEquals(5, kinesis.getNumberOfSubscribeToShardInvocations());
+
+		// Starting positions should correlate to the last consumed sequence number
+		assertStartingPositionAfterSequenceNumber(kinesis.getStartingPositionForSubscription(0), "0");
+		assertStartingPositionAfterSequenceNumber(kinesis.getStartingPositionForSubscription(1), "2");
+		assertStartingPositionAfterSequenceNumber(kinesis.getStartingPositionForSubscription(2), "4");
+		assertStartingPositionAfterSequenceNumber(kinesis.getStartingPositionForSubscription(3), "6");
+		assertStartingPositionAfterSequenceNumber(kinesis.getStartingPositionForSubscription(4), "8");
+	}
+
+	private void assertStartingPositionAfterSequenceNumber(
+			final StartingPosition startingPosition,
+			final String sequenceNumber) {
+		assertEquals(AFTER_SEQUENCE_NUMBER, startingPosition.type());
+		assertEquals(sequenceNumber, startingPosition.sequenceNumber());
+	}
+
+	private ShardConsumerMetricsReporter assertNumberOfMessagesReceivedFromKinesis(
+				final int expectedNumberOfMessages,
+				final KinesisProxyV2Interface kinesis,
+				final SequenceNumber startingSequenceNumber) throws Exception {
+		return assertNumberOfMessagesReceivedFromKinesis(
+			expectedNumberOfMessages,
+			kinesis,
+			startingSequenceNumber,
+			efoProperties());
+	}
+
+	private ShardConsumerMetricsReporter assertNumberOfMessagesReceivedFromKinesis(
+			final int expectedNumberOfMessages,
+			final KinesisProxyV2Interface kinesis,
+			final SequenceNumber startingSequenceNumber,
+			final Properties consumerConfig) throws Exception {
+		return ShardConsumerTestUtils.assertNumberOfMessagesReceivedFromKinesis(
+			expectedNumberOfMessages,
+			new FanOutRecordPublisherFactory(kinesis),
+			startingSequenceNumber,
+			consumerConfig);
+	}
+
+}
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
index b39b99e..40a599c 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java
@@ -17,52 +17,32 @@
 
 package org.apache.flink.streaming.connectors.kinesis.internals;
 
-import org.apache.flink.api.common.serialization.SimpleStringSchema;
-import org.apache.flink.metrics.MetricGroup;
-import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher;
 import org.apache.flink.streaming.connectors.kinesis.internals.publisher.polling.PollingRecordPublisherFactory;
 import org.apache.flink.streaming.connectors.kinesis.metrics.ShardConsumerMetricsReporter;
-import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
 import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
-import org.apache.flink.streaming.connectors.kinesis.model.StartingPosition;
-import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
-import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchemaWrapper;
 import org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisBehavioursFactory;
-import org.apache.flink.streaming.connectors.kinesis.testutils.KinesisShardIdGenerator;
-import org.apache.flink.streaming.connectors.kinesis.testutils.TestSourceContext;
-import org.apache.flink.streaming.connectors.kinesis.testutils.TestableKinesisDataFetcher;
-import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
-
-import com.amazonaws.services.kinesis.model.HashKeyRange;
-import com.amazonaws.services.kinesis.model.Shard;
-import org.apache.commons.lang3.StringUtils;
+
 import org.junit.Test;
-import org.mockito.Mockito;
 
-import java.math.BigInteger;
 import java.text.SimpleDateFormat;
-import java.util.Collections;
 import java.util.Date;
-import java.util.LinkedList;
 import java.util.Properties;
-import java.util.concurrent.atomic.AtomicReference;
 
 import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SHARD_USE_ADAPTIVE_READS;
 import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_INITIAL_TIMESTAMP;
 import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_TIMESTAMP_DATE_FORMAT;
+import static org.apache.flink.streaming.connectors.kinesis.internals.ShardConsumerTestUtils.fakeSequenceNumber;
 import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM;
 import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM;
-import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM;
 import static org.junit.Assert.assertEquals;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.verify;
 
 /**
- * Tests for the {@link ShardConsumer}.
+ * Tests for the {@link ShardConsumer} using Polling consumption mocked Kinesis behaviours.
  */
 public class ShardConsumerTest {
 
@@ -161,83 +141,24 @@ public class ShardConsumerTest {
 		verify(kinesis).getShardIterator(any(), eq("AT_SEQUENCE_NUMBER"), eq("0"));
 	}
 
-	private SequenceNumber fakeSequenceNumber() {
-		return new SequenceNumber("fakeStartingState");
-	}
-
 	private ShardConsumerMetricsReporter assertNumberOfMessagesReceivedFromKinesis(
-		final int expectedNumberOfMessages,
-		final KinesisProxyInterface kinesis,
-		final SequenceNumber startingSequenceNumber) throws Exception {
+			final int expectedNumberOfMessages,
+			final KinesisProxyInterface kinesis,
+			final SequenceNumber startingSequenceNumber) throws Exception {
 		return assertNumberOfMessagesReceivedFromKinesis(expectedNumberOfMessages, kinesis, startingSequenceNumber, new Properties());
 	}
 
 	private ShardConsumerMetricsReporter assertNumberOfMessagesReceivedFromKinesis(
-		final int expectedNumberOfMessages,
-		final KinesisProxyInterface kinesis,
-		final SequenceNumber startingSequenceNumber,
-		final Properties consumerProperties) throws Exception {
-		ShardConsumerMetricsReporter shardMetricsReporter = new ShardConsumerMetricsReporter(mock(MetricGroup.class));
-
-		StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);
-
-		LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
-		subscribedShardsStateUnderTest.add(
-			new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(fakeToBeConsumedShard),
-				fakeToBeConsumedShard, startingSequenceNumber));
-
-		TestSourceContext<String> sourceContext = new TestSourceContext<>();
-
-		KinesisDeserializationSchemaWrapper<String> deserializationSchema = new KinesisDeserializationSchemaWrapper<>(
-			new SimpleStringSchema());
-		TestableKinesisDataFetcher<String> fetcher =
-			new TestableKinesisDataFetcher<>(
-				Collections.singletonList("fakeStream"),
-				sourceContext,
-				consumerProperties,
-				deserializationSchema,
-				10,
-				2,
-				new AtomicReference<>(),
-				subscribedShardsStateUnderTest,
-				KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(Collections.singletonList("fakeStream")),
-				Mockito.mock(KinesisProxyInterface.class));
-
-		final StreamShardHandle shardHandle = subscribedShardsStateUnderTest.get(0).getStreamShardHandle();
-		SequenceNumber lastProcessedSequenceNum = subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum();
-		StartingPosition startingPosition = AWSUtil.getStartingPosition(lastProcessedSequenceNum, consumerProperties);
-
-		final RecordPublisher recordPublisher = new PollingRecordPublisherFactory(config -> kinesis)
-			.create(startingPosition, fetcher.getConsumerConfiguration(), mock(MetricGroup.class), shardHandle);
-
-		int shardIndex = fetcher.registerNewSubscribedShardState(subscribedShardsStateUnderTest.get(0));
-		new ShardConsumer<>(
-			fetcher,
-			recordPublisher,
-			shardIndex,
-			shardHandle,
-			lastProcessedSequenceNum,
-			shardMetricsReporter,
-			deserializationSchema)
-			.run();
-
-		assertEquals(expectedNumberOfMessages, sourceContext.getCollectedOutputs().size());
-		assertEquals(
-			SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get(),
-			subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum());
-
-		return shardMetricsReporter;
-	}
-
-	private static StreamShardHandle getMockStreamShard(String streamName, int shardId) {
-		return new StreamShardHandle(
-			streamName,
-			new Shard()
-				.withShardId(KinesisShardIdGenerator.generateFromShardOrder(shardId))
-				.withHashKeyRange(
-					new HashKeyRange()
-						.withStartingHashKey("0")
-						.withEndingHashKey(new BigInteger(StringUtils.repeat("FF", 16), 16).toString())));
+			final int expectedNumberOfMessages,
+			final KinesisProxyInterface kinesis,
+			final SequenceNumber startingSequenceNumber,
+			final Properties consumerProperties) throws Exception {
+
+		return ShardConsumerTestUtils.assertNumberOfMessagesReceivedFromKinesis(
+			expectedNumberOfMessages,
+			new PollingRecordPublisherFactory(config -> kinesis),
+			startingSequenceNumber,
+			consumerProperties);
 	}
 
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTestUtils.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTestUtils.java
new file mode 100644
index 0000000..3be976a
--- /dev/null
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTestUtils.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals;
+
+import org.apache.flink.api.common.serialization.SimpleStringSchema;
+import org.apache.flink.metrics.MetricGroup;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisherFactory;
+import org.apache.flink.streaming.connectors.kinesis.metrics.ShardConsumerMetricsReporter;
+import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
+import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.StartingPosition;
+import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
+import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchemaWrapper;
+import org.apache.flink.streaming.connectors.kinesis.testutils.KinesisShardIdGenerator;
+import org.apache.flink.streaming.connectors.kinesis.testutils.TestSourceContext;
+import org.apache.flink.streaming.connectors.kinesis.testutils.TestableKinesisDataFetcher;
+import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
+
+import com.amazonaws.services.kinesis.model.HashKeyRange;
+import com.amazonaws.services.kinesis.model.Shard;
+import org.apache.commons.lang3.StringUtils;
+import org.mockito.Mockito;
+
+import java.math.BigInteger;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.Properties;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+
+/**
+ * Tests for the {@link ShardConsumer}.
+ */
+public class ShardConsumerTestUtils {
+
+	public static <T> ShardConsumerMetricsReporter assertNumberOfMessagesReceivedFromKinesis(
+				final int expectedNumberOfMessages,
+				final RecordPublisherFactory recordPublisherFactory,
+				final SequenceNumber startingSequenceNumber,
+				final Properties consumerProperties) throws InterruptedException {
+		ShardConsumerMetricsReporter shardMetricsReporter = new ShardConsumerMetricsReporter(mock(MetricGroup.class));
+
+		StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);
+
+		LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
+		subscribedShardsStateUnderTest.add(
+			new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(fakeToBeConsumedShard),
+				fakeToBeConsumedShard, startingSequenceNumber));
+
+		TestSourceContext<String> sourceContext = new TestSourceContext<>();
+
+		KinesisDeserializationSchemaWrapper<String> deserializationSchema = new KinesisDeserializationSchemaWrapper<>(
+			new SimpleStringSchema());
+		TestableKinesisDataFetcher<String> fetcher =
+			new TestableKinesisDataFetcher<>(
+				Collections.singletonList("fakeStream"),
+				sourceContext,
+				consumerProperties,
+				deserializationSchema,
+				10,
+				2,
+				new AtomicReference<>(),
+				subscribedShardsStateUnderTest,
+				KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(Collections.singletonList("fakeStream")),
+				Mockito.mock(KinesisProxyInterface.class),
+				Mockito.mock(KinesisProxyV2Interface.class));
+
+		final StreamShardHandle shardHandle = subscribedShardsStateUnderTest.get(0).getStreamShardHandle();
+		final SequenceNumber lastProcessedSequenceNum = subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum();
+		final StartingPosition startingPosition = AWSUtil.getStartingPosition(lastProcessedSequenceNum, consumerProperties);
+
+		final RecordPublisher recordPublisher = recordPublisherFactory
+			.create(startingPosition, fetcher.getConsumerConfiguration(), mock(MetricGroup.class), shardHandle);
+
+		int shardIndex = fetcher.registerNewSubscribedShardState(subscribedShardsStateUnderTest.get(0));
+		new ShardConsumer<>(
+			fetcher,
+			recordPublisher,
+			shardIndex,
+			shardHandle,
+			lastProcessedSequenceNum,
+			shardMetricsReporter,
+			deserializationSchema)
+			.run();
+
+		assertEquals(expectedNumberOfMessages, sourceContext.getCollectedOutputs().size());
+		assertEquals(
+			SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get(),
+			subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum());
+
+		return shardMetricsReporter;
+	}
+
+	public static StreamShardHandle getMockStreamShard(String streamName, int shardId) {
+		return new StreamShardHandle(
+			streamName,
+			new Shard()
+				.withShardId(KinesisShardIdGenerator.generateFromShardOrder(shardId))
+				.withHashKeyRange(
+					new HashKeyRange()
+						.withStartingHashKey("0")
+						.withEndingHashKey(new BigInteger(StringUtils.repeat("FF", 16), 16).toString())));
+	}
+
+	public static SequenceNumber fakeSequenceNumber() {
+		return new SequenceNumber("fakeStartingState");
+	}
+}
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfigurationTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfigurationTest.java
index ebeaa33..2b4ee99 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfigurationTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfigurationTest.java
@@ -1,4 +1,3 @@
-
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherTest.java
new file mode 100644
index 0000000..97aef9f
--- /dev/null
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherTest.java
@@ -0,0 +1,443 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout;
+
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordBatch;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher;
+import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
+import org.apache.flink.streaming.connectors.kinesis.model.StartingPosition;
+import org.apache.flink.streaming.connectors.kinesis.proxy.FullJitterBackoff;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
+import org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory;
+import org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.SingleShardFanOutKinesisV2;
+import org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.SubscriptionErrorKinesisV2;
+import org.apache.flink.streaming.connectors.kinesis.testutils.TestUtils.TestConsumer;
+
+import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord;
+import org.hamcrest.Matchers;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.services.kinesis.model.LimitExceededException;
+import software.amazon.awssdk.services.kinesis.model.Record;
+import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEvent;
+
+import java.nio.ByteBuffer;
+import java.util.Date;
+import java.util.List;
+import java.util.Properties;
+import java.util.stream.Collectors;
+
+import static java.util.Collections.emptyList;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_CONSUMER_NAME;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RECORD_PUBLISHER_TYPE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.EFO;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SUBSCRIBE_TO_SHARD_BACKOFF_BASE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SUBSCRIBE_TO_SHARD_BACKOFF_EXPONENTIAL_CONSTANT;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SUBSCRIBE_TO_SHARD_BACKOFF_MAX;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SUBSCRIBE_TO_SHARD_RETRIES;
+import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.COMPLETE;
+import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.INCOMPLETE;
+import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM;
+import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM;
+import static org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.SubscriptionErrorKinesisV2.NUMBER_OF_SUBSCRIPTIONS;
+import static org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.emptyShard;
+import static org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.singletonShard;
+import static org.apache.flink.streaming.connectors.kinesis.testutils.TestUtils.createDummyStreamShardHandle;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThat;
+import static org.mockito.ArgumentMatchers.anyDouble;
+import static org.mockito.ArgumentMatchers.anyInt;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+import static software.amazon.awssdk.services.kinesis.model.ShardIteratorType.AFTER_SEQUENCE_NUMBER;
+import static software.amazon.awssdk.services.kinesis.model.ShardIteratorType.AT_SEQUENCE_NUMBER;
+import static software.amazon.awssdk.services.kinesis.model.ShardIteratorType.AT_TIMESTAMP;
+import static software.amazon.awssdk.services.kinesis.model.ShardIteratorType.LATEST;
+import static software.amazon.awssdk.services.kinesis.model.ShardIteratorType.TRIM_HORIZON;
+
+/**
+ * Tests for {@link FanOutRecordPublisher}.
+ */
+public class FanOutRecordPublisherTest {
+
+	@Rule
+	public ExpectedException thrown = ExpectedException.none();
+
+	private static final long EXPECTED_SUBSCRIBE_TO_SHARD_MAX = 1;
+	private static final long EXPECTED_SUBSCRIBE_TO_SHARD_BASE = 2;
+	private static final double EXPECTED_SUBSCRIBE_TO_SHARD_POW = 0.5;
+	private static final int EXPECTED_SUBSCRIBE_TO_SHARD_RETRIES = 3;
+
+	private static final String DUMMY_SEQUENCE = "1";
+
+	private static final SequenceNumber SEQUENCE_NUMBER = new SequenceNumber(DUMMY_SEQUENCE);
+
+	private static final SequenceNumber AGGREGATED_SEQUENCE_NUMBER = new SequenceNumber(DUMMY_SEQUENCE, 1L);
+
+	@Test
+	public void testToSdkV2StartingPositionAfterSequenceNumber() throws Exception {
+		SingleShardFanOutKinesisV2 kinesis = emptyShard();
+
+		RecordPublisher publisher = createRecordPublisher(kinesis, StartingPosition.continueFromSequenceNumber(SEQUENCE_NUMBER));
+		publisher.run(new TestConsumer());
+
+		assertEquals(DUMMY_SEQUENCE, kinesis.getStartingPositionForSubscription(0).sequenceNumber());
+		assertEquals(AFTER_SEQUENCE_NUMBER, kinesis.getStartingPositionForSubscription(0).type());
+	}
+
+	@Test
+	public void testToSdkV2StartingPositionAtSequenceNumber() throws Exception {
+		SingleShardFanOutKinesisV2 kinesis = emptyShard();
+
+		RecordPublisher publisher = createRecordPublisher(kinesis, StartingPosition.restartFromSequenceNumber(AGGREGATED_SEQUENCE_NUMBER));
+		publisher.run(new TestConsumer());
+
+		assertEquals(DUMMY_SEQUENCE, kinesis.getStartingPositionForSubscription(0).sequenceNumber());
+		assertEquals(AT_SEQUENCE_NUMBER, kinesis.getStartingPositionForSubscription(0).type());
+	}
+
+	@Test
+	public void testToSdkV2StartingPositionLatest() throws Exception {
+		SingleShardFanOutKinesisV2 kinesis = emptyShard();
+
+		RecordPublisher publisher = createRecordPublisher(kinesis, latest());
+		publisher.run(new TestConsumer());
+
+		assertNull(kinesis.getStartingPositionForSubscription(0).sequenceNumber());
+		assertEquals(LATEST, kinesis.getStartingPositionForSubscription(0).type());
+	}
+
+	@Test
+	public void testToSdkV2StartingPositionTrimHorizon() throws Exception {
+		SingleShardFanOutKinesisV2 kinesis = emptyShard();
+
+		RecordPublisher publisher = createRecordPublisher(kinesis, StartingPosition.continueFromSequenceNumber(SENTINEL_EARLIEST_SEQUENCE_NUM.get()));
+		publisher.run(new TestConsumer());
+
+		assertNull(kinesis.getStartingPositionForSubscription(0).sequenceNumber());
+		assertEquals(TRIM_HORIZON, kinesis.getStartingPositionForSubscription(0).type());
+	}
+
+	@Test
+	public void testToSdkV2StartingPositionAtTimeStamp() throws Exception {
+		SingleShardFanOutKinesisV2 kinesis = emptyShard();
+		Date now = new Date();
+
+		RecordPublisher publisher = createRecordPublisher(kinesis, StartingPosition.fromTimestamp(now));
+		publisher.run(new TestConsumer());
+
+		assertEquals(now.toInstant(), kinesis.getStartingPositionForSubscription(0).timestamp());
+		assertEquals(AT_TIMESTAMP, kinesis.getStartingPositionForSubscription(0).type());
+	}
+
+	@Test
+	public void testToSdkV1Records() throws Exception {
+		Date now = new Date();
+		byte[] data = new byte[] { 0, 1, 2, 3 };
+
+		Record record = Record
+			.builder()
+			.approximateArrivalTimestamp(now.toInstant())
+			.partitionKey("pk")
+			.sequenceNumber("sn")
+			.data(SdkBytes.fromByteArray(data))
+			.build();
+
+		KinesisProxyV2Interface kinesis = singletonShard(createSubscribeToShardEvent(record));
+		RecordPublisher publisher = createRecordPublisher(kinesis, latest());
+
+		TestConsumer consumer = new TestConsumer();
+		publisher.run(consumer);
+
+		UserRecord actual = consumer.getRecordBatches().get(0).getDeaggregatedRecords().get(0);
+		assertFalse(actual.isAggregated());
+		assertEquals(now, actual.getApproximateArrivalTimestamp());
+		assertEquals("sn", actual.getSequenceNumber());
+		assertEquals("pk", actual.getPartitionKey());
+		assertThat(toByteArray(actual.getData()), Matchers.equalTo(data));
+	}
+
+	@Test
+	public void testExceptionThrownInConsumerPropagatesToRecordPublisher() throws Exception {
+		thrown.expect(RuntimeException.class);
+		thrown.expectMessage("An error thrown from the consumer");
+
+		SingleShardFanOutKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory.boundedShard().build();
+		RecordPublisher recordPublisher = createRecordPublisher(kinesis);
+
+		recordPublisher.run(batch -> {
+			throw new RuntimeException("An error thrown from the consumer");
+		});
+	}
+
+	@Test
+	public void testResourceNotFoundWhenObtainingSubscriptionTerminatesApplication() throws Exception {
+		thrown.expect(ResourceNotFoundException.class);
+
+		KinesisProxyV2Interface kinesis = FakeKinesisFanOutBehavioursFactory.resourceNotFoundWhenObtainingSubscription();
+		RecordPublisher recordPublisher = createRecordPublisher(kinesis);
+
+		recordPublisher.run(new TestConsumer());
+	}
+
+	@Test
+	public void testShardConsumerCompletesIfResourceNotFoundExceptionThrownFromSubscription() throws Exception {
+		ResourceNotFoundException exception = ResourceNotFoundException.builder().build();
+		SubscriptionErrorKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory.errorDuringSubscription(exception);
+		RecordPublisher recordPublisher = createRecordPublisher(kinesis);
+		TestConsumer consumer = new TestConsumer();
+
+		assertEquals(COMPLETE, recordPublisher.run(consumer));
+
+		// Will exit on the first subscription
+		assertEquals(1, kinesis.getNumberOfSubscribeToShardInvocations());
+	}
+
+	@Test
+	public void testShardConsumerRetriesIfLimitExceededExceptionThrownFromSubscription() throws Exception {
+		LimitExceededException exception = LimitExceededException.builder().build();
+		SubscriptionErrorKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory.errorDuringSubscription(exception);
+		RecordPublisher recordPublisher = createRecordPublisher(kinesis);
+		TestConsumer consumer = new TestConsumer();
+
+		int count = 0;
+		while (recordPublisher.run(consumer) == INCOMPLETE) {
+			if (++count > NUMBER_OF_SUBSCRIPTIONS + 1) {
+				break;
+			}
+		}
+
+		// An exception is thrown on the 5th subscription and then the subscription completes on the next
+		assertEquals(NUMBER_OF_SUBSCRIPTIONS + 1, kinesis.getNumberOfSubscribeToShardInvocations());
+	}
+
+	@Test
+	public void testSubscribeToShardBacksOffForRetryableError() throws Exception {
+		LimitExceededException retryableError = LimitExceededException.builder().build();
+		SubscriptionErrorKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory.errorDuringSubscription(retryableError);
+		FanOutRecordPublisherConfiguration configuration = createConfiguration();
+
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+		when(backoff.calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), anyInt())).thenReturn(100L);
+
+		new FanOutRecordPublisher(latest(), "arn", createDummyStreamShardHandle(), kinesis, configuration, backoff)
+			.run(new TestConsumer());
+
+		verify(backoff).calculateFullJitterBackoff(
+			EXPECTED_SUBSCRIBE_TO_SHARD_BASE,
+			EXPECTED_SUBSCRIBE_TO_SHARD_MAX,
+			EXPECTED_SUBSCRIBE_TO_SHARD_POW,
+			1
+		);
+
+		verify(backoff).sleep(100L);
+	}
+
+	@Test
+	public void testSubscribeToShardFailsWhenMaxRetriesExceeded() throws Exception {
+		thrown.expect(RuntimeException.class);
+		thrown.expectMessage("Maximum reties exceeded for SubscribeToShard. Failed 3 times.");
+
+		Properties efoProperties = createEfoProperties();
+		efoProperties.setProperty(SUBSCRIBE_TO_SHARD_RETRIES, String.valueOf(EXPECTED_SUBSCRIBE_TO_SHARD_RETRIES));
+		FanOutRecordPublisherConfiguration configuration = new FanOutRecordPublisherConfiguration(efoProperties, emptyList());
+
+		LimitExceededException retryableError = LimitExceededException.builder().build();
+		SubscriptionErrorKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory.errorDuringSubscription(retryableError);
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+
+		FanOutRecordPublisher recordPublisher = new FanOutRecordPublisher(latest(), "arn", createDummyStreamShardHandle(), kinesis, configuration, backoff);
+
+		int count = 0;
+		while (recordPublisher.run(new TestConsumer()) == INCOMPLETE) {
+			if (++count > 3) {
+				break;
+			}
+		}
+	}
+
+	@Test
+	public void testSubscribeToShardBacksOffAttemptIncreases() throws Exception {
+		LimitExceededException retryableError = LimitExceededException.builder().build();
+		SubscriptionErrorKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory.errorDuringSubscription(retryableError);
+		FanOutRecordPublisherConfiguration configuration = createConfiguration();
+
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+
+		FanOutRecordPublisher recordPublisher = new FanOutRecordPublisher(latest(), "arn", createDummyStreamShardHandle(), kinesis, configuration, backoff);
+
+		recordPublisher.run(new TestConsumer());
+		recordPublisher.run(new TestConsumer());
+		recordPublisher.run(new TestConsumer());
+
+		verify(backoff).calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), eq(1));
+		verify(backoff).calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), eq(2));
+		verify(backoff).calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), eq(3));
+
+		verify(backoff, never()).calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), eq(0));
+		verify(backoff, never()).calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), eq(4));
+	}
+
+	@Test
+	public void testBackOffAttemptResetsWithSuccessfulSubscription() throws Exception {
+		SubscriptionErrorKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory.alternatingSuccessErrorDuringSubscription();
+		FanOutRecordPublisherConfiguration configuration = createConfiguration();
+
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+
+		FanOutRecordPublisher recordPublisher = new FanOutRecordPublisher(latest(), "arn", createDummyStreamShardHandle(), kinesis, configuration, backoff);
+
+		recordPublisher.run(new TestConsumer());
+		recordPublisher.run(new TestConsumer());
+		recordPublisher.run(new TestConsumer());
+
+		// Expecting:
+		// - first attempt to fail, and backoff attempt #1
+		// - second attempt to succeed, and reset attempt index
+		// - third attempt to fail, and backoff attempt #1
+
+		verify(backoff, times(2)).calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), eq(1));
+
+		verify(backoff, never()).calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), eq(0));
+		verify(backoff, never()).calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), eq(2));
+	}
+
+	@Test
+	public void testRecordDurability() throws Exception {
+		SingleShardFanOutKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory
+			.boundedShard()
+			.withBatchCount(10)
+			.withBatchesPerSubscription(3)
+			.withRecordsPerBatch(12)
+			.build();
+
+		RecordPublisher recordPublisher = createRecordPublisher(kinesis);
+		TestConsumer consumer = new TestConsumer();
+
+		int count = 0;
+		while (recordPublisher.run(consumer) == INCOMPLETE) {
+			if (++count > 4) {
+				break;
+			}
+		}
+
+		List<UserRecord> userRecords = flattenToUserRecords(consumer.getRecordBatches());
+
+		// Should have received 10 * 12 = 120 records
+		assertEquals(120, userRecords.size());
+
+		int expectedSequenceNumber = 1;
+		for (UserRecord record : userRecords) {
+			assertEquals(String.valueOf(expectedSequenceNumber++), record.getSequenceNumber());
+		}
+	}
+
+	@Test
+	public void testAggregatedRecordDurability() throws Exception {
+		SingleShardFanOutKinesisV2 kinesis = FakeKinesisFanOutBehavioursFactory
+			.boundedShard()
+			.withBatchCount(10)
+			.withAggregationFactor(5)
+			.withRecordsPerBatch(12)
+			.build();
+
+		RecordPublisher recordPublisher = createRecordPublisher(kinesis);
+		TestConsumer consumer = new TestConsumer();
+
+		int count = 0;
+		while (recordPublisher.run(consumer) == INCOMPLETE) {
+			if (++count > 5) {
+				break;
+			}
+		}
+
+		List<UserRecord> userRecords = flattenToUserRecords(consumer.getRecordBatches());
+
+		// Should have received 10 * 12 * 5 = 600 records
+		assertEquals(600, userRecords.size());
+
+		int sequence = 1;
+		long subsequence = 0;
+		for (UserRecord userRecord : userRecords) {
+			assertEquals(String.valueOf(sequence), userRecord.getSequenceNumber());
+			assertEquals(subsequence++, userRecord.getSubSequenceNumber());
+
+			if (subsequence == 5) {
+				sequence++;
+				subsequence = 0;
+			}
+		}
+	}
+
+	private List<UserRecord> flattenToUserRecords(final List<RecordBatch> recordBatch) {
+		return recordBatch
+			.stream()
+			.flatMap(b -> b.getDeaggregatedRecords().stream())
+			.collect(Collectors.toList());
+	}
+
+	private byte[] toByteArray(final ByteBuffer byteBuffer) {
+		byte[] dataBytes = new byte[byteBuffer.remaining()];
+		byteBuffer.get(dataBytes);
+		return dataBytes;
+	}
+
+	private RecordPublisher createRecordPublisher(final KinesisProxyV2Interface kinesis) {
+		return createRecordPublisher(kinesis, latest());
+	}
+
+	private RecordPublisher createRecordPublisher(final KinesisProxyV2Interface kinesis, final StartingPosition startingPosition) {
+		return new FanOutRecordPublisher(startingPosition, "arn", createDummyStreamShardHandle(), kinesis, createConfiguration(), new FullJitterBackoff());
+	}
+
+	private FanOutRecordPublisherConfiguration createConfiguration() {
+		return new FanOutRecordPublisherConfiguration(createEfoProperties(), emptyList());
+	}
+
+	private Properties createEfoProperties() {
+		Properties config = new Properties();
+		config.setProperty(RECORD_PUBLISHER_TYPE, EFO.name());
+		config.setProperty(EFO_CONSUMER_NAME, "dummy-efo-consumer");
+		config.setProperty(SUBSCRIBE_TO_SHARD_BACKOFF_BASE, String.valueOf(EXPECTED_SUBSCRIBE_TO_SHARD_BASE));
+		config.setProperty(SUBSCRIBE_TO_SHARD_BACKOFF_MAX, String.valueOf(EXPECTED_SUBSCRIBE_TO_SHARD_MAX));
+		config.setProperty(SUBSCRIBE_TO_SHARD_BACKOFF_EXPONENTIAL_CONSTANT, String.valueOf(EXPECTED_SUBSCRIBE_TO_SHARD_POW));
+		return config;
+	}
+
+	private SubscribeToShardEvent createSubscribeToShardEvent(final Record...records) {
+		return SubscribeToShardEvent
+			.builder()
+			.records(records)
+			.build();
+	}
+
+	private StartingPosition latest() {
+		return StartingPosition.continueFromSequenceNumber(SENTINEL_LATEST_SEQUENCE_NUM.get());
+	}
+
+}
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherTest.java
index 2bbe3da..6b6107e 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherTest.java
@@ -18,23 +18,17 @@
 package org.apache.flink.streaming.connectors.kinesis.internals.publisher.polling;
 
 import org.apache.flink.metrics.MetricGroup;
-import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordBatch;
-import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordBatchConsumer;
 import org.apache.flink.streaming.connectors.kinesis.metrics.PollingRecordPublisherMetricsReporter;
-import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
 import org.apache.flink.streaming.connectors.kinesis.model.StartingPosition;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
 import org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisBehavioursFactory;
 import org.apache.flink.streaming.connectors.kinesis.testutils.TestUtils;
+import org.apache.flink.streaming.connectors.kinesis.testutils.TestUtils.TestConsumer;
 
-import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 
-import java.util.ArrayList;
-import java.util.List;
-
 import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.COMPLETE;
 import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.INCOMPLETE;
 import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM;
@@ -62,9 +56,9 @@ public class PollingRecordPublisherTest {
 		TestConsumer consumer = new TestConsumer();
 		recordPublisher.run(consumer);
 
-		assertEquals(1, consumer.recordBatches.size());
-		assertEquals(5, consumer.recordBatches.get(0).getDeaggregatedRecordSize());
-		assertEquals(100L, consumer.recordBatches.get(0).getMillisBehindLatest(), 0);
+		assertEquals(1, consumer.getRecordBatches().size());
+		assertEquals(5, consumer.getRecordBatches().get(0).getDeaggregatedRecordSize());
+		assertEquals(100L, consumer.getRecordBatches().get(0).getMillisBehindLatest(), 0);
 	}
 
 	@Test
@@ -146,20 +140,4 @@ public class PollingRecordPublisherTest {
 			500L);
 	}
 
-	private static class TestConsumer implements RecordBatchConsumer {
-		private final List<RecordBatch> recordBatches = new ArrayList<>();
-		private String latestSequenceNumber;
-
-		@Override
-		public SequenceNumber accept(final RecordBatch batch) {
-			recordBatches.add(batch);
-
-			if (batch.getDeaggregatedRecordSize() > 0) {
-				List<UserRecord> records = batch.getDeaggregatedRecords();
-				latestSequenceNumber = records.get(records.size() - 1).getSequenceNumber();
-			}
-
-			return new SequenceNumber(latestSequenceNumber);
-		}
-	}
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Test.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Test.java
new file mode 100644
index 0000000..f7641c3
--- /dev/null
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Test.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.proxy;
+
+import org.junit.Test;
+import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler;
+
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+
+/**
+ * Tests for {@link KinesisProxyV2}.
+ */
+public class KinesisProxyV2Test {
+
+	@Test
+	public void testSubscribeToShard() {
+		KinesisAsyncClient kinesis = mock(KinesisAsyncClient.class);
+		KinesisProxyV2 proxy = new KinesisProxyV2(kinesis);
+
+		SubscribeToShardRequest request = SubscribeToShardRequest.builder().build();
+		SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler
+			.builder()
+			.subscriber(event -> {})
+			.build();
+
+		proxy.subscribeToShard(request, responseHandler);
+
+		verify(kinesis).subscribeToShard(eq(request), eq(responseHandler));
+	}
+
+	@Test
+	public void testCloseInvokesClientClose() {
+		KinesisAsyncClient kinesis = mock(KinesisAsyncClient.class);
+		KinesisProxyV2 proxy = new KinesisProxyV2(kinesis);
+
+		proxy.close();
+
+		verify(kinesis).close();
+	}
+
+}
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisFanOutBehavioursFactory.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisFanOutBehavioursFactory.java
new file mode 100644
index 0000000..83e299a
--- /dev/null
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisFanOutBehavioursFactory.java
@@ -0,0 +1,391 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.testutils;
+
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
+
+import com.amazonaws.kinesis.agg.RecordAggregator;
+import org.apache.commons.lang3.NotImplementedException;
+import org.reactivestreams.Subscriber;
+import org.reactivestreams.Subscription;
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.services.kinesis.model.LimitExceededException;
+import software.amazon.awssdk.services.kinesis.model.Record;
+import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
+import software.amazon.awssdk.services.kinesis.model.StartingPosition;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEvent;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEventStream;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponse;
+import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler;
+
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+
+/**
+ * Factory for different kinds of fake Kinesis behaviours using the {@link KinesisProxyV2Interface} interface.
+ */
+public class FakeKinesisFanOutBehavioursFactory {
+
+	public static SingleShardFanOutKinesisV2.Builder boundedShard() {
+		return new SingleShardFanOutKinesisV2.Builder();
+	}
+
+	public static KinesisProxyV2Interface singletonShard(final SubscribeToShardEvent event) {
+		return new SingletonEventFanOutKinesisV2(event);
+	}
+
+	public static SingleShardFanOutKinesisV2 emptyShard() {
+		return new SingleShardFanOutKinesisV2.Builder().withBatchCount(0).build();
+	}
+
+	public static KinesisProxyV2Interface resourceNotFoundWhenObtainingSubscription() {
+		return new ExceptionalKinesisV2(ResourceNotFoundException.builder().build());
+	}
+
+	public static SubscriptionErrorKinesisV2 errorDuringSubscription(final Throwable throwable) {
+		return new SubscriptionErrorKinesisV2(throwable);
+	}
+
+	public static SubscriptionErrorKinesisV2 alternatingSuccessErrorDuringSubscription() {
+		return new AlternatingSubscriptionErrorKinesisV2(LimitExceededException.builder().build());
+	}
+
+	public static AbstractSingleShardFanOutKinesisV2 emptyBatchFollowedBySingleRecord() {
+		return new AbstractSingleShardFanOutKinesisV2(2) {
+			private int subscription = 0;
+
+			@Override
+			void sendEvents(Subscriber<? super SubscribeToShardEventStream> subscriber) {
+				SubscribeToShardEvent.Builder builder = SubscribeToShardEvent
+					.builder()
+					.continuationSequenceNumber(subscription == 0 ? "1" : null);
+
+				if (subscription == 1) {
+					builder.records(createRecord(new AtomicInteger(1)));
+				}
+
+				subscriber.onNext(builder.build());
+				subscription++;
+			}
+		};
+	}
+
+	/**
+	 * An unbounded fake Kinesis that offers subscriptions with 5 records, alternating throwing the given exception.
+	 * The first subscription is exceptional, second successful, and so on.
+	 */
+	private static class AlternatingSubscriptionErrorKinesisV2 extends SubscriptionErrorKinesisV2 {
+
+		int index = 0;
+
+		private AlternatingSubscriptionErrorKinesisV2(final Throwable throwable) {
+			super(throwable);
+		}
+
+		@Override
+		void sendEvents(Subscriber<? super SubscribeToShardEventStream> subscriber) {
+			if (index % 2 == 0) {
+				super.sendEvents(subscriber);
+			} else {
+				super.sendEventBatch(subscriber);
+				subscriber.onComplete();
+			}
+
+			index++;
+		}
+	}
+
+	/**
+	 * A fake Kinesis that throws the given exception after sending 5 records.
+	 * A total of 5 subscriptions can be acquired.
+	 */
+	public static class SubscriptionErrorKinesisV2 extends AbstractSingleShardFanOutKinesisV2 {
+
+		public static final int NUMBER_OF_SUBSCRIPTIONS = 5;
+
+		public static final int NUMBER_OF_EVENTS_PER_SUBSCRIPTION = 5;
+
+		private final Throwable throwable;
+
+		AtomicInteger sequenceNumber = new AtomicInteger();
+
+		private SubscriptionErrorKinesisV2(final Throwable throwable) {
+			super(NUMBER_OF_SUBSCRIPTIONS);
+			this.throwable = throwable;
+		}
+
+		@Override
+		void sendEvents(Subscriber<? super SubscribeToShardEventStream> subscriber) {
+			sendEventBatch(subscriber);
+			subscriber.onError(throwable);
+		}
+
+		void sendEventBatch(Subscriber<? super SubscribeToShardEventStream> subscriber) {
+			for (int i = 0; i < NUMBER_OF_EVENTS_PER_SUBSCRIPTION; i++) {
+				subscriber.onNext(SubscribeToShardEvent
+					.builder()
+					.records(createRecord(sequenceNumber))
+					.continuationSequenceNumber(String.valueOf(i))
+					.build());
+			}
+		}
+	}
+
+	private static class ExceptionalKinesisV2 extends KinesisProxyV2InterfaceAdapter {
+
+		private final RuntimeException exception;
+
+		private ExceptionalKinesisV2(RuntimeException exception) {
+			this.exception = exception;
+		}
+
+		@Override
+		public CompletableFuture<Void> subscribeToShard(SubscribeToShardRequest request, SubscribeToShardResponseHandler responseHandler) {
+			responseHandler.exceptionOccurred(exception);
+			return CompletableFuture.completedFuture(null);
+		}
+	}
+
+	private static class SingletonEventFanOutKinesisV2 extends AbstractSingleShardFanOutKinesisV2 {
+
+		private final SubscribeToShardEvent event;
+
+		private SingletonEventFanOutKinesisV2(SubscribeToShardEvent event) {
+			super(1);
+			this.event = event;
+		}
+
+		@Override
+		void sendEvents(Subscriber<? super SubscribeToShardEventStream> subscriber) {
+			subscriber.onNext(event);
+		}
+	}
+
+	/**
+	 * A fake implementation of KinesisProxyV2 SubscribeToShard that provides dummy records for EFO subscriptions.
+	 * Aggregated and non-aggregated records are supported with various batch and subscription sizes.
+	 */
+	public static class SingleShardFanOutKinesisV2 extends AbstractSingleShardFanOutKinesisV2 {
+
+		private final int batchesPerSubscription;
+
+		private final int recordsPerBatch;
+
+		private final long millisBehindLatest;
+
+		private final int totalRecords;
+
+		private final int aggregationFactor;
+
+		private final AtomicInteger sequenceNumber = new AtomicInteger();
+
+		private SingleShardFanOutKinesisV2(final Builder builder) {
+			super(builder.getSubscriptionCount());
+			this.batchesPerSubscription = builder.batchesPerSubscription;
+			this.recordsPerBatch = builder.recordsPerBatch;
+			this.millisBehindLatest = builder.millisBehindLatest;
+			this.aggregationFactor = builder.aggregationFactor;
+			this.totalRecords = builder.getTotalRecords();
+		}
+
+		@Override
+		void sendEvents(final Subscriber<? super SubscribeToShardEventStream> subscriber) {
+			SubscribeToShardEvent.Builder eventBuilder = SubscribeToShardEvent
+				.builder()
+				.millisBehindLatest(millisBehindLatest);
+
+			for (int batchIndex = 0; batchIndex < batchesPerSubscription && sequenceNumber.get() < totalRecords; batchIndex++) {
+				List<Record> records = new ArrayList<>();
+
+				for (int i = 0; i < recordsPerBatch; i++) {
+					final Record record;
+
+					if (aggregationFactor == 1) {
+						record = createRecord(sequenceNumber);
+					} else {
+						record = createAggregatedRecord(aggregationFactor, sequenceNumber);
+					}
+
+					records.add(record);
+				}
+
+				eventBuilder.records(records);
+
+				String continuation = sequenceNumber.get() < totalRecords ? String.valueOf(sequenceNumber.get() + 1) : null;
+				eventBuilder.continuationSequenceNumber(continuation);
+
+				subscriber.onNext(eventBuilder.build());
+			}
+		}
+
+		/**
+		 * A convenience builder for {@link SingleShardFanOutKinesisV2}.
+		 */
+		public static class Builder {
+			private int batchesPerSubscription = 100000;
+			private int recordsPerBatch = 10;
+			private long millisBehindLatest = 0;
+			private int batchCount = 1;
+			private int aggregationFactor = 1;
+
+			public int getSubscriptionCount() {
+				return (int) Math.ceil((double) getTotalRecords() / batchesPerSubscription / recordsPerBatch);
+			}
+
+			public int getTotalRecords() {
+				return batchCount * recordsPerBatch;
+			}
+
+			public Builder withBatchesPerSubscription(final int batchesPerSubscription) {
+				this.batchesPerSubscription = batchesPerSubscription;
+				return this;
+			}
+
+			public Builder withRecordsPerBatch(final int recordsPerBatch) {
+				this.recordsPerBatch = recordsPerBatch;
+				return this;
+			}
+
+			public Builder withBatchCount(final int batchCount) {
+				this.batchCount = batchCount;
+				return this;
+			}
+
+			public Builder withMillisBehindLatest(final long millisBehindLatest) {
+				this.millisBehindLatest = millisBehindLatest;
+				return this;
+			}
+
+			public Builder withAggregationFactor(final int aggregationFactor) {
+				this.aggregationFactor = aggregationFactor;
+				return this;
+			}
+
+			public SingleShardFanOutKinesisV2 build() {
+				return new SingleShardFanOutKinesisV2(this);
+			}
+		}
+	}
+
+	/**
+	 * A single shard dummy EFO implementation that provides basic responses and subscription management.
+	 * Does not provide any records.
+	 */
+	public abstract static class AbstractSingleShardFanOutKinesisV2 extends KinesisProxyV2InterfaceAdapter {
+
+		private final List<SubscribeToShardRequest> requests = new ArrayList<>();
+
+		private int remainingSubscriptions;
+
+		private AbstractSingleShardFanOutKinesisV2(final int remainingSubscriptions) {
+			this.remainingSubscriptions = remainingSubscriptions;
+		}
+
+		public int getNumberOfSubscribeToShardInvocations() {
+			return requests.size();
+		}
+
+		public StartingPosition getStartingPositionForSubscription(final int subscriptionIndex) {
+			assertTrue(subscriptionIndex >= 0);
+			assertTrue(subscriptionIndex < getNumberOfSubscribeToShardInvocations());
+
+			return requests.get(subscriptionIndex).startingPosition();
+		}
+
+		@Override
+		public CompletableFuture<Void> subscribeToShard(
+			final SubscribeToShardRequest request,
+			final SubscribeToShardResponseHandler responseHandler) {
+
+			requests.add(request);
+
+			return CompletableFuture.supplyAsync(() -> {
+				responseHandler.responseReceived(SubscribeToShardResponse.builder().build());
+
+				responseHandler.onEventStream(subscriber -> {
+					subscriber.onSubscribe(mock(Subscription.class));
+
+					if (remainingSubscriptions > 0) {
+						sendEvents(subscriber);
+						remainingSubscriptions--;
+					} else {
+						SubscribeToShardEvent.Builder eventBuilder = SubscribeToShardEvent
+							.builder()
+							.millisBehindLatest(0L)
+							.continuationSequenceNumber(null);
+
+						subscriber.onNext(eventBuilder.build());
+					}
+
+					subscriber.onComplete();
+				});
+
+				return null;
+			});
+		}
+
+		abstract void sendEvents(final Subscriber<? super SubscribeToShardEventStream> subscriber);
+
+	}
+
+	private static class KinesisProxyV2InterfaceAdapter implements KinesisProxyV2Interface {
+
+		@Override
+		public CompletableFuture<Void> subscribeToShard(SubscribeToShardRequest request, SubscribeToShardResponseHandler responseHandler) {
+			throw new NotImplementedException("This method is not implemented.");
+		}
+	}
+
+	private static Record createRecord(final AtomicInteger sequenceNumber) {
+		return createRecord(randomAlphabetic(32).getBytes(UTF_8), sequenceNumber);
+	}
+
+	private static Record createRecord(final byte[] data, final AtomicInteger sequenceNumber) {
+		return Record
+			.builder()
+			.approximateArrivalTimestamp(Instant.now())
+			.data(SdkBytes.fromByteArray(data))
+			.sequenceNumber(String.valueOf(sequenceNumber.incrementAndGet()))
+			.partitionKey("pk")
+			.build();
+	}
+
+	private static Record createAggregatedRecord(final int aggregationFactor, final AtomicInteger sequenceNumber) {
+		RecordAggregator recordAggregator = new RecordAggregator();
+
+		for (int i = 0; i < aggregationFactor; i++) {
+			try {
+				recordAggregator.addUserRecord("pk", randomAlphabetic(32).getBytes(UTF_8));
+			} catch (Exception e) {
+				throw new RuntimeException(e);
+			}
+		}
+
+		return createRecord(recordAggregator.clearAndGet().toRecordBytes(), sequenceNumber);
+	}
+
+}
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestUtils.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestUtils.java
index 7c5c786..abb186b 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestUtils.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestUtils.java
@@ -19,10 +19,14 @@ package org.apache.flink.streaming.connectors.kinesis.testutils;
 
 import org.apache.flink.configuration.ConfigConstants;
 import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordBatch;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher;
+import org.apache.flink.streaming.connectors.kinesis.model.SequenceNumber;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 
 import com.amazonaws.kinesis.agg.AggRecord;
 import com.amazonaws.kinesis.agg.RecordAggregator;
+import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord;
 import com.amazonaws.services.kinesis.model.HashKeyRange;
 import com.amazonaws.services.kinesis.model.Record;
 import com.amazonaws.services.kinesis.model.SequenceNumberRange;
@@ -39,6 +43,12 @@ import java.util.Properties;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFORegistrationType.NONE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_CONSUMER_ARN_PREFIX;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_REGISTRATION_TYPE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RECORD_PUBLISHER_TYPE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.EFO;
+
 /**
  * General test utils.
  */
@@ -116,4 +126,35 @@ public class TestUtils {
 		return new StreamShardHandle(streamName, shard);
 	}
 
+	public static Properties efoProperties() {
+		Properties consumerConfig = new Properties();
+		consumerConfig.setProperty(RECORD_PUBLISHER_TYPE, EFO.name());
+		consumerConfig.setProperty(EFO_REGISTRATION_TYPE, NONE.name());
+		consumerConfig.setProperty(EFO_CONSUMER_ARN_PREFIX + "." + "fakeStream", "stream-consumer-arn");
+		return consumerConfig;
+	}
+
+	/**
+	 * A test record consumer used to capture messages from kinesis.
+	 */
+	public static class TestConsumer implements RecordPublisher.RecordBatchConsumer {
+		private final List<RecordBatch> recordBatches = new ArrayList<>();
+		private String latestSequenceNumber;
+
+		@Override
+		public SequenceNumber accept(final RecordBatch batch) {
+			recordBatches.add(batch);
+
+			if (batch.getDeaggregatedRecordSize() > 0) {
+				List<UserRecord> records = batch.getDeaggregatedRecords();
+				latestSequenceNumber = records.get(records.size() - 1).getSequenceNumber();
+			}
+
+			return new SequenceNumber(latestSequenceNumber);
+		}
+
+		public List<RecordBatch> getRecordBatches() {
+			return recordBatches;
+		}
+	}
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
index 3bb11bd..cd32361 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcher.java
@@ -25,6 +25,7 @@ import org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetche
 import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
 import org.apache.flink.streaming.connectors.kinesis.model.StreamShardHandle;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
 
 import org.mockito.Mockito;
@@ -48,13 +49,39 @@ import static org.mockito.Mockito.when;
  */
 public class TestableKinesisDataFetcher<T> extends KinesisDataFetcher<T> {
 
-	private OneShotLatch runWaiter;
-	private OneShotLatch initialDiscoveryWaiter;
-	private OneShotLatch shutdownWaiter;
+	private final OneShotLatch runWaiter;
+	private final OneShotLatch initialDiscoveryWaiter;
+	private final OneShotLatch shutdownWaiter;
 
 	private volatile boolean running;
 
 	public TestableKinesisDataFetcher(
+		List<String> fakeStreams,
+		SourceFunction.SourceContext<T> sourceContext,
+		Properties fakeConfiguration,
+		KinesisDeserializationSchema<T> deserializationSchema,
+		int fakeTotalCountOfSubtasks,
+		int fakeIndexOfThisSubtask,
+		AtomicReference<Throwable> thrownErrorUnderTest,
+		LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest,
+		HashMap<String, String> subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
+		KinesisProxyInterface fakeKinesis) {
+
+		this(
+			fakeStreams,
+			sourceContext,
+			fakeConfiguration,
+			deserializationSchema,
+			fakeTotalCountOfSubtasks,
+			fakeIndexOfThisSubtask,
+			thrownErrorUnderTest,
+			subscribedShardsStateUnderTest,
+			subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
+			fakeKinesis,
+			null);
+	}
+
+	public TestableKinesisDataFetcher(
 			List<String> fakeStreams,
 			SourceFunction.SourceContext<T> sourceContext,
 			Properties fakeConfiguration,
@@ -64,7 +91,8 @@ public class TestableKinesisDataFetcher<T> extends KinesisDataFetcher<T> {
 			AtomicReference<Throwable> thrownErrorUnderTest,
 			LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest,
 			HashMap<String, String> subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
-			KinesisProxyInterface fakeKinesis) {
+			KinesisProxyInterface fakeKinesis,
+			KinesisProxyV2Interface fakeKinesisV2) {
 		super(
 			fakeStreams,
 			sourceContext,
@@ -78,7 +106,8 @@ public class TestableKinesisDataFetcher<T> extends KinesisDataFetcher<T> {
 			thrownErrorUnderTest,
 			subscribedShardsStateUnderTest,
 			subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
-			(properties) -> fakeKinesis);
+			properties -> fakeKinesis,
+			properties -> fakeKinesisV2);
 
 		this.runWaiter = new OneShotLatch();
 		this.initialDiscoveryWaiter = new OneShotLatch();
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcherForShardConsumerException.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcherForShardConsumerException.java
index 74bb0c5..a45a463 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcherForShardConsumerException.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/TestableKinesisDataFetcherForShardConsumerException.java
@@ -20,6 +20,7 @@ package org.apache.flink.streaming.connectors.kinesis.testutils;
 import org.apache.flink.core.testutils.OneShotLatch;
 import org.apache.flink.streaming.api.functions.source.SourceFunction;
 import org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetcher;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisherFactory;
 import org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
@@ -55,7 +56,8 @@ public class TestableKinesisDataFetcherForShardConsumerException<T> extends Test
 			final AtomicReference<Throwable> thrownErrorUnderTest,
 			final LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest,
 			final HashMap<String, String> subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
-			final KinesisProxyInterface fakeKinesis) {
+			final KinesisProxyInterface fakeKinesis,
+			final RecordPublisherFactory recordPublisherFactory) {
 		super(fakeStreams, sourceContext, fakeConfiguration, deserializationSchema, fakeTotalCountOfSubtasks,
 			fakeIndexOfThisSubtask, thrownErrorUnderTest, subscribedShardsStateUnderTest,
 			subscribedStreamsToLastDiscoveredShardIdsStateUnderTest, fakeKinesis);
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtilTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtilTest.java
index e66ff51..cdb871b 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtilTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtilTest.java
@@ -40,7 +40,6 @@ import java.util.Date;
 import java.util.Properties;
 
 import static com.amazonaws.services.kinesis.model.ShardIteratorType.AT_TIMESTAMP;
-import static com.amazonaws.services.kinesis.model.ShardIteratorType.LATEST;
 import static org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.AWS_CREDENTIALS_PROVIDER;
 import static org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.CredentialProvider.ASSUME_ROLE;
 import static org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.CredentialProvider.AUTO;
@@ -52,7 +51,7 @@ import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequen
 import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -61,6 +60,7 @@ import static org.junit.Assert.assertTrue;
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(AWSUtil.class)
 public class AWSUtilTest {
+
 	@Rule
 	private final ExpectedException exception = ExpectedException.none();
 
@@ -214,8 +214,8 @@ public class AWSUtilTest {
 	public void testGetStartingPositionForLatest() {
 		StartingPosition position = AWSUtil.getStartingPosition(SENTINEL_LATEST_SEQUENCE_NUM.get(), new Properties());
 
-		assertEquals(LATEST, position.getShardIteratorType());
-		assertNull(position.getStartingMarker());
+		assertEquals(AT_TIMESTAMP, position.getShardIteratorType());
+		assertNotNull(position.getStartingMarker());
 	}
 
 	@Test
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2UtilTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2UtilTest.java
index 0862ec2..4d96ab8 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2UtilTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2UtilTest.java
@@ -33,6 +33,7 @@ import software.amazon.awssdk.auth.credentials.WebIdentityTokenFileCredentialsPr
 import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration;
 import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption;
 import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
+import software.amazon.awssdk.http.nio.netty.Http2Configuration;
 import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient;
 import software.amazon.awssdk.regions.Region;
 import software.amazon.awssdk.services.kinesis.KinesisAsyncClientBuilder;
@@ -48,6 +49,8 @@ import static org.apache.flink.streaming.connectors.kinesis.config.AWSConfigCons
 import static org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.roleArn;
 import static org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.roleSessionName;
 import static org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.webIdentityTokenFile;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DEFAULT_EFO_HTTP_CLIENT_MAX_CONURRENCY;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_HTTP_CLIENT_MAX_CONCURRENCY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
@@ -59,6 +62,7 @@ import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
+import static software.amazon.awssdk.http.Protocol.HTTP2;
 
 /**
  * Tests for {@link AwsV2Util}.
@@ -227,39 +231,40 @@ public class AwsV2UtilTest {
 		ClientConfiguration clientConfiguration = new ClientConfigurationFactory().getConfig();
 		NettyNioAsyncHttpClient.Builder builder = mockHttpClientBuilder();
 
-		AwsV2Util.createHttpClient(clientConfiguration, builder);
+		AwsV2Util.createHttpClient(clientConfiguration, builder, new Properties());
 
 		verify(builder).build();
-		verify(builder).maxConcurrency(50);
+		verify(builder).maxConcurrency(DEFAULT_EFO_HTTP_CLIENT_MAX_CONURRENCY);
 		verify(builder).connectionTimeout(Duration.ofSeconds(10));
 		verify(builder).writeTimeout(Duration.ofSeconds(50));
 		verify(builder).connectionMaxIdleTime(Duration.ofMinutes(1));
 		verify(builder).useIdleConnectionReaper(true);
+		verify(builder).protocol(HTTP2);
 		verify(builder, never()).connectionTimeToLive(any());
 	}
 
 	@Test
-	public void testCreateNettyHttpClientMaxConcurrency() {
+	public void testCreateNettyHttpClientConnectionTimeout() {
 		ClientConfiguration clientConfiguration = new ClientConfigurationFactory().getConfig();
-		clientConfiguration.setMaxConnections(100);
+		clientConfiguration.setConnectionTimeout(1000);
 
 		NettyNioAsyncHttpClient.Builder builder = mockHttpClientBuilder();
 
-		AwsV2Util.createHttpClient(clientConfiguration, builder);
+		AwsV2Util.createHttpClient(clientConfiguration, builder, new Properties());
 
-		verify(builder).maxConcurrency(100);
+		verify(builder).connectionTimeout(Duration.ofSeconds(1));
 	}
 
 	@Test
-	public void testCreateNettyHttpClientConnectionTimeout() {
-		ClientConfiguration clientConfiguration = new ClientConfigurationFactory().getConfig();
-		clientConfiguration.setConnectionTimeout(1000);
+	public void testCreateNettyHttpClientMaxConcurrency() {
+		Properties clientConfiguration = new Properties();
+		clientConfiguration.setProperty(EFO_HTTP_CLIENT_MAX_CONCURRENCY, "123");
 
 		NettyNioAsyncHttpClient.Builder builder = mockHttpClientBuilder();
 
-		AwsV2Util.createHttpClient(clientConfiguration, builder);
+		AwsV2Util.createHttpClient(new ClientConfigurationFactory().getConfig(), builder, clientConfiguration);
 
-		verify(builder).connectionTimeout(Duration.ofSeconds(1));
+		verify(builder).maxConcurrency(123);
 	}
 
 	@Test
@@ -269,7 +274,7 @@ public class AwsV2UtilTest {
 
 		NettyNioAsyncHttpClient.Builder builder = mockHttpClientBuilder();
 
-		AwsV2Util.createHttpClient(clientConfiguration, builder);
+		AwsV2Util.createHttpClient(clientConfiguration, builder, new Properties());
 
 		verify(builder).writeTimeout(Duration.ofSeconds(3));
 	}
@@ -281,7 +286,7 @@ public class AwsV2UtilTest {
 
 		NettyNioAsyncHttpClient.Builder builder = mockHttpClientBuilder();
 
-		AwsV2Util.createHttpClient(clientConfiguration, builder);
+		AwsV2Util.createHttpClient(clientConfiguration, builder, new Properties());
 
 		verify(builder).connectionMaxIdleTime(Duration.ofSeconds(2));
 	}
@@ -293,7 +298,7 @@ public class AwsV2UtilTest {
 
 		NettyNioAsyncHttpClient.Builder builder = mockHttpClientBuilder();
 
-		AwsV2Util.createHttpClient(clientConfiguration, builder);
+		AwsV2Util.createHttpClient(clientConfiguration, builder, new Properties());
 
 		verify(builder).useIdleConnectionReaper(false);
 	}
@@ -305,7 +310,7 @@ public class AwsV2UtilTest {
 
 		NettyNioAsyncHttpClient.Builder builder = mockHttpClientBuilder();
 
-		AwsV2Util.createHttpClient(clientConfiguration, builder);
+		AwsV2Util.createHttpClient(clientConfiguration, builder, new Properties());
 
 		verify(builder).connectionTimeToLive(Duration.ofSeconds(5));
 	}
@@ -383,6 +388,9 @@ public class AwsV2UtilTest {
 		when(builder.writeTimeout(any())).thenReturn(builder);
 		when(builder.connectionMaxIdleTime(any())).thenReturn(builder);
 		when(builder.useIdleConnectionReaper(anyBoolean())).thenReturn(builder);
+		when(builder.connectionAcquisitionTimeout(any())).thenReturn(builder);
+		when(builder.protocol(any())).thenReturn(builder);
+		when(builder.http2Configuration(any(Http2Configuration.class))).thenReturn(builder);
 
 		return builder;
 	}
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java
index bb0f6a1..ee32b6b 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java
@@ -38,6 +38,7 @@ import java.util.Properties;
 import java.util.stream.Collectors;
 
 import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DEFAULT_STREAM_TIMESTAMP_DATE_FORMAT;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_HTTP_CLIENT_MAX_CONCURRENCY;
 import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_INITIAL_TIMESTAMP;
 import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_TIMESTAMP_DATE_FORMAT;
 import static org.junit.Assert.assertEquals;
@@ -50,7 +51,7 @@ import static org.junit.Assert.fail;
 public class KinesisConfigUtilTest {
 
 	@Rule
-	private ExpectedException exception = ExpectedException.none();
+	public ExpectedException exception = ExpectedException.none();
 
 	// ----------------------------------------------------------------------
 	// getValidatedProducerConfiguration() tests
@@ -214,6 +215,7 @@ public class KinesisConfigUtilTest {
 	// ----------------------------------------------------------------------
 	// validateEfoConfiguration() tests
 	// ----------------------------------------------------------------------
+
 	@Test
 	public void testNoEfoRegistrationTypeInConfig() {
 		Properties testConfig = TestUtils.getStandardProperties();
@@ -282,6 +284,37 @@ public class KinesisConfigUtilTest {
 		List<String> streams = Arrays.asList("stream1", "stream2");
 		KinesisConfigUtil.validateEfoConfiguration(testConfig, streams);
 	}
+
+	@Test
+	public void testValidateEfoMaxConcurrency() {
+		Properties testConfig = TestUtils.getStandardProperties();
+		testConfig.setProperty(EFO_HTTP_CLIENT_MAX_CONCURRENCY, "55");
+
+		KinesisConfigUtil.validateConsumerConfiguration(testConfig);
+	}
+
+	@Test
+	public void testValidateEfoMaxConcurrencyNonNumeric() {
+		exception.expect(IllegalArgumentException.class);
+		exception.expectMessage("Invalid value given for EFO HTTP client max concurrency. Must be positive.");
+
+		Properties testConfig = TestUtils.getStandardProperties();
+		testConfig.setProperty(EFO_HTTP_CLIENT_MAX_CONCURRENCY, "abc");
+
+		KinesisConfigUtil.validateConsumerConfiguration(testConfig);
+	}
+
+	@Test
+	public void testValidateEfoMaxConcurrencyNegative() {
+		exception.expect(IllegalArgumentException.class);
+		exception.expectMessage("Invalid value given for EFO HTTP client max concurrency. Must be positive.");
+
+		Properties testConfig = TestUtils.getStandardProperties();
+		testConfig.setProperty(EFO_HTTP_CLIENT_MAX_CONCURRENCY, "-1");
+
+		KinesisConfigUtil.validateConsumerConfiguration(testConfig);
+	}
+
 	// ----------------------------------------------------------------------
 	// validateConsumerConfiguration() tests
 	// ----------------------------------------------------------------------


[flink] 02/03: [FLINK-18661][Kinesis] Stream consumer Registration/Deregistration

Posted by tz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tzulitai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit ad4e9a60f12b5e28c93091e5d89f1cbce86c0c35
Author: Danny Cranmer <cr...@amazon.com>
AuthorDate: Fri Sep 4 10:35:35 2020 +0100

    [FLINK-18661][Kinesis] Stream consumer Registration/Deregistration
---
 .../connectors/kinesis/FlinkKinesisConsumer.java   |   3 +
 ...V2Interface.java => FlinkKinesisException.java} |  30 +-
 .../kinesis/config/ConsumerConfigConstants.java    |  20 +-
 .../kinesis/internals/KinesisDataFetcher.java      |  25 +-
 .../fanout/FanOutRecordPublisherConfiguration.java |  59 +++-
 .../publisher/fanout/StreamConsumerRegistrar.java  | 275 ++++++++++++++++++
 .../publisher/polling/PollingRecordPublisher.java  |   2 +-
 .../connectors/kinesis/proxy/KinesisProxyV2.java   | 155 ++++++++++-
 .../kinesis/proxy/KinesisProxyV2Factory.java       |  60 ++++
 .../kinesis/proxy/KinesisProxyV2Interface.java     |  15 +
 .../connectors/kinesis/util/AwsV2Util.java         |  57 ++--
 .../connectors/kinesis/util/KinesisConfigUtil.java |   6 +
 .../kinesis/util/StreamConsumerRegistrarUtil.java  | 164 +++++++++++
 .../kinesis/internals/KinesisDataFetcherTest.java  |   3 +
 .../FanOutRecordPublisherConfigurationTest.java    |  65 ++++-
 .../fanout/StreamConsumerRegistrarTest.java        | 309 +++++++++++++++++++++
 .../kinesis/proxy/KinesisProxyV2Test.java          | 303 +++++++++++++++++++-
 .../FakeKinesisFanOutBehavioursFactory.java        | 207 +++++++++++++-
 .../connectors/kinesis/util/AwsV2UtilTest.java     |  99 +++++++
 .../kinesis/util/KinesisConfigUtilTest.java        |  22 ++
 .../util/StreamConsumerRegistrarUtilTest.java      |  74 +++++
 21 files changed, 1867 insertions(+), 86 deletions(-)

diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java
index 1352656..d0dc7a1 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumer.java
@@ -46,6 +46,7 @@ import org.apache.flink.streaming.connectors.kinesis.model.StreamShardMetadata;
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchemaWrapper;
 import org.apache.flink.streaming.connectors.kinesis.util.KinesisConfigUtil;
+import org.apache.flink.streaming.connectors.kinesis.util.StreamConsumerRegistrarUtil;
 import org.apache.flink.streaming.connectors.kinesis.util.WatermarkTracker;
 import org.apache.flink.util.InstantiationUtil;
 
@@ -220,6 +221,8 @@ public class FlinkKinesisConsumer<T> extends RichParallelSourceFunction<T> imple
 				"Please check that it does not contain references to non-serializable instances.");
 		this.deserializer = deserializer;
 
+		StreamConsumerRegistrarUtil.eagerlyRegisterStreamConsumers(configProps, streams);
+
 		if (LOG.isInfoEnabled()) {
 			StringBuilder sb = new StringBuilder();
 			for (String stream : streams) {
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Interface.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisException.java
similarity index 53%
copy from flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Interface.java
copy to flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisException.java
index e748eb2..2d2033b 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Interface.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisException.java
@@ -15,28 +15,34 @@
  * limitations under the License.
  */
 
-package org.apache.flink.streaming.connectors.kinesis.proxy;
+package org.apache.flink.streaming.connectors.kinesis;
 
 import org.apache.flink.annotation.Internal;
 
-import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
-import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler;
-
-import java.util.concurrent.CompletableFuture;
-
 /**
- * Interface for a Kinesis proxy using AWS SDK v2.x operating on multiple Kinesis streams within the same AWS service region.
+ * A {@link RuntimeException} wrapper indicating the exception was thrown from this connector.
+ * This class is abstract, semantic subclasses should be created to indicate the type of exception.
  */
 @Internal
-public interface KinesisProxyV2Interface {
+public abstract class FlinkKinesisException extends RuntimeException {
+
+	public FlinkKinesisException(final String message) {
+		super(message);
+	}
 
-	CompletableFuture<Void> subscribeToShard(SubscribeToShardRequest request, SubscribeToShardResponseHandler responseHandler);
+	public FlinkKinesisException(final String message, final Throwable cause) {
+		super(message, cause);
+	}
 
 	/**
-	 * Destroy any open resources used by the factory.
+	 * A semantic {@link RuntimeException} thrown to indicate timeout errors in the Kinesis connector.
 	 */
-	default void close() {
-		// Do nothing by default
+	@Internal
+	public static class FlinkKinesisTimeoutException extends FlinkKinesisException {
+
+		public FlinkKinesisTimeoutException(String message) {
+			super(message);
+		}
 	}
 
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
index f003b3b..00da231 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/config/ConsumerConfigConstants.java
@@ -24,6 +24,8 @@ import org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumbe
 
 import com.amazonaws.services.kinesis.model.ShardIteratorType;
 
+import java.time.Duration;
+
 /**
  * Optional consumer specific configuration keys and default values for {@link FlinkKinesisConsumer}.
  */
@@ -143,6 +145,9 @@ public class ConsumerConfigConstants extends AWSConfigConstants {
 	/** The maximum number of registerStream attempts if we get a recoverable exception. */
 	public static final String REGISTER_STREAM_RETRIES = "flink.stream.registerstreamconsumer.maxretries";
 
+	/** The maximum time in seconds to wait for a stream consumer to become active before giving up. */
+	public static final String REGISTER_STREAM_TIMEOUT_SECONDS = "flink.stream.registerstreamconsumer.timeout";
+
 	/** The base backoff time between each registerStream attempt. */
 	public static final String REGISTER_STREAM_BACKOFF_BASE = "flink.stream.registerstreamconsumer.backoff.base";
 
@@ -155,6 +160,9 @@ public class ConsumerConfigConstants extends AWSConfigConstants {
 	/** The maximum number of deregisterStream attempts if we get a recoverable exception. */
 	public static final String DEREGISTER_STREAM_RETRIES = "flink.stream.deregisterstreamconsumer.maxretries";
 
+	/** The maximum time in seconds to wait for a stream consumer to deregister before giving up. */
+	public static final String DEREGISTER_STREAM_TIMEOUT_SECONDS = "flink.stream.deregisterstreamconsumer.timeout";
+
 	/** The base backoff time between each deregisterStream attempt. */
 	public static final String DEREGISTER_STREAM_BACKOFF_BASE = "flink.stream.deregisterstreamconsumer.backoff.base";
 
@@ -260,17 +268,21 @@ public class ConsumerConfigConstants extends AWSConfigConstants {
 
 	public static final int DEFAULT_REGISTER_STREAM_RETRIES = 10;
 
-	public static final long DEFAULT_REGISTER_STREAM_BACKOFF_BASE = 200L;
+	public static final Duration DEFAULT_REGISTER_STREAM_TIMEOUT = Duration.ofSeconds(60);
 
-	public static final long DEFAULT_REGISTER_STREAM_BACKOFF_MAX = 1000L;
+	public static final long DEFAULT_REGISTER_STREAM_BACKOFF_BASE = 500L;
+
+	public static final long DEFAULT_REGISTER_STREAM_BACKOFF_MAX = 2000L;
 
 	public static final double DEFAULT_REGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT = 1.5;
 
 	public static final int DEFAULT_DEREGISTER_STREAM_RETRIES = 10;
 
-	public static final long DEFAULT_DEREGISTER_STREAM_BACKOFF_BASE = 200L;
+	public static final Duration DEFAULT_DEREGISTER_STREAM_TIMEOUT = Duration.ofSeconds(60);
+
+	public static final long DEFAULT_DEREGISTER_STREAM_BACKOFF_BASE = 500L;
 
-	public static final long DEFAULT_DEREGISTER_STREAM_BACKOFF_MAX = 1000L;
+	public static final long DEFAULT_DEREGISTER_STREAM_BACKOFF_MAX = 2000L;
 
 	public static final double DEFAULT_DEREGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT = 1.5;
 
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
index 133a4d3..d2a6ec4 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcher.java
@@ -43,12 +43,12 @@ import org.apache.flink.streaming.connectors.kinesis.model.StreamShardMetadata;
 import org.apache.flink.streaming.connectors.kinesis.proxy.GetShardListResult;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxy;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyInterface;
-import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Factory;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
 import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema;
 import org.apache.flink.streaming.connectors.kinesis.util.AWSUtil;
-import org.apache.flink.streaming.connectors.kinesis.util.AwsV2Util;
 import org.apache.flink.streaming.connectors.kinesis.util.RecordEmitter;
+import org.apache.flink.streaming.connectors.kinesis.util.StreamConsumerRegistrarUtil;
 import org.apache.flink.streaming.connectors.kinesis.util.WatermarkTracker;
 import org.apache.flink.streaming.runtime.operators.windowing.TimestampedValue;
 import org.apache.flink.streaming.runtime.tasks.ProcessingTimeCallback;
@@ -61,7 +61,6 @@ import com.amazonaws.services.kinesis.model.SequenceNumberRange;
 import com.amazonaws.services.kinesis.model.Shard;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
 
 import javax.annotation.Nullable;
 
@@ -360,7 +359,7 @@ public class KinesisDataFetcher<T> {
 			new ArrayList<>(),
 			createInitialSubscribedStreamsToLastDiscoveredShardsState(streams),
 			KinesisProxy::create,
-			KinesisDataFetcher::createKinesisProxyV2);
+			KinesisProxyV2Factory::createKinesisProxyV2);
 	}
 
 	@VisibleForTesting
@@ -406,6 +405,8 @@ public class KinesisDataFetcher<T> {
 			createShardConsumersThreadPool(runtimeContext.getTaskNameWithSubtasks());
 
 		this.recordEmitter = createRecordEmitter(configProps);
+
+		StreamConsumerRegistrarUtil.lazilyRegisterStreamConsumers(configProps, streams);
 	}
 
 	private RecordEmitter createRecordEmitter(Properties configProps) {
@@ -467,11 +468,6 @@ public class KinesisDataFetcher<T> {
 		return recordPublisherFactory.create(startingPosition, configProps, metricGroup, subscribedShard);
 	}
 
-	private static KinesisProxyV2Interface createKinesisProxyV2(final Properties configProps) {
-		final KinesisAsyncClient client = AwsV2Util.createKinesisAsyncClient(configProps);
-		return new KinesisProxyV2(client);
-	}
-
 	/**
 	 * Starts the fetcher. After starting the fetcher, it can only
 	 * be stopped by calling {@link KinesisDataFetcher#shutdownFetcher()}.
@@ -597,7 +593,7 @@ public class KinesisDataFetcher<T> {
 		// we will escape from this loop only when shutdownFetcher() or stopWithError() is called
 		// TODO: have this thread emit the records for tracking backpressure
 
-		final long discoveryIntervalMillis = Long.valueOf(
+		final long discoveryIntervalMillis = Long.parseLong(
 			configProps.getProperty(
 				ConsumerConfigConstants.SHARD_DISCOVERY_INTERVAL_MILLIS,
 				Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_DISCOVERY_INTERVAL_MILLIS)));
@@ -699,6 +695,12 @@ public class KinesisDataFetcher<T> {
 	public void shutdownFetcher() {
 		running = false;
 
+		StreamConsumerRegistrarUtil.deregisterStreamConsumers(configProps, streams);
+
+		recordPublisherFactory.close();
+
+		shardConsumersExecutor.shutdownNow();
+
 		if (mainThread != null) {
 			mainThread.interrupt(); // the main thread may be sleeping for the discovery interval
 		}
@@ -711,9 +713,6 @@ public class KinesisDataFetcher<T> {
 		if (LOG.isInfoEnabled()) {
 			LOG.info("Shutting down the shard consumer threads of subtask {} ...", indexOfThisConsumerSubtask);
 		}
-		shardConsumersExecutor.shutdownNow();
-
-		recordPublisherFactory.close();
 	}
 
 	/** After calling {@link KinesisDataFetcher#shutdownFetcher()}, this can be called to await the fetcher shutdown. */
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfiguration.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfiguration.java
index 89ffad3..bf4a660 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfiguration.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfiguration.java
@@ -25,12 +25,16 @@ import org.apache.flink.util.Preconditions;
 
 import javax.annotation.Nullable;
 
+import java.time.Duration;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Properties;
 
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.efoConsumerArn;
+
+
 /**
  * This is a configuration class for enhanced fan-out components.
  */
@@ -48,10 +52,9 @@ public class FanOutRecordPublisherConfiguration {
 	private String consumerName;
 
 	/**
-	 * The manual set efo consumer arns for each stream. Should not be Null if the efoRegistrationType is NONE
+	 * A map of stream to stream consumer ARN for EFO subscriptions.
 	 */
-	@Nullable
-	private Map<String, String> streamConsumerArns;
+	private final Map<String, String> streamConsumerArns = new HashMap<>();
 
 	/**
 	 * Base backoff millis for the deregister stream operation.
@@ -94,6 +97,11 @@ public class FanOutRecordPublisherConfiguration {
 	private final int registerStreamMaxRetries;
 
 	/**
+	 * Maximum time to wait for a stream consumer to become active before giving up.
+	 */
+	private final Duration registerStreamConsumerTimeout;
+
+	/**
 	 * Base backoff millis for the deregister stream operation.
 	 */
 	private final long deregisterStreamBaseBackoffMillis;
@@ -114,6 +122,11 @@ public class FanOutRecordPublisherConfiguration {
 	private final int deregisterStreamMaxRetries;
 
 	/**
+	 * Maximum time to wait for a stream consumer to deregister before giving up.
+	 */
+	private final Duration deregisterStreamConsumerTimeout;
+
+	/**
 	 * Max retries for the describe stream operation.
 	 */
 	private final int describeStreamMaxRetries;
@@ -159,7 +172,7 @@ public class FanOutRecordPublisherConfiguration {
 	 * @param configProps the configuration properties from config file.
 	 * @param streams     the streams which is sent to match the EFO consumer arn if the EFO registration mode is set to `NONE`.
 	 */
-	public FanOutRecordPublisherConfiguration(Properties configProps, List<String> streams) {
+	public FanOutRecordPublisherConfiguration(final Properties configProps, final List<String> streams) {
 		Preconditions.checkArgument(configProps.getProperty(ConsumerConfigConstants.RECORD_PUBLISHER_TYPE).equals(RecordPublisherType.EFO.toString()), "Only efo record publisher can register a FanOutProperties.");
 		KinesisConfigUtil.validateEfoConfiguration(configProps, streams);
 
@@ -167,11 +180,11 @@ public class FanOutRecordPublisherConfiguration {
 		//if efo registration type is EAGER|LAZY, then user should explicitly provide a consumer name for each stream.
 		if (efoRegistrationType == EFORegistrationType.EAGER || efoRegistrationType == EFORegistrationType.LAZY) {
 			consumerName = configProps.getProperty(ConsumerConfigConstants.EFO_CONSUMER_NAME);
-		} else {
-			//else users should explicitly provide consumer arns.
-			streamConsumerArns = new HashMap<>();
-			for (String stream : streams) {
-				String key = ConsumerConfigConstants.EFO_CONSUMER_ARN_PREFIX + "." + stream;
+		}
+
+		for (String stream : streams) {
+			String key = efoConsumerArn(stream);
+			if (configProps.containsKey(key)) {
 				streamConsumerArns.put(stream, configProps.getProperty(key));
 			}
 		}
@@ -206,6 +219,12 @@ public class FanOutRecordPublisherConfiguration {
 			configProps.getProperty(
 				ConsumerConfigConstants.REGISTER_STREAM_RETRIES))
 			.map(Integer::parseInt).orElse(ConsumerConfigConstants.DEFAULT_REGISTER_STREAM_RETRIES);
+		this.registerStreamConsumerTimeout = Optional.ofNullable(
+			configProps.getProperty(
+				ConsumerConfigConstants.REGISTER_STREAM_TIMEOUT_SECONDS))
+			.map(Integer::parseInt)
+			.map(Duration::ofSeconds)
+			.orElse(ConsumerConfigConstants.DEFAULT_REGISTER_STREAM_TIMEOUT);
 
 		this.deregisterStreamBaseBackoffMillis = Optional.ofNullable(
 			configProps.getProperty(
@@ -223,6 +242,12 @@ public class FanOutRecordPublisherConfiguration {
 			configProps.getProperty(
 				ConsumerConfigConstants.DEREGISTER_STREAM_RETRIES))
 			.map(Integer::parseInt).orElse(ConsumerConfigConstants.DEFAULT_DEREGISTER_STREAM_RETRIES);
+		this.deregisterStreamConsumerTimeout = Optional.ofNullable(
+			configProps.getProperty(
+				ConsumerConfigConstants.DEREGISTER_STREAM_TIMEOUT_SECONDS))
+			.map(Integer::parseInt)
+			.map(Duration::ofSeconds)
+			.orElse(ConsumerConfigConstants.DEFAULT_DEREGISTER_STREAM_TIMEOUT);
 
 		this.describeStreamMaxRetries = Optional.ofNullable(
 			configProps.getProperty(ConsumerConfigConstants.STREAM_DESCRIBE_RETRIES))
@@ -314,6 +339,13 @@ public class FanOutRecordPublisherConfiguration {
 		return registerStreamMaxRetries;
 	}
 
+	/**
+	 * Get maximum duration to wait for a stream consumer to become active before giving up.
+	 */
+	public Duration getRegisterStreamConsumerTimeout() {
+		return registerStreamConsumerTimeout;
+	}
+
 	// ------------------------------------------------------------------------
 	//  deregisterStream() related performance settings
 	// ------------------------------------------------------------------------
@@ -346,6 +378,13 @@ public class FanOutRecordPublisherConfiguration {
 		return deregisterStreamMaxRetries;
 	}
 
+	/**
+	 * Get maximum duration to wait for a stream consumer to deregister before giving up.
+	 */
+	public Duration getDeregisterStreamConsumerTimeout() {
+		return deregisterStreamConsumerTimeout;
+	}
+
 	// ------------------------------------------------------------------------
 	//  describeStream() related performance settings
 	// ------------------------------------------------------------------------
@@ -428,6 +467,6 @@ public class FanOutRecordPublisherConfiguration {
 	 * Get the according consumer arn to the stream, will be null if efo registration type is 'LAZY' or 'EAGER'.
 	 */
 	public Optional<String> getStreamConsumerArn(String stream) {
-		return Optional.ofNullable(streamConsumerArns).map(arns -> arns.get(stream));
+		return Optional.ofNullable(streamConsumerArns.get(stream));
 	}
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrar.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrar.java
new file mode 100644
index 0000000..3d22655
--- /dev/null
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrar.java
@@ -0,0 +1,275 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisException.FlinkKinesisTimeoutException;
+import org.apache.flink.streaming.connectors.kinesis.proxy.FullJitterBackoff;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
+import org.apache.flink.util.Preconditions;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamResponse;
+import software.amazon.awssdk.services.kinesis.model.ResourceInUseException;
+import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
+
+import javax.annotation.Nullable;
+
+import java.time.Duration;
+import java.time.Instant;
+import java.util.Optional;
+import java.util.concurrent.ExecutionException;
+
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFORegistrationType.LAZY;
+import static software.amazon.awssdk.services.kinesis.model.ConsumerStatus.ACTIVE;
+import static software.amazon.awssdk.services.kinesis.model.ConsumerStatus.DELETING;
+
+/**
+ * Responsible for registering and deregistering EFO stream consumers.
+ * Will block until consumers are ready.
+ */
+@Internal
+public class StreamConsumerRegistrar {
+
+	private static final Logger LOG = LoggerFactory.getLogger(StreamConsumerRegistrar.class);
+
+	private final KinesisProxyV2Interface kinesisProxyV2Interface;
+
+	private final FanOutRecordPublisherConfiguration configuration;
+
+	private final FullJitterBackoff backoff;
+
+	public StreamConsumerRegistrar(
+			final KinesisProxyV2Interface kinesisProxyV2Interface,
+			final FanOutRecordPublisherConfiguration configuration,
+			final FullJitterBackoff backoff) {
+		this.kinesisProxyV2Interface = Preconditions.checkNotNull(kinesisProxyV2Interface);
+		this.configuration = Preconditions.checkNotNull(configuration);
+		this.backoff = Preconditions.checkNotNull(backoff);
+	}
+
+	/**
+	 * Register a stream consumer with the given name against the given stream.
+	 * Blocks until the consumer becomes active.
+	 * If the stream consumer already exists, the ARN is returned.
+	 *
+	 * @param stream the stream to register the stream consumer against
+	 * @param streamConsumerName the name of the new stream consumer
+	 * @return the stream consumer ARN
+	 * @throws ExecutionException
+	 * @throws InterruptedException
+	 */
+	public String registerStreamConsumer(final String stream, final String streamConsumerName) throws ExecutionException, InterruptedException {
+		LOG.debug("Registering stream consumer - {}::{}", stream, streamConsumerName);
+
+		int attempt = 1;
+
+		if (configuration.getEfoRegistrationType() == LAZY) {
+			registrationBackoff(configuration, backoff, attempt++);
+		}
+
+		DescribeStreamResponse describeStreamResponse = kinesisProxyV2Interface.describeStream(stream);
+		String streamArn = describeStreamResponse.streamDescription().streamARN();
+
+		LOG.debug("Found stream ARN - {}", streamArn);
+
+		Optional<DescribeStreamConsumerResponse> describeStreamConsumerResponse =
+			describeStreamConsumer(streamArn, streamConsumerName);
+
+		if (!describeStreamConsumerResponse.isPresent()) {
+			invokeIgnoringResourceInUse(() -> kinesisProxyV2Interface.registerStreamConsumer(streamArn, streamConsumerName));
+		}
+
+		String streamConsumerArn = waitForConsumerToBecomeActive(
+			describeStreamConsumerResponse.orElse(null), streamArn, streamConsumerName, attempt);
+
+		LOG.debug("Using stream consumer - {}", streamConsumerArn);
+
+		return streamConsumerArn;
+	}
+
+	/**
+	 * Deregister the stream consumer with the given ARN.
+	 * Blocks until the consumer is deleted.
+	 *
+	 * @param stream the stream in which to deregister the consumer
+	 * @throws ExecutionException
+	 * @throws InterruptedException
+	 */
+	public void deregisterStreamConsumer(final String stream) throws InterruptedException, ExecutionException {
+		LOG.debug("Deregistering stream consumer - {}", stream);
+
+		int attempt = 1;
+		String streamConsumerArn = getStreamConsumerArn(stream);
+
+		deregistrationBackoff(configuration, backoff, attempt++);
+
+		Optional<DescribeStreamConsumerResponse> response = describeStreamConsumer(streamConsumerArn);
+		if (response.isPresent() && response.get().consumerDescription().consumerStatus() != DELETING) {
+			invokeIgnoringResourceInUse(() -> kinesisProxyV2Interface.deregisterStreamConsumer(streamConsumerArn));
+		}
+
+		waitForConsumerToDeregister(response.orElse(null), streamConsumerArn, attempt);
+
+		LOG.debug("Deregistered stream consumer - {}", streamConsumerArn);
+	}
+
+	/**
+	 * Destroy any open resources used by the factory.
+	 */
+	public void close() {
+		kinesisProxyV2Interface.close();
+	}
+
+	@VisibleForTesting
+	void registrationBackoff(
+			final FanOutRecordPublisherConfiguration configuration,
+			final FullJitterBackoff backoff,
+			int attempt) throws InterruptedException {
+		long backoffMillis = backoff.calculateFullJitterBackoff(
+			configuration.getRegisterStreamBaseBackoffMillis(),
+			configuration.getRegisterStreamMaxBackoffMillis(),
+			configuration.getRegisterStreamExpConstant(),
+			attempt);
+
+		backoff.sleep(backoffMillis);
+	}
+
+	@VisibleForTesting
+	void deregistrationBackoff(
+			final FanOutRecordPublisherConfiguration configuration,
+			final FullJitterBackoff backoff,
+			int attempt) throws InterruptedException {
+		long backoffMillis = backoff.calculateFullJitterBackoff(
+			configuration.getDeregisterStreamBaseBackoffMillis(),
+			configuration.getDeregisterStreamMaxBackoffMillis(),
+			configuration.getDeregisterStreamExpConstant(),
+			attempt);
+
+		backoff.sleep(backoffMillis);
+	}
+
+	private String waitForConsumerToBecomeActive(
+			@Nullable final DescribeStreamConsumerResponse describeStreamConsumerResponse,
+			final String streamArn,
+			final String streamConsumerName,
+			final int initialAttempt) throws InterruptedException, ExecutionException {
+		int attempt = initialAttempt;
+
+		Instant start = Instant.now();
+		Duration timeout = configuration.getRegisterStreamConsumerTimeout();
+
+		DescribeStreamConsumerResponse response = describeStreamConsumerResponse;
+		while (response == null || response.consumerDescription().consumerStatus() != ACTIVE) {
+			LOG.debug("Waiting for stream consumer to become active, attempt {} - {} on {}", attempt, streamConsumerName, streamArn);
+			registrationBackoff(configuration, backoff, attempt++);
+			response = kinesisProxyV2Interface.describeStreamConsumer(streamArn, streamConsumerName);
+
+			if (Duration.between(start, Instant.now()).compareTo(timeout) > 0) {
+				throw new FlinkKinesisTimeoutException("Timeout waiting for stream consumer to become active: " + streamConsumerName + " on " + streamArn);
+			}
+		}
+
+		return response.consumerDescription().consumerARN();
+	}
+
+	private void waitForConsumerToDeregister(
+			@Nullable final DescribeStreamConsumerResponse describeStreamConsumerResponse,
+			final String streamConsumerArn,
+			final int initialAttempt) throws InterruptedException, ExecutionException {
+		int attempt = initialAttempt;
+
+		Instant start = Instant.now();
+		Duration timeout = configuration.getDeregisterStreamConsumerTimeout();
+
+		Optional<DescribeStreamConsumerResponse> response = Optional.ofNullable(describeStreamConsumerResponse);
+		while (response.isPresent() && response.get().consumerDescription().consumerStatus() != DELETING) {
+			LOG.debug("Waiting for stream consumer to deregister, attempt {} - {}", attempt, streamConsumerArn);
+			deregistrationBackoff(configuration, backoff, attempt++);
+			response = describeStreamConsumer(streamConsumerArn);
+
+			if (Duration.between(start, Instant.now()).compareTo(timeout) > 0) {
+				throw new FlinkKinesisTimeoutException("Timeout waiting for stream consumer to deregister: " + streamConsumerArn);
+			}
+		}
+	}
+
+	private Optional<DescribeStreamConsumerResponse> describeStreamConsumer(final String streamArn, final String streamConsumerName) throws InterruptedException, ExecutionException {
+		return describeStreamConsumer(() -> kinesisProxyV2Interface.describeStreamConsumer(streamArn, streamConsumerName));
+	}
+
+	private Optional<DescribeStreamConsumerResponse> describeStreamConsumer(final String streamConsumerArn) throws InterruptedException, ExecutionException {
+		return describeStreamConsumer(() -> kinesisProxyV2Interface.describeStreamConsumer(streamConsumerArn));
+	}
+
+	private Optional<DescribeStreamConsumerResponse> describeStreamConsumer(
+			final ResponseSupplier<DescribeStreamConsumerResponse> responseSupplier) throws InterruptedException, ExecutionException {
+		DescribeStreamConsumerResponse response;
+
+		try {
+			response = responseSupplier.get();
+		} catch (ExecutionException ex) {
+			if (isResourceNotFound(ex)) {
+				return Optional.empty();
+			}
+
+			throw ex;
+		}
+
+		return Optional.ofNullable(response);
+	}
+
+	private <T> void invokeIgnoringResourceInUse(
+		final ResponseSupplier<T> responseSupplier) throws InterruptedException, ExecutionException {
+		try {
+			responseSupplier.get();
+		} catch (ExecutionException ex) {
+			if (isResourceInUse(ex)) {
+				// The stream consumer may have been created since we performed the describe
+				return;
+			}
+
+			throw ex;
+		}
+	}
+
+	private boolean isResourceNotFound(final ExecutionException ex) {
+		return ex.getCause() instanceof ResourceNotFoundException;
+	}
+
+	private boolean isResourceInUse(final ExecutionException ex) {
+		return ex.getCause() instanceof ResourceInUseException;
+	}
+
+	private String getStreamConsumerArn(final String stream) {
+		Optional<String> streamConsumerArn = configuration.getStreamConsumerArn(stream);
+		if (!streamConsumerArn.isPresent()) {
+			throw new IllegalArgumentException("Stream consumer ARN not found for stream: " + stream);
+		}
+
+		return streamConsumerArn.get();
+	}
+
+	private interface ResponseSupplier<T> {
+		T get() throws ExecutionException, InterruptedException;
+	}
+
+}
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisher.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisher.java
index 36d3c69..5b94fea 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisher.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisher.java
@@ -133,7 +133,7 @@ public class PollingRecordPublisher implements RecordPublisher {
 		while (getRecordsResult == null) {
 			try {
 				getRecordsResult = kinesisProxy.getRecords(shardItr, maxNumberOfRecords);
-			} catch (ExpiredIteratorException eiEx) {
+			} catch (ExpiredIteratorException | InterruptedException eiEx) {
 				LOG.warn("Encountered an unexpected expired iterator {} for shard {};" +
 					" refreshing the iterator ...", shardItr, subscribedShard);
 
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2.java
index 26908ce..d578e44 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2.java
@@ -18,13 +18,27 @@
 package org.apache.flink.streaming.connectors.kinesis.proxy;
 
 import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.FanOutRecordPublisherConfiguration;
+import org.apache.flink.streaming.connectors.kinesis.util.AwsV2Util;
 import org.apache.flink.util.Preconditions;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
 import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
+import software.amazon.awssdk.services.kinesis.model.DeregisterStreamConsumerRequest;
+import software.amazon.awssdk.services.kinesis.model.DeregisterStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamResponse;
+import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerRequest;
+import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerResponse;
 import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
 import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler;
 
 import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
 
 /**
  * Kinesis proxy implementation using AWS SDK v2.x - a utility class that is used as a proxy to make
@@ -34,28 +48,159 @@ import java.util.concurrent.CompletableFuture;
 @Internal
 public class KinesisProxyV2 implements KinesisProxyV2Interface {
 
+	private static final Logger LOG = LoggerFactory.getLogger(KinesisProxyV2.class);
+
 	/** An Asynchronous client used to communicate with AWS services. */
 	private final KinesisAsyncClient kinesisAsyncClient;
 
+	private final SdkAsyncHttpClient httpClient;
+
+	private final FanOutRecordPublisherConfiguration fanOutRecordPublisherConfiguration;
+
+	private final FullJitterBackoff backoff;
+
 	/**
-	 * Create a new KinesisProxyV2 using the provided Async Client.
+	 * Create a new KinesisProxyV2.
 	 *
-	 * @param kinesisAsyncClient the kinesis async client used to communicate with Kinesis
+	 * @param kinesisAsyncClient AWS SDK v2 Kinesis client used to communicate with AWS services
+	 * @param httpClient the underlying HTTP client, reference required for close only
+	 * @param fanOutRecordPublisherConfiguration the configuration for Fan Out features
+	 * @param backoff the backoff utility used to introduce Full Jitter delays
 	 */
-	public KinesisProxyV2(final KinesisAsyncClient kinesisAsyncClient) {
+	public KinesisProxyV2(
+			final KinesisAsyncClient kinesisAsyncClient,
+			final SdkAsyncHttpClient httpClient,
+			final FanOutRecordPublisherConfiguration fanOutRecordPublisherConfiguration,
+			final FullJitterBackoff backoff) {
 		this.kinesisAsyncClient = Preconditions.checkNotNull(kinesisAsyncClient);
+		this.httpClient = httpClient;
+		this.fanOutRecordPublisherConfiguration = fanOutRecordPublisherConfiguration;
+		this.backoff = backoff;
 	}
 
 	@Override
 	public CompletableFuture<Void> subscribeToShard(
-			final SubscribeToShardRequest request,
-			final SubscribeToShardResponseHandler responseHandler) {
+		final SubscribeToShardRequest request,
+		final SubscribeToShardResponseHandler responseHandler) {
 		return kinesisAsyncClient.subscribeToShard(request, responseHandler);
 	}
 
 	@Override
 	public void close() {
 		kinesisAsyncClient.close();
+		httpClient.close();
+	}
+
+	@Override
+	public DescribeStreamResponse describeStream(String stream) throws InterruptedException, ExecutionException {
+		DescribeStreamRequest describeStreamRequest = DescribeStreamRequest
+			.builder()
+			.streamName(stream)
+			.build();
+
+		return invokeWithRetryAndBackoff(
+			() -> kinesisAsyncClient.describeStream(describeStreamRequest).get(),
+			fanOutRecordPublisherConfiguration.getDescribeStreamBaseBackoffMillis(),
+			fanOutRecordPublisherConfiguration.getDescribeStreamMaxBackoffMillis(),
+			fanOutRecordPublisherConfiguration.getDescribeStreamExpConstant(),
+			fanOutRecordPublisherConfiguration.getDescribeStreamMaxRetries());
+	}
+
+	@Override
+	public DescribeStreamConsumerResponse describeStreamConsumer(final String streamArn, final String consumerName) throws InterruptedException, ExecutionException  {
+		DescribeStreamConsumerRequest describeStreamConsumerRequest = DescribeStreamConsumerRequest
+			.builder()
+			.streamARN(streamArn)
+			.consumerName(consumerName)
+			.build();
+
+		return describeStreamConsumer(describeStreamConsumerRequest);
+	}
+
+	@Override
+	public DescribeStreamConsumerResponse describeStreamConsumer(final String streamConsumerArn) throws InterruptedException, ExecutionException  {
+		DescribeStreamConsumerRequest describeStreamConsumerRequest = DescribeStreamConsumerRequest
+			.builder()
+			.consumerARN(streamConsumerArn)
+			.build();
+
+		return describeStreamConsumer(describeStreamConsumerRequest);
+	}
+
+	private DescribeStreamConsumerResponse describeStreamConsumer(final DescribeStreamConsumerRequest request) throws InterruptedException, ExecutionException  {
+		return invokeWithRetryAndBackoff(
+			() -> kinesisAsyncClient.describeStreamConsumer(request).get(),
+			fanOutRecordPublisherConfiguration.getDescribeStreamConsumerBaseBackoffMillis(),
+			fanOutRecordPublisherConfiguration.getDescribeStreamConsumerMaxBackoffMillis(),
+			fanOutRecordPublisherConfiguration.getDescribeStreamConsumerExpConstant(),
+			fanOutRecordPublisherConfiguration.getDescribeStreamConsumerMaxRetries());
+	}
+
+	@Override
+	public RegisterStreamConsumerResponse registerStreamConsumer(final String streamArn, final String consumerName) throws InterruptedException, ExecutionException {
+		RegisterStreamConsumerRequest registerStreamConsumerRequest = RegisterStreamConsumerRequest
+			.builder()
+			.streamARN(streamArn)
+			.consumerName(consumerName)
+			.build();
+
+		return invokeWithRetryAndBackoff(
+			() -> kinesisAsyncClient.registerStreamConsumer(registerStreamConsumerRequest).get(),
+			fanOutRecordPublisherConfiguration.getRegisterStreamBaseBackoffMillis(),
+			fanOutRecordPublisherConfiguration.getRegisterStreamMaxBackoffMillis(),
+			fanOutRecordPublisherConfiguration.getRegisterStreamExpConstant(),
+			fanOutRecordPublisherConfiguration.getRegisterStreamMaxRetries());
+	}
+
+	@Override
+	public DeregisterStreamConsumerResponse deregisterStreamConsumer(final String consumerArn) throws InterruptedException, ExecutionException {
+		DeregisterStreamConsumerRequest deregisterStreamConsumerRequest = DeregisterStreamConsumerRequest
+			.builder()
+			.consumerARN(consumerArn)
+			.build();
+
+		return invokeWithRetryAndBackoff(
+			() -> kinesisAsyncClient.deregisterStreamConsumer(deregisterStreamConsumerRequest).get(),
+			fanOutRecordPublisherConfiguration.getDeregisterStreamBaseBackoffMillis(),
+			fanOutRecordPublisherConfiguration.getDeregisterStreamMaxBackoffMillis(),
+			fanOutRecordPublisherConfiguration.getDeregisterStreamExpConstant(),
+			fanOutRecordPublisherConfiguration.getDeregisterStreamMaxRetries());
+	}
+
+	private <T> T invokeWithRetryAndBackoff(
+			final ResponseSupplier<T> responseSupplier,
+			final long jitterBase,
+			final long jitterMax,
+			final double jitterExponent,
+			final int maximumNumberOfRetries) throws InterruptedException, ExecutionException {
+		T response = null;
+		int attempt = 0;
+
+		while (attempt < maximumNumberOfRetries && response == null) {
+			try {
+				response = responseSupplier.get();
+			} catch (Exception ex) {
+				if (AwsV2Util.isRecoverableException(ex)) {
+					long backoffMillis = backoff.calculateFullJitterBackoff(jitterBase, jitterMax, jitterExponent, ++attempt);
+					LOG.warn("Encountered recoverable error: {}. Backing off for {} millis.",
+						ex.getClass().getSimpleName(), backoffMillis, ex);
+
+					backoff.sleep(backoffMillis);
+				} else {
+					throw ex;
+				}
+			}
+		}
+
+		if (response == null) {
+			throw new RuntimeException("Retries exceeded - all " + maximumNumberOfRetries + " retry attempts failed.");
+		}
+
+		return response;
+	}
+
+	private interface ResponseSupplier<T> {
+		T get() throws ExecutionException, InterruptedException;
 	}
 
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Factory.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Factory.java
new file mode 100644
index 0000000..149282f
--- /dev/null
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Factory.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.proxy;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.FanOutRecordPublisherConfiguration;
+import org.apache.flink.streaming.connectors.kinesis.util.AwsV2Util;
+import org.apache.flink.util.Preconditions;
+
+import com.amazonaws.ClientConfiguration;
+import com.amazonaws.ClientConfigurationFactory;
+import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
+import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient;
+import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
+
+import java.util.Properties;
+
+import static java.util.Collections.emptyList;
+
+/**
+ * Creates instances of {@link KinesisProxyV2}.
+ */
+@Internal
+public class KinesisProxyV2Factory {
+
+	private static final FullJitterBackoff BACKOFF = new FullJitterBackoff();
+
+	/**
+	 * Uses the given properties to instantiate a new instance of {@link KinesisProxyV2}.
+	 *
+	 * @param configProps the properties used to parse configuration
+	 * @return the Kinesis proxy
+	 */
+	public static KinesisProxyV2Interface createKinesisProxyV2(final Properties configProps) {
+		Preconditions.checkNotNull(configProps);
+
+		final ClientConfiguration clientConfiguration = new ClientConfigurationFactory().getConfig();
+		final SdkAsyncHttpClient httpClient = AwsV2Util.createHttpClient(clientConfiguration, NettyNioAsyncHttpClient.builder(), configProps);
+		final FanOutRecordPublisherConfiguration configuration = new FanOutRecordPublisherConfiguration(configProps, emptyList());
+		final KinesisAsyncClient client = AwsV2Util.createKinesisAsyncClient(configProps, clientConfiguration, httpClient);
+
+		return new KinesisProxyV2(client, httpClient, configuration, BACKOFF);
+	}
+
+}
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Interface.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Interface.java
index e748eb2..3fc509c 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Interface.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Interface.java
@@ -19,10 +19,15 @@ package org.apache.flink.streaming.connectors.kinesis.proxy;
 
 import org.apache.flink.annotation.Internal;
 
+import software.amazon.awssdk.services.kinesis.model.DeregisterStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamResponse;
+import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerResponse;
 import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
 import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler;
 
 import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
 
 /**
  * Interface for a Kinesis proxy using AWS SDK v2.x operating on multiple Kinesis streams within the same AWS service region.
@@ -30,6 +35,16 @@ import java.util.concurrent.CompletableFuture;
 @Internal
 public interface KinesisProxyV2Interface {
 
+	DescribeStreamResponse describeStream(String stream) throws InterruptedException, ExecutionException;
+
+	DescribeStreamConsumerResponse describeStreamConsumer(final String streamConsumerArn) throws InterruptedException, ExecutionException;
+
+	DescribeStreamConsumerResponse describeStreamConsumer(final String streamArn, final String consumerName) throws InterruptedException, ExecutionException;
+
+	RegisterStreamConsumerResponse registerStreamConsumer(final String streamArn, final String consumerName) throws InterruptedException, ExecutionException;
+
+	DeregisterStreamConsumerResponse deregisterStreamConsumer(final String consumerArn) throws InterruptedException, ExecutionException;
+
 	CompletableFuture<Void> subscribeToShard(SubscribeToShardRequest request, SubscribeToShardResponseHandler responseHandler);
 
 	/**
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2Util.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2Util.java
index 2326314..b5ab86a 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2Util.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2Util.java
@@ -23,7 +23,6 @@ import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants;
 import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.CredentialProvider;
 
 import com.amazonaws.ClientConfiguration;
-import com.amazonaws.ClientConfigurationFactory;
 import com.amazonaws.auth.AWSCredentialsProvider;
 import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
 import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
@@ -42,6 +41,8 @@ import software.amazon.awssdk.profiles.ProfileFile;
 import software.amazon.awssdk.regions.Region;
 import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
 import software.amazon.awssdk.services.kinesis.KinesisAsyncClientBuilder;
+import software.amazon.awssdk.services.kinesis.model.LimitExceededException;
+import software.amazon.awssdk.services.kinesis.model.ProvisionedThroughputExceededException;
 import software.amazon.awssdk.services.sts.StsClient;
 import software.amazon.awssdk.services.sts.auth.StsAssumeRoleCredentialsProvider;
 import software.amazon.awssdk.services.sts.model.AssumeRoleRequest;
@@ -53,7 +54,12 @@ import java.util.Optional;
 import java.util.Properties;
 
 import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DEFAULT_EFO_HTTP_CLIENT_MAX_CONURRENCY;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFORegistrationType.EAGER;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFORegistrationType.NONE;
 import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_HTTP_CLIENT_MAX_CONCURRENCY;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_REGISTRATION_TYPE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RECORD_PUBLISHER_TYPE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.EFO;
 
 /**
  * Utility methods specific to Amazon Web Service SDK v2.x.
@@ -71,32 +77,21 @@ public class AwsV2Util {
 	 * - https://github.com/aws/aws-sdk-java-v2/blob/2.13.52/docs/LaunchChangelog.md#134-client-override-retry-configuration
 	 *
 	 * @param configProps configuration properties
+	 * @param clientConfiguration the AWS SDK v1.X config ported to V2 to instantiate the client
+	 * @param httpClient the underlying HTTP client used to talk to Kinesis
 	 * @return a new Amazon Kinesis Client
 	 */
-	public static KinesisAsyncClient createKinesisAsyncClient(final Properties configProps) {
-		ClientConfiguration clientConfiguration = new ClientConfigurationFactory().getConfig();
-		return createKinesisAsyncClient(configProps, clientConfiguration);
-	}
-
-	/**
-	 * Creates an Amazon Kinesis Async Client from the provided properties.
-	 * Configuration is copied from AWS SDK v1 configuration class as per:
-	 * - https://github.com/aws/aws-sdk-java-v2/blob/2.13.52/docs/LaunchChangelog.md#134-client-override-retry-configuration
-	 *
-	 * @param configProps configuration properties
-	 * @param config the AWS SDK v1.x client configuration used to create the client
-	 * @return a new Amazon Kinesis Client
-	 */
-	public static KinesisAsyncClient createKinesisAsyncClient(final Properties configProps, final ClientConfiguration config) {
-		final SdkAsyncHttpClient httpClient = createHttpClient(config, NettyNioAsyncHttpClient.builder(), configProps);
-		final ClientOverrideConfiguration overrideConfiguration = createClientOverrideConfiguration(config, ClientOverrideConfiguration.builder());
+	public static KinesisAsyncClient createKinesisAsyncClient(
+			final Properties configProps,
+			final ClientConfiguration clientConfiguration,
+			final SdkAsyncHttpClient httpClient) {
+		final ClientOverrideConfiguration overrideConfiguration = createClientOverrideConfiguration(clientConfiguration, ClientOverrideConfiguration.builder());
 		final KinesisAsyncClientBuilder clientBuilder = KinesisAsyncClient.builder();
 
 		return createKinesisAsyncClient(configProps, clientBuilder, httpClient, overrideConfiguration);
 	}
 
-	@VisibleForTesting
-	static SdkAsyncHttpClient createHttpClient(
+	public static SdkAsyncHttpClient createHttpClient(
 			final ClientConfiguration config,
 			final NettyNioAsyncHttpClient.Builder httpClientBuilder,
 			final Properties consumerConfig) {
@@ -272,4 +267,26 @@ public class AwsV2Util {
 		return Region.of(configProps.getProperty(AWSConfigConstants.AWS_REGION));
 	}
 
+	public static boolean isRecoverableException(Exception e) {
+		Throwable cause = e.getCause();
+		return cause instanceof LimitExceededException || cause instanceof ProvisionedThroughputExceededException;
+	}
+
+	public static boolean isUsingEfoRecordPublisher(final Properties properties) {
+		return EFO.name().equals(properties.get(RECORD_PUBLISHER_TYPE));
+	}
+
+	public static boolean isEagerEfoRegistrationType(final Properties properties) {
+		return EAGER.name().equals(properties.get(EFO_REGISTRATION_TYPE));
+	}
+
+	public static boolean isLazyEfoRegistrationType(final Properties properties) {
+		return !isEagerEfoRegistrationType(properties) &&
+			!isNoneEfoRegistrationType(properties);
+	}
+
+	public static boolean isNoneEfoRegistrationType(final Properties properties) {
+		return NONE.name().equals(properties.get(EFO_REGISTRATION_TYPE));
+	}
+
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
index 9626478..ee9df01 100644
--- a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtil.java
@@ -206,6 +206,9 @@ public class KinesisConfigUtil {
 		validateOptionalPositiveIntProperty(config, ConsumerConfigConstants.REGISTER_STREAM_RETRIES,
 			"Invalid value given for maximum retry attempts for register stream operation. Must be a valid non-negative integer value.");
 
+		validateOptionalPositiveIntProperty(config, ConsumerConfigConstants.REGISTER_STREAM_TIMEOUT_SECONDS,
+			"Invalid value given for maximum timeout for register stream consumer. Must be a valid non-negative integer value.");
+
 		validateOptionalPositiveLongProperty(config, ConsumerConfigConstants.REGISTER_STREAM_BACKOFF_MAX,
 			"Invalid value given for register stream operation max backoff milliseconds. Must be a valid non-negative long value.");
 
@@ -218,6 +221,9 @@ public class KinesisConfigUtil {
 		validateOptionalPositiveIntProperty(config, ConsumerConfigConstants.DEREGISTER_STREAM_RETRIES,
 			"Invalid value given for maximum retry attempts for deregister stream operation. Must be a valid non-negative integer value.");
 
+		validateOptionalPositiveIntProperty(config, ConsumerConfigConstants.DEREGISTER_STREAM_TIMEOUT_SECONDS,
+			"Invalid value given for maximum timeout for deregister stream consumer. Must be a valid non-negative integer value.");
+
 		validateOptionalPositiveLongProperty(config, ConsumerConfigConstants.DEREGISTER_STREAM_BACKOFF_BASE,
 			"Invalid value given for deregister stream operation base backoff milliseconds. Must be a valid non-negative long value.");
 
diff --git a/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/StreamConsumerRegistrarUtil.java b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/StreamConsumerRegistrarUtil.java
new file mode 100644
index 0000000..acf5d33
--- /dev/null
+++ b/flink-connectors/flink-connector-kinesis/src/main/java/org/apache/flink/streaming/connectors/kinesis/util/StreamConsumerRegistrarUtil.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.util;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisException;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.FanOutRecordPublisherConfiguration;
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.StreamConsumerRegistrar;
+import org.apache.flink.streaming.connectors.kinesis.proxy.FullJitterBackoff;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Factory;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
+
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.ExecutionException;
+
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_CONSUMER_NAME;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.efoConsumerArn;
+import static org.apache.flink.streaming.connectors.kinesis.util.AwsV2Util.isEagerEfoRegistrationType;
+import static org.apache.flink.streaming.connectors.kinesis.util.AwsV2Util.isLazyEfoRegistrationType;
+import static org.apache.flink.streaming.connectors.kinesis.util.AwsV2Util.isNoneEfoRegistrationType;
+import static org.apache.flink.streaming.connectors.kinesis.util.AwsV2Util.isUsingEfoRecordPublisher;
+
+/**
+ * A utility class that creates instances of {@link StreamConsumerRegistrar} and handles batch operations.
+ */
+@Internal
+public class StreamConsumerRegistrarUtil {
+
+	/**
+	 * Registers stream consumers for the given streams if EFO is enabled with EAGER registration strategy.
+	 *
+	 * @param configProps the properties to parse configuration from
+	 * @param streams the stream to register consumers against
+	 */
+	public static void eagerlyRegisterStreamConsumers(final Properties configProps, final List<String> streams) {
+		if (!isUsingEfoRecordPublisher(configProps) || !isEagerEfoRegistrationType(configProps)) {
+			return;
+		}
+
+		registerStreamConsumers(configProps, streams);
+	}
+
+	/**
+	 * Registers stream consumers for the given streams if EFO is enabled with LAZY registration strategy.
+	 *
+	 * @param configProps the properties to parse configuration from
+	 * @param streams the stream to register consumers against
+	 */
+	public static void lazilyRegisterStreamConsumers(final Properties configProps, final List<String> streams) {
+		if (!isUsingEfoRecordPublisher(configProps) || !isLazyEfoRegistrationType(configProps)) {
+			return;
+		}
+
+		registerStreamConsumers(configProps, streams);
+	}
+
+	/**
+	 * Deregisters stream consumers for the given streams if EFO is enabled with EAGER|LAZY registration strategy.
+	 *
+	 * @param configProps the properties to parse configuration from
+	 * @param streams the stream to register consumers against
+	 */
+	public static void deregisterStreamConsumers(final Properties configProps, final List<String> streams) {
+		if (!isUsingEfoRecordPublisher(configProps) || isNoneEfoRegistrationType(configProps)) {
+			return;
+		}
+
+		StreamConsumerRegistrar registrar = createStreamConsumerRegistrar(configProps, streams);
+
+		try {
+			deregisterStreamConsumers(registrar, configProps, streams);
+		} finally {
+			registrar.close();
+		}
+	}
+
+	private static void registerStreamConsumers(final Properties configProps, final List<String> streams) {
+		StreamConsumerRegistrar registrar = createStreamConsumerRegistrar(configProps, streams);
+
+		try {
+			registerStreamConsumers(registrar, configProps, streams);
+		} finally {
+			registrar.close();
+		}
+	}
+
+	@VisibleForTesting
+	static void registerStreamConsumers(
+			final StreamConsumerRegistrar registrar,
+			final Properties configProps,
+			final List<String> streams) {
+		String streamConsumerName = configProps.getProperty(EFO_CONSUMER_NAME);
+
+		for (String stream : streams) {
+			try {
+				String streamConsumerArn = registrar.registerStreamConsumer(stream, streamConsumerName);
+				configProps.setProperty(efoConsumerArn(stream), streamConsumerArn);
+			} catch (ExecutionException ex) {
+				throw new FlinkKinesisStreamConsumerRegistrarException("Error registering stream: " + stream, ex);
+			} catch (InterruptedException ex) {
+				Thread.currentThread().interrupt();
+				throw new FlinkKinesisStreamConsumerRegistrarException("Error registering stream: " + stream, ex);
+			}
+		}
+	}
+
+	@VisibleForTesting
+	static void deregisterStreamConsumers(
+			final StreamConsumerRegistrar registrar,
+			final Properties configProps,
+			final List<String> streams) {
+		if (!isUsingEfoRecordPublisher(configProps) || isNoneEfoRegistrationType(configProps)) {
+			return;
+		}
+
+		for (String stream : streams) {
+			try {
+				registrar.deregisterStreamConsumer(stream);
+			} catch (ExecutionException ex) {
+				throw new FlinkKinesisStreamConsumerRegistrarException("Error deregistering stream: " + stream, ex);
+			} catch (InterruptedException ex) {
+				Thread.currentThread().interrupt();
+				throw new FlinkKinesisStreamConsumerRegistrarException("Error registering stream: " + stream, ex);
+			}
+		}
+	}
+
+	private static StreamConsumerRegistrar createStreamConsumerRegistrar(final Properties configProps, final List<String> streams) {
+		FullJitterBackoff backoff = new FullJitterBackoff();
+		FanOutRecordPublisherConfiguration configuration = new FanOutRecordPublisherConfiguration(configProps, streams);
+		KinesisProxyV2Interface kinesis = KinesisProxyV2Factory.createKinesisProxyV2(configProps);
+
+		return new StreamConsumerRegistrar(kinesis, configuration, backoff);
+	}
+
+	/**
+	 * A semantic {@link RuntimeException} thrown to indicate errors de-/registering stream consumers.
+	 */
+	@Internal
+	public static class FlinkKinesisStreamConsumerRegistrarException extends FlinkKinesisException {
+
+		public FlinkKinesisStreamConsumerRegistrarException(final String message, final Throwable cause) {
+			super(message, cause);
+		}
+	}
+
+}
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java
index f6de864..439da58 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java
@@ -70,6 +70,8 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 
 import static java.util.Collections.singletonList;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFORegistrationType.NONE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_REGISTRATION_TYPE;
 import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RECORD_PUBLISHER_TYPE;
 import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.EFO;
 import static org.junit.Assert.assertEquals;
@@ -893,6 +895,7 @@ public class KinesisDataFetcherTest extends TestLogger {
 	public void testRecordPublisherFactoryIsTornDown() {
 		Properties config = TestUtils.getStandardProperties();
 		config.setProperty(RECORD_PUBLISHER_TYPE, EFO.name());
+		config.setProperty(EFO_REGISTRATION_TYPE, NONE.name());
 
 		KinesisProxyV2Interface kinesisV2 = mock(KinesisProxyV2Interface.class);
 
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfigurationTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfigurationTest.java
index 2b4ee99..0a4fbdb 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfigurationTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfigurationTest.java
@@ -17,8 +17,6 @@
 
 package org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout;
 
-import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants;
-import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFORegistrationType;
 import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType;
 import org.apache.flink.streaming.connectors.kinesis.testutils.TestUtils;
 import org.apache.flink.util.TestLogger;
@@ -30,14 +28,24 @@ import org.junit.runner.RunWith;
 import org.powermock.core.classloader.annotations.PrepareForTest;
 import org.powermock.modules.junit4.PowerMockRunner;
 
+import java.time.Duration;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Properties;
 
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DEREGISTER_STREAM_TIMEOUT_SECONDS;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFORegistrationType.NONE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_CONSUMER_ARN_PREFIX;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_CONSUMER_NAME;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_REGISTRATION_TYPE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RECORD_PUBLISHER_TYPE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.REGISTER_STREAM_TIMEOUT_SECONDS;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.EFO;
 import static org.junit.Assert.assertEquals;
 
 /**
@@ -55,7 +63,7 @@ public class FanOutRecordPublisherConfigurationTest extends TestLogger {
 		exception.expectMessage("Only efo record publisher can register a FanOutProperties.");
 
 		Properties testConfig = TestUtils.getStandardProperties();
-		testConfig.setProperty(ConsumerConfigConstants.RECORD_PUBLISHER_TYPE, RecordPublisherType.POLLING.toString());
+		testConfig.setProperty(RECORD_PUBLISHER_TYPE, RecordPublisherType.POLLING.toString());
 
 		new FanOutRecordPublisherConfiguration(testConfig, new ArrayList<>());
 	}
@@ -64,21 +72,21 @@ public class FanOutRecordPublisherConfigurationTest extends TestLogger {
 	public void testEagerStrategyWithConsumerName() {
 		String fakedConsumerName = "fakedconsumername";
 		Properties testConfig = TestUtils.getStandardProperties();
-		testConfig.setProperty(ConsumerConfigConstants.RECORD_PUBLISHER_TYPE, RecordPublisherType.EFO.toString());
-		testConfig.setProperty(ConsumerConfigConstants.EFO_CONSUMER_NAME, fakedConsumerName);
+		testConfig.setProperty(RECORD_PUBLISHER_TYPE, EFO.toString());
+		testConfig.setProperty(EFO_CONSUMER_NAME, fakedConsumerName);
 		FanOutRecordPublisherConfiguration fanOutRecordPublisherConfiguration = new FanOutRecordPublisherConfiguration(testConfig, new ArrayList<>());
 		assertEquals(fanOutRecordPublisherConfiguration.getConsumerName(), Optional.of(fakedConsumerName));
 	}
 
 	@Test
 	public void testEagerStrategyWithNoConsumerName() {
-		String msg = "No valid enhanced fan-out consumer name is set through " + ConsumerConfigConstants.EFO_CONSUMER_NAME;
+		String msg = "No valid enhanced fan-out consumer name is set through " + EFO_CONSUMER_NAME;
 
 		exception.expect(IllegalArgumentException.class);
 		exception.expectMessage(msg);
 
 		Properties testConfig = TestUtils.getStandardProperties();
-		testConfig.setProperty(ConsumerConfigConstants.RECORD_PUBLISHER_TYPE, RecordPublisherType.EFO.toString());
+		testConfig.setProperty(RECORD_PUBLISHER_TYPE, EFO.toString());
 		new FanOutRecordPublisherConfiguration(testConfig, new ArrayList<>());
 	}
 
@@ -86,11 +94,11 @@ public class FanOutRecordPublisherConfigurationTest extends TestLogger {
 	public void testNoneStrategyWithStreams() {
 		List<String> streams = Arrays.asList("fakedstream1", "fakedstream2");
 		Properties testConfig = TestUtils.getStandardProperties();
-		testConfig.setProperty(ConsumerConfigConstants.RECORD_PUBLISHER_TYPE, RecordPublisherType.EFO.toString());
-		testConfig.setProperty(ConsumerConfigConstants.EFO_REGISTRATION_TYPE, EFORegistrationType.NONE.toString());
+		testConfig.setProperty(RECORD_PUBLISHER_TYPE, EFO.toString());
+		testConfig.setProperty(EFO_REGISTRATION_TYPE, NONE.toString());
 		streams.forEach(
 			stream ->
-				testConfig.setProperty(ConsumerConfigConstants.EFO_CONSUMER_ARN_PREFIX + "." + stream, stream)
+				testConfig.setProperty(EFO_CONSUMER_ARN_PREFIX + "." + stream, stream)
 		);
 		FanOutRecordPublisherConfiguration fanOutRecordPublisherConfiguration = new FanOutRecordPublisherConfiguration(testConfig, streams);
 		Map<String, String> expectedStreamArns = new HashMap<>();
@@ -109,8 +117,8 @@ public class FanOutRecordPublisherConfigurationTest extends TestLogger {
 		exception.expectMessage(msg);
 
 		Properties testConfig = TestUtils.getStandardProperties();
-		testConfig.setProperty(ConsumerConfigConstants.RECORD_PUBLISHER_TYPE, RecordPublisherType.EFO.toString());
-		testConfig.setProperty(ConsumerConfigConstants.EFO_REGISTRATION_TYPE, EFORegistrationType.NONE.toString());
+		testConfig.setProperty(RECORD_PUBLISHER_TYPE, EFO.toString());
+		testConfig.setProperty(EFO_REGISTRATION_TYPE, NONE.toString());
 
 		new FanOutRecordPublisherConfiguration(testConfig, streams);
 	}
@@ -124,10 +132,37 @@ public class FanOutRecordPublisherConfigurationTest extends TestLogger {
 		exception.expectMessage(msg);
 
 		Properties testConfig = TestUtils.getStandardProperties();
-		testConfig.setProperty(ConsumerConfigConstants.RECORD_PUBLISHER_TYPE, RecordPublisherType.EFO.toString());
-		testConfig.setProperty(ConsumerConfigConstants.EFO_REGISTRATION_TYPE, EFORegistrationType.NONE.toString());
-		testConfig.setProperty(ConsumerConfigConstants.EFO_CONSUMER_ARN_PREFIX + "." + "fakedstream1", "fakedstream1");
+		testConfig.setProperty(RECORD_PUBLISHER_TYPE, EFO.toString());
+		testConfig.setProperty(EFO_REGISTRATION_TYPE, NONE.toString());
+		testConfig.setProperty(EFO_CONSUMER_ARN_PREFIX + "." + "fakedstream1", "fakedstream1");
 
 		new FanOutRecordPublisherConfiguration(testConfig, streams);
 	}
+
+	@Test
+	public void testParseRegisterStreamConsumerTimeout() {
+		Properties testConfig = TestUtils.getStandardProperties();
+		testConfig.setProperty(RECORD_PUBLISHER_TYPE, EFO.toString());
+		testConfig.setProperty(EFO_CONSUMER_NAME, "name");
+		testConfig.setProperty(REGISTER_STREAM_TIMEOUT_SECONDS, "120");
+
+		FanOutRecordPublisherConfiguration configuration = new FanOutRecordPublisherConfiguration(testConfig, Collections.emptyList());
+
+		assertEquals(Duration.ofSeconds(120), configuration.getRegisterStreamConsumerTimeout());
+		assertEquals(Duration.ofSeconds(60), configuration.getDeregisterStreamConsumerTimeout());
+	}
+
+	@Test
+	public void testParseDeregisterStreamConsumerTimeout() {
+		Properties testConfig = TestUtils.getStandardProperties();
+		testConfig.setProperty(RECORD_PUBLISHER_TYPE, EFO.toString());
+		testConfig.setProperty(EFO_CONSUMER_NAME, "name");
+		testConfig.setProperty(DEREGISTER_STREAM_TIMEOUT_SECONDS, "240");
+
+		FanOutRecordPublisherConfiguration configuration = new FanOutRecordPublisherConfiguration(testConfig, Collections.emptyList());
+
+		assertEquals(Duration.ofSeconds(60), configuration.getRegisterStreamConsumerTimeout());
+		assertEquals(Duration.ofSeconds(240), configuration.getDeregisterStreamConsumerTimeout());
+	}
+
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrarTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrarTest.java
new file mode 100644
index 0000000..79757d2
--- /dev/null
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrarTest.java
@@ -0,0 +1,309 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout;
+
+import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisException.FlinkKinesisTimeoutException;
+import org.apache.flink.streaming.connectors.kinesis.proxy.FullJitterBackoff;
+import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
+import org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory;
+import org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.StreamConsumerFakeKinesis;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
+
+import java.util.Properties;
+
+import static java.util.Collections.emptyList;
+import static java.util.Collections.singletonList;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DEREGISTER_STREAM_BACKOFF_BASE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DEREGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DEREGISTER_STREAM_BACKOFF_MAX;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DEREGISTER_STREAM_TIMEOUT_SECONDS;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFORegistrationType.LAZY;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_CONSUMER_NAME;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_REGISTRATION_TYPE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RECORD_PUBLISHER_TYPE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.REGISTER_STREAM_BACKOFF_BASE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.REGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.REGISTER_STREAM_BACKOFF_MAX;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.REGISTER_STREAM_TIMEOUT_SECONDS;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.EFO;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.efoConsumerArn;
+import static org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.STREAM_CONSUMER_ARN_EXISTING;
+import static org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.STREAM_CONSUMER_ARN_NEW;
+import static org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.StreamConsumerFakeKinesis.NUMBER_OF_DESCRIBE_REQUESTS_TO_ACTIVATE;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.ArgumentMatchers.anyDouble;
+import static org.mockito.ArgumentMatchers.anyInt;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests for {@link StreamConsumerRegistrar}.
+ */
+public class StreamConsumerRegistrarTest {
+
+	private static final String STREAM = "stream";
+
+	private static final long EXPECTED_REGISTRATION_MAX = 1;
+	private static final long EXPECTED_REGISTRATION_BASE = 2;
+	private static final double EXPECTED_REGISTRATION_POW = 0.5;
+
+	private static final long EXPECTED_DEREGISTRATION_MAX = 2;
+	private static final long EXPECTED_DEREGISTRATION_BASE = 4;
+	private static final double EXPECTED_DEREGISTRATION_POW = 1;
+
+	@Rule
+	public final ExpectedException thrown = ExpectedException.none();
+
+	@Test
+	public void testStreamNotFoundWhenRegisteringThrowsException() throws Exception {
+		thrown.expect(ResourceNotFoundException.class);
+
+		KinesisProxyV2Interface kinesis = FakeKinesisFanOutBehavioursFactory.streamNotFound();
+		StreamConsumerRegistrar registrar = createRegistrar(kinesis, mock(FullJitterBackoff.class));
+
+		registrar.registerStreamConsumer(STREAM, "name");
+	}
+
+	@Test
+	public void testRegisterStreamConsumerRegistersNewStreamConsumer() throws Exception {
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+
+		KinesisProxyV2Interface kinesis = FakeKinesisFanOutBehavioursFactory.streamConsumerNotFound();
+		StreamConsumerRegistrar registrar = createRegistrar(kinesis, backoff);
+
+		String result = registrar.registerStreamConsumer(STREAM, "name");
+
+		assertEquals(STREAM_CONSUMER_ARN_NEW, result);
+	}
+
+	@Test
+	public void testRegisterStreamConsumerThatAlreadyExistsAndActive() throws Exception {
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+
+		KinesisProxyV2Interface kinesis = FakeKinesisFanOutBehavioursFactory.existingActiveConsumer();
+		StreamConsumerRegistrar registrar = createRegistrar(kinesis, backoff);
+
+		String result = registrar.registerStreamConsumer(STREAM, "name");
+
+		verify(backoff, never()).sleep(anyLong());
+		assertEquals(STREAM_CONSUMER_ARN_EXISTING, result);
+	}
+
+	@Test
+	public void testRegisterStreamConsumerWaitsForConsumerToBecomeActive() throws Exception {
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+
+		StreamConsumerFakeKinesis kinesis = FakeKinesisFanOutBehavioursFactory.registerExistingConsumerAndWaitToBecomeActive();
+		StreamConsumerRegistrar registrar = createRegistrar(kinesis, backoff);
+
+		String result = registrar.registerStreamConsumer(STREAM, "name");
+
+		// we backoff on each retry
+		verify(backoff, times(NUMBER_OF_DESCRIBE_REQUESTS_TO_ACTIVATE - 1)).sleep(anyLong());
+		assertEquals(STREAM_CONSUMER_ARN_EXISTING, result);
+
+		// We will invoke describe stream until the stream consumer is activated
+		assertEquals(NUMBER_OF_DESCRIBE_REQUESTS_TO_ACTIVATE, kinesis.getNumberOfDescribeStreamConsumerInvocations());
+
+		for (int i = 1; i < NUMBER_OF_DESCRIBE_REQUESTS_TO_ACTIVATE; i++) {
+			verify(backoff).calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), eq(i));
+		}
+	}
+
+	@Test
+	public void testRegisterStreamConsumerTimeoutWaitingForConsumerToBecomeActive() throws Exception {
+		thrown.expect(FlinkKinesisTimeoutException.class);
+		thrown.expectMessage("Timeout waiting for stream consumer to become active: name on stream-arn");
+
+		StreamConsumerFakeKinesis kinesis = FakeKinesisFanOutBehavioursFactory.registerExistingConsumerAndWaitToBecomeActive();
+
+		Properties configProps = createEfoProperties();
+		configProps.setProperty(REGISTER_STREAM_TIMEOUT_SECONDS, "1");
+
+		FanOutRecordPublisherConfiguration configuration = new FanOutRecordPublisherConfiguration(configProps, singletonList(STREAM));
+		StreamConsumerRegistrar registrar = new StreamConsumerRegistrar(kinesis, configuration, backoffFor(1001));
+
+		registrar.registerStreamConsumer(STREAM, "name");
+	}
+
+	@Test
+	public void testRegistrationBackoffForLazy() throws Exception {
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+
+		KinesisProxyV2Interface kinesis = FakeKinesisFanOutBehavioursFactory.existingActiveConsumer();
+
+		Properties efoProperties = createEfoProperties();
+		efoProperties.setProperty(EFO_REGISTRATION_TYPE, LAZY.name());
+
+		FanOutRecordPublisherConfiguration configuration = new FanOutRecordPublisherConfiguration(efoProperties, emptyList());
+		StreamConsumerRegistrar registrar = new StreamConsumerRegistrar(kinesis, configuration, backoff);
+
+		String result = registrar.registerStreamConsumer(STREAM, "name");
+
+		verify(backoff).sleep(anyLong());
+		assertEquals(STREAM_CONSUMER_ARN_EXISTING, result);
+	}
+
+	@Test
+	public void testDeregisterStreamConsumerAndWaitForDeletingStatus() throws Exception {
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+
+		StreamConsumerFakeKinesis kinesis = FakeKinesisFanOutBehavioursFactory.existingActiveConsumer();
+		StreamConsumerRegistrar registrar = createRegistrar(kinesis, backoff);
+
+		registrar.deregisterStreamConsumer(STREAM);
+
+		// We will invoke describe stream until the stream consumer is in the DELETING state
+		assertEquals(2, kinesis.getNumberOfDescribeStreamConsumerInvocations());
+
+		for (int i = 1; i < 2; i++) {
+			verify(backoff).calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), eq(i));
+		}
+	}
+
+	@Test
+	public void testDeregisterStreamConsumerTimeoutWaitingForConsumerToDeregister() throws Exception {
+		thrown.expect(FlinkKinesisTimeoutException.class);
+		thrown.expectMessage("Timeout waiting for stream consumer to deregister: stream-consumer-arn");
+
+		StreamConsumerFakeKinesis kinesis = FakeKinesisFanOutBehavioursFactory.existingActiveConsumer();
+
+		Properties configProps = createEfoProperties();
+		configProps.setProperty(DEREGISTER_STREAM_TIMEOUT_SECONDS, "1");
+
+		FanOutRecordPublisherConfiguration configuration = new FanOutRecordPublisherConfiguration(configProps, singletonList(STREAM));
+		StreamConsumerRegistrar registrar = new StreamConsumerRegistrar(kinesis, configuration, backoffFor(1001));
+
+		registrar.deregisterStreamConsumer(STREAM);
+	}
+
+	@Test
+	public void testDeregisterStreamConsumerNotFound() throws Exception {
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+
+		StreamConsumerFakeKinesis kinesis = FakeKinesisFanOutBehavioursFactory.streamConsumerNotFound();
+		StreamConsumerRegistrar registrar = createRegistrar(kinesis, backoff);
+
+		registrar.deregisterStreamConsumer(STREAM);
+
+		assertEquals(1, kinesis.getNumberOfDescribeStreamConsumerInvocations());
+	}
+
+	@Test
+	public void testDeregisterStreamConsumerArnNotFound() throws Exception {
+		thrown.expect(IllegalArgumentException.class);
+		thrown.expectMessage("Stream consumer ARN not found for stream: not-found");
+
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+
+		StreamConsumerFakeKinesis kinesis = FakeKinesisFanOutBehavioursFactory.streamConsumerNotFound();
+		StreamConsumerRegistrar registrar = createRegistrar(kinesis, backoff);
+
+		registrar.deregisterStreamConsumer("not-found");
+	}
+
+	@Test
+	public void testRegistrationBackoff() throws Exception {
+		FanOutRecordPublisherConfiguration configuration = createConfiguration();
+
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+		when(backoff.calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), anyInt())).thenReturn(5L);
+
+		StreamConsumerRegistrar registrar = new StreamConsumerRegistrar(mock(KinesisProxyV2Interface.class), configuration, backoff);
+
+		registrar.registrationBackoff(configuration, backoff, 10);
+
+		verify(backoff).sleep(5);
+		verify(backoff).calculateFullJitterBackoff(
+			EXPECTED_REGISTRATION_BASE,
+			EXPECTED_REGISTRATION_MAX,
+			EXPECTED_REGISTRATION_POW,
+			10
+		);
+	}
+
+	@Test
+	public void testDeregistrationBackoff() throws Exception {
+		FanOutRecordPublisherConfiguration configuration = createConfiguration();
+
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+		when(backoff.calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), anyInt())).thenReturn(5L);
+
+		StreamConsumerRegistrar registrar = new StreamConsumerRegistrar(mock(KinesisProxyV2Interface.class), configuration, backoff);
+
+		registrar.deregistrationBackoff(configuration, backoff, 11);
+
+		verify(backoff).sleep(5);
+		verify(backoff).calculateFullJitterBackoff(
+			EXPECTED_DEREGISTRATION_BASE,
+			EXPECTED_DEREGISTRATION_MAX,
+			EXPECTED_DEREGISTRATION_POW,
+			11
+		);
+	}
+
+	@Test
+	public void testCloseClosesProxy() {
+		KinesisProxyV2Interface kinesis = mock(KinesisProxyV2Interface.class);
+		StreamConsumerRegistrar registrar = createRegistrar(kinesis, mock(FullJitterBackoff.class));
+
+		registrar.close();
+
+		verify(kinesis).close();
+	}
+
+	private StreamConsumerRegistrar createRegistrar(final KinesisProxyV2Interface kinesis, final FullJitterBackoff backoff) {
+		FanOutRecordPublisherConfiguration configuration = createConfiguration();
+		return new StreamConsumerRegistrar(kinesis, configuration, backoff);
+	}
+
+	private FanOutRecordPublisherConfiguration createConfiguration() {
+		return new FanOutRecordPublisherConfiguration(createEfoProperties(), singletonList(STREAM));
+	}
+
+	private Properties createEfoProperties() {
+		Properties config = new Properties();
+		config.setProperty(RECORD_PUBLISHER_TYPE, EFO.name());
+		config.setProperty(EFO_CONSUMER_NAME, "dummy-efo-consumer");
+		config.setProperty(REGISTER_STREAM_BACKOFF_BASE, String.valueOf(EXPECTED_REGISTRATION_BASE));
+		config.setProperty(REGISTER_STREAM_BACKOFF_MAX, String.valueOf(EXPECTED_REGISTRATION_MAX));
+		config.setProperty(REGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT, String.valueOf(EXPECTED_REGISTRATION_POW));
+		config.setProperty(DEREGISTER_STREAM_BACKOFF_BASE, String.valueOf(EXPECTED_DEREGISTRATION_BASE));
+		config.setProperty(DEREGISTER_STREAM_BACKOFF_MAX, String.valueOf(EXPECTED_DEREGISTRATION_MAX));
+		config.setProperty(DEREGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT, String.valueOf(EXPECTED_DEREGISTRATION_POW));
+		config.setProperty(efoConsumerArn(STREAM), "stream-consumer-arn");
+		return config;
+	}
+
+	private FullJitterBackoff backoffFor(final long millisToBackoffFor) {
+		FullJitterBackoff backoff = spy(new FullJitterBackoff());
+		when(backoff.calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), anyInt())).thenReturn(millisToBackoffFor);
+		return backoff;
+	}
+
+}
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Test.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Test.java
index f7641c3..7c36efc 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Test.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Test.java
@@ -17,24 +17,95 @@
 
 package org.apache.flink.streaming.connectors.kinesis.proxy;
 
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.FanOutRecordPublisherConfiguration;
+
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.junit.runner.RunWith;
+import org.mockito.ArgumentCaptor;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
 import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
+import software.amazon.awssdk.services.kinesis.model.DeregisterStreamConsumerRequest;
+import software.amazon.awssdk.services.kinesis.model.DeregisterStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamResponse;
+import software.amazon.awssdk.services.kinesis.model.LimitExceededException;
+import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerRequest;
+import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerResponse;
 import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
 import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler;
 
+import java.util.Properties;
+import java.util.concurrent.CompletableFuture;
+
+import static java.util.Collections.emptyList;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DEREGISTER_STREAM_BACKOFF_BASE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DEREGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DEREGISTER_STREAM_BACKOFF_MAX;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DESCRIBE_STREAM_CONSUMER_BACKOFF_BASE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DESCRIBE_STREAM_CONSUMER_BACKOFF_EXPONENTIAL_CONSTANT;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DESCRIBE_STREAM_CONSUMER_BACKOFF_MAX;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_CONSUMER_NAME;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RECORD_PUBLISHER_TYPE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.REGISTER_STREAM_BACKOFF_BASE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.REGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.REGISTER_STREAM_BACKOFF_MAX;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.EFO;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_DESCRIBE_BACKOFF_BASE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_DESCRIBE_BACKOFF_EXPONENTIAL_CONSTANT;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_DESCRIBE_BACKOFF_MAX;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_DESCRIBE_RETRIES;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SUBSCRIBE_TO_SHARD_BACKOFF_BASE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SUBSCRIBE_TO_SHARD_BACKOFF_EXPONENTIAL_CONSTANT;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SUBSCRIBE_TO_SHARD_BACKOFF_MAX;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 /**
- * Tests for {@link KinesisProxyV2}.
+ * Test for methods in the {@link KinesisProxyV2} class.
  */
+@RunWith(PowerMockRunner.class)
+@PrepareForTest(KinesisProxyV2.class)
 public class KinesisProxyV2Test {
 
+	private static final long EXPECTED_SUBSCRIBE_TO_SHARD_MAX = 1;
+	private static final long EXPECTED_SUBSCRIBE_TO_SHARD_BASE = 2;
+	private static final double EXPECTED_SUBSCRIBE_TO_SHARD_POW = 0.1;
+
+	private static final long EXPECTED_REGISTRATION_MAX = 2;
+	private static final long EXPECTED_REGISTRATION_BASE = 3;
+	private static final double EXPECTED_REGISTRATION_POW = 0.2;
+
+	private static final long EXPECTED_DEREGISTRATION_MAX = 3;
+	private static final long EXPECTED_DEREGISTRATION_BASE = 4;
+	private static final double EXPECTED_DEREGISTRATION_POW = 0.3;
+
+	private static final long EXPECTED_DESCRIBE_CONSUMER_MAX = 4;
+	private static final long EXPECTED_DESCRIBE_CONSUMER_BASE = 5;
+	private static final double EXPECTED_DESCRIBE_CONSUMER_POW = 0.4;
+
+	private static final long EXPECTED_DESCRIBE_STREAM_MAX = 5;
+	private static final long EXPECTED_DESCRIBE_STREAM_BASE = 6;
+	private static final double EXPECTED_DESCRIBE_STREAM_POW = 0.5;
+	private static final int EXPECTED_DESCRIBE_STREAM_RETRIES = 10;
+
+	@Rule
+	public final ExpectedException exception = ExpectedException.none();
+
 	@Test
 	public void testSubscribeToShard() {
 		KinesisAsyncClient kinesis = mock(KinesisAsyncClient.class);
-		KinesisProxyV2 proxy = new KinesisProxyV2(kinesis);
+		KinesisProxyV2 proxy = new KinesisProxyV2(kinesis, mock(SdkAsyncHttpClient.class), createConfiguration(), mock(FullJitterBackoff.class));
 
 		SubscribeToShardRequest request = SubscribeToShardRequest.builder().build();
 		SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler
@@ -49,12 +120,238 @@ public class KinesisProxyV2Test {
 
 	@Test
 	public void testCloseInvokesClientClose() {
+		SdkAsyncHttpClient httpClient = mock(SdkAsyncHttpClient.class);
 		KinesisAsyncClient kinesis = mock(KinesisAsyncClient.class);
-		KinesisProxyV2 proxy = new KinesisProxyV2(kinesis);
+		KinesisProxyV2 proxy = new KinesisProxyV2(kinesis, httpClient, createConfiguration(), mock(FullJitterBackoff.class));
 
 		proxy.close();
 
 		verify(kinesis).close();
+		verify(httpClient).close();
+	}
+
+	@Test
+	public void testRegisterStreamConsumer() throws Exception {
+		KinesisAsyncClient client = mock(KinesisAsyncClient.class);
+		KinesisProxyV2 proxy = new KinesisProxyV2(client, mock(SdkAsyncHttpClient.class), createConfiguration(), mock(FullJitterBackoff.class));
+
+		RegisterStreamConsumerResponse expected = RegisterStreamConsumerResponse.builder().build();
+
+		ArgumentCaptor<RegisterStreamConsumerRequest> requestCaptor = ArgumentCaptor
+			.forClass(RegisterStreamConsumerRequest.class);
+		when(client.registerStreamConsumer(requestCaptor.capture()))
+			.thenReturn(CompletableFuture.completedFuture(expected));
+
+		RegisterStreamConsumerResponse actual = proxy.registerStreamConsumer("arn", "name");
+
+		assertEquals(expected, actual);
+
+		RegisterStreamConsumerRequest request = requestCaptor.getValue();
+		assertEquals("arn", request.streamARN());
+		assertEquals("name", request.consumerName());
+	}
+
+	@Test
+	public void testRegisterStreamConsumerBackoffJitter() throws Exception {
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+		KinesisAsyncClient client = mock(KinesisAsyncClient.class);
+		KinesisProxyV2 proxy = new KinesisProxyV2(client, mock(SdkAsyncHttpClient.class), createConfiguration(), backoff);
+
+		when(client.registerStreamConsumer(any(RegisterStreamConsumerRequest.class)))
+			.thenThrow(new RuntimeException(LimitExceededException.builder().build()))
+			.thenReturn(CompletableFuture.completedFuture(RegisterStreamConsumerResponse.builder().build()));
+
+		proxy.registerStreamConsumer("arn", "name");
+
+		verify(backoff).sleep(anyLong());
+		verify(backoff).calculateFullJitterBackoff(
+			EXPECTED_REGISTRATION_BASE,
+			EXPECTED_REGISTRATION_MAX,
+			EXPECTED_REGISTRATION_POW,
+			1);
+	}
+
+	@Test
+	public void testDeregisterStreamConsumer() throws Exception {
+		KinesisAsyncClient client = mock(KinesisAsyncClient.class);
+		KinesisProxyV2 proxy = new KinesisProxyV2(client, mock(SdkAsyncHttpClient.class), createConfiguration(), mock(FullJitterBackoff.class));
+
+		DeregisterStreamConsumerResponse expected = DeregisterStreamConsumerResponse.builder().build();
+
+		ArgumentCaptor<DeregisterStreamConsumerRequest> requestCaptor = ArgumentCaptor
+			.forClass(DeregisterStreamConsumerRequest.class);
+		when(client.deregisterStreamConsumer(requestCaptor.capture()))
+			.thenReturn(CompletableFuture.completedFuture(expected));
+
+		DeregisterStreamConsumerResponse actual = proxy.deregisterStreamConsumer("arn");
+
+		assertEquals(expected, actual);
+
+		DeregisterStreamConsumerRequest request = requestCaptor.getValue();
+		assertEquals("arn", request.consumerARN());
+	}
+
+	@Test
+	public void testDeregisterStreamConsumerBackoffJitter() throws Exception {
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+		KinesisAsyncClient client = mock(KinesisAsyncClient.class);
+		KinesisProxyV2 proxy = new KinesisProxyV2(client, mock(SdkAsyncHttpClient.class), createConfiguration(), backoff);
+
+		when(client.deregisterStreamConsumer(any(DeregisterStreamConsumerRequest.class)))
+			.thenThrow(new RuntimeException(LimitExceededException.builder().build()))
+			.thenReturn(CompletableFuture.completedFuture(DeregisterStreamConsumerResponse.builder().build()));
+
+		proxy.deregisterStreamConsumer("arn");
+
+		verify(backoff).sleep(anyLong());
+		verify(backoff).calculateFullJitterBackoff(
+			EXPECTED_DEREGISTRATION_BASE,
+			EXPECTED_DEREGISTRATION_MAX,
+			EXPECTED_DEREGISTRATION_POW,
+			1);
+	}
+
+	@Test
+	public void testDescribeStreamConsumerWithStreamConsumerArn() throws Exception {
+		KinesisAsyncClient client = mock(KinesisAsyncClient.class);
+		KinesisProxyV2 proxy = new KinesisProxyV2(client, mock(SdkAsyncHttpClient.class), createConfiguration(), mock(FullJitterBackoff.class));
+
+		DescribeStreamConsumerResponse expected = DescribeStreamConsumerResponse.builder().build();
+
+		ArgumentCaptor<DescribeStreamConsumerRequest> requestCaptor = ArgumentCaptor
+			.forClass(DescribeStreamConsumerRequest.class);
+		when(client.describeStreamConsumer(requestCaptor.capture()))
+			.thenReturn(CompletableFuture.completedFuture(expected));
+
+		DescribeStreamConsumerResponse actual = proxy.describeStreamConsumer("arn");
+
+		assertEquals(expected, actual);
+
+		DescribeStreamConsumerRequest request = requestCaptor.getValue();
+		assertEquals("arn", request.consumerARN());
+	}
+
+	@Test
+	public void testDescribeStreamConsumerWithStreamArnAndConsumerName() throws Exception {
+		KinesisAsyncClient client = mock(KinesisAsyncClient.class);
+		KinesisProxyV2 proxy = new KinesisProxyV2(client, mock(SdkAsyncHttpClient.class), createConfiguration(), mock(FullJitterBackoff.class));
+
+		DescribeStreamConsumerResponse expected = DescribeStreamConsumerResponse.builder().build();
+
+		ArgumentCaptor<DescribeStreamConsumerRequest> requestCaptor = ArgumentCaptor
+			.forClass(DescribeStreamConsumerRequest.class);
+		when(client.describeStreamConsumer(requestCaptor.capture()))
+			.thenReturn(CompletableFuture.completedFuture(expected));
+
+		DescribeStreamConsumerResponse actual = proxy.describeStreamConsumer("arn", "name");
+
+		assertEquals(expected, actual);
+
+		DescribeStreamConsumerRequest request = requestCaptor.getValue();
+		assertEquals("arn", request.streamARN());
+		assertEquals("name", request.consumerName());
+	}
+
+	@Test
+	public void testDescribeStreamConsumerBackoffJitter() throws Exception {
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+		KinesisAsyncClient client = mock(KinesisAsyncClient.class);
+		KinesisProxyV2 proxy = new KinesisProxyV2(client, mock(SdkAsyncHttpClient.class), createConfiguration(), backoff);
+
+		when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class)))
+			.thenThrow(new RuntimeException(LimitExceededException.builder().build()))
+			.thenReturn(CompletableFuture.completedFuture(DescribeStreamConsumerResponse.builder().build()));
+
+		proxy.describeStreamConsumer("arn");
+
+		verify(backoff).sleep(anyLong());
+		verify(backoff).calculateFullJitterBackoff(
+			EXPECTED_DESCRIBE_CONSUMER_BASE,
+			EXPECTED_DESCRIBE_CONSUMER_MAX,
+			EXPECTED_DESCRIBE_CONSUMER_POW,
+			1);
+	}
+
+	@Test
+	public void testDescribeStream() throws Exception {
+		KinesisAsyncClient client = mock(KinesisAsyncClient.class);
+		KinesisProxyV2 proxy = new KinesisProxyV2(client, mock(SdkAsyncHttpClient.class), createConfiguration(), mock(FullJitterBackoff.class));
+
+		DescribeStreamResponse expected = DescribeStreamResponse.builder().build();
+
+		ArgumentCaptor<DescribeStreamRequest> requestCaptor = ArgumentCaptor
+			.forClass(DescribeStreamRequest.class);
+		when(client.describeStream(requestCaptor.capture()))
+			.thenReturn(CompletableFuture.completedFuture(expected));
+
+		DescribeStreamResponse actual = proxy.describeStream("stream");
+
+		assertEquals(expected, actual);
+
+		DescribeStreamRequest request = requestCaptor.getValue();
+		assertEquals("stream", request.streamName());
+	}
+
+	@Test
+	public void testDescribeStreamBackoffJitter() throws Exception {
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+		KinesisAsyncClient client = mock(KinesisAsyncClient.class);
+		KinesisProxyV2 proxy = new KinesisProxyV2(client, mock(SdkAsyncHttpClient.class), createConfiguration(), backoff);
+
+		when(client.describeStream(any(DescribeStreamRequest.class)))
+			.thenThrow(new RuntimeException(LimitExceededException.builder().build()))
+			.thenReturn(CompletableFuture.completedFuture(DescribeStreamResponse.builder().build()));
+
+		proxy.describeStream("arn");
+
+		verify(backoff).sleep(anyLong());
+		verify(backoff).calculateFullJitterBackoff(
+			EXPECTED_DESCRIBE_STREAM_BASE,
+			EXPECTED_DESCRIBE_STREAM_MAX,
+			EXPECTED_DESCRIBE_STREAM_POW,
+			1);
+	}
+
+	@Test
+	public void testDescribeStreamFailsAfterMaxRetries() throws Exception {
+		exception.expect(RuntimeException.class);
+		exception.expectMessage("Retries exceeded - all 10 retry attempts failed.");
+
+		FullJitterBackoff backoff = mock(FullJitterBackoff.class);
+		KinesisAsyncClient client = mock(KinesisAsyncClient.class);
+		KinesisProxyV2 proxy = new KinesisProxyV2(client, mock(SdkAsyncHttpClient.class), createConfiguration(), backoff);
+
+		when(client.describeStream(any(DescribeStreamRequest.class)))
+			.thenThrow(new RuntimeException(LimitExceededException.builder().build()));
+
+		proxy.describeStream("arn");
+	}
+
+	private FanOutRecordPublisherConfiguration createConfiguration() {
+		return new FanOutRecordPublisherConfiguration(createEfoProperties(), emptyList());
+	}
+
+	private Properties createEfoProperties() {
+		Properties config = new Properties();
+		config.setProperty(RECORD_PUBLISHER_TYPE, EFO.name());
+		config.setProperty(EFO_CONSUMER_NAME, "dummy-efo-consumer");
+		config.setProperty(SUBSCRIBE_TO_SHARD_BACKOFF_BASE, String.valueOf(EXPECTED_SUBSCRIBE_TO_SHARD_BASE));
+		config.setProperty(SUBSCRIBE_TO_SHARD_BACKOFF_MAX, String.valueOf(EXPECTED_SUBSCRIBE_TO_SHARD_MAX));
+		config.setProperty(SUBSCRIBE_TO_SHARD_BACKOFF_EXPONENTIAL_CONSTANT, String.valueOf(EXPECTED_SUBSCRIBE_TO_SHARD_POW));
+		config.setProperty(REGISTER_STREAM_BACKOFF_BASE, String.valueOf(EXPECTED_REGISTRATION_BASE));
+		config.setProperty(REGISTER_STREAM_BACKOFF_MAX, String.valueOf(EXPECTED_REGISTRATION_MAX));
+		config.setProperty(REGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT, String.valueOf(EXPECTED_REGISTRATION_POW));
+		config.setProperty(DEREGISTER_STREAM_BACKOFF_BASE, String.valueOf(EXPECTED_DEREGISTRATION_BASE));
+		config.setProperty(DEREGISTER_STREAM_BACKOFF_MAX, String.valueOf(EXPECTED_DEREGISTRATION_MAX));
+		config.setProperty(DEREGISTER_STREAM_BACKOFF_EXPONENTIAL_CONSTANT, String.valueOf(EXPECTED_DEREGISTRATION_POW));
+		config.setProperty(DESCRIBE_STREAM_CONSUMER_BACKOFF_BASE, String.valueOf(EXPECTED_DESCRIBE_CONSUMER_BASE));
+		config.setProperty(DESCRIBE_STREAM_CONSUMER_BACKOFF_MAX, String.valueOf(EXPECTED_DESCRIBE_CONSUMER_MAX));
+		config.setProperty(DESCRIBE_STREAM_CONSUMER_BACKOFF_EXPONENTIAL_CONSTANT, String.valueOf(EXPECTED_DESCRIBE_CONSUMER_POW));
+		config.setProperty(STREAM_DESCRIBE_BACKOFF_BASE, String.valueOf(EXPECTED_DESCRIBE_STREAM_BASE));
+		config.setProperty(STREAM_DESCRIBE_BACKOFF_MAX, String.valueOf(EXPECTED_DESCRIBE_STREAM_MAX));
+		config.setProperty(STREAM_DESCRIBE_BACKOFF_EXPONENTIAL_CONSTANT, String.valueOf(EXPECTED_DESCRIBE_STREAM_POW));
+		config.setProperty(STREAM_DESCRIBE_RETRIES, String.valueOf(EXPECTED_DESCRIBE_STREAM_RETRIES));
+		return config;
 	}
 
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisFanOutBehavioursFactory.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisFanOutBehavioursFactory.java
index 83e299a..3552efb 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisFanOutBehavioursFactory.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisFanOutBehavioursFactory.java
@@ -20,14 +20,21 @@ package org.apache.flink.streaming.connectors.kinesis.testutils;
 import org.apache.flink.streaming.connectors.kinesis.proxy.KinesisProxyV2Interface;
 
 import com.amazonaws.kinesis.agg.RecordAggregator;
-import org.apache.commons.lang3.NotImplementedException;
 import org.reactivestreams.Subscriber;
 import org.reactivestreams.Subscription;
 import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.services.kinesis.model.Consumer;
+import software.amazon.awssdk.services.kinesis.model.ConsumerDescription;
+import software.amazon.awssdk.services.kinesis.model.ConsumerStatus;
+import software.amazon.awssdk.services.kinesis.model.DeregisterStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerResponse;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamResponse;
 import software.amazon.awssdk.services.kinesis.model.LimitExceededException;
 import software.amazon.awssdk.services.kinesis.model.Record;
+import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerResponse;
 import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
 import software.amazon.awssdk.services.kinesis.model.StartingPosition;
+import software.amazon.awssdk.services.kinesis.model.StreamDescription;
 import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEvent;
 import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEventStream;
 import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
@@ -38,18 +45,31 @@ import java.time.Instant;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
+import static software.amazon.awssdk.services.kinesis.model.ConsumerStatus.ACTIVE;
+import static software.amazon.awssdk.services.kinesis.model.ConsumerStatus.CREATING;
+import static software.amazon.awssdk.services.kinesis.model.ConsumerStatus.DELETING;
 
 /**
  * Factory for different kinds of fake Kinesis behaviours using the {@link KinesisProxyV2Interface} interface.
  */
 public class FakeKinesisFanOutBehavioursFactory {
 
+	public static final String STREAM_ARN = "stream-arn";
+	public static final String STREAM_CONSUMER_ARN_EXISTING = "stream-consumer-arn";
+	public static final String STREAM_CONSUMER_ARN_NEW = "stream-consumer-arn-new";
+
+	// ------------------------------------------------------------------------
+	//  Behaviours related to subscribe to shard and consuming data
+	// ------------------------------------------------------------------------
+
 	public static SingleShardFanOutKinesisV2.Builder boundedShard() {
 		return new SingleShardFanOutKinesisV2.Builder();
 	}
@@ -74,6 +94,36 @@ public class FakeKinesisFanOutBehavioursFactory {
 		return new AlternatingSubscriptionErrorKinesisV2(LimitExceededException.builder().build());
 	}
 
+	// ------------------------------------------------------------------------
+	//  Behaviours related to describing streams
+	// ------------------------------------------------------------------------
+
+	public static KinesisProxyV2Interface streamNotFound() {
+		return new StreamConsumerFakeKinesis.Builder()
+			.withThrowsWhileDescribingStream(ResourceNotFoundException.builder().build())
+			.build();
+	}
+
+	// ------------------------------------------------------------------------
+	//  Behaviours related to stream consumer registration/deregistration
+	// ------------------------------------------------------------------------
+
+	public static StreamConsumerFakeKinesis streamConsumerNotFound() {
+		return new StreamConsumerFakeKinesis.Builder()
+			.withStreamConsumerNotFound(true)
+			.build();
+	}
+
+	public static StreamConsumerFakeKinesis existingActiveConsumer() {
+		return new StreamConsumerFakeKinesis.Builder().build();
+	}
+
+	public static StreamConsumerFakeKinesis registerExistingConsumerAndWaitToBecomeActive() {
+		return new StreamConsumerFakeKinesis.Builder()
+			.withStreamConsumerStatus(CREATING)
+			.build();
+	}
+
 	public static AbstractSingleShardFanOutKinesisV2 emptyBatchFollowedBySingleRecord() {
 		return new AbstractSingleShardFanOutKinesisV2(2) {
 			private int subscription = 0;
@@ -298,7 +348,6 @@ public class FakeKinesisFanOutBehavioursFactory {
 	public abstract static class AbstractSingleShardFanOutKinesisV2 extends KinesisProxyV2InterfaceAdapter {
 
 		private final List<SubscribeToShardRequest> requests = new ArrayList<>();
-
 		private int remainingSubscriptions;
 
 		private AbstractSingleShardFanOutKinesisV2(final int remainingSubscriptions) {
@@ -352,12 +401,164 @@ public class FakeKinesisFanOutBehavioursFactory {
 
 	}
 
+	/**
+	 * A fake Kinesis Proxy V2 that implements dummy logic for stream consumer related methods.
+	 */
+	public static class StreamConsumerFakeKinesis extends KinesisProxyV2InterfaceAdapter {
+
+		public static final int NUMBER_OF_DESCRIBE_REQUESTS_TO_ACTIVATE = 5;
+		public static final int NUMBER_OF_DESCRIBE_REQUESTS_TO_DELETE = 5;
+
+		private final RuntimeException throwsWhileDescribingStream;
+		private String streamConsumerArn = STREAM_CONSUMER_ARN_EXISTING;
+		private ConsumerStatus streamConsumerStatus;
+		private boolean streamConsumerNotFound;
+		private int numberOfDescribeStreamConsumerInvocations = 0;
+
+		private StreamConsumerFakeKinesis(final Builder builder) {
+			this.throwsWhileDescribingStream = builder.throwsWhileDescribingStream;
+			this.streamConsumerStatus = builder.streamConsumerStatus;
+			this.streamConsumerNotFound = builder.streamConsumerNotFound;
+		}
+
+		public int getNumberOfDescribeStreamConsumerInvocations() {
+			return numberOfDescribeStreamConsumerInvocations;
+		}
+
+		@Override
+		public DescribeStreamResponse describeStream(String stream) throws InterruptedException, ExecutionException {
+			if (throwsWhileDescribingStream != null) {
+				throw throwsWhileDescribingStream;
+			}
+
+			return DescribeStreamResponse
+				.builder()
+				.streamDescription(StreamDescription
+					.builder()
+					.streamARN(STREAM_ARN)
+					.build())
+				.build();
+		}
+
+		@Override
+		public RegisterStreamConsumerResponse registerStreamConsumer(String streamArn, String consumerName) throws InterruptedException, ExecutionException {
+			assertEquals(STREAM_ARN, streamArn);
+
+			streamConsumerNotFound = false;
+			streamConsumerArn = STREAM_CONSUMER_ARN_NEW;
+
+			return RegisterStreamConsumerResponse
+				.builder()
+				.consumer(Consumer
+					.builder()
+					.consumerARN(STREAM_CONSUMER_ARN_NEW)
+					.consumerStatus(streamConsumerStatus)
+					.build())
+				.build();
+		}
+
+		@Override
+		public DeregisterStreamConsumerResponse deregisterStreamConsumer(final String consumerArn) throws InterruptedException, ExecutionException {
+			streamConsumerStatus = DELETING;
+			return DeregisterStreamConsumerResponse.builder().build();
+		}
+
+		@Override
+		public DescribeStreamConsumerResponse describeStreamConsumer(
+				final String streamArn,
+				final String consumerName) throws InterruptedException, ExecutionException {
+			assertEquals(STREAM_ARN, streamArn);
+
+			numberOfDescribeStreamConsumerInvocations++;
+
+			if (streamConsumerStatus == DELETING && numberOfDescribeStreamConsumerInvocations == NUMBER_OF_DESCRIBE_REQUESTS_TO_DELETE) {
+				streamConsumerNotFound = true;
+			} else if (numberOfDescribeStreamConsumerInvocations == NUMBER_OF_DESCRIBE_REQUESTS_TO_ACTIVATE) {
+				streamConsumerStatus = ACTIVE;
+			}
+
+			if (streamConsumerNotFound) {
+				throw new ExecutionException(ResourceNotFoundException.builder().build());
+			}
+
+			return DescribeStreamConsumerResponse
+				.builder()
+				.consumerDescription(ConsumerDescription
+					.builder()
+					.consumerARN(streamConsumerArn)
+					.consumerName(consumerName)
+					.consumerStatus(streamConsumerStatus)
+					.build())
+				.build();
+		}
+
+		@Override
+		public DescribeStreamConsumerResponse describeStreamConsumer(String streamConsumerArn) throws InterruptedException, ExecutionException {
+			assertEquals(this.streamConsumerArn, streamConsumerArn);
+			return describeStreamConsumer(STREAM_ARN, "consumer-name");
+		}
+
+		private static class Builder {
+
+			private RuntimeException throwsWhileDescribingStream;
+			private ConsumerStatus streamConsumerStatus = ACTIVE;
+			private boolean streamConsumerNotFound = false;
+
+			public StreamConsumerFakeKinesis build() {
+				return new StreamConsumerFakeKinesis(this);
+			}
+
+			public Builder withStreamConsumerNotFound(final boolean streamConsumerNotFound) {
+				this.streamConsumerNotFound = streamConsumerNotFound;
+				return this;
+			}
+
+			public Builder withThrowsWhileDescribingStream(final RuntimeException throwsWhileDescribingStream) {
+				this.throwsWhileDescribingStream = throwsWhileDescribingStream;
+				return this;
+			}
+
+			public Builder withStreamConsumerStatus(final ConsumerStatus streamConsumerStatus) {
+				this.streamConsumerStatus = streamConsumerStatus;
+				return this;
+			}
+
+		}
+
+	}
+
 	private static class KinesisProxyV2InterfaceAdapter implements KinesisProxyV2Interface {
 
 		@Override
+		public DescribeStreamResponse describeStream(String stream) throws InterruptedException, ExecutionException {
+			throw new UnsupportedOperationException("This method is not implemented.");
+		}
+
+		@Override
+		public DescribeStreamConsumerResponse describeStreamConsumer(String streamConsumerArn) throws InterruptedException, ExecutionException {
+			throw new UnsupportedOperationException("This method is not implemented.");
+		}
+
+		@Override
+		public DescribeStreamConsumerResponse describeStreamConsumer(String streamArn, String consumerName) throws InterruptedException, ExecutionException {
+			throw new UnsupportedOperationException("This method is not implemented.");
+		}
+
+		@Override
+		public RegisterStreamConsumerResponse registerStreamConsumer(String streamArn, String consumerName) throws InterruptedException, ExecutionException {
+			throw new UnsupportedOperationException("This method is not implemented.");
+		}
+
+		@Override
+		public DeregisterStreamConsumerResponse deregisterStreamConsumer(String consumerArn) throws InterruptedException, ExecutionException {
+			throw new UnsupportedOperationException("This method is not implemented.");
+		}
+
+		@Override
 		public CompletableFuture<Void> subscribeToShard(SubscribeToShardRequest request, SubscribeToShardResponseHandler responseHandler) {
-			throw new NotImplementedException("This method is not implemented.");
+			throw new UnsupportedOperationException("This method is not implemented.");
 		}
+
 	}
 
 	private static Record createRecord(final AtomicInteger sequenceNumber) {
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2UtilTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2UtilTest.java
index 4d96ab8..7586428 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2UtilTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2UtilTest.java
@@ -37,12 +37,14 @@ import software.amazon.awssdk.http.nio.netty.Http2Configuration;
 import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient;
 import software.amazon.awssdk.regions.Region;
 import software.amazon.awssdk.services.kinesis.KinesisAsyncClientBuilder;
+import software.amazon.awssdk.services.kinesis.model.LimitExceededException;
 import software.amazon.awssdk.services.sts.auth.StsAssumeRoleCredentialsProvider;
 
 import java.net.URI;
 import java.nio.file.Paths;
 import java.time.Duration;
 import java.util.Properties;
+import java.util.concurrent.ExecutionException;
 
 import static org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.AWS_CREDENTIALS_PROVIDER;
 import static org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.AWS_REGION;
@@ -50,8 +52,15 @@ import static org.apache.flink.streaming.connectors.kinesis.config.AWSConfigCons
 import static org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.roleSessionName;
 import static org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants.webIdentityTokenFile;
 import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DEFAULT_EFO_HTTP_CLIENT_MAX_CONURRENCY;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFORegistrationType.EAGER;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFORegistrationType.LAZY;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFORegistrationType.NONE;
 import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_HTTP_CLIENT_MAX_CONCURRENCY;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RECORD_PUBLISHER_TYPE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.EFO;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.POLLING;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyBoolean;
@@ -412,4 +421,94 @@ public class AwsV2UtilTest {
 
 		return builder;
 	}
+
+	@Test
+	public void testIsUsingEfoRecordPublisher() {
+		Properties prop = new Properties();
+		assertFalse(AwsV2Util.isUsingEfoRecordPublisher(prop));
+
+		prop.setProperty(RECORD_PUBLISHER_TYPE, EFO.name());
+		assertTrue(AwsV2Util.isUsingEfoRecordPublisher(prop));
+
+		prop.setProperty(RECORD_PUBLISHER_TYPE, POLLING.name());
+		assertFalse(AwsV2Util.isUsingEfoRecordPublisher(prop));
+	}
+
+	@Test
+	public void testIsEagerEfoRegistrationType() {
+		Properties prop = new Properties();
+		assertFalse(AwsV2Util.isEagerEfoRegistrationType(prop));
+
+		prop.setProperty(ConsumerConfigConstants.EFO_REGISTRATION_TYPE, EAGER.name());
+		assertTrue(AwsV2Util.isEagerEfoRegistrationType(prop));
+
+		prop.setProperty(ConsumerConfigConstants.EFO_REGISTRATION_TYPE, LAZY.name());
+		assertFalse(AwsV2Util.isEagerEfoRegistrationType(prop));
+
+		prop.setProperty(ConsumerConfigConstants.EFO_REGISTRATION_TYPE, NONE.name());
+		assertFalse(AwsV2Util.isEagerEfoRegistrationType(prop));
+	}
+
+	@Test
+	public void testIsLazyEfoRegistrationType() {
+		Properties prop = new Properties();
+		assertTrue(AwsV2Util.isLazyEfoRegistrationType(prop));
+
+		prop.setProperty(ConsumerConfigConstants.EFO_REGISTRATION_TYPE, EAGER.name());
+		assertFalse(AwsV2Util.isLazyEfoRegistrationType(prop));
+
+		prop.setProperty(ConsumerConfigConstants.EFO_REGISTRATION_TYPE, LAZY.name());
+		assertTrue(AwsV2Util.isLazyEfoRegistrationType(prop));
+
+		prop.setProperty(ConsumerConfigConstants.EFO_REGISTRATION_TYPE, NONE.name());
+		assertFalse(AwsV2Util.isLazyEfoRegistrationType(prop));
+	}
+
+	@Test
+	public void testIsNoneEfoRegistrationType() {
+		Properties prop = new Properties();
+		assertFalse(AwsV2Util.isNoneEfoRegistrationType(prop));
+
+		prop.setProperty(ConsumerConfigConstants.EFO_REGISTRATION_TYPE, EAGER.name());
+		assertFalse(AwsV2Util.isNoneEfoRegistrationType(prop));
+
+		prop.setProperty(ConsumerConfigConstants.EFO_REGISTRATION_TYPE, LAZY.name());
+		assertFalse(AwsV2Util.isNoneEfoRegistrationType(prop));
+
+		prop.setProperty(ConsumerConfigConstants.EFO_REGISTRATION_TYPE, NONE.name());
+		assertTrue(AwsV2Util.isNoneEfoRegistrationType(prop));
+	}
+
+	@Test
+	public void testIsRecoverableExceptionForRecoverable() {
+		Exception recoverable = LimitExceededException.builder().build();
+		assertTrue(AwsV2Util.isRecoverableException(new ExecutionException(recoverable)));
+	}
+
+	@Test
+	public void testIsRecoverableExceptionForNonRecoverable() {
+		Exception nonRecoverable = new IllegalArgumentException("abc");
+		assertFalse(AwsV2Util.isRecoverableException(new ExecutionException(nonRecoverable)));
+	}
+
+	@Test
+	public void testIsRecoverableExceptionForRuntimeExceptionWrappingRecoverable() {
+		Exception recoverable = LimitExceededException.builder().build();
+		Exception runtime = new RuntimeException("abc", recoverable);
+		assertTrue(AwsV2Util.isRecoverableException(runtime));
+	}
+
+	@Test
+	public void testIsRecoverableExceptionForRuntimeExceptionWrappingNonRecoverable() {
+		Exception nonRecoverable = new IllegalArgumentException("abc");
+		Exception runtime = new RuntimeException("abc", nonRecoverable);
+		assertFalse(AwsV2Util.isRecoverableException(runtime));
+	}
+
+	@Test
+	public void testIsRecoverableExceptionForNullCause() {
+		Exception nonRecoverable = new IllegalArgumentException("abc");
+		assertFalse(AwsV2Util.isRecoverableException(nonRecoverable));
+	}
+
 }
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java
index ee32b6b..bff99f1 100644
--- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java
@@ -703,6 +703,17 @@ public class KinesisConfigUtilTest {
 	}
 
 	@Test
+	public void testUnparsableIntForRegisterStreamTimeoutInConfig() {
+		exception.expect(IllegalArgumentException.class);
+		exception.expectMessage("Invalid value given for maximum timeout for register stream consumer. Must be a valid non-negative integer value.");
+
+		Properties testConfig = TestUtils.getStandardProperties();
+		testConfig.setProperty(ConsumerConfigConstants.REGISTER_STREAM_TIMEOUT_SECONDS, "unparsableInt");
+
+		KinesisConfigUtil.validateConsumerConfiguration(testConfig);
+	}
+
+	@Test
 	public void testUnparsableLongForRegisterStreamBackoffBaseMillisInConfig() {
 		exception.expect(IllegalArgumentException.class);
 		exception.expectMessage("Invalid value given for register stream operation base backoff milliseconds");
@@ -747,6 +758,17 @@ public class KinesisConfigUtilTest {
 	}
 
 	@Test
+	public void testUnparsableIntForDeRegisterStreamTimeoutInConfig() {
+		exception.expect(IllegalArgumentException.class);
+		exception.expectMessage("Invalid value given for maximum timeout for deregister stream consumer. Must be a valid non-negative integer value.");
+
+		Properties testConfig = TestUtils.getStandardProperties();
+		testConfig.setProperty(ConsumerConfigConstants.DEREGISTER_STREAM_TIMEOUT_SECONDS, "unparsableInt");
+
+		KinesisConfigUtil.validateConsumerConfiguration(testConfig);
+	}
+
+	@Test
 	public void testUnparsableLongForDeRegisterStreamBackoffBaseMillisInConfig() {
 		exception.expect(IllegalArgumentException.class);
 		exception.expectMessage("Invalid value given for deregister stream operation base backoff milliseconds");
diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/StreamConsumerRegistrarUtilTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/StreamConsumerRegistrarUtilTest.java
new file mode 100644
index 0000000..d9e7f6d
--- /dev/null
+++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/StreamConsumerRegistrarUtilTest.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.streaming.connectors.kinesis.util;
+
+import org.apache.flink.streaming.connectors.kinesis.internals.publisher.fanout.StreamConsumerRegistrar;
+
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Properties;
+
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_CONSUMER_NAME;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RECORD_PUBLISHER_TYPE;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.EFO;
+import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.efoConsumerArn;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests for {@link StreamConsumerRegistrar}.
+ */
+public class StreamConsumerRegistrarUtilTest {
+
+	@Test
+	public void testRegisterStreamConsumers() throws Exception {
+		Properties configProps = new Properties();
+		configProps.setProperty(EFO_CONSUMER_NAME, "consumer-name");
+
+		StreamConsumerRegistrar registrar = mock(StreamConsumerRegistrar.class);
+		when(registrar.registerStreamConsumer("stream-1", "consumer-name"))
+			.thenReturn("stream-1-consumer-arn");
+		when(registrar.registerStreamConsumer("stream-2", "consumer-name"))
+			.thenReturn("stream-2-consumer-arn");
+
+		StreamConsumerRegistrarUtil.registerStreamConsumers(registrar, configProps, Arrays.asList("stream-1", "stream-2"));
+
+		assertEquals("stream-1-consumer-arn", configProps.getProperty(efoConsumerArn("stream-1")));
+		assertEquals("stream-2-consumer-arn", configProps.getProperty(efoConsumerArn("stream-2")));
+	}
+
+	@Test
+	public void testDeregisterStreamConsumersMissingStreamArn() throws Exception {
+		Properties configProps = new Properties();
+		configProps.setProperty(RECORD_PUBLISHER_TYPE, EFO.name());
+		configProps.setProperty(EFO_CONSUMER_NAME, "consumer-name");
+
+		List<String> streams = Arrays.asList("stream-1", "stream-2");
+		StreamConsumerRegistrar registrar = mock(StreamConsumerRegistrar.class);
+
+		StreamConsumerRegistrarUtil.deregisterStreamConsumers(registrar, configProps, streams);
+
+		verify(registrar).deregisterStreamConsumer("stream-1");
+		verify(registrar).deregisterStreamConsumer("stream-2");
+	}
+
+}