You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pekko.apache.org by fa...@apache.org on 2023/02/13 22:13:29 UTC

[incubator-pekko-connectors-kafka] branch main updated: use correct config names and replace Akka/Alpakka mentions (#35)

This is an automated email from the ASF dual-hosted git repository.

fanningpj pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-pekko-connectors-kafka.git


The following commit(s) were added to refs/heads/main by this push:
     new 06cacfa2 use correct config names and replace Akka/Alpakka mentions (#35)
06cacfa2 is described below

commit 06cacfa2fbb6f8b8613206765d801fb4fd49239e
Author: PJ Fanning <pj...@users.noreply.github.com>
AuthorDate: Mon Feb 13 23:13:23 2023 +0100

    use correct config names and replace Akka/Alpakka mentions (#35)
---
 CONTRIBUTING.md                                    |   2 +-
 build.sbt                                          |   4 +-
 .../pekko/kafka/scaladsl/MetadataClient.scala      |   2 +-
 docs/release-train-issue-template.md               |   2 +-
 docs/src/main/paradox/.htaccess                    |   2 -
 docs/src/main/paradox/_template/logo.st            |   1 -
 docs/src/main/paradox/atleastonce.md               |   2 +-
 docs/src/main/paradox/cluster-sharding.md          |  38 +--
 docs/src/main/paradox/consumer-metadata.md         |   2 +-
 docs/src/main/paradox/consumer-rebalance.md        |  16 +-
 docs/src/main/paradox/consumer.md                  |  28 +-
 docs/src/main/paradox/debugging.md                 |  26 +-
 docs/src/main/paradox/discovery.md                 |  36 +-
 docs/src/main/paradox/errorhandling.md             |  16 +-
 docs/src/main/paradox/home.md                      |  60 ++--
 .../main/paradox/images/akka-alpakka-reverse.svg   |   1 -
 .../paradox/images/alpakka-kafka-stream-trace.png  | Bin 257757 -> 0 bytes
 docs/src/main/paradox/index.md                     |   6 +-
 docs/src/main/paradox/producer.md                  |  14 +-
 docs/src/main/paradox/production.md                |  24 +-
 docs/src/main/paradox/release-notes/1.0.x.md       | 177 ----------
 docs/src/main/paradox/release-notes/1.1.x.md       |  80 -----
 docs/src/main/paradox/release-notes/2.0.x.md       | 370 ---------------------
 docs/src/main/paradox/release-notes/2.1.x.md       | 169 ----------
 docs/src/main/paradox/release-notes/index.md       |  15 +-
 docs/src/main/paradox/send-producer.md             |   4 +-
 docs/src/main/paradox/serialization.md             |   4 +-
 docs/src/main/paradox/snapshots.md                 |  30 +-
 docs/src/main/paradox/subscription.md              |   2 +-
 docs/src/main/paradox/testing-testcontainers.md    |   8 +-
 docs/src/main/paradox/testing.md                   |  34 +-
 docs/src/main/paradox/transactions.md              |  10 +-
 project/ParadoxSettings.scala                      |  10 +
 project/ProjectSettings.scala                      |   2 +-
 project/plugins.sbt                                |  14 +-
 project/project-info.conf                          |  50 +--
 36 files changed, 214 insertions(+), 1047 deletions(-)

diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a7099a20..c1822713 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -69,7 +69,7 @@ Dependency licenses will be checked automatically by [FOSSA](https://fossa.com/)
 
 ## Work In Progress
 
-It is ok to work on a public feature branch in the GitHub repository. Something that can sometimes be useful for early feedback etc. If so then it is preferable to name the branch accordingly. This can be done by either prefix the name with ``wip-`` as in ‘Work In Progress’, or use hierarchical names like ``wip/..``, ``feature/..`` or ``topic/..``. Either way is fine as long as it is clear that it is work in progress and not ready for merge. This work can temporarily have a lower standar [...]
+It is ok to work on a public feature branch in the GitHub repository. Something that can sometimes be useful for early feedback etc. If so then it is preferable to name the branch accordingly. This can be done by either prefix the name with ``wip-`` as in ‘Work In Progress’, or use hierarchical names like ``wip/..``, ``feature/..`` or ``topic/..``. Either way is fine as long as it is clear that it is work in progress and not ready for merge. This work can temporarily have a lower standar [...]
 
 Also, to facilitate both well-formed commits and working together, the ``wip`` and ``feature``/``topic`` identifiers also have special meaning.   Any branch labelled with ``wip`` is considered “git-unstable” and may be rebased and have its history rewritten.   Any branch with ``feature``/``topic`` in the name is considered “stable” enough for others to depend on when a group is working on a feature.
 
diff --git a/build.sbt b/build.sbt
index ba4290d9..081fd587 100644
--- a/build.sbt
+++ b/build.sbt
@@ -92,7 +92,7 @@ lazy val tests = project
     IntegrationTest / parallelExecution := false)
 
 lazy val docs = project
-  .enablePlugins(ParadoxPlugin, ParadoxSitePlugin, PreprocessPlugin, PublishRsyncPlugin)
+  .enablePlugins(ParadoxPlugin, PekkoParadoxPlugin, ParadoxSitePlugin, PreprocessPlugin, PublishRsyncPlugin)
   .disablePlugins(MimaPlugin)
   .settings(commonSettings)
   .settings(
@@ -113,7 +113,7 @@ lazy val docs = project
       ("https://docs\\.oracle\\.com/en/java/javase/11/docs/api/".r,
         _ => "https://docs\\.oracle\\.com/en/java/javase/11/docs/api/")),
     Paradox / siteSubdirName := s"docs/pekko-connectors-kafka/${projectInfoVersion.value}",
-    ParadoxSettings.propertiesSettings,
+    ParadoxSettings.settings,
     resolvers += Resolver.jcenterRepo,
     publishRsyncArtifacts += makeSite.value -> "www/",
     publishRsyncHost := "akkarepo@gustav.akka.io")
diff --git a/core/src/main/scala/org/apache/pekko/kafka/scaladsl/MetadataClient.scala b/core/src/main/scala/org/apache/pekko/kafka/scaladsl/MetadataClient.scala
index 4515390e..b172897a 100644
--- a/core/src/main/scala/org/apache/pekko/kafka/scaladsl/MetadataClient.scala
+++ b/core/src/main/scala/org/apache/pekko/kafka/scaladsl/MetadataClient.scala
@@ -103,7 +103,7 @@ object MetadataClient {
     val consumerActor = system
       .asInstanceOf[ExtendedActorSystem]
       .systemActorOf(KafkaConsumerActor.props(consumerSettings),
-        s"alpakka-kafka-metadata-client-${actorCount.getAndIncrement()}")
+        s"pekko-connectors-kafka-metadata-client-${actorCount.getAndIncrement()}")
     new MetadataClient(consumerActor, timeout, true)
   }
 }
diff --git a/docs/release-train-issue-template.md b/docs/release-train-issue-template.md
index 8a92aea4..d3f7588b 100644
--- a/docs/release-train-issue-template.md
+++ b/docs/release-train-issue-template.md
@@ -42,7 +42,7 @@ Key links:
 
 ### Check availability
 - [ ] Check [API](https://doc.akka.io/api/alpakka-kafka/$VERSION$/) documentation
-- [ ] Check [reference](https://doc.akka.io/docs/alpakka-kafka/$VERSION$/) documentation
+- [ ] Check [reference](https://pekko.apache.org/docs/pekko-connectors-kafka-kafka/$VERSION$/) documentation
 - [ ] Check the release on [Maven central](https://repo1.maven.org/maven2/com/typesafe/akka/akka-stream-kafka_2.13/$VERSION$/)
 
 ### When everything is on maven central
diff --git a/docs/src/main/paradox/.htaccess b/docs/src/main/paradox/.htaccess
deleted file mode 100644
index a9f7771d..00000000
--- a/docs/src/main/paradox/.htaccess
+++ /dev/null
@@ -1,2 +0,0 @@
-# No patch version redirect
-RedirectMatch 301 ^/docs/alpakka-kafka/([^/]+)/release-notes/1.0.html https://doc.akka.io/docs/alpakka-kafka/$1/release-notes/1.0.x.html
diff --git a/docs/src/main/paradox/_template/logo.st b/docs/src/main/paradox/_template/logo.st
deleted file mode 100644
index 47a5c0b4..00000000
--- a/docs/src/main/paradox/_template/logo.st
+++ /dev/null
@@ -1 +0,0 @@
-<a href="https://akka.io"><img class="logo" src="$page.base$images/akka-alpakka-reverse.svg"></a>
diff --git a/docs/src/main/paradox/atleastonce.md b/docs/src/main/paradox/atleastonce.md
index 014a6d4c..894f153c 100644
--- a/docs/src/main/paradox/atleastonce.md
+++ b/docs/src/main/paradox/atleastonce.md
@@ -1,5 +1,5 @@
 ---
-project.description: Achieve at-least-once semantics with offset committing in Alpakka Kafka.
+project.description: Achieve at-least-once semantics with offset committing in Apache Pekko Connectors Kafka.
 ---
 # At-Least-Once Delivery
 
diff --git a/docs/src/main/paradox/cluster-sharding.md b/docs/src/main/paradox/cluster-sharding.md
index da9b8c88..ec6e7faf 100644
--- a/docs/src/main/paradox/cluster-sharding.md
+++ b/docs/src/main/paradox/cluster-sharding.md
@@ -1,11 +1,11 @@
 ---
-project.description: Alpakka Kafka provides a module to use Kafka with Akka Cluster External Sharding.
+project.description: Apache Pekko Connectors Kafka provides a module to use Kafka with Apache Pekko Cluster External Sharding.
 ---
-# Akka Cluster Sharding
+# Apache Pekko Cluster Sharding
 
-Akka Cluster allows the user to use an @extref[external shard allocation](pekko:/typed/cluster-sharding.html#external-shard-allocation) strategy in order to give the user more control over how many shards are created and what cluster nodes they are assigned to. 
-If you consume Kafka messages into your Akka Cluster application then it's possible to run an Alpakka Kafka Consumer on each cluster node and co-locate Kafka partitions with Akka Cluster shards. 
-When partitions and shards are co-located together then there is less chance that a message must be transmitted over the network by the Akka Cluster Shard Coordinator to a destination user sharded entity.
+Apache Pekko Cluster allows the user to use an @extref[external shard allocation](pekko:/typed/cluster-sharding.html#external-shard-allocation) strategy in order to give the user more control over how many shards are created and what cluster nodes they are assigned to. 
+If you consume Kafka messages into your Apache Pekko Cluster application then it's possible to run an Apache Pekko Connectors Kafka Consumer on each cluster node and co-locate Kafka partitions with Apache Pekko Cluster shards. 
+When partitions and shards are co-located together then there is less chance that a message must be transmitted over the network by the Apache Pekko Cluster Shard Coordinator to a destination user sharded entity.
 
 ## Project Info
 
@@ -14,23 +14,23 @@ When partitions and shards are co-located together then there is less chance tha
 ## Dependency
 
 @@dependency [Maven,sbt,Gradle] {
-  group=com.typesafe.akka
-  artifact=akka-stream-kafka-cluster-sharding_$scala.binary.version$
+  group=org.apache.pekko
+  artifact=pekko-connectors-kafka-cluster-sharding_$scala.binary.version$
   version=$project.version$
-  symbol2=AkkaVersion
-  value2="$akka.version$"
-  group2=com.typesafe.akka
-  artifact2=akka-cluster-sharding-typed_$scala.binary.version$
-  version2=AkkaVersion
+  symbol2=PekkoVersion
+  value2="$pekko.version$"
+  group2=org.apache.pekko
+  artifact2=pekko-cluster-sharding-typed_$scala.binary.version$
+  version2=PekkoVersion
 }
 
-This module contains an Akka extension called `KafkaClusterSharding` and depends on `akka-cluster-sharding-typed`.
+This module contains an Apache Pekko extension called `KafkaClusterSharding` and depends on `pekko-cluster-sharding-typed`.
 
 ## Setup
 
 There are two steps required to setup the cluster sharding module.
 
-* Initialize Akka Cluster Sharding with a @scaladoc[ShardingMessageExtractor](org.apache.pekko.cluster.sharding.typed.ShardingMessageExtractor) to route Kafka consumed messages to the correct Akka Cluster shard and user entity.
+* Initialize Apache Pekko Cluster Sharding with a @scaladoc[ShardingMessageExtractor](org.apache.pekko.cluster.sharding.typed.ShardingMessageExtractor) to route Kafka consumed messages to the correct Apache Pekko Cluster shard and user entity.
 * Use a provided Rebalance Listener in your @scaladoc[ConsumerSettings](org.apache.pekko.kafka.ConsumerSettings) to update the external shard allocation at runtime when Kafka Consumer Group rebalances occur.
 
 @@@ note
@@ -42,7 +42,7 @@ It's a self-contained example that can run on a developer's laptop.
 
 ## Sharding Message Extractors
 
-To setup the @scaladoc[ShardingMessageExtractor](org.apache.pekko.cluster.sharding.typed.ShardingMessageExtractor) pick a factory method in the `KafkaClusterSharding` Akka extension that best fits your use case. 
+To setup the @scaladoc[ShardingMessageExtractor](org.apache.pekko.cluster.sharding.typed.ShardingMessageExtractor) pick a factory method in the `KafkaClusterSharding` Apache Pekko extension that best fits your use case. 
 This module provides two kinds of extractors, extractors for entities that are within a @scaladoc[ShardingEnvelope](org.apache.pekko.cluster.sharding.typed.ShardingEnvelope) and without.  
 They're called `messageExtractor` and `messageExtractorNoEnvelope` respectively.
 
@@ -71,7 +71,7 @@ Scala
 Java
 : @@snip [snip](/tests/src/test/java/docs/javadsl/ClusterShardingExample.java) { #message-extractor }
 
-Setup Akka Typed Cluster Sharding.
+Setup Apache Pekko Typed Cluster Sharding.
 
 Scala
 : @@snip [snip](/tests/src/test/scala/docs/scaladsl/ClusterShardingExample.scala) { #setup-cluster-sharding }
@@ -81,7 +81,7 @@ Java
 
 ## Rebalance Listener
 
-The Rebalance Listener is a pre-defined Actor that will handle @scaladoc[ConsumerRebalanceEvents](org.apache.pekko.kafka.ConsumerRebalanceEvent) that will update the Akka Cluster External Sharding strategy when subscribed partitions are re-assigned to consumers running on different cluster nodes. 
+The Rebalance Listener is a pre-defined Actor that will handle @scaladoc[ConsumerRebalanceEvents](org.apache.pekko.kafka.ConsumerRebalanceEvent) that will update the Apache Pekko Cluster External Sharding strategy when subscribed partitions are re-assigned to consumers running on different cluster nodes. 
 This makes sure that shards remain local to Kafka Consumers after a consumer group rebalance.
 The Rebalance Listener is returned as a Typed @scaladoc[ActorRef[ConsumerRebalanceEvent]](org.apache.pekko.actor.typed.ActorRef) and must be converted to a classic @scaladoc[ActorRef](org.apache.pekko.actor.ActorRef) before being passed to @scaladoc[ConsumerSettings](org.apache.pekko.kafka.ConsumerSettings).
 
@@ -92,11 +92,11 @@ This allows you to create multiple Kafka Consumer Groups that consume the same t
 
 For example, a `user-events` topic is consumed by two consumer groups.
 One consumer group is used to maintain an up-to-date view of the user's profile and the other is used to represent an aggregate history of the types of user events.
-The same message type is used by separate Alpakka Kafka consumers, but the messages are routed to different Akka Cluster Sharding Coordinators that are setup to use separate @scaladoc[Behaviors](org.apache.pekko.actor.typed.Behavior).  
+The same message type is used by separate Apache Pekko Connectors Kafka consumers, but the messages are routed to different Apache Pekko Cluster Sharding Coordinators that are setup to use separate @scaladoc[Behaviors](org.apache.pekko.actor.typed.Behavior).  
 
 @@@ 
 
-Create the rebalance listener using the extension and pass it into an Alpakka Kafka @scaladoc[Subscription](org.apache.pekko.kafka.Subscription).
+Create the rebalance listener using the extension and pass it into an Apache Pekko Connectors Kafka @scaladoc[Subscription](org.apache.pekko.kafka.Subscription).
 
 Scala
 : @@snip [snip](/tests/src/test/scala/docs/scaladsl/ClusterShardingExample.scala) { #rebalance-listener }
diff --git a/docs/src/main/paradox/consumer-metadata.md b/docs/src/main/paradox/consumer-metadata.md
index f03b1805..312dd591 100644
--- a/docs/src/main/paradox/consumer-metadata.md
+++ b/docs/src/main/paradox/consumer-metadata.md
@@ -1,5 +1,5 @@
 ---
-project.description: Access Kafka consumer metadata by sending messages to the actor provided by Alpakka Kafka.
+project.description: Access Kafka consumer metadata by sending messages to the actor provided by Apache Pekko Connectors Kafka.
 ---
 # Consumer Metadata
 
diff --git a/docs/src/main/paradox/consumer-rebalance.md b/docs/src/main/paradox/consumer-rebalance.md
index 20a1f90d..6b1eb048 100644
--- a/docs/src/main/paradox/consumer-rebalance.md
+++ b/docs/src/main/paradox/consumer-rebalance.md
@@ -1,9 +1,9 @@
 ---
-project.description: React on Kafka rebalancing the partitions assigned to an Alpakka Kafka consumer.
+project.description: React on Kafka rebalancing the partitions assigned to an Apache Pekko Connectors Kafka consumer.
 ---
 # React on Partition Assignment
 
-Alpakka Kafka allows to react to the Kafka broker's balancing of partitions within a consumer group in two ways:
+Apache Pekko Connectors Kafka allows to react to the Kafka broker's balancing of partitions within a consumer group in two ways:
 
 1. callbacks to the @apidoc[PartitionAssignmentHandler]
 1. messages to a @ref[rebalance listener actor](#listening-for-rebalance-events)
@@ -13,27 +13,27 @@ Alpakka Kafka allows to react to the Kafka broker's balancing of partitions with
 Kafka balances partitions between all consumers within a consumer group. When new consumers join or leave the group partitions are revoked from and assigned to those consumers.
 
 @@@ note { title="API may change" }
-This @apidoc[PartitionAssignmentHandler] API was introduced in Alpakka Kafka 2.0.0 and may still be subject to change.
+This @apidoc[PartitionAssignmentHandler] API was introduced in Apache Pekko Connectors Kafka 2.0.0 and may still be subject to change.
 
 Please give input on its usefulness in [Issue #985](https://github.com/akka/alpakka-kafka/issues/985).
 @@@
 
-Alpakka Kafka's @apidoc[PartitionAssignmentHandler] expects callbacks to be implemented, all are called with a set of @javadoc[TopicPartition](org.apache.kafka.common.TopicPartition)s and a reference to the @apidoc[RestrictedConsumer] which allows some access to the Kafka @javadoc[Consumer](org.apache.kafka.clients.consumer.Consumer) instance used internally by Alpakka Kafka.
+Apache Pekko Connectors Kafka's @apidoc[PartitionAssignmentHandler] expects callbacks to be implemented, all are called with a set of @javadoc[TopicPartition](org.apache.kafka.common.TopicPartition)s and a reference to the @apidoc[RestrictedConsumer] which allows some access to the Kafka @javadoc[Consumer](org.apache.kafka.clients.consumer.Consumer) instance used internally by Apache Pekko Connectors Kafka.
 
 1. `onRevoke` is called when the Kafka broker revokes partitions from this consumer
 1. `onAssign` is called when the Kafka broker assigns partitions to this consumer
 1. `onLost` is called when partition metadata has changed and partitions no longer exist.  This can occur if a topic is deleted or if the leader's metadata is stale. For details see [KIP-429 Incremental Rebalance Protocol](https://cwiki.apache.org/confluence/display/KAFKA/KIP-429%3A+Kafka+Consumer+Incremental+Rebalance+Protocol).
-1. `onStop` is called when the Alpakka Kafka consumer source is about to stop
+1. `onStop` is called when the Apache Pekko Connectors Kafka consumer source is about to stop
 
 Rebalancing starts with revoking partitions from all consumers in a consumer group and assigning all partitions to consumers in a second phase. During rebalance no consumer within that consumer group receives any messages.
 
-The @apidoc[PartitionAssignmentHandler] is Alpakka Kafka's replacement of the Kafka client library's @javadoc[ConsumerRebalanceListener](org.apache.kafka.clients.consumer.ConsumerRebalanceListener).
+The @apidoc[PartitionAssignmentHandler] is Apache Pekko Connectors Kafka's replacement of the Kafka client library's @javadoc[ConsumerRebalanceListener](org.apache.kafka.clients.consumer.ConsumerRebalanceListener).
 
 @@@ warning
 
 All methods on the @apidoc[PartitionAssignmentHandler] are called synchronously during Kafka's poll and rebalance logic. They block any other activity for that consumer.
 
-If any of these methods take longer than the timeout configured in `akka.kafka.consumer.partition-handler-warning` (default 5 seconds) a warning will be logged.
+If any of these methods take longer than the timeout configured in `pekko.kafka.consumer.partition-handler-warning` (default 5 seconds) a warning will be logged.
 
 @@@
 
@@ -55,7 +55,7 @@ from consuming from specific topic partitions. Two kinds of messages will be sen
 * @apidoc[TopicPartitionsRevoked]
 
 You can use a typed @apidoc[org.apache.pekko.actor.typed.ActorRef] to implement your rebalance event listener by converting it into a classic actor ref.
-See the example below and read the @extref[Coexistence](pekko:/typed/coexisting.html) page of the Akka Documentation for more details on Akka Classic and Typed interoperability.
+See the example below and read the @extref[Coexistence](pekko:/typed/coexisting.html) page of the Apache Pekko Documentation for more details on Apache Pekko Classic and Typed interoperability.
 
 Scala
 : @@ snip [snip](/tests/src/test/scala/docs/scaladsl/ConsumerExample.scala) { #withTypedRebalanceListenerActor }
diff --git a/docs/src/main/paradox/consumer.md b/docs/src/main/paradox/consumer.md
index dc0ac306..7f23daf2 100644
--- a/docs/src/main/paradox/consumer.md
+++ b/docs/src/main/paradox/consumer.md
@@ -1,16 +1,16 @@
 ---
-project.description: Consume messages from Apache Kafka in Akka Streams sources and their commit offsets to Kafka.
+project.description: Consume messages from Apache Kafka in Apache Pekko Streams sources and their commit offsets to Kafka.
 ---
 # Consumer
 
-A consumer subscribes to Kafka topics and passes the messages into an Akka Stream.
+A consumer subscribes to Kafka topics and passes the messages into an Apache Pekko Stream.
 
 The underlying implementation is using the `KafkaConsumer`, see @javadoc[Kafka API](org.apache.kafka.clients.consumer.KafkaConsumer) for a description of consumer groups, offsets, and other details.
 
 
 ## Choosing a consumer
 
-Alpakka Kafka offers a large variety of consumers that connect to Kafka and stream data. The tables below may help you to find the consumer best suited for your use-case.
+Apache Pekko Connectors Kafka offers a large variety of consumers that connect to Kafka and stream data. The tables below may help you to find the consumer best suited for your use-case.
 
 ### Consumers
 
@@ -50,7 +50,7 @@ When creating a consumer source you need to pass in @apidoc[ConsumerSettings] th
 * group id for the consumer, note that offsets are always committed for a given consumer group
 * Kafka consumer tuning parameters
 
-Alpakka Kafka's defaults for all settings are defined in `reference.conf` which is included in the library JAR.
+Apache Pekko Connectors Kafka's defaults for all settings are defined in `reference.conf` which is included in the library JAR.
 
 Important consumer settings
 : | Setting   | Description                                  |
@@ -83,7 +83,7 @@ Java
 application.conf (HOCON)
 : @@ snip [app.conf](/tests/src/test/resources/application.conf) { #consumer-config-inheritance }
 
-Read the settings that inherit the defaults from "akka.kafka.consumer" settings:
+Read the settings that inherit the defaults from "pekko.kafka.consumer" settings:
 
 Scala
 : @@ snip [read](/tests/src/test/scala/docs/scaladsl/ConsumerExample.scala) { #config-inheritance } 
@@ -96,9 +96,9 @@ Java
 
 The Kafka read offset can either be stored in Kafka (see below), or at a data store of your choice.
 
-@apidoc[Consumer.plainSource](Consumer$) { java="#plainSource[K,V](settings:akka.kafka.ConsumerSettings[K,V],subscription:akka.kafka.Subscription):akka.stream.javadsl.Source[org.apache.kafka.clients.consumer.ConsumerRecord[K,V],akka.kafka.javadsl.Consumer.Control]" scala="#plainSource[K,V](settings:akka.kafka.ConsumerSettings[K,V],subscription:akka.kafka.Subscription):akka.stream.scaladsl.Source[org.apache.kafka.clients.consumer.ConsumerRecord[K,V],akka.kafka.scaladsl.Consumer.Control]" } 
+@apidoc[Consumer.plainSource](Consumer$) { java="#plainSource[K,V](settings:org.apache.pekko.kafka.ConsumerSettings[K,V],subscription:org.apache.pekko.kafka.Subscription):org.apache.pekko.stream.javadsl.Source[org.apache.kafka.clients.consumer.ConsumerRecord[K,V],pekko.kafka.javadsl.Consumer.Control]" scala="#plainSource[K,V](settings:org.apache.pekko.kafka.ConsumerSettings[K,V],subscription:org.apache.pekko.kafka.Subscription):org.apache.pekko.stream.scaladsl.Source[org.apache.kafka.cli [...]
 and 
-@apidoc[Consumer.plainPartitionedManualOffsetSource](Consumer$) { java="#plainPartitionedManualOffsetSource[K,V](settings:akka.kafka.ConsumerSettings[K,V],subscription:akka.kafka.AutoSubscription,getOffsetsOnAssign:java.util.function.Function[java.util.Set[org.apache.kafka.common.TopicPartition],java.util.concurrent.CompletionStage[java.util.Map[org.apache.kafka.common.TopicPartition,Long]]]):akka.stream.javadsl.Source[akka.japi.Pair[org.apache.kafka.common.TopicPartition,akka.stream.jav [...]
+@apidoc[Consumer.plainPartitionedManualOffsetSource](Consumer$) { java="#plainPartitionedManualOffsetSource[K,V](settings:org.apache.pekko.kafka.ConsumerSettings[K,V],subscription:org.apache.pekko.kafka.AutoSubscription,getOffsetsOnAssign:java.util.function.Function[java.util.Set[org.apache.kafka.common.TopicPartition],java.util.concurrent.CompletionStage[java.util.Map[org.apache.kafka.common.TopicPartition,Long]]]):org.apache.pekko.stream.javadsl.Source[org.apache.pekko.japi.Pair[org.ap [...]
 can be used to emit @javadoc[ConsumerRecord](org.apache.kafka.clients.consumer.ConsumerRecord) elements
 as received from the underlying @javadoc[KafkaConsumer](org.apache.kafka.clients.consumer.KafkaConsumer). They do not have support for committing offsets to Kafka. When using
 these Sources, either store an offset externally, or use auto-commit (note that auto-commit is disabled by default).
@@ -122,7 +122,7 @@ Java
 : @@ snip [snip](/tests/src/test/java/docs/javadsl/ConsumerExampleTest.java) { #plainSource }
 
 For 
-@apidoc[Consumer.plainSource](Consumer$) { java="#plainSource[K,V](settings:akka.kafka.ConsumerSettings[K,V],subscription:akka.kafka.Subscription):akka.stream.javadsl.Source[org.apache.kafka.clients.consumer.ConsumerRecord[K,V],akka.kafka.javadsl.Consumer.Control]" scala="#plainSource[K,V](settings:akka.kafka.ConsumerSettings[K,V],subscription:akka.kafka.Subscription):akka.stream.scaladsl.Source[org.apache.kafka.clients.consumer.ConsumerRecord[K,V],akka.kafka.scaladsl.Consumer.Control]" } 
+@apidoc[Consumer.plainSource](Consumer$) { java="#plainSource[K,V](settings:org.apache.pekko.kafka.ConsumerSettings[K,V],subscription:org.apache.pekko.kafka.Subscription):org.apache.pekko.stream.javadsl.Source[org.apache.kafka.clients.consumer.ConsumerRecord[K,V],pekko.kafka.javadsl.Consumer.Control]" scala="#plainSource[K,V](settings:org.apache.pekko.kafka.ConsumerSettings[K,V],subscription:org.apache.pekko.kafka.Subscription):org.apache.pekko.stream.scaladsl.Source[org.apache.kafka.cli [...]
 the @apidoc[Subscriptions.assignmentWithOffset](Subscriptions$) specifies the starting point (offset) for a given consumer group id, topic and partition. The group id is defined in the @apidoc[ConsumerSettings$].
 
 Alternatively, with @apidoc[Consumer.plainPartitionedManualOffsetSource](Consumer$), only the consumer group id and the topic are required on creation.
@@ -136,7 +136,7 @@ emits tuples of assigned topic-partition and a corresponding source, as in [Sour
 ## Offset Storage in Kafka - committing
 
 The 
-@apidoc[Consumer.committableSource](Consumer$) { java="#committableSource[K,V](settings:akka.kafka.ConsumerSettings[K,V],subscription:akka.kafka.Subscription):akka.stream.javadsl.Source[akka.kafka.ConsumerMessage.CommittableMessage[K,V],akka.kafka.javadsl.Consumer.Control]" scala="#committableSource[K,V](settings:akka.kafka.ConsumerSettings[K,V],subscription:akka.kafka.Subscription):akka.stream.scaladsl.Source[akka.kafka.ConsumerMessage.CommittableMessage[K,V],akka.kafka.scaladsl.Consume [...]
+@apidoc[Consumer.committableSource](Consumer$) { java="#committableSource[K,V](settings:org.apache.pekko.kafka.ConsumerSettings[K,V],subscription:org.apache.pekko.kafka.Subscription):org.apache.pekko.stream.javadsl.Source[pekko.kafka.ConsumerMessage.CommittableMessage[K,V],pekko.kafka.javadsl.Consumer.Control]" scala="#committableSource[K,V](settings:org.apache.pekko.kafka.ConsumerSettings[K,V],subscription:org.apache.pekko.kafka.Subscription):org.apache.pekko.stream.scaladsl.Source[pekk [...]
 makes it possible to commit offset positions to Kafka. Compared to auto-commit this gives exact control of when a message is considered consumed.
 
 This is useful when "at-least-once" delivery is desired, as each message will likely be delivered one time, but in failure cases could be received more than once.
@@ -160,7 +160,7 @@ Scala
 Java
 : @@ snip [snip](/tests/src/test/java/docs/javadsl/ConsumerExampleTest.java) { #committerSink }
  
-When creating a @apidoc[Committer.sink](Committer$) you need to pass in @apidoc[CommitterSettings$]. These may be created by passing the actor system to read the defaults from the config section `akka.kafka.committer`, or by passing a @scaladoc[Config](com.typesafe.config.Config) instance with the same structure.
+When creating a @apidoc[Committer.sink](Committer$) you need to pass in @apidoc[CommitterSettings$]. These may be created by passing the actor system to read the defaults from the config section `pekko.kafka.committer`, or by passing a @scaladoc[Config](com.typesafe.config.Config) instance with the same structure.
 
 Table
 : | Setting   | Description                                  | Default Value |
@@ -174,7 +174,7 @@ reference.conf
 
 All commit batches are aggregated internally and passed on to Kafka very often (in every poll cycle), the Committer settings configure how the stream sends the offsets to the internal actor which communicates with the Kafka broker. Increasing these values means that in case of a failure you may have to re-process more messages.
 
-If you use Kafka older than version 2.1.0 and consume from a topic with low activity, and possibly no messages arrive for more than 24 hours, consider enabling periodical commit refresh (`akka.kafka.consumer.commit-refresh-interval` configuration parameters), otherwise offsets might expire in the Kafka storage. This has been fixed in Kafka 2.1.0 (See [KAFKA-4682](https://issues.apache.org/jira/browse/KAFKA-4682)).
+If you use Kafka older than version 2.1.0 and consume from a topic with low activity, and possibly no messages arrive for more than 24 hours, consider enabling periodical commit refresh (`pekko.kafka.consumer.commit-refresh-interval` configuration parameters), otherwise offsets might expire in the Kafka storage. This has been fixed in Kafka 2.1.0 (See [KAFKA-4682](https://issues.apache.org/jira/browse/KAFKA-4682)).
 
 #### Committer variants
 
@@ -193,7 +193,7 @@ These factory methods are part of the @apidoc[Committer$].
 
 The @apidoc[Consumer.commitWithMetadataSource](Consumer$) allows you to add metadata to the committed offset based on the last consumed record.
 
-Note that the first offset provided to the consumer during a partition assignment will not contain metadata. This offset can get committed due to a periodic commit refresh (`akka.kafka.consumer.commit-refresh-interval` configuration parameters) and the commit will not contain metadata.
+Note that the first offset provided to the consumer during a partition assignment will not contain metadata. This offset can get committed due to a periodic commit refresh (`pekko.kafka.consumer.commit-refresh-interval` configuration parameters) and the commit will not contain metadata.
 
 Scala
 : @@ snip [snip](/tests/src/test/scala/docs/scaladsl/ConsumerExample.scala) { #commitWithMetadata }
@@ -272,8 +272,8 @@ It is shared by creating a @apidoc[org.apache.pekko.kafka.KafkaConsumerActor$].
 You need to create the actor and stop it by sending `KafkaConsumerActor.Stop` when it is not needed any longer. 
 You pass the classic @apidoc[org.apache.pekko.actor.ActorRef] as a parameter to the @apidoc[Consumer](Consumer$) factory methods.
 
-When using a typed @apidoc[org.apache.pekko.actor.typed.ActorSystem] you can create the @apidoc[org.apache.pekko.kafka.KafkaConsumerActor$] by using the Akka typed adapter to create a classic @apidoc[org.apache.pekko.actor.ActorRef].
-Then you can carry on using the existing Alpakka Kafka API.
+When using a typed @apidoc[org.apache.pekko.actor.typed.ActorSystem] you can create the @apidoc[org.apache.pekko.kafka.KafkaConsumerActor$] by using the Apache Pekko typed adapter to create a classic @apidoc[org.apache.pekko.actor.ActorRef].
+Then you can carry on using the existing Apache Pekko Connectors Kafka API.
 
 Scala
 : @@ snip [snip](/tests/src/test/scala/docs/scaladsl/PartitionExamples.scala) { #consumerActorTyped }
diff --git a/docs/src/main/paradox/debugging.md b/docs/src/main/paradox/debugging.md
index f6d10e5f..e86cc9c8 100644
--- a/docs/src/main/paradox/debugging.md
+++ b/docs/src/main/paradox/debugging.md
@@ -1,31 +1,31 @@
 # Debugging
 
-Debugging setups with the Alpakka Kafka Connector will be required at times. This page collects a few ideas to start out with in case the connector does not behave as you expected.
+Debugging setups with the Apache Pekko Connectors Kafka Connector will be required at times. This page collects a few ideas to start out with in case the connector does not behave as you expected.
 
 ## Logging with SLF4J
 
-Akka, Akka Streams and thus the Alpakka Kafka Connector support [SLF4J logging API](https://www.slf4j.org/) by adding Akka's SLF4J module and an SLF4J compatible logging framework, eg. [Logback](https://logback.qos.ch/).
+Apache Pekko, Apache Pekko Streams and thus the Apache Pekko Connectors Kafka Connector support [SLF4J logging API](https://www.slf4j.org/) by adding Apache Pekko's SLF4J module and an SLF4J compatible logging framework, eg. [Logback](https://logback.qos.ch/).
 
-The Kafka client library used by the Alpakka Kafka connector uses SLF4J, as well.
+The Kafka client library used by the Apache Pekko Connectors Kafka connector uses SLF4J, as well.
 
 @@dependency [Maven,sbt,Gradle] {
-  symbol=AkkaVersion
-  value="$akka.version$"
-  group=com.typesafe.akka
-  artifact=akka-slf4j_$scala.binary.version$
-  version=AkkaVersion
+  symbol=PekkoVersion
+  value="$pekko.version$"
+  group=org.apache.pekko
+  artifact=pekko-slf4j_$scala.binary.version$
+  version=PekkoVersion
   group2=ch.qos.logback
   artifact2=logback-classic
   version2=1.2.3
 }
 
-To enable Akka SLF4J logging, configure Akka in `application.conf` as below. Refer to the @extref[Pekko documentation](pekko:logging.html#slf4j) for details.
+To enable Apache Pekko SLF4J logging, configure Apache Pekko in `application.conf` as below. Refer to the @extref[Pekko documentation](pekko:logging.html#slf4j) for details.
 
 ```hocon
-akka {
-  loggers = ["akka.event.slf4j.Slf4jLogger"]
+pekko {
+  loggers = ["org.apache.pekko.event.slf4j.Slf4jLogger"]
   loglevel = "DEBUG"
-  logging-filter = "akka.event.slf4j.Slf4jLoggingFilter"
+  logging-filter = "org.apache.pekko.event.slf4j.Slf4jLoggingFilter"
 }
 ```
 
@@ -34,7 +34,7 @@ akka {
 In case you're debugging the internals in the Kafka Consumer actor, you might want to enable receive logging to see all messages it receives. To lower the log message volume, change the Kafka poll interval to something larger, eg. 300 ms.
 
 ```hocon
-akka {
+pekko {
   actor {
     debug.receive = true
   }
diff --git a/docs/src/main/paradox/discovery.md b/docs/src/main/paradox/discovery.md
index b62a5f71..fb56c4a0 100644
--- a/docs/src/main/paradox/discovery.md
+++ b/docs/src/main/paradox/discovery.md
@@ -1,27 +1,27 @@
 ---
-project.description: Akka Discovery can be used to achieve Kafka broker discovery from the operations environment.
+project.description: Apache Pekko Discovery can be used to achieve Kafka broker discovery from the operations environment.
 ---
 # Service discovery
 
-By using @extref:[Pekko Discovery](pekko:discovery/index.html) Alpakka Kafka may read the Kafka bootstrap server addresses from any Akka Discovery-compatible service discovery mechanism.
+By using @extref:[Pekko Discovery](pekko:discovery/index.html) Apache Pekko Connectors Kafka may read the Kafka bootstrap server addresses from any Apache Pekko Discovery-compatible service discovery mechanism.
 
-Akka Discovery supports Configuration (HOCON), DNS (SRV records), and aggregation of multiple discovery methods out-of-the-box. Kubernetes API, AWS API: EC2 Tag-Based Discovery, AWS API: ECS Discovery and Consul implementations for Akka Discovery are available in @extref:[Pekko Management](pekko-management:).
+Apache Pekko Discovery supports Configuration (HOCON), DNS (SRV records), and aggregation of multiple discovery methods out-of-the-box. Kubernetes API, AWS API: EC2 Tag-Based Discovery, AWS API: ECS Discovery and Consul implementations for Apache Pekko Discovery are available in @extref:[Pekko Management](pekko-management:).
 
 ## Dependency
 
-The Akka Discovery version must match the Akka version used in your build. To use the implementations provided by Akka Management, you need to add the desired dependency.
+The Apache Pekko Discovery version must match the Apache Pekko version used in your build. To use the implementations provided by Apache Pekko Management, you need to add the desired dependency.
 
 @@dependency [Maven,sbt,Gradle] {
-  symbol=AkkaVersion
-  value=$akka.version$
-  group=com.typesafe.akka
-  artifact=akka-discovery_$scala.binary.version$
-  version=AkkaVersion
+  symbol=PekkoVersion
+  value=$pekko.version$
+  group=org.apache.pekko
+  artifact=pekko-discovery_$scala.binary.version$
+  version=PekkoVersion
 }
 
 ## Configure consumer settings
 
-To use Akka Discovery with Alpakka Kafka consumers, configure a section for your consumer settings which inherits the default settings (by using `${akka.kafka.consumer}`) and add a service name and a timeout for the service lookup. Setting the `service-name` in the `akka.kafka.consumer` config will work, if all your consumers connect to the same Kafka broker.
+To use Apache Pekko Discovery with Apache Pekko Connectors Kafka consumers, configure a section for your consumer settings which inherits the default settings (by using `${pekko.kafka.consumer}`) and add a service name and a timeout for the service lookup. Setting the `service-name` in the `pekko.kafka.consumer` config will work, if all your consumers connect to the same Kafka broker.
 
 The service name must match the one configured with the discovery technology you use. Overwrite the `resolve-timeout` depending on the discovery technology used, and your environment.
 
@@ -29,7 +29,7 @@ Note that consumers and producers may share a service (as shown in the examples
 
 application.conf
 :   ```hocon
-    discovery-consumer: ${akka.kafka.consumer} {
+    discovery-consumer: ${pekko.kafka.consumer} {
       service-name = "kafkaService1"
     }
     ```
@@ -45,7 +45,7 @@ Java
 
 ## Configure producer settings
 
-To use Akka Discovery with Alpakka Kafka producers, configure a section for your producer settings which inherits the default settings (by using `${akka.kafka.producer}`) and add a service name and a timeout for the service lookup. Setting the `service-name` in the `akka.kafka.producer` config will work, if all your producers connect to the same Kafka broker.
+To use Apache Pekko Discovery with Apache Pekko Connectors Kafka producers, configure a section for your producer settings which inherits the default settings (by using `${pekko.kafka.producer}`) and add a service name and a timeout for the service lookup. Setting the `service-name` in the `pekko.kafka.producer` config will work, if all your producers connect to the same Kafka broker.
 
 The service name must match the one configured with the discovery technology you use. Overwrite the `resolve-timeout` depending on the discovery technology used, and your environment.
 
@@ -53,7 +53,7 @@ Note that consumers and producers may share a service (as shown in the examples
 
 application.conf
 :   ```hocon
-    discovery-producer: ${akka.kafka.producer} {
+    discovery-producer: ${pekko.kafka.producer} {
       service-name = "kafkaService1"
     }
     ```
@@ -75,12 +75,12 @@ application.conf
 :   &#9;
 
     ```hocon
-    akka.kafka.producer {
+    pekko.kafka.producer {
       service-name = "kafkaServiceDefault"
       service-name = ${?KAFKA_SERVICE_NAME}
     }
     
-    akka.kafka.consumer {
+    pekko.kafka.consumer {
       service-name = "kafkaServiceDefault"
       service-name = ${?KAFKA_SERVICE_NAME}
     }
@@ -90,11 +90,11 @@ application.conf
 
 ## Specify a different service discovery mechanism
 
-The Actor System-wide service discovery is used by default, to choose a different Akka Discovery implementation, set the `discovery-method` setting in the producer and consumer configurations accordingly.
+The Actor System-wide service discovery is used by default, to choose a different Apache Pekko Discovery implementation, set the `discovery-method` setting in the producer and consumer configurations accordingly.
 
 application.conf
 :   ```hocon
-    discovery-producer: ${akka.kafka.producer} {
+    discovery-producer: ${pekko.kafka.producer} {
       discovery-method = "kubernetes-api"
       service-name = "kafkaService1"
       resolve-timeout = 3 seconds
@@ -103,7 +103,7 @@ application.conf
 
 ## Use Config (HOCON) to describe the bootstrap servers
 
-The setup below uses the built-in Akka Discovery implementation reading from Config (HOCON) files. That might be a good choice for development and testing. You may use the @extref:[Aggregate implementation](pekko:discovery/index.html#discovery-method-aggregate-multiple-discovery-methods) to first use another discovery technology, before falling back to the config file.
+The setup below uses the built-in Apache Pekko Discovery implementation reading from Config (HOCON) files. That might be a good choice for development and testing. You may use the @extref:[Aggregate implementation](pekko:discovery/index.html#discovery-method-aggregate-multiple-discovery-methods) to first use another discovery technology, before falling back to the config file.
 
 application.conf
 :   @@snip [conf](/tests/src/test/scala/org/apache/pekko/kafka/ConsumerSettingsSpec.scala) { #discovery-with-config }
diff --git a/docs/src/main/paradox/errorhandling.md b/docs/src/main/paradox/errorhandling.md
index 6a40829e..dc2923e1 100644
--- a/docs/src/main/paradox/errorhandling.md
+++ b/docs/src/main/paradox/errorhandling.md
@@ -1,15 +1,15 @@
 ---
-project.description: Handle errors from the Kafka API in Alpakka Kafka.
+project.description: Handle errors from the Kafka API in Apache Pekko Connectors Kafka.
 ---
 # Error handling
 
 ## Failing consumer
 
-Errors from the Kafka consumer will be forwarded to the Alpakka sources that use it, the sources will fail their streams.
+Errors from the Kafka consumer will be forwarded to the Apache Pekko Connectors sources that use it, the sources will fail their streams.
 
 ### Lost connection to the Kafka broker
 
-To fail a Alpakka Kafka consumer in case the Kafka broker is not available, configure a **Connection Checker** via @apidoc[ConsumerSettings]. If not **Connection Checker** is configured, Alpakka will continue to poll the broker indefinitely.
+To fail a Apache Pekko Connectors Kafka consumer in case the Kafka broker is not available, configure a **Connection Checker** via @apidoc[ConsumerSettings]. If not **Connection Checker** is configured, Apache Pekko Connectors will continue to poll the broker indefinitely.
 
 
 ## Failing producer
@@ -18,7 +18,7 @@ Retry handling for producers is built-in into Kafka. In case of failure when sen
 
 ## Restarting the stream with a backoff stage
 
-Akka streams @extref[provides graph stages](pekko:stream/stream-error.html#delayed-restarts-with-a-backoff-stage)
+Apache Pekko streams @extref[provides graph stages](pekko:stream/stream-error.html#delayed-restarts-with-a-backoff-stage)
 to gracefully restart a stream on failure, with a configurable backoff. This can be taken advantage of to restart a failing stream and its consumer with an exponential backoff, by wrapping it in a `RestartSource`.
 
 Scala
@@ -48,11 +48,11 @@ Sometimes, due to various Kafka server bugs (see below) the consumer will fail t
     - **NOTE**: consumer will never skip data, but may reprocess many days of data, up to the topic's configured
    retention
 
-Alpakka Kafka cannot do anything for the first two approaches. However, the `offset-reset-protection` configuration in
+Apache Pekko Connectors Kafka cannot do anything for the first two approaches. However, the `offset-reset-protection` configuration in
  the `ConsumerSettings` can help detect the inadvertent loss of offsets and subsequent reset. You can configure 
-`akka.kafka.consumer.offset-reset-protection.offset-threshold` to a number of offsets back from the _latest requested
+`pekko.kafka.consumer.offset-reset-protection.offset-threshold` to a number of offsets back from the _latest requested
  offset_ that would indicate one of these reset bugs has occurred. Similarly, setting 
-`akka.kafka.consumer.offset-reset-protection.time-threshold` will reset the consumer back to the latest committed offset
+`pekko.kafka.consumer.offset-reset-protection.time-threshold` will reset the consumer back to the latest committed offset
  when a record is older than `now - time-threshold`; that is, `time-threshold` older than the last received offset.
 
 When the client notices that the offset from the next fetched batch is outside the threshold for a given partition, the
@@ -69,7 +69,7 @@ fetch again from the latest offset. That means, the consumer would only need to
 10x improvement from the 1000 messages it would have had to process with offset-reset-protection enabled.
 
 By default, consumer reset protection is **off**. You must set 
-`akka.kafka.consumer.offset-reset-protection.enable = true`, and set one of the thresholds, to enable it.
+`pekko.kafka.consumer.offset-reset-protection.enable = true`, and set one of the thresholds, to enable it.
 
 Internally, the consumer attempts to avoid too much overhead in checking each batch, so it verifies only that the first
 and the last offset in each received batch for each partition are within the threshold. This should have a minimal
diff --git a/docs/src/main/paradox/home.md b/docs/src/main/paradox/home.md
index 4fb60bf7..213d9776 100644
--- a/docs/src/main/paradox/home.md
+++ b/docs/src/main/paradox/home.md
@@ -1,8 +1,8 @@
 # Overview
 
-The [Apache Pekko Kafka Connector project](https://pekko.apache.org/docs/pekko-connectors-kafka/current/) is an open source initiative to implement stream-aware and reactive integration pipelines for Java and Scala. It is built on top of @extref[Pekko Streams](pekko:stream/index.html), and has been designed from the ground up to understand streaming natively and provide a DSL for reactive and stream-oriented programming, with built-in support for backpressure. Akka Streams is a [Reactive [...]
+The [Apache Pekko Kafka Connector project](https://pekko.apache.org/docs/pekko-connectors-kafka/current/) is an open source initiative to implement stream-aware and reactive integration pipelines for Java and Scala. It is built on top of @extref[Pekko Streams](pekko:stream/index.html), and has been designed from the ground up to understand streaming natively and provide a DSL for reactive and stream-oriented programming, with built-in support for backpressure. Apache Pekko Streams is a [ [...]
 
-This **Alpakka Kafka connector** lets you connect [Apache Kafka](https://kafka.apache.org/) to Akka Streams. It was formerly known as **Akka Streams Kafka** and even **Reactive Kafka**.
+This **Apache Pekko Connectors Kafka connector** lets you connect [Apache Kafka](https://kafka.apache.org/) to Apache Pekko Streams. It was formerly known as **Apache Pekko Streams Kafka** and even **Reactive Kafka**.
 
 ## Project Info
 
@@ -10,21 +10,9 @@ This **Alpakka Kafka connector** lets you connect [Apache Kafka](https://kafka.a
 
 ## Matching Kafka Versions
 
-|Kafka client | Scala Versions | Akka version | Alpakka Kafka Connector
-|-------------|----------------|--------------|-------------------------
-|[3.0.1](https://dist.apache.org/repos/dist/release/kafka/3.0.1/RELEASE_NOTES.html) | 2.13             | 2.6.18+         | [release 3.0.1](https://github.com/akka/alpakka-kafka/releases/tag/v3.0.0)
-|[3.0.0](https://blogs.apache.org/kafka/entry/what-s-new-in-apache6)                | 2.13             | 2.6.18+         | [release 3.0.0 RC1](https://github.com/akka/alpakka-kafka/releases/tag/v3.0.0-RC1)
-|[2.7.0](https://archive.apache.org/dist/kafka/2.7.0/RELEASE_NOTES.html) | 2.13, 2.12       | 2.6.14+         | @ref:[release 2.1.0](release-notes/2.1.x.md)
-|[2.4.1](https://archive.apache.org/dist/kafka/2.4.1/RELEASE_NOTES.html) | 2.13, 2.12, 2.11 | 2.5.31+, 2.6.6+ | @ref:[release 2.0.5](release-notes/2.0.x.md)
-|[2.4.1](https://archive.apache.org/dist/kafka/2.4.1/RELEASE_NOTES.html) | 2.13, 2.12, 2.11 | 2.5.30+, 2.6.6+ | @ref:[release 2.0.4](release-notes/2.0.x.md)
-|[2.4.1](https://archive.apache.org/dist/kafka/2.4.1/RELEASE_NOTES.html) | 2.13, 2.12, 2.11 | 2.5.30+, 2.6.3+ | @ref:[release 2.0.3](release-notes/2.0.x.md)
-|[2.4.0](https://archive.apache.org/dist/kafka/2.4.0/RELEASE_NOTES.html) | 2.13, 2.12, 2.11 | 2.5.23+, 2.6.x | @ref:[release 2.0.0](release-notes/2.0.x.md)
-|[2.1.1](https://archive.apache.org/dist/kafka/2.1.1/RELEASE_NOTES.html) | 2.13, 2.12, 2.11 | 2.5.x        | @ref:[release 1.0.4](release-notes/1.0.x.md#1-0-4)
-|[2.1.1](https://archive.apache.org/dist/kafka/2.1.1/RELEASE_NOTES.html) | 2.12, 2.11       | 2.5.x        | @ref:[release 1.0.1](release-notes/1.0.x.md#1-0-1)
-|[2.1.0](https://archive.apache.org/dist/kafka/2.1.0/RELEASE_NOTES.html) | 2.12, 2.11       | 2.5.x        | @ref:[release 1.0](release-notes/1.0.x.md#1-0)
-|1.1.x        | 2.12, 2.11 | 2.5.x        | [release 0.20+](https://github.com/akka/alpakka-kafka/releases)
-|1.0.x        | 2.12, 2.11 | 2.5.x        | [release 0.20+](https://github.com/akka/alpakka-kafka/releases)
-|0.11.x       | 2.12, 2.11 | 2.5.x        | [release 0.19](https://github.com/akka/alpakka-kafka/milestone/19?closed=1)
+| Kafka client                                                                       | Scala Versions | Apache Pekko version | Apache Pekko Connectors Kafka Connector
+|------------------------------------------------------------------------------------|----------------|----------------------|-------------------------
+| [3.0.1](https://dist.apache.org/repos/dist/release/kafka/3.0.1/RELEASE_NOTES.html) | 2.13             | 1.0.0                | 1.0.0
 
 @@@ note
 
@@ -41,50 +29,50 @@ Check even Confluent's [Versions and Interoperability](https://docs.confluent.io
 ## Dependencies
 
 @@dependency [Maven,sbt,Gradle] {
-  group=com.typesafe.akka
-  artifact=akka-stream-kafka_$scala.binary.version$
+  group=org.apache.pekko
+  artifact=pekko-connectors-kafka_$scala.binary.version$
   version=$project.version$
-  symbol2=AkkaVersion
-  value2="$akka.version$"
-  group2=com.typesafe.akka
-  artifact2=akka-stream_$scala.binary.version$
-  version2=AkkaVersion
+  symbol2=PekkoVersion
+  value2="$pekko.version$"
+  group2=org.apache.pekko
+  artifact2=pekko-stream_$scala.binary.version$
+  version2=PekkoVersion
 }
 
-This connector depends on Akka 2.6.x and note that it is important that all `akka-*` dependencies are in the same version, so it is recommended to depend on them explicitly to avoid problems with transient dependencies causing an unlucky mix of versions.
+This connector depends on Apache Pekko 1.0.x and note that it is important that all `pekko-*` dependencies are in the same version, so it is recommended to depend on them explicitly to avoid problems with transient dependencies causing an unlucky mix of versions.
 
-Alpakka Kafka APIs accept a typed @apidoc[org.apache.pekko.actor.typed.ActorSystem] or a classic @apidoc[org.apache.pekko.actor.ActorSystem] because both implement the @apidoc[org.apache.pekko.actor.ClassicActorSystemProvider] @scala[trait]@java[interface].
-There are some Alpakka Kafka APIs that only accept classic a @apidoc[org.apache.pekko.actor.ActorRef], such as the @ref[rebalance listener](./consumer-rebalance.md) API, but otherwise there is no difference between running Alpakka Kafka and any other Akka Streams implementation with a typed @apidoc[org.apache.pekko.actor.typed.ActorSystem]. 
-For more information on Akka classic and typed interoperability read the @extref[Coexistence](pekko:/typed/coexisting.html) page of the Akka Documentation.
+Apache Pekko Connectors Kafka APIs accept a typed @apidoc[org.apache.pekko.actor.typed.ActorSystem] or a classic @apidoc[org.apache.pekko.actor.ActorSystem] because both implement the @apidoc[org.apache.pekko.actor.ClassicActorSystemProvider] @scala[trait]@java[interface].
+There are some Apache Pekko Connectors Kafka APIs that only accept classic a @apidoc[org.apache.pekko.actor.ActorRef], such as the @ref[rebalance listener](./consumer-rebalance.md) API, but otherwise there is no difference between running Apache Pekko Connectors Kafka and any other Apache Pekko Streams implementation with a typed @apidoc[org.apache.pekko.actor.typed.ActorSystem]. 
+For more information on Apache Pekko classic and typed interoperability read the @extref[Coexistence](pekko:/typed/coexisting.html) page of the Apache Pekko Documentation.
 
-The table below shows Alpakka Kafka's direct dependencies and the second tab shows all libraries it depends on transitively.
+The table below shows Apache Pekko Connectors Kafka's direct dependencies and the second tab shows all libraries it depends on transitively.
 
 @@dependencies { projectId="core" }
 
-* Akka Streams $akka.version$ @extref[documentation](pekko:stream/index.html) and [sources](https://github.com/akka/akka)
+* Apache Pekko Streams $pekko.version$ @extref[documentation](pekko:stream/index.html) and [sources](https://github.com/akka/akka)
 * Apache Kafka client $kafka.version$ @extref[documentation](kafka:/documentation#index) and [sources](https://github.com/apache/kafka)
 
 
 ## Scala and Java APIs
 
-Following Akka's conventions there are two separate packages named `akka.kafka.scaladsl` and `akka.kafka.javadsl`
+Following Apache Pekko's conventions, there are two separate packages named `org.apache.pekko.kafka.scaladsl` and `org.apache.pekko.kafka.javadsl`
 with the API for Scala and Java. These packages contain `Producer` and `Consumer`
-classes with factory methods for the various Akka Streams `Flow`, `Sink` and `Source`
+classes with factory methods for the various Apache Pekko Streams `Flow`, `Sink` and `Source`
 that are producing or consuming messages to/from Kafka.
 
 
 ## Examples
 
-A few self-contained examples using Alpakka are available as [Alpakka Samples](https://akka.io/alpakka-samples/).
+A few self-contained examples using Apache Pekko Connectors are available as [Apache Pekko Connectors Samples](https://akka.io/alpakka-samples/).
 
-To read and see how others use Alpakka see the [Alpakka documentation's Webinars, Presentations and Articles](https://doc.akka.io/docs/alpakka/current/other-docs/webinars-presentations-articles.html) listing.
+To read and see how others use Apache Pekko Connectors, see the [Apache Pekko Connectors documentation's Webinars, Presentations and Articles](https://pekko.apache.org/docs/pekko-connectors-kafka/current/other-docs/webinars-presentations-articles.html) listing.
 
 
 ## Contributing
 
-Please feel free to contribute to Alpakka and the Alpakka Kafka connector by reporting issues you identify, or by suggesting changes to the code. Please refer to our [contributing instructions](https://github.com/akka/alpakka-kafka/blob/master/CONTRIBUTING.md) to learn how it can be done.
+Please feel free to contribute to Apache Pekko Connectors and the Apache Pekko Connectors Kafka connector by reporting issues you identify, or by suggesting changes to the code. Please refer to our [contributing instructions](https://github.com/apache/incubator-pekko-connectors-kafka/blob/main/CONTRIBUTING.md) to learn how it can be done.
 
-We want Akka and Alpakka to strive in a welcoming and open atmosphere and expect all contributors to respect our [code of conduct](https://www.lightbend.com/conduct).
+We want Apache Pekko and Apache Pekko Connectors to strive in a welcoming and open atmosphere and expect all contributors to respect our [code of conduct](https://www.apache.org/foundation/policies/conduct.html).
 
 
 @@@ index
diff --git a/docs/src/main/paradox/images/akka-alpakka-reverse.svg b/docs/src/main/paradox/images/akka-alpakka-reverse.svg
deleted file mode 100644
index 548378b2..00000000
--- a/docs/src/main/paradox/images/akka-alpakka-reverse.svg
+++ /dev/null
@@ -1 +0,0 @@
-<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 658 270"><title>akka-alpakka-reverse</title><g id="akka-alpakka-reverse"><path d="M422.94,211.26v-5h5v26.2c0,2.95.67,4.22,2.55,4.22.53,0,1.14-.07,1.74-.13v3.75a8.43,8.43,0,0,1-3.15.53,6.22,6.22,0,0,1-3.55-.93,6.66,6.66,0,0,1-2.28-4.83c-2.54,4-6.63,6-12.26,6a16.19,16.19,0,0,1-12.13-5.09,18.92,18.92,0,0,1,0-25.52A16.26,16.26,0,0,1,411,205.23C417.05,205.23,421,207.84,422.94,211.26Zm-3,21.37a13.28,13.28,0,0 [...]
\ No newline at end of file
diff --git a/docs/src/main/paradox/images/alpakka-kafka-stream-trace.png b/docs/src/main/paradox/images/alpakka-kafka-stream-trace.png
deleted file mode 100644
index 1c36c7f4..00000000
Binary files a/docs/src/main/paradox/images/alpakka-kafka-stream-trace.png and /dev/null differ
diff --git a/docs/src/main/paradox/index.md b/docs/src/main/paradox/index.md
index 53d76333..7a9a6d42 100644
--- a/docs/src/main/paradox/index.md
+++ b/docs/src/main/paradox/index.md
@@ -1,8 +1,8 @@
-# Alpakka Kafka Documentation
+# Apache Pekko Connectors Kafka Documentation
 
-The [Alpakka project](https://doc.akka.io/docs/alpakka/current/) is an open source initiative to implement stream-aware and reactive integration pipelines for Java and Scala. It is built on top of @extref[Pekko Streams](pekko:stream/index.html), and has been designed from the ground up to understand streaming natively and provide a DSL for reactive and stream-oriented programming, with built-in support for backpressure. Akka Streams is a [Reactive Streams](https://www.reactive-streams.or [...]
+The [Apache Pekko Connectors project](https://pekko.apache.org/docs/pekko-connectors-kafka/current/) is an open source initiative to implement stream-aware and reactive integration pipelines for Java and Scala. It is built on top of @extref[Pekko Streams](pekko:stream/index.html), and has been designed from the ground up to understand streaming natively and provide a DSL for reactive and stream-oriented programming, with built-in support for backpressure. Apache Pekko Streams is a [React [...]
 
-This **Alpakka Kafka connector** lets you connect [Apache Kafka](https://kafka.apache.org/) to Akka Streams. It was formerly known as **Akka Streams Kafka** and even **Reactive Kafka**.
+This **Apache Pekko Connectors Kafka connector** lets you connect [Apache Kafka](https://kafka.apache.org/) to Apache Pekko Streams. It was formerly known as **Apache Pekko Streams Kafka** and even **Reactive Kafka**.
 
 @@toc { .main depth=2 }
 
diff --git a/docs/src/main/paradox/producer.md b/docs/src/main/paradox/producer.md
index bac2f852..db215c64 100644
--- a/docs/src/main/paradox/producer.md
+++ b/docs/src/main/paradox/producer.md
@@ -1,5 +1,5 @@
 ---
-project.description: Produce messages to Apache Kafka topics from Akka Streams with Alpakka Kafka.
+project.description: Produce messages to Apache Kafka topics from Apache Pekko Streams with Apache Pekko Connectors Kafka.
 ---
 # Producer
 
@@ -9,9 +9,9 @@ The underlying implementation is using the `KafkaProducer`, see the @javadoc[Kaf
 
 ## Choosing a producer
 
-Alpakka Kafka offers producer flows and sinks that connect to Kafka and write data. The tables below may help you to find the producer best suited for your use-case.
+Apache Pekko Connectors Kafka offers producer flows and sinks that connect to Kafka and write data. The tables below may help you to find the producer best suited for your use-case.
 
-For use-cases that don't benefit from Akka Streams, the @ref[Send Producer](send-producer.md) offers a @scala[`Future`-based]@java[`CompletionStage`-based] send API.
+For use-cases that don't benefit from Apache Pekko Streams, the @ref[Send Producer](send-producer.md) offers a @scala[`Future`-based]@java[`CompletionStage`-based] send API.
 
 ### Producers
 
@@ -37,7 +37,7 @@ For details about the batched committing see @ref:[Consumer: Offset Storage in K
 ### Transactional producers
 
 These factory methods are part of the @apidoc[Transactional$] API. For details see @ref[Transactions](transactions.md).
-Alpakka Kafka must manage the producer when using transactions.
+Apache Pekko Connectors Kafka must manage the producer when using transactions.
 
 | Factory method          | May use shared producer | Stream element type | Pass-through |
 |-------------------------|-------------------------|---------------------|--------------|
@@ -63,7 +63,7 @@ Java
 
 In addition to programmatic construction of the @apidoc[ProducerSettings$] it can also be created from configuration (`application.conf`). 
 
-When creating @apidoc[ProducerSettings$] with a classic @apidoc[org.apache.pekko.actor.ActorSystem] or typed @apidoc[org.apache.pekko.actor.typed.ActorSystem] it uses the config section `akka.kafka.producer`. 
+When creating @apidoc[ProducerSettings$] with a classic @apidoc[org.apache.pekko.actor.ActorSystem] or typed @apidoc[org.apache.pekko.actor.typed.ActorSystem] it uses the config section `pekko.kafka.producer`. 
 The format of these settings files are described in the [Typesafe Config Documentation](https://github.com/lightbend/config#using-hocon-the-json-superset).
 
 @@ snip [snip](/core/src/main/resources/reference.conf) { #producer-settings }
@@ -75,7 +75,7 @@ See Kafka's @javadoc[KafkaProducer](org.apache.kafka.clients.producer.KafkaProdu
 
 ## Producer as a Sink
 
-@apidoc[Producer.plainSink](Producer$) { java="#plainSink[K,V](settings:akka.kafka.ProducerSettings[K,V]):akka.stream.javadsl.Sink[org.apache.kafka.clients.producer.ProducerRecord[K,V],java.util.concurrent.CompletionStage[akka.Done]]" scala="#plainSink[K,V](settings:akka.kafka.ProducerSettings[K,V]):akka.stream.scaladsl.Sink[org.apache.kafka.clients.producer.ProducerRecord[K,V],scala.concurrent.Future[akka.Done]]" } 
+@apidoc[Producer.plainSink](Producer$) { java="#plainSink[K,V](settings:org.apache.pekko.kafka.ProducerSettings[K,V]):org.apache.pekko.stream.javadsl.Sink[org.apache.kafka.clients.producer.ProducerRecord[K,V],java.util.concurrent.CompletionStage[org.apache.pekko.Done]]" scala="#plainSink[K,V](settings:org.apache.pekko.kafka.ProducerSettings[K,V]):org.apache.pekko.stream.scaladsl.Sink[org.apache.kafka.clients.producer.ProducerRecord[K,V],scala.concurrent.Future[org.apache.pekko.Done]]" } 
 is the easiest way to publish messages. The sink consumes the Kafka type @javadoc[ProducerRecord](org.apache.kafka.clients.producer.ProducerRecord) which contains 
 
 1. a topic name to which the record is being sent, 
@@ -150,7 +150,7 @@ For flows the @apidoc[ProducerMessage.PassThroughMessage]s continue as @apidoc[P
 
 ## Producer as a Flow
 
-@apidoc[Producer.flexiFlow](Producer$) { java="#flexiFlow[K,V,PassThrough](settings:akka.kafka.ProducerSettings[K,V]):akka.stream.javadsl.Flow[akka.kafka.ProducerMessage.Envelope[K,V,PassThrough],akka.kafka.ProducerMessage.Results[K,V,PassThrough],akka.NotUsed]" scala="#flexiFlow[K,V,PassThrough](settings:akka.kafka.ProducerSettings[K,V]):akka.stream.scaladsl.Flow[akka.kafka.ProducerMessage.Envelope[K,V,PassThrough],akka.kafka.ProducerMessage.Results[K,V,PassThrough],akka.NotUsed]" }
+@apidoc[Producer.flexiFlow](Producer$) { java="#flexiFlow[K,V,PassThrough](settings:org.apache.pekko.kafka.ProducerSettings[K,V]):org.apache.pekko.stream.javadsl.Flow[pekko.kafka.ProducerMessage.Envelope[K,V,PassThrough],pekko.kafka.ProducerMessage.Results[K,V,PassThrough],org.apache.pekko.NotUsed]" scala="#flexiFlow[K,V,PassThrough](settings:org.apache.pekko.kafka.ProducerSettings[K,V]):org.apache.pekko.stream.scaladsl.Flow[pekko.kafka.ProducerMessage.Envelope[K,V,PassThrough],pekko.kaf [...]
 allows the stream to continue after publishing messages to Kafka. It accepts implementations of @apidoc[ProducerMessage.Envelope] as input, which continue in the flow as implementations of @apidoc[ProducerMessage.Results]. 
  
 
diff --git a/docs/src/main/paradox/production.md b/docs/src/main/paradox/production.md
index 44ceb195..badf660b 100644
--- a/docs/src/main/paradox/production.md
+++ b/docs/src/main/paradox/production.md
@@ -1,31 +1,21 @@
 ---
-project.description: Consider these areas when using Alpakka Kafka in production.
+project.description: Consider these areas when using Apache Pekko Connectors Kafka in production.
 ---
 # Production considerations
 
 
-## Alpakka Kafka API
+## Apache Pekko Connectors Kafka API
 
 1. Do not use `Consumer.atMostOnceSource` in production as it internally commits the offset after every element.
 1. If you create `Producer` sinks in "inner flows", be sure to @ref:[share the `Producer` instance](producer.md#sharing-the-kafkaproducer-instance). This avoids the expensive creation of `KafkaProducer` instances.
 
 @@@ note
 
-This is just a start, please add your experiences to this list by [opening a Pull Request](https://github.com/akka/alpakka-kafka/pulls).
+This is just a start, please add your experiences to this list by [opening a Pull Request](https://github.com/apache/incubator-pekko-connectors-kafka/pulls).
 
 @@@
 
 
-## Monitoring and Tracing
-
-For performance monitoring consider [Lightbend Telemetry](https://developer.lightbend.com/docs/telemetry/current/home.html) which gives insights into Akka and Akka Streams.
-
-Lightbend Telemetry supports OpenTracing context propagation so that you can follow individual messages through Kafka producers and consumers.
-
-![OpenTracing with Alpakka Kafka](.../alpakka-kafka-stream-trace.png)
-
-See [Enabling OpenTracing in your app](https://developer.lightbend.com/docs/telemetry/current/extensions/opentracing/enabling.html#alpakka-kafka-configuration).
-
 ## Security setup
 
 The different security setups offered by Kafka brokers are described in the @extref[Apache Kafka documentation](kafka:/documentation.html#security).
@@ -34,11 +24,11 @@ The different security setups offered by Kafka brokers are described in the @ext
 ### SSL
 
 The properties described in Kafka's @extref[Configuring Kafka Clients for SSL](kafka:/documentation.html#security_configclients) go in the
-`akka.kafka.consumer.kafka-clients` and `akka.kafka.producer.kafka-clients` sections of the configuration, or can be added programmatically via
+`pekko.kafka.consumer.kafka-clients` and `pekko.kafka.producer.kafka-clients` sections of the configuration, or can be added programmatically via
 `ProducerSettings.withProperties` and `ConsumerSettings.withProperties`. The necessary property name constants are available in @javadoc[SslConfigs](org.apache.kafka.common.config.SslConfigs).
 
 ```hocon
-akka.kafka.producer { # and akka.kafka.consumer respectively
+pekko.kafka.producer { # and pekko.kafka.consumer respectively
   kafka-clients {
     security.protocol=SSL
     ssl.truststore.location=/var/private/ssl/kafka.client.truststore.jks
@@ -58,11 +48,11 @@ You have the option to pass the passwords as command line parameters or environm
 ### Kerberos
 
 The properties described in Kafka's @extref[Configuring Kafka Clients for Kerberos](kafka:/documentation.html#security_sasl_kerberos_clientconfig) go in the
-`akka.kafka.consumer.kafka-clients` and `akka.kafka.producer.kafka-clients` sections of the configuration, or can be added programmatically via
+`pekko.kafka.consumer.kafka-clients` and `pekko.kafka.producer.kafka-clients` sections of the configuration, or can be added programmatically via
 `ProducerSettings.withProperties` and `ConsumerSettings.withProperties`.
 
 ```hocon
-akka.kafka.producer { # and akka.kafka.consumer respectively
+pekko.kafka.producer { # and pekko.kafka.consumer respectively
   kafka-clients {
     security.protocol=SASL_PLAINTEXT # (or SASL_SSL)
     sasl.mechanism=GSSAPI
diff --git a/docs/src/main/paradox/release-notes/1.0.x.md b/docs/src/main/paradox/release-notes/1.0.x.md
deleted file mode 100644
index 54be25e5..00000000
--- a/docs/src/main/paradox/release-notes/1.0.x.md
+++ /dev/null
@@ -1,177 +0,0 @@
----
-project.description: Release notes for all Alpakka Kafka 1.0.x releases.
----
-# Alpakka Kafka 1.0.x
-
-@@@ note
-In case you are browsing a specific version's documentation: check out the [latest release notes](https://github.com/akka/alpakka-kafka/releases)
-@@@
-
-# 1.0.5
-
-Released: 2019-07-25
-
-## Notable changes since 1.0.4
-
-* `FlowWithContext` support [#780](https://github.com/akka/alpakka-kafka/pull/780)
-* Added connection to broker status checker [#674](https://github.com/akka/alpakka-kafka/issues/674) 
-
-This release is compiled and tested against Akka [2.5](https://doc.akka.io/docs/akka/2.5/), [2.6](https://akka.io/blog/news/2019/04/12/akka-2.6-roadmap) and Scala 2.11, 2.12, 2.13.0 on Adopt OpenJDK 8 and 11.
-
-This release was made possible by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="2m" src="https://avatars3.githubusercontent.com/u/422086?v=4&amp;s=40"/> **2m**](https://github.com/2m) | 13 | 629 | 213 |
-| [<img width="20" alt="ennru" src="https://avatars3.githubusercontent.com/u/458526?v=4&amp;s=40"/> **ennru**](https://github.com/ennru) | 7 | 1327 | 203 |
-| [<img width="20" alt="tayvs" src="https://avatars1.githubusercontent.com/u/14348912?v=4&amp;s=40"/> **tayvs**](https://github.com/tayvs) | 1 | 397 | 9 |
-| [<img width="20" alt="lomigmegard" src="https://avatars1.githubusercontent.com/u/434236?v=4&amp;s=40"/> **lomigmegard**](https://github.com/lomigmegard) | 1 | 1 | 1 |
-
-# 1.0.4
-
-Released: 2019-06-11
-
-Alpakka Kafka 1.0.4 is released for Scala 2.13, 2.12 and 2.11.
-
-## Notable changes since 1.0.3
-
-* Compile with Scala 2.13 [#817](https://github.com/akka/alpakka-kafka/pull/817)
-* Add `Committer.batchFlow` that emits `CommittableOffsetBatch` for every committed batch. [#799](https://github.com/akka/alpakka-kafka/pull/799)
-
-The detailed list of changes is found in [the milestone](https://github.com/akka/alpakka-kafka/milestone/32?closed=1).
-
-This release is compiled and tested against [Akka 2.5](https://doc.akka.io/docs/akka/2.5/) and Scala 2.11, 2.12, 2.13.0 on Adopt OpenJDK 8 and 11.
-
-This release was made possible by:
-
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="2m" src="https://avatars3.githubusercontent.com/u/422086?v=4&s=40"> **2m**](https://github.com/2m) | 30 | 2947 | 2735 |
-| [<img width="20" alt="ennru" src="https://avatars3.githubusercontent.com/u/458526?v=4&s=40"> **ennru**](https://github.com/ennru) | 3 | 966 | 301 |
-| [<img width="20" alt="mowczare" src="https://avatars0.githubusercontent.com/u/9057533?s=40&v=4"/> **mowczare**](https://github.com/mowczare) | 2 | 40 | 32 |
-| [<img width="20" alt="mowczare" src="https://avatars2.githubusercontent.com/u/22529514?s=40&v=4"/> **Jimmycheong**](https://github.com/Jimmycheong) | 1 | 3 | 3 |
-
-
-# 1.0.3
-
-Released: 2019-05-09
-
-Alpakka Kafka 1.0.3 fixes an important bug in the transactional support and improves the testkit for Java users.
-
-## Notable changes since 1.0.2
-
-* Transactions: Avoid a timeout during draining @github[#787](#787) by [
-Szymon Matejczyk, @szymonm](https://github.com/szymonm)
-* Error handling: Signal exceptions from subscribe to stream, part of @github[#772](#772)
-* Testkit: Pull Embedded Kafka from new organisation id, part of @github[#772](#772)
-* Testkit: Improve Java API and make timeouts configurable via config @github[#786](#786)
-* Testkit: Specify the Docker image version for Testcontainers Kafka @github[#783](#783) as suggested by [@miguelpuyol](https://github.com/miguelpuyol) 
-
-The detailed list of changes is found in [the milestone](https://github.com/akka/alpakka-kafka/milestone/31?closed=1).
-
-This release is compiled and tested against [Akka 2.5](https://doc.akka.io/docs/akka/2.5/) and Scala 2.11 and 2.12 on Adopt OpenJDK 1.8.
-
-This release was made possible by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="2m" src="https://avatars3.githubusercontent.com/u/422086?v=4&s=40"> **2m**](https://github.com/2m) | 17 | 957 | 517 |
-| [<img width="20" alt="szymonm" src="https://avatars2.githubusercontent.com/u/5087912?v=4&s=40"> **szymonm**](https://github.com/szymonm) | 2 | 29 | 17 |
-| [<img width="20" alt="ennru" src="https://avatars3.githubusercontent.com/u/458526?v=4&s=40"> **ennru**](https://github.com/ennru) | 2 | 15 | 6 |
-
-
-# 1.0.2
-
-Released: 2019-05-02
-
-Alpakka Kafka 1.0.2 makes the transactional support more reliable.
-
-## Notable changes since 1.0.1
-
-* Improvements for transactional support
-    * Transactions: Add offsets to be committed directly after `producer.send` @github[#752](#752)
-    * Fix transaction offsets for transactional producer @github[#742](#742) by [Szymon Matejczyk, @szymonm](https://github.com/szymonm)
-* Do not fail commits during a rebalance (avoiding `CommitFailedException`) @github[#755](#755)
-* [Testcontainers](https://www.testcontainers.org/) support in the @ref:[Alpakka Kafka testkit](../testing-testcontainers.md#testing-with-a-docker-kafka-cluster) @github[#775](#775)
-
-The detailed list of changes is found in [the milestone](https://github.com/akka/alpakka-kafka/milestone/30?closed=1).
-
-This release is compiled and tested against [Akka 2.5](https://doc.akka.io/docs/akka/2.5/) and Scala 2.11 and 2.12 on Adopt OpenJDK 1.8.
-
-This release was made possible by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="ennru" src="https://avatars3.githubusercontent.com/u/458526?v=4&s=40"> **ennru**](https://github.com/ennru) | 23 | 2165 | 1453 |
-| [<img width="20" alt="2m" src="https://avatars3.githubusercontent.com/u/422086?v=4&s=40"> **2m**](https://github.com/2m) | 4 | 147 | 59 |
-| [<img width="20" alt="szymonm" src="https://avatars2.githubusercontent.com/u/5087912?v=4&s=40"> **szymonm**](https://github.com/szymonm) | 3 | 669 | 192 |
-| [<img width="20" alt="fancywriter" src="https://avatars0.githubusercontent.com/u/1200256?v=4&s=40"> **fancywriter**](https://github.com/fancywriter) | 1 | 5 | 3 |
-
-
-# 1.0.1
-
-Released: 2019-03-07
-
-Alpakka Kafka 1.0.1 upgrades the Apache Kafka client to 2.1.1, as version 2.1.0 contained a few annoying bugs which disturb proper operations.
-
-Most notably: [Kubernetes - Kafka clients are resolving DNS entries only one time (KAFKA-7755)](https://issues.apache.org/jira/browse/KAFKA-7755)
-
-Full [Apache Kafka 2.1.1 release notes](https://archive.apache.org/dist/kafka/2.1.1/RELEASE_NOTES.html).
-
-The upgrade to the Apache Kafka client 2.1.1 is the only change compared to release @ref[1.0](#1-0).
-
-This release is compiled and tested against [Akka 2.5](https://doc.akka.io/docs/akka/2.5/) and Scala 2.11 and 2.12.
-
-This release was made possible by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="Philippus" src="https://avatars3.githubusercontent.com/u/1923596?v=4&amp;s=40"/> **Philippus**](https://github.com/Philippus) | 1 | 1 | 1 |
-| [<img width="20" alt="raboof" src="https://avatars2.githubusercontent.com/u/131856?v=4&amp;s=40"/> **raboof**](https://github.com/raboof) | 1 | 1 | 1 |
-
-
-# 1.0
-
-Released: 2019-02-28
-
-Final release of Alpakka Kafka 1.0! Theses release notes contain the need-to-know features and changes for Alpakka Kafka since release 0.22.
-
-### A bit of "history"
-
-Alpakka Kafka has made a long journey. It started off as **reactive kafka** with a first release in January 2015 when Akka Streams were still experimental and created by [Krzysiek Ciesielski, @kciesielski](https://github.com/kciesielski). The team at [SoftwareMill](https://softwaremill.com/) were the first stewards of the project.
-
-In 2016 the Akka team at [Lightbend](https://www.lightbend.com/) (at that time still called *Typesafe*) took over the responsibility to take care of it and started offering commercial support from version 0.16 (released May 2017). They were assisted by [Alexey Romanchuk, @13h3r](https://github.com/13h3r) during that time. The Akka team has participated significantly in the creation of the [Reactive Streams](https://www.reactive-streams.org/) industry standard and [Akka Streams](https://d [...]
-
-In May 2018 Lightbend started the Alpakka team to steward the [Alpakka project](https://doc.akka.io/docs/alpakka/current/) and Alpakka Kafka. When the work of improving the tests and the code for Alpakka Kafka kicked off, we dropped the name it had for a while **Akka Streams Kafka**.
-
-[Apache Kafka](https://kafka.apache.org/) has made an incredible journey during this period. From in-house project at LinkedIn via being open-sourced to becoming the integration backbone of many micro-service architectures.
-
-The need for streaming data has been the key driver for all these endeavours.
-
-## Features
-
-Alpakka Kafka provides Apache Kafka connectivity for Akka Streams. It supports consuming messages from Kafka into Akka Streams with at-most-once, at-least-once and transactional semantics, and supports producing messages to Kafka.
-
-Once consumed messages are in the Akka Stream, the whole flexibility of all [Akka Stream operators](https://doc.akka.io/docs/akka/current/stream/operators/index.html) becomes available.
-
-Alpakka Kafka achieves back-pressure for consuming by automatically pausing and resuming its Kafka subscriptions. When there is no downstream demand for more data, the consumer will not read any data. Any other communication with the Kafka broker (heartbeats, committing, rebalancing) will still continue.
-
-Alpakka Kafka 1.0 uses the Apache Kafka Java client 2.1.0 internally.
-
-
-## Most important changes since 0.22
-
-* Upgrade to Kafka client 2.1.0 [#660](https://github.com/akka/alpakka-kafka/pull/660). This upgrade makes it possible to use of the zstandard compression (with Kafka 2.1 brokers). Use Kafka client 2.x poll API [#614](https://github.com/akka/alpakka-kafka/pull/614).
-
-* **No more `WakeupException`!** The Kafka client API 2.x allows for specifying a timeout when polling the Kafka broker, thus we do not need to use the cranky tool of Kafka's `WakeupException`s to be sure not to block a precious thread. The settings to configure wake-ups are not used anymore.
-
-* Alpakka Kafka consumers don't fail for non-responding Kafka brokers anymore (as they used to to after a number of `WakeupException`s). 
-
-* New `Committer.sink` and `Committer.flow` for standardised committing [#622](https://github.com/akka/alpakka-kafka/pull/622) and [#644](https://github.com/akka/alpakka-kafka/issues/644)
-
-* Commit with metadata [#563](https://github.com/akka/alpakka-kafka/pull/563) and [#579](https://github.com/akka/alpakka-kafka/pull/579)
-
-* Java APIs for all settings classes [#616](https://github.com/akka/alpakka-kafka/pull/616)
-
-* @ref:[Alpakka Kafka testkit](../testing.md)
diff --git a/docs/src/main/paradox/release-notes/1.1.x.md b/docs/src/main/paradox/release-notes/1.1.x.md
deleted file mode 100644
index 0c91dd94..00000000
--- a/docs/src/main/paradox/release-notes/1.1.x.md
+++ /dev/null
@@ -1,80 +0,0 @@
----
-project.description: Release notes for all Alpakka Kafka 1.1.x releases.
----
-# Alpakka Kafka 1.1.x
-
-@@@ note
-In case you are browsing a specific version's documentation: check out the [latest release notes](https://github.com/akka/alpakka-kafka/releases)
-@@@
-
-Alpakka Kafka 1.1 changes the internals of how offset commits are sent to the Kafka broker. The new implementation is very beneficial for high-throughput consumers as committing is now connected to the regular polling of the Kafka consumer, which reduces pressure on Kafka.
-
-It adds a new source for advanced usage: `committablePartitionedManualOffsetSource` which may be used when offsets are stored external to Kafka, but tools for consumer lag which rely on offsets being committed to Kafka are in use.
-
-As a new experimental feature, offset committing is now possible without applying backpressure to the stream when commits are not processed at the same speed. This can be controlled via the `delivery` committer setting.
-
-Exceptions from the Kafka consumer API are now properly passed back and fail the stream.
-
-This goes along with a few improvements to the documentation.
-
-# 1.1.0
-
-Released: 2019-10-10
-
-No changes affecting the functionality since 1.1.0-RC2.
-
-The detailed list of changes is found in [the 1.1.0 Github release listing](https://github.com/akka/alpakka-kafka/releases/tag/v1.1.0).
-
-
-# 1.1.0-RC2
-
-Released: 2019-10-02
-
-- Committable partitioned source with manual offset seek support [#908](https://github.com/akka/alpakka-kafka/issues/908) by [@seglo](https://github.com/seglo)
-
-The detailed list of changes is found in [the 1.1.0-RC2 Github release listing](https://github.com/akka/alpakka-kafka/releases/tag/v1.1.0-RC2).
-
-This release is compiled and tested against [Akka 2.5](https://doc.akka.io/docs/akka/2.5/) for Scala 2.11, 2.12 and 2.13, [Akka 2.6-M8](https://doc.akka.io/docs/akka/2.6/) for Scala 2.12, and 2.13 and both Akka versions on Adopt OpenJDK 1.8 and 11.
-
-**If we do not get any reports that speak against it, we'll release Alpakka Kafka 1.1.0 in a few weeks.**
-
-This release contains contributions by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="ennru" src="https://avatars3.githubusercontent.com/u/458526?v=4&s=40"> **ennru**](https://github.com/ennru) | 10 | 303 | 207 |
-| [<img width="20" alt="seglo" src="https://avatars2.githubusercontent.com/u/1148412?v=4&s=40"> **seglo**](https://github.com/seglo) | 1 | 249 | 25 |
-| [<img width="20" alt="raboof" src="https://avatars2.githubusercontent.com/u/131856?v=4&s=40"> **raboof**](https://github.com/raboof) | 1 | 1 | 1 |
-
-# 1.1.0-RC1
-
-Released: 2019-09-05
-
-Alpakka Kafka 1.1 changes the internals of how offset commits are sent to the Kafka broker. The new implementation is very beneficial for high-throughput consumers as committing is now connected to the regular polling of the Kafka consumer, which reduces pressure on Kafka.
-
-As a new experimental feature, offset committing is now possible without applying backpressure to the stream when commits are not processed at the same speed. This can be controlled via the `delivery` committer setting.
-
-Exceptions from the Kafka consumer API are now properly passed back and fail the stream.
-
-This goes along with a few improvements to the documentation.
-
-
-## Notable changes since 1.0.5
-
-- Aggregate offsets and commit before poll [#862](https://github.com/akka/alpakka-kafka/issues/862)
-- Special-casing single offset committing [#868](https://github.com/akka/alpakka-kafka/issues/868)
-- Introduce setting for committing without backpressure [#883](https://github.com/akka/alpakka-kafka/pull/883)
-- Capture exceptions from Kafka consumer and pass to involved stages [#887](https://github.com/akka/alpakka-kafka/pull/887)
-
-The detailed list of changes is found in [the 1.1.0-RC1 Github release listing](https://github.com/akka/alpakka-kafka/releases/tag/v1.1.0-RC1).
-
-This release is compiled and tested against [Akka 2.5](https://doc.akka.io/docs/akka/2.5/) for Scala 2.11, 2.12 and 2.13, [Akka 2.6-M6](https://doc.akka.io/docs/akka/2.6/) for Scala 2.12, and 2.13 and both Akka versions on Adopt OpenJDK 1.8 and 11.
-
-This release contains contributions by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="ennru" src="https://avatars3.githubusercontent.com/u/458526?v=4&s=40"> **ennru**](https://github.com/ennru) | 25 | 947 | 369 |
-| [<img width="20" alt="2m" src="https://avatars3.githubusercontent.com/u/422086?v=4&s=40"> **2m**](https://github.com/2m) | 9 | 93 | 66 |
-| [<img width="20" alt="TimMoore" src="https://avatars0.githubusercontent.com/u/44385?v=4&s=40"> **TimMoore**](https://github.com/TimMoore) | 2 | 10 | 33 |
-| [<img width="20" alt="bwmcadams" src="https://avatars3.githubusercontent.com/u/98358?v=4&s=40"> **bwmcadams**](https://github.com/bwmcadams) | 1 | 1 | 1 |
diff --git a/docs/src/main/paradox/release-notes/2.0.x.md b/docs/src/main/paradox/release-notes/2.0.x.md
deleted file mode 100644
index 831cacce..00000000
--- a/docs/src/main/paradox/release-notes/2.0.x.md
+++ /dev/null
@@ -1,370 +0,0 @@
----
-project.description: Release notes for all Alpakka Kafka 2.0.x releases.
----
-# Alpakka Kafka 2.0.x
-
-@@@ note
-In case you are browsing a specific version's documentation: check out the [latest release notes](https://github.com/akka/alpakka-kafka/releases)
-@@@
-
-The Alpakka Kafka 2.0 series features
-
-* upgrade to the Apache Kafka client 2.4.0
-* `Producer.committableSink` behaves as a combination of `Producer.flexiFlow` and `Committer.sink`, but has a special-purpose implementation
-* built-in @ref:[Akka Discovery](../discovery.md) support
-* More versatile use of Testcontainers in Alpakka Kafka Testkit [#939](https://github.com/akka/alpakka-kafka/issues/939).
-* Removal of some API which was deprecated since 1.0.0 or earlier.
-* Internal filter to avoid emitting records of revoked partitions [#946](https://github.com/akka/alpakka-kafka/issues/946) and [#992](https://github.com/akka/alpakka-kafka/issues/992)
-
-# 2.0.7
-
-Released: 2021-02-03
-
-## Notable changes since 2.0.6
-
-**This release only affects the Alpakka Kafka TestKit**
-
-## Alpakka Kafka Testkit
-
-- Increase default cluster start timeouts and make configurable (backport) [#1311](https://github.com/akka/alpakka-kafka/issues/1311) by [@seglo](https://github.com/seglo)
-- Use restart script [#1310](https://github.com/akka/alpakka-kafka/issues/1310) by [@seglo](https://github.com/seglo)
-
-The milestone contains everything [*closed in 2.0.7*](https://github.com/akka/alpakka-kafka/issues?q=is%3Aclosed+milestone%3A2.0.7).
-
-Since 2.0.6 Alpakka Kafka has received contributions by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="seglo" src="https://avatars.githubusercontent.com/u/1148412?v=4&amp;s=40"/> **seglo**](https://github.com/seglo) | 2 | 89 | 12 |
-
-# 2.0.6
-
-Released: 2020-12-22
-
-## Notable changes since 2.0.5
-
-**The most relevant change in this release is the update of TestContainers to 1.15.1 (see below)**
-
-- Handle any combination of Committables in CommitObservationLogic [#1262](https://github.com/akka/alpakka-kafka/issues/1262) by [@seglo](https://github.com/seglo)
-- Fix message-batch loss when rebalancing partitioned sources [#1263](https://github.com/akka/alpakka-kafka/issues/1263) by [@jhooda](https://github.com/jhooda)
-- Producer.committableSink: offsets not committed, when MultiMessage is empty [#1200](https://github.com/akka/alpakka-kafka/issues/1200) by [@herzrasen](https://github.com/herzrasen)
-
-## Alpakka Kafka TestKit
-
-- Update default CP version and make docker image/tag configurable [#1287](https://github.com/akka/alpakka-kafka/issues/1287) by [@seglo](https://github.com/seglo)
-- Testkit: Make Testcontainer logs visible [#1281](https://github.com/akka/alpakka-kafka/issues/1281) by [@seglo](https://github.com/seglo)
-- Testcontainers Kafka 1.15.1 (was 1.14.3) [#1257](https://github.com/akka/alpakka-kafka/issues/1257) by [@scala-steward](https://github.com/scala-steward)
-- TestKit: make consumerDefaults parameterless again [#1292](https://github.com/akka/alpakka-kafka/issues/1292)
-
-## Dependencies
-
-- jackson-databind 2.10.5.1 (was 2.10.5) [#1270](https://github.com/akka/alpakka-kafka/issues/1270) by [@scala-steward](https://github.com/scala-steward)
-- Add note about provided jackson dependency [#1245](https://github.com/akka/alpakka-kafka/issues/1245) by [@seglo](https://github.com/seglo)
-
-# 2.0.5
-
-Released: 2020-09-09
-
-## Notable changes since 2.0.4
-
-- Allow typed Actor Systems in SendProducer and DiscoverySupport [#1192](https://github.com/akka/alpakka-kafka/issues/1192) by [@ennru](https://github.com/ennru)
-- Generate version information when publishing artifacts [#1188](https://github.com/akka/alpakka-kafka/issues/1188) by [@marcospereira](https://github.com/marcospereira)
-
-## Alpakka Kafka core
-
-- Deprecate RestrictedConsumer.committed(TopicPartition) [#1178](https://github.com/akka/alpakka-kafka/issues/1178) by [@seglo](https://github.com/seglo)
-- Cleanup build warnings [#1177](https://github.com/akka/alpakka-kafka/issues/1177) by [@seglo](https://github.com/seglo)
-- Order logging params for RetriableException consumer failures [#1168](https://github.com/akka/alpakka-kafka/issues/1168) by [@seglo](https://github.com/seglo)
-- Generate version information when publishing artifacts [#1188](https://github.com/akka/alpakka-kafka/issues/1188) by [@marcospereira](https://github.com/marcospereira)
-
-## Alpakka Kafka Testkit
-
-- Cleanup build warnings [#1177](https://github.com/akka/alpakka-kafka/issues/1177) by [@seglo](https://github.com/seglo)
-
-## Documentation
-
-- Run & materialize stream in Akka Cluster Sharding Example [#1190](https://github.com/akka/alpakka-kafka/issues/1190) by [@seglo](https://github.com/seglo)
-- Cleanup testcontainers docs snippets [#1180](https://github.com/akka/alpakka-kafka/issues/1180) by [@seglo](https://github.com/seglo)
-
-## Tests
-
-- Reduce bloat of log when testsuite log reported in failed travis build [#1182](https://github.com/akka/alpakka-kafka/issues/1182) by [@seglo](https://github.com/seglo)
-- Simplify consuming stream for AssignmentSpec "consume from the specified topic pattern" test [#1181](https://github.com/akka/alpakka-kafka/issues/1181) by [@seglo](https://github.com/seglo)
-- Cleanup build warnings [#1177](https://github.com/akka/alpakka-kafka/issues/1177) by [@seglo](https://github.com/seglo)
-
-## Updates
-
-- scalapb-runtime 0.10.8 (was 0.10.7) [#1171](https://github.com/akka/alpakka-kafka/issues/1171) by [@scala-steward](https://github.com/scala-steward)
-- metrics-core 4.1.12.1 (was 4.1.11) [#1194](https://github.com/akka/alpakka-kafka/issues/1194) by [@scala-steward](https://github.com/scala-steward)
-- sbt-java-formatter 0.6.0 (was 0.5.1) [#1193](https://github.com/akka/alpakka-kafka/issues/1193) by [@scala-steward](https://github.com/scala-steward)
-- jackson-databind 2.10.5 (was 2.10.4) [#1169](https://github.com/akka/alpakka-kafka/issues/1169) by [@scala-steward](https://github.com/scala-steward)
-- mockito-core 3.4.6 (was 2.28.2) [#1173](https://github.com/akka/alpakka-kafka/issues/1173) by [@scala-steward](https://github.com/scala-steward)
-- sbt-scalafmt 2.4.2 (was 2.4.0) [#1174](https://github.com/akka/alpakka-kafka/issues/1174) by [@scala-steward](https://github.com/scala-steward)
-- Update paradox akka to 0.35, new links to LB.com [#1167](https://github.com/akka/alpakka-kafka/issues/1167) by [@johanandren](https://github.com/johanandren)
-
-The milestone contains everything [*closed in 2.0.5*](https://github.com/akka/alpakka-kafka/issues?q=is%3Aclosed+milestone%3A2.0.5).
-
-Since 2.0.4 Alpakka Kafka has received contributions by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="seglo" src="https://avatars2.githubusercontent.com/u/1148412?v=4&amp;s=40"/> **seglo**](https://github.com/seglo) | 7 | 104 | 94 |
-| [<img width="20" alt="scala-steward" src="https://avatars1.githubusercontent.com/u/43047562?v=4&amp;s=40"/> **scala-steward**](https://github.com/scala-steward) | 7 | 7 | 7 |
-| [<img width="20" alt="ennru" src="https://avatars3.githubusercontent.com/u/458526?v=4&amp;s=40"/> **ennru**](https://github.com/ennru) | 1 | 75 | 23 |
-| [<img width="20" alt="marcospereira" src="https://avatars0.githubusercontent.com/u/4576?v=4&amp;s=40"/> **marcospereira**](https://github.com/marcospereira) | 1 | 36 | 0 |
-| [<img width="20" alt="johanandren" src="https://avatars3.githubusercontent.com/u/666915?v=4&amp;s=40"/> **johanandren**](https://github.com/johanandren) | 1 | 1 | 1 |
-
-# 2.0.4
-
-Released: 2020-07-22
-
-## Notable changes since 2.0.3
-
-- Emit offset batches on timer [#1160](https://github.com/akka/alpakka-kafka/issues/1160) by [@jyates](https://github.com/jyates)
-- Do not commit offsets for unassigned partitions [#1123](https://github.com/akka/alpakka-kafka/issues/1123) by [@jyates](https://github.com/jyates)
-- Use bulk update API for shard allocation [#1139](https://github.com/akka/alpakka-kafka/issues/1139) by [@chbatey](https://github.com/chbatey)
-- Add produce consume check to KafkaContainerCluster [#1131](https://github.com/akka/alpakka-kafka/issues/1131) by [@seglo](https://github.com/seglo)
-- Deprecate embedded kafka support [#1136](https://github.com/akka/alpakka-kafka/issues/1136) by [@seglo](https://github.com/seglo)
-
-## Alpakka Kafka core
-
-- Emit offset batches on timer [#1160](https://github.com/akka/alpakka-kafka/issues/1160) by [@jyates](https://github.com/jyates)
-- Do not commit offsets for unassigned partitions [#1123](https://github.com/akka/alpakka-kafka/issues/1123) by [@jyates](https://github.com/jyates)
-
-## Alpakka Kafka Akka Cluster Sharding
-
-- Use bulk update API for shard allocation [#1139](https://github.com/akka/alpakka-kafka/issues/1139) by [@chbatey](https://github.com/chbatey)
-- Don't use ctx.log in KafkaClusterSharding [#1138](https://github.com/akka/alpakka-kafka/issues/1138) by [@chbatey](https://github.com/chbatey)
-
-## Alpakka Kafka Testkit
-
-- Deprecate embedded kafka support [#1136](https://github.com/akka/alpakka-kafka/issues/1136) by [@seglo](https://github.com/seglo)
-- Replace embedded kafka with schema registry with testcontainers [#1135](https://github.com/akka/alpakka-kafka/issues/1135) by [@seglo](https://github.com/seglo)
-- Add produce consume check to KafkaContainerCluster [#1131](https://github.com/akka/alpakka-kafka/issues/1131) by [@seglo](https://github.com/seglo)
-
-## Updates
-
-- mockito-core 2.28.2 (was 2.24.5) [#1151](https://github.com/akka/alpakka-kafka/issues/1151) by [@scala-steward](https://github.com/scala-steward)
-- scala-collection-compat 2.1.6 (was 2.1.2) [#1152](https://github.com/akka/alpakka-kafka/issues/1152) by [@scala-steward](https://github.com/scala-steward)
-- jul-to-slf4j, log4j-over-slf4j 1.7.30 (was 1.7.26) [#1155](https://github.com/akka/alpakka-kafka/issues/1155) by [@scala-steward](https://github.com/scala-steward)
-- embedded-kafka 2.4.1.1 (was 2.4.1) [#1147](https://github.com/akka/alpakka-kafka/issues/1147) by [@scala-steward](https://github.com/scala-steward)
-- protobuf-java 3.12.2 (was 3.11.4) [#1142](https://github.com/akka/alpakka-kafka/issues/1142) by [@scala-steward](https://github.com/scala-steward)
-- sbt 1.3.13 (was 1.3.9) [#1153](https://github.com/akka/alpakka-kafka/issues/1153) by [@scala-steward](https://github.com/scala-steward)
-- akka-stream-alpakka-csv 2.0.1 (was 2.0.0) [#1143](https://github.com/akka/alpakka-kafka/issues/1143) by [@scala-steward](https://github.com/scala-steward)
-
-The milestone contains everything [*closed in 2.0.4*](https://github.com/akka/alpakka-kafka/issues?q=is%3Aclosed+milestone%3A2.0.4).
-
-Since 2.0.3 Alpakka Kafka has received contributions by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="ennru" src="https://avatars3.githubusercontent.com/u/458526?v=4&amp;s=40"/> **ennru**](https://github.com/ennru) | 5 | 1412 | 354 |
-| [<img width="20" alt="seglo" src="https://avatars2.githubusercontent.com/u/1148412?v=4&amp;s=40"/> **seglo**](https://github.com/seglo) | 4 | 417 | 210 |
-| [<img width="20" alt="jyates" src="https://avatars2.githubusercontent.com/u/197388?v=4&amp;s=40"/> **jyates**](https://github.com/jyates) | 2 | 157 | 6 |
-| [<img width="20" alt="chbatey" src="https://avatars1.githubusercontent.com/u/1866779?v=4&amp;s=40"/> **chbatey**](https://github.com/chbatey) | 2 | 45 | 31 |
-| [<img width="20" alt="MiLk" src="https://avatars0.githubusercontent.com/u/29782?v=4&amp;s=40"/> **MiLk**](https://github.com/MiLk) | 2 | 3 | 3 |
-| [<img width="20" alt="Zhen-hao" src="https://avatars3.githubusercontent.com/u/10957195?v=4&amp;s=40"/> **Zhen-hao**](https://github.com/Zhen-hao) | 1 | 3 | 7 |
-| [<img width="20" alt="raboof" src="https://avatars2.githubusercontent.com/u/131856?v=4&amp;s=40"/> **raboof**](https://github.com/raboof) | 1 | 2 | 0 |
-
-# 2.0.3
-
-Released: 2020-05-14
-
-## Notable changes since 2.0.2
-
-- @ref[Akka Cluster Sharding](../cluster-sharding.md) integration to co-locate Kafka partition reading with Akka Cluster shards by [@seglo](https://github.com/seglo) and [@chbatey](https://github.com/chbatey)
-- Retry committing of offsets when failure is marked as retriable [#1111](https://github.com/akka/alpakka-kafka/issues/1111) by [@ennru](https://github.com/ennru)
-- Delay commits until the next offset is observed [#1093](https://github.com/akka/alpakka-kafka/issues/1093) by [@seglo](https://github.com/seglo)
-- Non-streaming producer wrapper [#1085](https://github.com/akka/alpakka-kafka/issues/1085) by [@ennru](https://github.com/ennru)
-- Drain partial offset commit batches on upstream failure [#1058](https://github.com/akka/alpakka-kafka/issues/1058) by [@sdudzin](https://github.com/sdudzin)
-- Kafka client 2.4.1 [#1103](https://github.com/akka/alpakka-kafka/issues/1103) by [@ennru](https://github.com/ennru)
-
-
-## Alpakka Kafka core
-
-- Committing: retry commits marked as retriable [#1111](https://github.com/akka/alpakka-kafka/issues/1111) by [@ennru](https://github.com/ennru)
-- Build: Jackson 2.10.4 and sbt plugin updates [#1112](https://github.com/akka/alpakka-kafka/issues/1112) by [@ennru](https://github.com/ennru)
-- Security: mask passwords in settings' toString [#1110](https://github.com/akka/alpakka-kafka/issues/1110) by [@ennru](https://github.com/ennru)
-- Commit when next offset is observed [#1093](https://github.com/akka/alpakka-kafka/issues/1093) by [@seglo](https://github.com/seglo)
-- Non-streaming producer wrapper [#1085](https://github.com/akka/alpakka-kafka/issues/1085) by [@ennru](https://github.com/ennru)
-- Simplify commit replying [#1095](https://github.com/akka/alpakka-kafka/issues/1095) by [@ennru](https://github.com/ennru)
-- DrainingControl: creation in toMat [#1084](https://github.com/akka/alpakka-kafka/issues/1084) by [@ennru](https://github.com/ennru)
-- Simpler use with the new actors API [#1088](https://github.com/akka/alpakka-kafka/issues/1088) by [@ennru](https://github.com/ennru)
-- Cluster-sharding: single listener per entity type key + Java DSL [#1080](https://github.com/akka/alpakka-kafka/issues/1080) by [@chbatey](https://github.com/chbatey)
-- Drain partial offset commit batches on upstream failure [#1058](https://github.com/akka/alpakka-kafka/issues/1058) by [@sdudzin](https://github.com/sdudzin)
-- Kafka cluster sharding support with external sharding allocation [#1067](https://github.com/akka/alpakka-kafka/issues/1067) by [@seglo](https://github.com/seglo)
-- Add MetadataClient getCommittedOffsets [#1073](https://github.com/akka/alpakka-kafka/issues/1073) by [@epalace](https://github.com/epalace)
-
-
-## Alpakka Kafka Testkit
-
-- Testkit: Generify test producer consumer [#1099](https://github.com/akka/alpakka-kafka/issues/1099) by [@claudio-scandura](https://github.com/claudio-scandura)
-- Testkit: create ProducerMessage.PassThroughResult [#1096](https://github.com/akka/alpakka-kafka/issues/1096) by [@sebarys](https://github.com/sebarys)
-- Set stopTimeout to zero in BaseKafkaTest.consumeString. [#1094](https://github.com/akka/alpakka-kafka/issues/1094) by [@claudio-scandura](https://github.com/claudio-scandura)
-
-The milestone contains everything [*closed in 2.0.3*](https://github.com/akka/alpakka-kafka/issues?q=is%3Aclosed+milestone%3A2.0.3).
-
-
-Since 2.0.2 Alpakka Kafka has received contributions by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="ennru" src="https://avatars3.githubusercontent.com/u/458526?v=4&amp;s=40"/> **ennru**](https://github.com/ennru) | 14 | 1331 | 560 |
-| [<img width="20" alt="seglo" src="https://avatars2.githubusercontent.com/u/1148412?v=4&amp;s=40"/> **seglo**](https://github.com/seglo) | 6 | 876 | 151 |
-| [<img width="20" alt="claudio-scandura" src="https://avatars0.githubusercontent.com/u/1486771?v=4&amp;s=40"/> **claudio-scandura**](https://github.com/claudio-scandura) | 2 | 54 | 17 |
-| [<img width="20" alt="sdudzin" src="https://avatars0.githubusercontent.com/u/606713?v=4&amp;s=40"/> **sdudzin**](https://github.com/sdudzin) | 1 | 554 | 46 |
-| [<img width="20" alt="chbatey" src="https://avatars1.githubusercontent.com/u/1866779?v=4&amp;s=40"/> **chbatey**](https://github.com/chbatey) | 1 | 224 | 21 |
-| [<img width="20" alt="sebarys" src="https://avatars1.githubusercontent.com/u/22937277?v=4&amp;s=40"/> **sebarys**](https://github.com/sebarys) | 1 | 4 | 0 |
-
-
-# 2.0.2
-
-Released: 2020-02-20
-
-This release fixes 
-
-## Alpakka Kafka core
-
-- CommittingProducerSink: outstanding commits on multi-msg [#1041](https://github.com/akka/alpakka-kafka/issues/1041) by [@gabrielreid](https://github.com/gabrielreid)
-- CommittingProducerSink: Fix count on failure [#1043](https://github.com/akka/alpakka-kafka/issues/1043) by [@gabrielreid](https://github.com/gabrielreid)
-- Don't close shared Producer on fail [#1046](https://github.com/akka/alpakka-kafka/issues/1046) by [@gabrielreid](https://github.com/gabrielreid)
-- Java API `createCommittableOffsetBatch` accepts `Committable` [#1033](https://github.com/akka/alpakka-kafka/issues/1033) by [@jewertow](https://github.com/jewertow)
-- Discard of external offsets on rebalance [#1037](https://github.com/akka/alpakka-kafka/issues/1037) by [@gabrielreid](https://github.com/gabrielreid)
-- Fix slow-loading offsets during rebalance [#1029](https://github.com/akka/alpakka-kafka/issues/1029) by [@gabrielreid](https://github.com/gabrielreid)
-
-## Alpakka Kafka Testkit
-
-- Upgrade to Confluent Platform 5.4.0 [#1034](https://github.com/akka/alpakka-kafka/issues/1034) by [@seglo](https://github.com/seglo)
-
-The milestone contains everything [*closed in 2.0.2*](https://github.com/akka/alpakka-kafka/milestone/43?closed=1).
-
-
-Since 2.0.1 Alpakka Kafka has received contributions by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="gabrielreid" src="https://avatars3.githubusercontent.com/u/527401?v=4&amp;s=40"/> **gabrielreid**](https://github.com/gabrielreid) | 5 | 328 | 14 |
-| [<img width="20" alt="seglo" src="https://avatars2.githubusercontent.com/u/1148412?v=4&amp;s=40"/> **seglo**](https://github.com/seglo) | 5 | 151 | 84 |
-| [<img width="20" alt="ennru" src="https://avatars3.githubusercontent.com/u/458526?v=4&amp;s=40"/> **ennru**](https://github.com/ennru) | 2 | 18 | 21 |
-| [<img width="20" alt="jewertow" src="https://avatars2.githubusercontent.com/u/17457695?v=4&amp;s=40"/> **jewertow**](https://github.com/jewertow) | 1 | 44 | 1 |
-| [<img width="20" alt="ignasi35" src="https://avatars2.githubusercontent.com/u/762126?v=4&amp;s=40"/> **ignasi35**](https://github.com/ignasi35) | 1 | 16 | 3 |
-| [<img width="20" alt="Sebruck" src="https://avatars1.githubusercontent.com/u/2050823?v=4&amp;s=40"/> **Sebruck**](https://github.com/Sebruck) | 1 | 1 | 1 |
-
-
-
-# 2.0.1
-
-Released: 2020-01-23
-
-This release fixes 
-
-- CommittableProducer: Record outstanding commits on pass-through [#1022](https://github.com/akka/alpakka-kafka/issues/1022) by [@gabrielreid](https://github.com/gabrielreid)
-- Producer: avoid race between failure and upstream finish [#1025](https://github.com/akka/alpakka-kafka/pull/1025)
-
-The detailed list of changes is found in [the 2.0.1 Github release listing](https://github.com/akka/alpakka-kafka/releases/tag/v2.0.1).
-
-Since 2.0.0 Alpakka Kafka has received contributions by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="ennru" src="https://avatars3.githubusercontent.com/u/458526?v=4&amp;s=40"/> **ennru**](https://github.com/ennru) | 2 | 60 | 44 |
-| [<img width="20" alt="gabrielreid" src="https://avatars3.githubusercontent.com/u/527401?v=4&amp;s=40"/> **gabrielreid**](https://github.com/gabrielreid) | 1 | 78 | 0 |
-
-
-# 2.0.0
-
-Released: 2020-01-15
-
-This release doesn't contain any relevant changes over 2.0.0-RC1.
-
-The detailed list of changes is found in [the 2.0.0 Github release listing](https://github.com/akka/alpakka-kafka/releases/tag/v2.0.0).
-
-Since 1.1.0 Alpakka Kafka has received contributions by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="ennru" src="https://avatars3.githubusercontent.com/u/458526?v=4&amp;s=40"/> **ennru**](https://github.com/ennru) | 36 | 4358 | 1566 |
-| [<img width="20" alt="seglo" src="https://avatars2.githubusercontent.com/u/1148412?v=4&amp;s=40"/> **seglo**](https://github.com/seglo) | 11 | 2692 | 1122 |
-| [<img width="20" alt="charlibot" src="https://avatars3.githubusercontent.com/u/5785993?v=4&amp;s=40"/> **charlibot**](https://github.com/charlibot) | 1 | 1901 | 664 |
-| [<img width="20" alt="jewertow" src="https://avatars2.githubusercontent.com/u/17457695?v=4&amp;s=40"/> **jewertow**](https://github.com/jewertow) | 1 | 668 | 12 |
-| [<img width="20" alt="def1ne" src="https://avatars0.githubusercontent.com/u/3229478?v=4&amp;s=40"/> **def1ne**](https://github.com/def1ne) | 1 | 6 | 0 |
-| [<img width="20" alt="shobull" src="https://avatars1.githubusercontent.com/u/3430629?v=4&amp;s=40"/> **shobull**](https://github.com/shobull) | 1 | 1 | 1 |
-
-
-# 2.0.0-RC1
-
-Released: 2019-12-17
-
-## Notable changes since 2.0.0-M2
-
-* Kafka 2.4.0 Final
-    - Update to Kafka 2.4.0 final release [#971](https://github.com/akka/alpakka-kafka/issues/915) by [@ennru](https://github.com/ennru)
-
-* Bugs
-    - Rebalance: filter messages of revoked partitions in partitioned sources [#992](https://github.com/akka/alpakka-kafka/issues/992) by [@seglo](https://github.com/seglo)
-
-* Alpakka Kafka API
-    - Stage ID traceability [#995](https://github.com/akka/alpakka-kafka/issues/995) by [@seglo](https://github.com/seglo)
-
-This release contains contributions by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="ennru" src="https://avatars3.githubusercontent.com/u/458526?v=4&amp;s=40"/> **ennru**](https://github.com/ennru) | 5 | 75 | 85 |
-| [<img width="20" alt="seglo" src="https://avatars2.githubusercontent.com/u/1148412?v=4&amp;s=40"/> **seglo**](https://github.com/seglo) | 2 | 494 | 186 |
-
-# 2.0.0-M2
-
-Released: 2019-12-03
-
-## Notable changes since 1.1.0
-
-* Kafka 2.4.0-RC1
-    - Kafka 2.4.0-RC1 [#971](https://github.com/akka/alpakka-kafka/issues/971) by [@seglo](https://github.com/seglo)
-
-* `Producer.committableSink` behaves as a combination of `Producer.flexiFlow` and `Committer.sink`, but has a special-purpose implementation
-    - Committing producer sink: producer and committer in a sink stage [#963](https://github.com/akka/alpakka-kafka/issues/963) by [@ennru](https://github.com/ennru)
-    - Create `committableSink` as combination of Producer.flexiFlow an… [#932](https://github.com/akka/alpakka-kafka/issues/932) by [@ennru](https://github.com/ennru)
-    - Committer: change type bound to allow offset batches [#931](https://github.com/akka/alpakka-kafka/issues/931) by [@ennru](https://github.com/ennru)
-
-- Akka Discovery support [#836](https://github.com/akka/alpakka-kafka/issues/836) by [@ennru](https://github.com/ennru)
-
-- Add wrapper for Metadata requests #497 [#900](https://github.com/akka/alpakka-kafka/issues/900) by [@jewertow](https://github.com/jewertow)
-
-* Offset batches allow aggregation from multiple consumers [#953](https://github.com/akka/alpakka-kafka/issues/953) by [@ennru](https://github.com/ennru)
-
-* Performance
-    - As part of Apache Kafka: `KafkaConsumer` should not throw away already fetched data for paused partitions [apache/kafka#6988](https://github.com/apache/kafka/pull/6988) by [@seglo](https://github.com/seglo)
-    - Producer: change default parallelism [#944](https://github.com/akka/alpakka-kafka/issues/944) by [@ennru](https://github.com/ennru)
-    - Rebalance: filter messages of revoked partitions [#946](https://github.com/akka/alpakka-kafka/issues/946) by [@ennru](https://github.com/ennru)
-
-* Testkit
-    - Use testcontainers for multi-broker integration tests and benchmarks [#939](https://github.com/akka/alpakka-kafka/issues/939) by [@seglo](https://github.com/seglo)
-    - Make EmbeddedKafka a provided dependency [#954](https://github.com/akka/alpakka-kafka/issues/954) by [@ennru](https://github.com/ennru)
-
-* Alpakka Kafka API
-    - Committable: deprecate commitJavadsl and commitScaladsl [#959](https://github.com/akka/alpakka-kafka/issues/959) by [@ennru](https://github.com/ennru)
-    - ConsumerSetting: Add group instance id setter [#861](https://github.com/akka/alpakka-kafka/issues/861) by [@def1ne](https://github.com/def1ne)
-
-* Bugs
-    - Commit refreshing: fix bug from aggregating offsets [#975](https://github.com/akka/alpakka-kafka/issues/975) by [@ennru](https://github.com/ennru)
-    - Consumer drops records after rebalance [#978](https://github.com/akka/alpakka-kafka/issues/978) band-aided with [#997](https://github.com/akka/alpakka-kafka/pull/997)
-
-The detailed list of changes is found in [the 2.0.0-M2 Github release listing](https://github.com/akka/alpakka-kafka/releases/tag/v2.0.0-M2).
-
-This release is compiled and tested against Akka [2.5.23](https://doc.akka.io/docs/akka/2.5/), [2.6.0](https://doc.akka.io/docs/akka/2.6/) and Scala 2.11, 2.12, 2.13 on Adopt OpenJDK 8 and 11.
-
-This release contains contributions by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="ennru" src="https://avatars3.githubusercontent.com/u/458526?v=4&amp;s=40"/> **ennru**](https://github.com/ennru) | 26 | 4167 | 1434 |
-| [<img width="20" alt="seglo" src="https://avatars2.githubusercontent.com/u/1148412?v=4&amp;s=40"/> **seglo**](https://github.com/seglo) | 6 | 1521 | 882 |
-| [<img width="20" alt="charlibot" src="https://avatars3.githubusercontent.com/u/5785993?v=4&amp;s=40"/> **charlibot**](https://github.com/charlibot) | 1 | 1901 | 664 |
-| [<img width="20" alt="jewertow" src="https://avatars2.githubusercontent.com/u/17457695?v=4&amp;s=40"/> **jewertow**](https://github.com/jewertow) | 1 | 668 | 12 |
-| [<img width="20" alt="def1ne" src="https://avatars0.githubusercontent.com/u/3229478?v=4&amp;s=40"/> **def1ne**](https://github.com/def1ne) | 1 | 6 | 0 |
-| [<img width="20" alt="shobull" src="https://avatars1.githubusercontent.com/u/3430629?v=4&amp;s=40"/> **shobull**](https://github.com/shobull) | 1 | 1 | 1 |
-
-# 2.0.0-M1
-
-The release process failed.
diff --git a/docs/src/main/paradox/release-notes/2.1.x.md b/docs/src/main/paradox/release-notes/2.1.x.md
deleted file mode 100644
index 9ff3bfc9..00000000
--- a/docs/src/main/paradox/release-notes/2.1.x.md
+++ /dev/null
@@ -1,169 +0,0 @@
----
-project.description: Release notes for all Alpakka Kafka 2.1.x releases.
----
-# Alpakka Kafka 2.1.x
-
-@@@ note
-In case you are browsing a specific version's documentation: check out the [latest release notes](https://github.com/akka/alpakka-kafka/releases)
-@@@
-
-The Alpakka Kafka 2.1 series features
-
-* Kafka 2.7.0 client
-* Akka 2.6.x (dropped Akka 2.5)
-* Scala 2.12 and 2.13 (dropped Scala 2.11)
-* Testkit: use ScalaTest 3.1.x
-* Testkit: default to use Confluent Platform 6.1.1
-* Testkit: no longer support Embedded Kafka (Kafka 2.6.0 can't be safely embedded in Scala applications)
-
-# 2.1.1
-
-Released: 2021-07-23
-
-## Alpakka Kafka core
-
-- Upgrade Akka 2.6.15  [#1395](https://github.com/akka/alpakka-kafka/issues/1395) by [@ygree](https://github.com/ygree)
-- Fix Lightbend Telemetry context propagation in CommitCollectorStage [#1395](https://github.com/akka/alpakka-kafka/issues/1395) by [@ygree](https://github.com/ygree)
-
-The detailed list of changes is found in [the 2.1.1 Github release listing](https://github.com/akka/alpakka-kafka/releases/tag/v2.1.1).
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="ygree" src="https://avatars.githubusercontent.com/u/4147346?v=4&amp;s=40"/> **ygree**](https://github.com/ygree) | 6 | 68 | 23 |
-
-
-# 2.1.0
-
-Released: 2021-05-14
-
-## Noteworthy
-
-- Akka typed first documentation user experience [#1370](https://github.com/akka/alpakka-kafka/issues/1370) by [@seglo](https://github.com/seglo)
-
-The detailed list of changes is found in [the 2.1.0 Github release listing](https://github.com/akka/alpakka-kafka/releases/tag/v2.1.0).
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="scala-steward" src="https://avatars.githubusercontent.com/u/43047562?v=4&amp;s=40"/> **scala-steward**](https://github.com/scala-steward) | 5 | 11 | 11 |
-| [<img width="20" alt="seglo" src="https://avatars.githubusercontent.com/u/1148412?v=4&amp;s=40"/> **seglo**](https://github.com/seglo) | 1 | 265 | 7 |
-
-# 2.1.0-RC1
-
-@@@ note
-The Apache Kafka clients have a provided dependency on Jackson `2.10.5`, but Akka depends on `2.11.4`.
-Alpakka Kafka references `2.11.4`.
-@@@
-
-Released: 2021-04-19
-
-## Noteworthy
-
-- Upgrade Akka 2.6.14 and Kafka 2.7.0 [#1355](https://github.com/akka/alpakka-kafka/issues/1355) by [@seglo](https://github.com/seglo)
-- Add configurable protection against server-bug induced resets [#1299](https://github.com/akka/alpakka-kafka/issues/1299) by [@jyates](https://github.com/jyates)
-- Don't depend on scalatest in testcontainer singleton [#1330](https://github.com/akka/alpakka-kafka/issues/1330) by [@seglo](https://github.com/seglo)
-
-## Alpakka Kafka core
-
-- Upgrade Akka 2.6.14 and Kafka 2.7.0 [#1355](https://github.com/akka/alpakka-kafka/issues/1355) by [@seglo](https://github.com/seglo)
-- Add configurable protection against server-bug induced resets [#1299](https://github.com/akka/alpakka-kafka/issues/1299) by [@jyates](https://github.com/jyates)
-
-## Tests
-
-- Upgrade Akka 2.6.14 and Kafka 2.7.0 [#1355](https://github.com/akka/alpakka-kafka/issues/1355) by [@seglo](https://github.com/seglo)
-- Increase consumer group summary timeout in build [#1329](https://github.com/akka/alpakka-kafka/issues/1329) by [@seglo](https://github.com/seglo)
-- Add configurable protection against server-bug induced resets [#1299](https://github.com/akka/alpakka-kafka/issues/1299) by [@jyates](https://github.com/jyates)
-- Migrate build to GitHub actions [#1307](https://github.com/akka/alpakka-kafka/issues/1307) by [@seglo](https://github.com/seglo)
-
-## Alpakka Kafka Testkit
-
-- Upgrade Akka 2.6.14 and Kafka 2.7.0 [#1355](https://github.com/akka/alpakka-kafka/issues/1355) by [@seglo](https://github.com/seglo)
-- Don't depend on scalatest in testcontainer singleton [#1330](https://github.com/akka/alpakka-kafka/issues/1330) by [@seglo](https://github.com/seglo)
-- Add ProducerResultFactory.multiResult helper for MultiMessage [#1319](https://github.com/akka/alpakka-kafka/issues/1319) by [@ashendon](https://github.com/ashendon)
-
-## Documentation
-
-- Upgrade Akka 2.6.14 and Kafka 2.7.0 [#1355](https://github.com/akka/alpakka-kafka/issues/1355) by [@seglo](https://github.com/seglo)
-- Migrate to sonatype snapshot/release repositories [#1353](https://github.com/akka/alpakka-kafka/issues/1353) by [@seglo](https://github.com/seglo)
-- Add configurable protection against server-bug induced resets [#1299](https://github.com/akka/alpakka-kafka/issues/1299) by [@jyates](https://github.com/jyates)
-- Release notes v2.0.7 (master) [#1317](https://github.com/akka/alpakka-kafka/issues/1317) by [@seglo](https://github.com/seglo)
-- Migrate build to GitHub actions [#1307](https://github.com/akka/alpakka-kafka/issues/1307) by [@seglo](https://github.com/seglo)
-
-## Contributors 
-
-Since 2.0.7 Alpakka Kafka has received contributions by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="scala-steward" src="https://avatars.githubusercontent.com/u/43047562?v=4&amp;s=40"/> **scala-steward**](https://github.com/scala-steward) | 39 | 41 | 41 |
-| [<img width="20" alt="ennru" src="https://avatars.githubusercontent.com/u/458526?v=4&amp;s=40"/> **ennru**](https://github.com/ennru) | 35 | 515 | 296 |
-| [<img width="20" alt="seglo" src="https://avatars.githubusercontent.com/u/1148412?v=4&amp;s=40"/> **seglo**](https://github.com/seglo) | 25 | 2516 | 2593 |
-| [<img width="20" alt="octonato" src="https://avatars.githubusercontent.com/u/502982?v=4&amp;s=40"/> **octonato**](https://github.com/octonato) | 2 | 4 | 4 |
-| [<img width="20" alt="jyates" src="https://avatars.githubusercontent.com/u/197388?v=4&amp;s=40"/> **jyates**](https://github.com/jyates) | 1 | 925 | 71 |
-| [<img width="20" alt="jhooda" src="https://avatars.githubusercontent.com/u/2099200?v=4&amp;s=40"/> **jhooda**](https://github.com/jhooda) | 1 | 381 | 20 |
-| [<img width="20" alt="Matzz" src="https://avatars.githubusercontent.com/u/1036919?v=4&amp;s=40"/> **Matzz**](https://github.com/Matzz) | 1 | 94 | 65 |
-| [<img width="20" alt="altomch" src="https://avatars.githubusercontent.com/u/8770929?v=4&amp;s=40"/> **altomch**](https://github.com/altomch) | 1 | 51 | 52 |
-| [<img width="20" alt="ashendon" src="https://avatars.githubusercontent.com/u/13709042?v=4&amp;s=40"/> **ashendon**](https://github.com/ashendon) | 1 | 8 | 0 |
-| [<img width="20" alt="johanandren" src="https://avatars.githubusercontent.com/u/666915?v=4&amp;s=40"/> **johanandren**](https://github.com/johanandren) | 1 | 1 | 1 |
-| [<img width="20" alt="kciesielski" src="https://avatars.githubusercontent.com/u/1413553?v=4&amp;s=40"/> **kciesielski**](https://github.com/kciesielski) | 1 | 1 | 1 |
-
-# 2.1.0-M1
-
-Released: 2020-10-22
-
-## Noteworthy
-
-* Kafka 2.6.0 client [#1098](https://github.com/akka/alpakka-kafka/issues/1098)
-* Akka 2.6.10 (dropped Akka 2.5) [#1113](https://github.com/akka/alpakka-kafka/issues/1113)
-* Scala 2.12 and 2.13 (dropped Scala 2.11) [#1102](https://github.com/akka/alpakka-kafka/issues/1102)
-* The testkit does no longer support Embedded Kafka [#1114](https://github.com/akka/alpakka-kafka/issues/1114)
-* Scala 2.12.11 and 2.13.2 [#1212](https://github.com/akka/alpakka-kafka/issues/1212) by [@ennru](https://github.com/ennru)
-
-## Alpakka Kafka core
-
-- DiscoverySupport: remove deprecation [#1231](https://github.com/akka/alpakka-kafka/issues/1231) by [@ennru](https://github.com/ennru)
-- Drop Akka 2.5 dependency [#1209](https://github.com/akka/alpakka-kafka/issues/1209) by [@seglo](https://github.com/seglo)
-- Kafka 2.6.0 client upgrade and drop Scala 2.11 support [#1102](https://github.com/akka/alpakka-kafka/issues/1102) by [@seglo](https://github.com/seglo)
-- Commit collector: avoid pushing twice [#1205](https://github.com/akka/alpakka-kafka/issues/1205) by [@ennru](https://github.com/ennru)
-- Commit the offset when multi message is empty [#1201](https://github.com/akka/alpakka-kafka/issues/1201) by [@herzrasen](https://github.com/herzrasen)
-
-## Alpakka Kafka Testkit
-
-- testkit: Admin instead of AdminClient [#1183](https://github.com/akka/alpakka-kafka/issues/1183) by [@ennru](https://github.com/ennru)
-- Testkit: enable Mima [#1230](https://github.com/akka/alpakka-kafka/issues/1230) by [@ennru](https://github.com/ennru)
-- Start & stop Kafka process within running container [#1235](https://github.com/akka/alpakka-kafka/issues/1235) by [@seglo](https://github.com/seglo)
-- Drop embedded-kafka testkit support [#1229](https://github.com/akka/alpakka-kafka/issues/1229) by [@seglo](https://github.com/seglo)
-- Rollback Confluent Platform version; add Jackson Databind [#1226](https://github.com/akka/alpakka-kafka/issues/1226) by [@ennru](https://github.com/ennru)
-- Default to Confluent Platform version to 6.0.0 [#1224](https://github.com/akka/alpakka-kafka/issues/1224) by [@ennru](https://github.com/ennru)
-
-## Documentation
-
-- Docs: show Lightbend Telemetry OpenTracing support [#1237](https://github.com/akka/alpakka-kafka/issues/1237) by [@ennru](https://github.com/ennru)
-
-## Tests
-
-- ScalaTest 3.1 fixes for it tests [#1210](https://github.com/akka/alpakka-kafka/issues/1210) by [@seglo](https://github.com/seglo)
-- ScalaTest 3.1 with rewrites [#1207](https://github.com/akka/alpakka-kafka/issues/1207) by [@ennru](https://github.com/ennru)
-
-## Other updates
-
-- akka-stream-alpakka-csv 2.0.2 (was 2.0.1) [#1215](https://github.com/akka/alpakka-kafka/issues/1215) by [@scala-steward](https://github.com/scala-steward)
-- Use the latest JDK 8 and 11 versions in CI [#1213](https://github.com/akka/alpakka-kafka/issues/1213) by [@ennru](https://github.com/ennru)
-- protobuf-java 3.12.4 (was 3.12.2) [#1170](https://github.com/akka/alpakka-kafka/issues/1170) by [@scala-steward](https://github.com/scala-steward)
-- scala-collection-compat 2.2.0 (was 2.1.6) [#1219](https://github.com/akka/alpakka-kafka/issues/1219) by [@scala-steward](https://github.com/scala-steward)
-- junit-jupiter-api 5.7.0 (was 5.5.2) [#1217](https://github.com/akka/alpakka-kafka/issues/1217) by [@scala-steward](https://github.com/scala-steward)
-- mockito-core 3.5.13 (was 3.4.6) [#1218](https://github.com/akka/alpakka-kafka/issues/1218) by [@scala-steward](https://github.com/scala-steward)
-- sbt-mima-plugin 0.8.0 (was 0.7.0) [#1216](https://github.com/akka/alpakka-kafka/issues/1216) by [@scala-steward](https://github.com/scala-steward)
-- Lock scalafmt version [#1236](https://github.com/akka/alpakka-kafka/issues/1236) by [@ennru](https://github.com/ennru)
-
-The milestone contains everything [*closed in 2.1.0-M1*](https://github.com/akka/alpakka-kafka/issues?q=is%3Aclosed+milestone%3A2.1.0-M1).
-
-## Contributors
-
-Since 2.0.5 Alpakka Kafka has received contributions by:
-
-| Author | Commits | Lines added | Lines removed |
-| ------ | ------- | ----------- | ------------- |
-| [<img width="20" alt="ennru" src="https://avatars3.githubusercontent.com/u/458526?v=4&amp;s=40"/> **ennru**](https://github.com/ennru) | 27 | 338 | 253 |
-| [<img width="20" alt="seglo" src="https://avatars2.githubusercontent.com/u/1148412?v=4&amp;s=40"/> **seglo**](https://github.com/seglo) | 11 | 1577 | 2228 |
-| [<img width="20" alt="scala-steward" src="https://avatars1.githubusercontent.com/u/43047562?v=4&amp;s=40"/> **scala-steward**](https://github.com/scala-steward) | 6 | 6 | 6 |
-| [<img width="20" alt="herzrasen" src="https://avatars3.githubusercontent.com/u/20834977?v=4&amp;s=40"/> **herzrasen**](https://github.com/herzrasen) | 1 | 34 | 0 |
diff --git a/docs/src/main/paradox/release-notes/index.md b/docs/src/main/paradox/release-notes/index.md
index 34ae67ac..65eb1f52 100644
--- a/docs/src/main/paradox/release-notes/index.md
+++ b/docs/src/main/paradox/release-notes/index.md
@@ -1,15 +1,4 @@
 # Release Notes
 
-* All [GitHub releases](https://github.com/akka/alpakka-kafka/releases)
-* [2.2.0](https://github.com/akka/alpakka-kafka/releases/tag/v2.2.0)
-
-@@toc { depth=2 }
-
-@@@ index
-
-* [2.1.x](2.1.x.md)
-* [2.0.x](2.0.x.md)
-* [1.1.x](1.1.x.md)
-* [1.0.x](1.0.x.md)
-
-@@@
+* No Apache Pekko Releases yet
+* [Alpakka Kafka Release Notes](https://doc.akka.io/docs/alpakka-kafka/current/release-notes/index.html)
diff --git a/docs/src/main/paradox/send-producer.md b/docs/src/main/paradox/send-producer.md
index fdf0653c..8c7f1713 100644
--- a/docs/src/main/paradox/send-producer.md
+++ b/docs/src/main/paradox/send-producer.md
@@ -5,9 +5,9 @@ project.description: Produce messages to Apache Kafka topics with a Java or Scal
 
 A producer publishes messages to Kafka topics. The message itself contains information about what topic and partition to publish to so you can publish to different topics with the same producer.
 
-The Alpakka Kafka @apidoc[SendProducer] does not integrate with Akka Streams. Instead, it offers a wrapper of the Apache Kafka @javadoc[KafkaProducer](org.apache.kafka.clients.producer.KafkaProducer) to send data to Kafka topics in a per-element fashion with a @scala[`Future`-based]@java[`CompletionStage`-based] API.
+The Apache Pekko Connectors Kafka @apidoc[SendProducer] does not integrate with Apache Pekko Streams. Instead, it offers a wrapper of the Apache Kafka @javadoc[KafkaProducer](org.apache.kafka.clients.producer.KafkaProducer) to send data to Kafka topics in a per-element fashion with a @scala[`Future`-based]@java[`CompletionStage`-based] API.
 
-It supports the same @ref[settings](producer.md#settings) as Alpakka @apidoc[Producer$] flows and sinks and supports @ref[service discovery](discovery.md).
+It supports the same @ref[settings](producer.md#settings) as Apache Pekko Connectors @apidoc[Producer$] flows and sinks and supports @ref[service discovery](discovery.md).
 
 After use, the `Producer` needs to be properly closed via the asynchronous `close()` method.
 
diff --git a/docs/src/main/paradox/serialization.md b/docs/src/main/paradox/serialization.md
index 875a5f4b..77fff175 100644
--- a/docs/src/main/paradox/serialization.md
+++ b/docs/src/main/paradox/serialization.md
@@ -1,13 +1,13 @@
 # Serialization
 
-The general recommendation for de-/serialization of messages is to use byte arrays (or Strings) as value and do the de-/serialization in a `map` operation in the Akka Stream instead of implementing it directly in Kafka de-/serializers. When deserialization is handled explicitly within the Akka Stream, it is easier to implement the desired error handling strategy as the examples below show.
+The general recommendation for de-/serialization of messages is to use byte arrays (or Strings) as value and do the de-/serialization in a `map` operation in the Apache Pekko Stream instead of implementing it directly in Kafka de-/serializers. When deserialization is handled explicitly within the Apache Pekko Stream, it is easier to implement the desired error handling strategy as the examples below show.
 
 
 ## Protocol buffers
 
 [Protocol Buffers](https://developers.google.com/protocol-buffers) offer a language-neutral, platform-neutral, extensible mechanism for serializing structured data and allow consumers and producers to rely on the message format.
 
-The easiest way to use Protocol Buffers with Alpakka Kafka is to serialize and deserialize the Kafka message payload as a byte array and call the Protocol Buffers serialization and deserialization in a regular `map` operator. To serialize the Protobuf-defined type `Order` into a byte array use the `.toByteArray()` method which gets generated by the Protobuf compiler.
+The easiest way to use Protocol Buffers with Apache Pekko Connectors Kafka is to serialize and deserialize the Kafka message payload as a byte array and call the Protocol Buffers serialization and deserialization in a regular `map` operator. To serialize the Protobuf-defined type `Order` into a byte array use the `.toByteArray()` method which gets generated by the Protobuf compiler.
 
 Scala
 : @@ snip [snip](/tests/src/test/scala/docs/scaladsl/SerializationSpec.scala) { #protobuf-imports #protobuf-serializer }
diff --git a/docs/src/main/paradox/snapshots.md b/docs/src/main/paradox/snapshots.md
index f7c6ad19..35feacf9 100644
--- a/docs/src/main/paradox/snapshots.md
+++ b/docs/src/main/paradox/snapshots.md
@@ -1,13 +1,19 @@
 ---
-project.description: Snapshot builds of Alpakka Kafka are provided via the Sonatype snapshot repository.
+project.description: Snapshot builds of Apache Pekko Connectors Kafka are provided via the Apache snapshot repository.
 ---
 # Snapshots
 
-[snapshots-badge]:  https://img.shields.io/nexus/s/com.typesafe.akka/akka-stream-kafka_2.13?server=https%3A%2F%2Foss.sonatype.org
-[snapshots]:        https://oss.sonatype.org/content/repositories/snapshots/com/typesafe/akka/akka-stream-kafka_2.13/
+[snapshots]:        https://repository.apache.org/content/groups/snapshots/org/apache/pekko/pekko-connectors-kafka_2.13/
 
-Snapshots are published to the Sonatype Snapshot repository after every successful build on master.
-Add the following to your project build definition to resolve Alpakka Kafka connector snapshots:
+Snapshots are published to the Apache's Snapshot repository every night.
+
+@@@ warning
+
+The use of Pekko SNAPSHOTs, nightlies and milestone releases is discouraged unless you know what you are doing.
+
+@@@
+
+Add the following to your project build definition to resolve Apache Pekko Connectors Kafka connector snapshots:
 
 ## Configure repository
 
@@ -18,8 +24,8 @@ Maven
       <repositories>
         <repository>
             <id>snapshots-repo</id>
-            <name>Sonatype snapshots</name>
-            <url>https://oss.sonatype.org/content/repositories/snapshots</url>
+            <name>Apache snapshots</name>
+            <url>https://repository.apache.org/content/groups/snapshots</url>
         </repository>
       </repositories>
     ...
@@ -28,24 +34,22 @@ Maven
 
 sbt
 :   ```scala
-    resolvers += Resolver.sonatypeRepo("snapshots")
+    resolvers += "Apache Snapshots" at "https://repository.apache.org/content/groups/snapshots"
     ```
 
 Gradle
 :   ```gradle
     repositories {
       maven {
-        url  "https://oss.sonatype.org/content/repositories/snapshots"
+        url  "https://repository.apache.org/content/groups/snapshots"
       }
     }
     ```
 
 ## Documentation
 
-The [snapshot documentation](https://doc.akka.io/docs/alpakka-kafka/snapshot/) is updated with every snapshot build.
+The [snapshot documentation](https://pekko.apache.org/docs/pekko-connectors-kafka/snapshot/) is updated with every snapshot build.
 
 ## Versions
 
-Latest published snapshot version is [![snapshots-badge][]][snapshots]
-
-The snapshot repository is cleaned from time to time with no further notice. Check [Sonatype snapshots Alpakka Kafka files](https://oss.sonatype.org/content/repositories/snapshots/com/typesafe/akka/akka-stream-kafka_2.13/) to see what versions are currently available.
+The snapshot repository is cleaned from time to time with no further notice. Check [Apache Snapshots files](https://repository.apache.org/content/groups/snapshots/org/apache/pekko/pekko-connectors-kafka_2.13/) to see what versions are currently available.
diff --git a/docs/src/main/paradox/subscription.md b/docs/src/main/paradox/subscription.md
index 56c48f3f..9ce12adb 100644
--- a/docs/src/main/paradox/subscription.md
+++ b/docs/src/main/paradox/subscription.md
@@ -1,5 +1,5 @@
 ---
-project.description: An Alpakka Kafka consumer source can subscribe to Kafka topics within a consumer group, or to specific partitions.
+project.description: An Apache Pekko Connectors Kafka consumer source can subscribe to Kafka topics within a consumer group, or to specific partitions.
 ---
 # Subscription
 
diff --git a/docs/src/main/paradox/testing-testcontainers.md b/docs/src/main/paradox/testing-testcontainers.md
index d09f397e..db0aa149 100644
--- a/docs/src/main/paradox/testing-testcontainers.md
+++ b/docs/src/main/paradox/testing-testcontainers.md
@@ -1,5 +1,5 @@
 ---
-project.description: Alpakka Kafka provides Testcontainers support for running a Kafka cluster locally using Docker containers.
+project.description: Apache Pekko Connectors Kafka provides Testcontainers support for running a Kafka cluster locally using Docker containers.
 ---
 # Testing with a Docker Kafka cluster
 
@@ -17,7 +17,7 @@ The @apidoc[KafkaTestkitTestcontainersSettings] type can be used to perform acti
 * Overriding container settings and environment variables (i.e. to change default Broker config)
 * Apply custom docker configuration to the Kafka and ZooKeeper containers used to create a cluster
 
-To change defaults for all settings update the appropriate configuration in `akka.kafka.testkit.testcontainers`.
+To change defaults for all settings update the appropriate configuration in `pekko.kafka.testkit.testcontainers`.
 
 @@ snip [snip](/testkit/src/main/resources/reference.conf) { #testkit-testcontainers-settings }
 
@@ -53,7 +53,7 @@ You can retrieve the Schema Registry URL in your test configuration by calling `
 
 ## Testing with a Docker Kafka cluster from Java code
 
-The Alpakka Kafka testkit contains helper classes to start Kafka via Testcontainers. Alternatively, you may use just Testcontainers, as it is designed to be used with JUnit and you can follow [their documentation](https://www.testcontainers.org/modules/kafka/) to start and stop Kafka. To start a single instance for many tests see [Singleton containers](https://www.testcontainers.org/test_framework_integration/manual_lifecycle_control/).
+The Apache Pekko Connectors Kafka testkit contains helper classes to start Kafka via Testcontainers. Alternatively, you may use just Testcontainers, as it is designed to be used with JUnit and you can follow [their documentation](https://www.testcontainers.org/modules/kafka/) to start and stop Kafka. To start a single instance for many tests see [Singleton containers](https://www.testcontainers.org/test_framework_integration/manual_lifecycle_control/).
 
 The Testcontainers dependency must be added to your project explicitly.
 
@@ -84,7 +84,7 @@ The Testcontainers dependency must be added to your project explicitly.
   scope=test
 }
 
-To ensure proper shutdown of all stages in every test, wrap your test code in @apidoc[assertAllStagesStopped]((javadsl|scaladsl).StreamTestKit$). This may interfere with the `stop-timeout` which delays shutdown for Alpakka Kafka consumers. You might need to configure a shorter timeout in your `application.conf` for tests.
+To ensure proper shutdown of all stages in every test, wrap your test code in @apidoc[assertAllStagesStopped]((javadsl|scaladsl).StreamTestKit$). This may interfere with the `stop-timeout` which delays shutdown for Apache Pekko Connectors Kafka consumers. You might need to configure a shorter timeout in your `application.conf` for tests.
 
 ### One cluster for all tests
 
diff --git a/docs/src/main/paradox/testing.md b/docs/src/main/paradox/testing.md
index bebff83d..67341aa6 100644
--- a/docs/src/main/paradox/testing.md
+++ b/docs/src/main/paradox/testing.md
@@ -1,31 +1,31 @@
 ---
-project.description: Alpakka Kafka provides a Testkit with support for running local Kafka brokers for integration tests.
+project.description: Apache Pekko Connectors Kafka provides a Testkit with support for running local Kafka brokers for integration tests.
 ---
 # Testing
 
-To simplify testing of streaming integrations with Alpakka Kafka, it provides the **Alpakka Kafka testkit**. It provides help for
+To simplify testing of streaming integrations with Apache Pekko Connectors Kafka, it provides the **Apache Pekko Connectors Kafka testkit**. It provides help for
 
 * @ref:[Using Docker to launch a local Kafka cluster with testcontainers](testing-testcontainers.md)
-* @ref:[Mocking the Alpakka Kafka Consumers and Producers](#mocking-the-consumer-or-producer)
+* @ref:[Mocking the Apache Pekko Connectors Kafka Consumers and Producers](#mocking-the-consumer-or-producer)
 
 @@project-info{ projectId="testkit" }
 
 @@dependency [Maven,sbt,Gradle] {
-  group=com.typesafe.akka
-  artifact=akka-stream-kafka-testkit_$scala.binary.version$
+  group=org.apache.pekko
+  artifact=pekko-connectors-kafka-testkit_$scala.binary.version$
   version=$project.version$
   scope=test
-  symbol2=AkkaVersion
-  value2="$akka.version$"
-  group2=com.typesafe.akka
-  artifact2=akka-stream-testkit_$scala.binary.version$
-  version2=AkkaVersion
+  symbol2=PekkoVersion
+  value2="$pekko.version$"
+  group2=org.apache.pekko
+  artifact2=pekko-stream-testkit_$scala.binary.version$
+  version2=PekkoVersion
   scope2=test
 }
 
-Note that Akka testkits do not promise binary compatibility. The API might be changed even between patch releases.
+Note that Apache Pekko testkits do not promise binary compatibility. The API might be changed even between patch releases.
 
-The table below shows Alpakka Kafka testkit's direct dependencies and the second tab shows all libraries it depends on transitively. 
+The table below shows Apache Pekko Connectors Kafka testkit's direct dependencies and the second tab shows all libraries it depends on transitively. 
 
 @@dependencies { projectId="testkit" }
 
@@ -39,10 +39,10 @@ See the documentation for each for more details.
 
 | Type                                                                                                                                                    | Test Framework     | Cluster     | Lang         | Lifetime                 |
 |---------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------|-------------|--------------|--------------------------|
-| @ref:[`akka.kafka.testkit.javadsl.TestcontainersKafkaJunit4Test`](testing-testcontainers.md#testing-with-a-docker-kafka-cluster-from-java-code)         | JUnit 4            | Yes         | Java         | All tests, Per class     |
-| @ref:[`akka.kafka.testkit.javadsl.TestcontainersKafkaTest`](testing-testcontainers.md#testing-with-a-docker-kafka-cluster-from-java-code)               | JUnit 5            | Yes         | Java         | All tests, Per class     |
-| @ref:[`akka.kafka.testkit.scaladsl.TestcontainersKafkaLike`](testing-testcontainers.md#testing-with-a-docker-kafka-cluster-from-scala-code)             | ScalaTest          | Yes         | Scala        | All tests                |
-| @ref:[`akka.kafka.testkit.scaladsl.TestcontainersKafkaPerClassLike`](testing-testcontainers.md#testing-with-a-docker-kafka-cluster-from-scala-code)     | ScalaTest          | Yes         | Scala        | Per class                |
+| @ref:[`org.apache.pekko.kafka.testkit.javadsl.TestcontainersKafkaJunit4Test`](testing-testcontainers.md#testing-with-a-docker-kafka-cluster-from-java-code)         | JUnit 4            | Yes         | Java         | All tests, Per class     |
+| @ref:[`org.apache.pekko.kafka.testkit.javadsl.TestcontainersKafkaTest`](testing-testcontainers.md#testing-with-a-docker-kafka-cluster-from-java-code)               | JUnit 5            | Yes         | Java         | All tests, Per class     |
+| @ref:[`org.apache.pekko.kafka.testkit.scaladsl.TestcontainersKafkaLike`](testing-testcontainers.md#testing-with-a-docker-kafka-cluster-from-scala-code)             | ScalaTest          | Yes         | Scala        | All tests                |
+| @ref:[`org.apache.pekko.kafka.testkit.scaladsl.TestcontainersKafkaPerClassLike`](testing-testcontainers.md#testing-with-a-docker-kafka-cluster-from-scala-code)     | ScalaTest          | Yes         | Scala        | Per class                |
 
 ## Alternative testing libraries
 
@@ -50,7 +50,7 @@ If using Maven and Java, an alternative library that provides running Kafka brok
 
 ## Mocking the Consumer or Producer
 
-The testkit contains factories to create the messages emitted by Consumer sources in `akka.kafka.testkit.ConsumerResultFactory` and Producer flows in `akka.kafka.testkit.ProducerResultFactory`.
+The testkit contains factories to create the messages emitted by Consumer sources in `org.apache.pekko.kafka.testkit.ConsumerResultFactory` and Producer flows in `org.apache.pekko.kafka.testkit.ProducerResultFactory`.
 
 To create the materialized value of Consumer sources, @apidoc[ConsumerControlFactory$] offers a wrapped @apidoc[KillSwitch].
 
diff --git a/docs/src/main/paradox/transactions.md b/docs/src/main/paradox/transactions.md
index dd428e19..ab5a6782 100644
--- a/docs/src/main/paradox/transactions.md
+++ b/docs/src/main/paradox/transactions.md
@@ -1,5 +1,5 @@
 ---
-project.description: Alpakka has support for Kafka Transactions which provide guarantees that messages processed in a consume-transform-produce workflow are processed exactly once or not at all.
+project.description: Apache Pekko Connectors has support for Kafka Transactions which provide guarantees that messages processed in a consume-transform-produce workflow are processed exactly once or not at all.
 ---
 # Transactions
 
@@ -35,7 +35,7 @@ This can lead to several performance implications.
 
 1. A single producer per application has the opportunity to collectively batch sends to allow for better throughput.
 If we subdivide the same producing workload with multiple producers then we will lose the efficiency of consecutive batching to Kafka that one producer can manage.
-Since the Kafka Producer is threadsafe we would ideally only have one Producer per Alpakka Kafka application, but this isn't possible if we want to distribute our transactional application across multiple instances.
+Since the Kafka Producer is threadsafe we would ideally only have one Producer per Apache Pekko Connectors Kafka application, but this isn't possible if we want to distribute our transactional application across multiple instances.
 
 2. The Kafka cluster will receive more connection and request overhead because there are more batches sent from more producers.
 
@@ -62,11 +62,11 @@ A `transactional.id` must be defined and unique for each instance of the applica
 
 Kafka transactions are handled transparently to the user.  The @apidoc[Transactional.source](Transactional$) will enforce that a consumer group id is specified and the @apidoc[Transactional.flow](Transactional$) or @apidoc[Transactional.sink](Transactional$) will enforce that a `transactional.id` is specified.  All other Kafka consumer and producer properties required to enable transactions are overridden.
 
-Transactions are committed on an interval which can be controlled with the producer config `akka.kafka.producer.eos-commit-interval`, similar to how exactly once works with Kafka Streams.  The default value is `100ms`.  The larger commit interval is the more records will need to be reprocessed in the event of failure and the transaction is aborted.
+Transactions are committed on an interval which can be controlled with the producer config `pekko.kafka.producer.eos-commit-interval`, similar to how exactly once works with Kafka Streams.  The default value is `100ms`.  The larger commit interval is the more records will need to be reprocessed in the event of failure and the transaction is aborted.
 
 When the stream is materialized the producer will initialize the transaction for the provided `transactional.id` and a transaction will begin.  Every commit interval (`eos-commit-interval`) we check if there are any offsets available to commit.  If offsets exist then we suspend backpressured demand while we drain all outstanding messages that have not yet been successfully acknowledged (if any) and then commit the transaction.  After the commit succeeds a new transaction is begun and we  [...]
 
-Messages are also drained from the stream when the consumer gets a rebalance of partitions. In that case, the consumer will wait in the `onPartitionsRevoked` callback until all of the messages have been drained from the stream and the transaction is committed before allowing the rebalance to continue. The amount of total time the consumer will wait for draining is controlled by the `akka.kafka.consumer.commit-timeout`, and the interval between checks is controlled by the `akka.kafka.cons [...]
+Messages are also drained from the stream when the consumer gets a rebalance of partitions. In that case, the consumer will wait in the `onPartitionsRevoked` callback until all of the messages have been drained from the stream and the transaction is committed before allowing the rebalance to continue. The amount of total time the consumer will wait for draining is controlled by the `pekko.kafka.consumer.commit-timeout`, and the interval between checks is controlled by the `pekko.kafka.co [...]
 
 To gracefully shutdown the stream and commit the current transaction you must call `shutdown()` on the @apidoc[(javadsl|scaladsl).Consumer.Control] materialized value to await all produced message acknowledgements and commit the final transaction.  
 
@@ -93,7 +93,7 @@ Java
 
 When any stage in the stream fails the whole stream will be torn down.  In the general case it's desirable to allow transient errors to fail the whole stream because they cannot be recovered from within the application.  Transient errors can be caused by network partitions, Kafka broker failures, @javadoc[ProducerFencedException](org.apache.kafka.common.errors.ProducerFencedException)'s from other application instances, and so on.  When the stream encounters transient errors then the cur [...]
 
-For transient errors we can choose to rely on the Kafka producer's configuration to retry, or we can handle it ourselves at the Akka Streams or Application layer.  Using the @extref[RestartSource](pekko:/stream/stream-error.html#delayed-restarts-with-a-backoff-stage) we can backoff connection attempts so that we don't hammer the Kafka cluster in a tight loop.
+For transient errors we can choose to rely on the Kafka producer's configuration to retry, or we can handle it ourselves at the Apache Pekko Streams or Application layer.  Using the @extref[RestartSource](pekko:/stream/stream-error.html#delayed-restarts-with-a-backoff-stage) we can backoff connection attempts so that we don't hammer the Kafka cluster in a tight loop.
 
 Scala
 : @@ snip [snip](/tests/src/test/scala/docs/scaladsl/TransactionsExample.scala) { #transactionalFailureRetry }
diff --git a/project/ParadoxSettings.scala b/project/ParadoxSettings.scala
index 27917eb6..982be934 100644
--- a/project/ParadoxSettings.scala
+++ b/project/ParadoxSettings.scala
@@ -1,9 +1,17 @@
 import Versions._
 import com.lightbend.paradox.apidoc.ApidocPlugin.autoImport.apidocRootPackage
 import com.lightbend.paradox.sbt.ParadoxPlugin.autoImport.{ paradoxGroups, paradoxProperties, paradoxRoots }
+import org.apache.pekko.PekkoParadoxPlugin.autoImport._
 import sbt._
+import sbt.Keys._
 
 object ParadoxSettings {
+
+  val themeSettings = Seq(
+    // allow access to snapshots for pekko-sbt-paradox
+    resolvers += "Apache Nexus Snapshots".at("https://repository.apache.org/content/repositories/snapshots/"),
+    pekkoParadoxGithub := "https://github.com/apache/incubator-pekko-connectors-kafka")
+
   val propertiesSettings = Seq(
     apidocRootPackage := "org.apache.pekko",
     paradoxGroups := Map("Language" -> Seq("Java", "Scala")),
@@ -34,4 +42,6 @@ object ParadoxSettings {
       "testcontainers.version" -> testcontainersVersion,
       "javadoc.org.testcontainers.containers.base_url" -> s"https://www.javadoc.io/doc/org.testcontainers/testcontainers/$testcontainersVersion/",
       "javadoc.org.testcontainers.containers.link_style" -> "direct"))
+
+  val settings = propertiesSettings ++ themeSettings
 }
diff --git a/project/ProjectSettings.scala b/project/ProjectSettings.scala
index 20a08996..edc6c60d 100644
--- a/project/ProjectSettings.scala
+++ b/project/ProjectSettings.scala
@@ -89,7 +89,7 @@ object ProjectSettings {
       "-skip-packages",
       "pekko.pattern:scala", // for some reason Scaladoc creates this
       "-doc-source-url", {
-        val branch = if (isSnapshot.value) "master" else s"v${version.value}"
+        val branch = if (isSnapshot.value) "main" else s"v${version.value}"
         s"https://github.com/apache/incubator-pekko-connectors-kafka/tree/${branch}€{FILE_PATH_EXT}#L€{FILE_LINE}"
       },
       "-doc-canonical-base-url",
diff --git a/project/plugins.sbt b/project/plugins.sbt
index ea54f380..b1ddc625 100644
--- a/project/plugins.sbt
+++ b/project/plugins.sbt
@@ -7,8 +7,6 @@ addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.4.6")
 addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "1.1.0")
 addSbtPlugin("com.lightbend.sbt" % "sbt-java-formatter" % "0.7.0")
 // docs
-addSbtPlugin("com.lightbend.akka" % "sbt-paradox-akka" % "0.44")
-addSbtPlugin("com.lightbend.paradox" % "sbt-paradox-dependencies" % "0.2.2")
 addSbtPlugin("com.lightbend.sbt" % "sbt-publish-rsync" % "0.2")
 addSbtPlugin("com.github.sbt" % "sbt-unidoc" % "0.5.0")
 // Java 11 module names are not added https://github.com/ThoughtWorksInc/sbt-api-mappings/issues/58
@@ -16,3 +14,15 @@ addSbtPlugin("com.thoughtworks.sbt-api-mappings" % "sbt-api-mappings" % "3.0.2")
 addSbtPlugin("com.typesafe.sbt" % "sbt-site" % "1.4.1")
 
 resolvers += Resolver.jcenterRepo
+// allow access to snapshots for pekko-sbt-paradox
+resolvers += "Apache Nexus Snapshots".at("https://repository.apache.org/content/repositories/snapshots/")
+
+// We have to deliberately use older versions of sbt-paradox because current Pekko sbt build
+// only loads on JDK 1.8 so we need to bring in older versions of parboiled which support JDK 1.8
+addSbtPlugin(("org.apache.pekko" % "pekko-sbt-paradox" % "0.0.0+19-f498f7c0-SNAPSHOT").excludeAll(
+  "com.lightbend.paradox", "sbt-paradox",
+  "com.lightbend.paradox" % "sbt-paradox-apidoc",
+  "com.lightbend.paradox" % "sbt-paradox-project-info"))
+addSbtPlugin(("com.lightbend.paradox" % "sbt-paradox" % "0.9.2").force())
+addSbtPlugin(("com.lightbend.paradox" % "sbt-paradox-apidoc" % "0.10.1").force())
+addSbtPlugin(("com.lightbend.paradox" % "sbt-paradox-project-info" % "2.0.0").force())
diff --git a/project/project-info.conf b/project/project-info.conf
index 5ef8ad9a..98119732 100644
--- a/project/project-info.conf
+++ b/project/project-info.conf
@@ -18,55 +18,31 @@ project-info {
     }
     forums: [
       {
-        text: "Lightbend Discuss"
-        url: "https://discuss.lightbend.com/c/akka/streams-and-alpakka"
+        text: "Apache Pekko Dev mailing list"
+        url: "https://lists.apache.org/list.html?dev@pekko.apache.org"
       }
-    ]
-  }
-  core: ${project-info.shared-info} {
-    title: "Alpakka Kafka"
-    jpms-name: "akka.stream.alpakka.kafka"
-    levels: [
       {
-        readiness: Supported
-        since: "2017-05-02"
-        since-version: "0.16"
-      }
-      {
-        readiness: Incubating
-        since: "2015-01-07"
-        since-version: "0.1.0"
+        text: "apache/pekko-connectors-kafka discussions"
+        url: "https://github.com/apache/pekko-connectors-kafka/discussions"
       }
     ]
+  }
+  core: ${project-info.shared-info} {
+    title: "Apache Pekko Connectors Kafka"
+    jpms-name: "pekko.stream.connectors.kafka"
     api-docs: [
       {
-        url: "https://doc.akka.io/api/alpakka-kafka/"${project-info.version}"/akka/kafka/index.html"
+        url: "https://pekko.apache.org/api/pekko-connectors-kafka/"${project-info.version}"/org/apache/pekko/kafka/index.html"
         text: "API (Scaladoc)"
       }
     ]
   }
   testkit: ${project-info.shared-info} {
-    title: "Alpakka Kafka testkit"
-    jpms-name: "akka.stream.alpakka.kafka.testkit"
-    levels: [
-      {
-        readiness: Incubating
-        since: "2018-11-06"
-        since-version: "1.0-M1"
-        note: "The API of the testkit may change even for minor versions."
-      }
-    ]
+    title: "Apache Pekko Connectors Kafka testkit"
+    jpms-name: "pekko.stream.connectors.kafka.testkit"
   }
   cluster-sharding: ${project-info.shared-info} {
-    title: "Alpakka Kafka Cluster Sharding"
-    jpms-name: "akka.stream.alpakka.kafka.cluster.sharding"
-    levels: [
-      {
-        readiness: Incubating
-        since: "2020-03-05"
-        since-version: "2.0.3"
-        note: "The API of the cluster sharding is experimental and may change even for minor versions."
-      }
-    ]
+    title: "Apache Pekko Connectors Kafka Cluster Sharding"
+    jpms-name: "pekko.stream.connectors.kafka.cluster.sharding"
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pekko.apache.org
For additional commands, e-mail: commits-help@pekko.apache.org