You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pekko.apache.org by md...@apache.org on 2023/01/15 11:45:49 UTC

[incubator-pekko-connectors-kafka] 03/03: format source with scalafmt, #9

This is an automated email from the ASF dual-hosted git repository.

mdedetrich pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-pekko-connectors-kafka.git

commit cceccdcd0e48db13f5e09ee773353b6fd2cd851d
Author: Auto Format <nobody>
AuthorDate: Sun Jan 15 12:20:01 2023 +0100

    format source with scalafmt, #9
---
 .../benchmarks/AlpakkaCommittableProducer.scala    |  14 +-
 .../akka/kafka/benchmarks/BatchedConsumer.scala    |  30 +--
 .../scala/akka/kafka/benchmarks/Benchmarks.scala   |  33 ++-
 .../kafka/benchmarks/NoCommitBackpressure.scala    |   8 +-
 .../it/scala/akka/kafka/benchmarks/Producer.scala  |   2 +-
 .../it/scala/akka/kafka/benchmarks/SpecBase.scala  |   2 +-
 .../scala/akka/kafka/benchmarks/Transactions.scala |  16 +-
 .../AlpakkaCommittableSinkFixtures.scala           |  47 ++--
 .../akka/kafka/benchmarks/InflightMetrics.scala    |  65 +++--
 .../kafka/benchmarks/KafkaConsumerBenchmarks.scala |  11 +-
 .../kafka/benchmarks/KafkaConsumerFixtureGen.scala |  10 +-
 .../kafka/benchmarks/KafkaProducerBenchmarks.scala |   5 +-
 .../kafka/benchmarks/KafkaProducerFixtureGen.scala |  14 +-
 .../benchmarks/KafkaTransactionBenchmarks.scala    |  14 +-
 .../benchmarks/KafkaTransactionFixtureGen.scala    |  26 +-
 .../akka/kafka/benchmarks/PerfFixtureHelpers.scala |  18 +-
 .../ReactiveKafkaConsumerBenchmarks.scala          |  29 ++-
 .../benchmarks/ReactiveKafkaConsumerFixtures.scala |  21 +-
 .../ReactiveKafkaProducerBenchmarks.scala          |   4 +-
 .../benchmarks/ReactiveKafkaProducerFixtures.scala |  18 +-
 .../ReactiveKafkaTransactionBenchmarks.scala       |  10 +-
 .../ReactiveKafkaTransactionFixtures.scala         |  35 ++-
 .../main/scala/akka/kafka/benchmarks/Timed.scala   |  15 +-
 build.sbt                                          | 266 ++++++++++-----------
 .../cluster/sharding/KafkaClusterSharding.scala    |  75 +++---
 .../main/scala/akka/kafka/CommitterSettings.scala  |  20 +-
 .../akka/kafka/ConnectionCheckerSettings.scala     |  17 +-
 .../src/main/scala/akka/kafka/ConsumerFailed.scala |   6 +-
 .../main/scala/akka/kafka/ConsumerMessage.scala    |  16 +-
 .../main/scala/akka/kafka/ConsumerSettings.scala   |  80 +++----
 .../main/scala/akka/kafka/KafkaConsumerActor.scala |   4 +-
 core/src/main/scala/akka/kafka/Metadata.scala      |   4 +-
 .../akka/kafka/OffsetResetProtectionSettings.scala |  10 +-
 .../main/scala/akka/kafka/ProducerMessage.scala    |  35 +--
 .../main/scala/akka/kafka/ProducerSettings.scala   | 102 ++++----
 .../main/scala/akka/kafka/RestrictedConsumer.scala |   6 +-
 core/src/main/scala/akka/kafka/Subscriptions.scala |  10 +-
 .../kafka/internal/BaseSingleSourceLogic.scala     |  24 +-
 .../akka/kafka/internal/CommitCollectorStage.scala |  14 +-
 .../kafka/internal/CommitObservationLogic.scala    |  13 +-
 .../akka/kafka/internal/CommittableSources.scala   | 103 ++++----
 .../internal/CommittingProducerSinkStage.scala     |  41 ++--
 .../scala/akka/kafka/internal/ConfigSettings.scala |   8 +-
 .../akka/kafka/internal/ConnectionChecker.scala    |  20 +-
 .../kafka/internal/ConsumerProgressTracking.scala  |  52 ++--
 .../kafka/internal/ConsumerResetProtection.scala   |  37 ++-
 .../kafka/internal/ControlImplementations.scala    |  18 +-
 .../akka/kafka/internal/DefaultProducerStage.scala |  28 +--
 .../akka/kafka/internal/DeferredProducer.scala     |   5 +-
 .../kafka/internal/ExternalSingleSourceLogic.scala |   3 +-
 .../akka/kafka/internal/KafkaConsumerActor.scala   |  80 +++----
 .../akka/kafka/internal/KafkaSourceStage.scala     |   2 +-
 .../scala/akka/kafka/internal/LoggingWithId.scala  |   4 +-
 .../scala/akka/kafka/internal/MessageBuilder.scala |  50 ++--
 .../internal/PartitionAssignmentHelpers.scala      |   8 +-
 .../scala/akka/kafka/internal/PlainSources.scala   |  40 ++--
 .../akka/kafka/internal/SingleSourceLogic.scala    |  27 +--
 .../akka/kafka/internal/SourceLogicBuffer.scala    |   2 +-
 .../kafka/internal/SourceLogicSubscription.scala   |  25 +-
 .../scala/akka/kafka/internal/SubSourceLogic.scala |  84 +++----
 .../internal/TransactionalProducerStage.scala      |  60 +++--
 .../akka/kafka/internal/TransactionalSources.scala | 108 ++++-----
 .../main/scala/akka/kafka/javadsl/Committer.scala  |  14 +-
 .../main/scala/akka/kafka/javadsl/Consumer.scala   |  93 ++++---
 .../akka/kafka/javadsl/DiscoverySupport.scala      |  20 +-
 .../scala/akka/kafka/javadsl/MetadataClient.scala  |  21 +-
 .../main/scala/akka/kafka/javadsl/Producer.scala   |  44 ++--
 .../scala/akka/kafka/javadsl/SendProducer.scala    |   6 +-
 .../scala/akka/kafka/javadsl/Transactional.scala   |  29 +--
 .../main/scala/akka/kafka/scaladsl/Committer.scala |   9 +-
 .../main/scala/akka/kafka/scaladsl/Consumer.scala  |  63 +++--
 .../akka/kafka/scaladsl/DiscoverySupport.scala     |  26 +-
 .../scala/akka/kafka/scaladsl/MetadataClient.scala |  30 ++-
 .../main/scala/akka/kafka/scaladsl/Producer.scala  |  73 +++---
 .../scala/akka/kafka/scaladsl/SendProducer.scala   |  16 +-
 .../scala/akka/kafka/scaladsl/Transactional.scala  |  47 ++--
 project/AutomaticModuleName.scala                  |   5 +-
 project/VersionGenerator.scala                     |  11 +-
 .../akka/kafka/testkit/ConsumerResultFactory.scala |  27 +--
 .../akka/kafka/testkit/KafkaTestkitSettings.scala  |   4 +-
 .../KafkaTestkitTestcontainersSettings.scala       |  73 +++---
 .../akka/kafka/testkit/ProducerResultFactory.scala |  32 ++-
 .../akka/kafka/testkit/internal/KafkaTestKit.scala |  20 +-
 .../testkit/internal/KafkaTestKitChecks.scala      |  45 ++--
 .../testkit/internal/TestFrameworkInterface.scala  |   2 +-
 .../testkit/internal/TestcontainersKafka.scala     |  11 +-
 .../testkit/javadsl/ConsumerControlFactory.scala   |  11 +-
 .../testkit/scaladsl/ConsumerControlFactory.scala  |   8 +-
 .../akka/kafka/testkit/scaladsl/KafkaSpec.scala    |  38 ++-
 .../testkit/scaladsl/ScalatestKafkaSpec.scala      |   3 +-
 .../testkit/scaladsl/TestcontainersKafkaLike.scala |   2 +-
 .../src/it/scala/akka/kafka/IntegrationTests.scala |   3 +-
 .../akka/kafka/PartitionedSourceFailoverSpec.scala |  13 +-
 .../scala/akka/kafka/PlainSourceFailoverSpec.scala |  13 +-
 .../kafka/TransactionsPartitionedSourceSpec.scala  |  46 ++--
 .../scala/akka/kafka/TransactionsSourceSpec.scala  |  51 ++--
 .../test/scala/akka/kafka/ConfigSettingsSpec.scala |   3 +-
 .../scala/akka/kafka/ConsumerSettingsSpec.scala    |  47 ++--
 .../scala/akka/kafka/ProducerSettingsSpec.scala    |  47 ++--
 tests/src/test/scala/akka/kafka/Repeated.scala     |   2 +-
 .../test/scala/akka/kafka/TransactionsOps.scala    |  62 +++--
 .../kafka/internal/CommitCollectorStageSpec.scala  | 103 ++++----
 .../internal/CommittingProducerSinkSpec.scala      | 156 +++++-------
 .../kafka/internal/CommittingWithMockSpec.scala    |  76 +++---
 .../kafka/internal/ConnectionCheckerSpec.scala     |   4 +-
 .../scala/akka/kafka/internal/ConsumerDummy.scala  |  26 +-
 .../scala/akka/kafka/internal/ConsumerMock.scala   |   8 +-
 .../internal/ConsumerProgressTrackingSpec.scala    |  10 +-
 .../internal/ConsumerResetProtectionSpec.scala     |  65 +++--
 .../scala/akka/kafka/internal/ConsumerSpec.scala   |  42 ++--
 .../kafka/internal/OffsetAggregationSpec.scala     |   9 +-
 .../kafka/internal/PartitionedSourceSpec.scala     |  49 ++--
 .../scala/akka/kafka/internal/ProducerSpec.scala   |  92 ++++---
 .../akka/kafka/internal/SubscriptionsSpec.scala    |  22 +-
 .../scala/akka/kafka/javadsl/ControlSpec.scala     |  12 +-
 .../akka/kafka/scaladsl/CommittableSinkSpec.scala  |  10 +-
 .../scala/akka/kafka/scaladsl/CommittingSpec.scala | 101 +++-----
 .../kafka/scaladsl/ConnectionCheckerSpec.scala     |   2 +-
 .../scala/akka/kafka/scaladsl/ControlSpec.scala    |   4 +-
 .../akka/kafka/scaladsl/IntegrationSpec.scala      |  23 +-
 .../akka/kafka/scaladsl/MetadataClientSpec.scala   |   2 +-
 .../kafka/scaladsl/MisconfiguredConsumerSpec.scala |   4 +-
 .../kafka/scaladsl/MisconfiguredProducerSpec.scala |   2 +-
 .../akka/kafka/scaladsl/MultiConsumerSpec.scala    |  14 +-
 .../kafka/scaladsl/PartitionedSourcesSpec.scala    |  51 ++--
 .../akka/kafka/scaladsl/RebalanceExtSpec.scala     | 108 ++++-----
 .../scala/akka/kafka/scaladsl/RebalanceSpec.scala  |  63 ++---
 .../scala/akka/kafka/scaladsl/ReconnectSpec.scala  |  10 +-
 .../akka/kafka/scaladsl/RetentionPeriodSpec.scala  |   8 +-
 .../test/scala/akka/kafka/scaladsl/SpecBase.scala  |   2 +-
 .../akka/kafka/scaladsl/TransactionsSpec.scala     |  77 +++---
 .../scala/akka/kafka/tests/CapturingAppender.scala |   3 +-
 .../test/scala/akka/kafka/tests/LogbackUtil.scala  |   7 +-
 .../tests/javadsl/LogCapturingExtension.scala      |   8 +-
 .../kafka/tests/javadsl/LogCapturingJunit4.scala   |   3 +-
 .../akka/kafka/tests/scaladsl/LogCapturing.scala   |   6 +-
 .../test/scala/docs/scaladsl/AssignmentSpec.scala  |   9 +-
 .../src/test/scala/docs/scaladsl/AtLeastOnce.scala |  53 ++--
 .../docs/scaladsl/ClusterShardingExample.scala     |  18 +-
 .../test/scala/docs/scaladsl/ConsumerExample.scala |  43 ++--
 .../test/scala/docs/scaladsl/DocsSpecBase.scala    |   2 +-
 .../test/scala/docs/scaladsl/FetchMetadata.scala   |  10 +-
 .../scala/docs/scaladsl/PartitionExamples.scala    |  36 ++-
 .../test/scala/docs/scaladsl/ProducerExample.scala |  36 ++-
 .../scaladsl/SchemaRegistrySerializationSpec.scala |  25 +-
 .../scala/docs/scaladsl/SendProducerSpec.scala     |  19 +-
 .../scala/docs/scaladsl/SerializationSpec.scala    |  11 +-
 .../scala/docs/scaladsl/TestkitSamplesSpec.scala   |  29 +--
 .../scala/docs/scaladsl/TransactionsExample.scala  |  16 +-
 .../src/test/scala/docs/scaladsl/proto/Order.scala |  23 +-
 .../scala/docs/scaladsl/proto/OrderProto.scala     |  13 +-
 151 files changed, 1994 insertions(+), 2555 deletions(-)

diff --git a/benchmarks/src/it/scala/akka/kafka/benchmarks/AlpakkaCommittableProducer.scala b/benchmarks/src/it/scala/akka/kafka/benchmarks/AlpakkaCommittableProducer.scala
index f15866f0..553b74f6 100644
--- a/benchmarks/src/it/scala/akka/kafka/benchmarks/AlpakkaCommittableProducer.scala
+++ b/benchmarks/src/it/scala/akka/kafka/benchmarks/AlpakkaCommittableProducer.scala
@@ -5,7 +5,7 @@
 
 package akka.kafka.benchmarks
 
-import akka.kafka.benchmarks.BenchmarksBase.{topic_100_100, topic_100_5000}
+import akka.kafka.benchmarks.BenchmarksBase.{ topic_100_100, topic_100_5000 }
 import akka.kafka.benchmarks.Timed.runPerfTest
 import akka.kafka.benchmarks.app.RunTestCommand
 
@@ -18,8 +18,7 @@ class AlpakkaCommittableProducer extends BenchmarksBase() {
     runPerfTest(
       cmd,
       AlpakkaCommittableSinkFixtures.composedSink(cmd),
-      AlpakkaCommittableSinkBenchmarks.run
-    )
+      AlpakkaCommittableSinkBenchmarks.run)
   }
 
   it should "bench composed sink with 5000b messages" in {
@@ -27,8 +26,7 @@ class AlpakkaCommittableProducer extends BenchmarksBase() {
     runPerfTest(
       cmd,
       AlpakkaCommittableSinkFixtures.composedSink(cmd),
-      AlpakkaCommittableSinkBenchmarks.run
-    )
+      AlpakkaCommittableSinkBenchmarks.run)
   }
 
   it should "bench `Producer.committableSink` with 100b messages" in {
@@ -36,8 +34,7 @@ class AlpakkaCommittableProducer extends BenchmarksBase() {
     runPerfTest(
       cmd,
       AlpakkaCommittableSinkFixtures.producerSink(cmd),
-      AlpakkaCommittableSinkBenchmarks.run
-    )
+      AlpakkaCommittableSinkBenchmarks.run)
   }
 
   it should "bench `Producer.committableSink` with 5000b messages" in {
@@ -45,7 +42,6 @@ class AlpakkaCommittableProducer extends BenchmarksBase() {
     runPerfTest(
       cmd,
       AlpakkaCommittableSinkFixtures.producerSink(cmd),
-      AlpakkaCommittableSinkBenchmarks.run
-    )
+      AlpakkaCommittableSinkBenchmarks.run)
   }
 }
diff --git a/benchmarks/src/it/scala/akka/kafka/benchmarks/BatchedConsumer.scala b/benchmarks/src/it/scala/akka/kafka/benchmarks/BatchedConsumer.scala
index 00ca5ba0..360ddc16 100644
--- a/benchmarks/src/it/scala/akka/kafka/benchmarks/BatchedConsumer.scala
+++ b/benchmarks/src/it/scala/akka/kafka/benchmarks/BatchedConsumer.scala
@@ -5,7 +5,7 @@
 
 package akka.kafka.benchmarks
 
-import akka.kafka.benchmarks.BenchmarksBase.{topic_1000_100, topic_1000_5000, topic_1000_5000_8}
+import akka.kafka.benchmarks.BenchmarksBase.{ topic_1000_100, topic_1000_5000, topic_1000_5000_8 }
 import akka.kafka.benchmarks.Timed.runPerfTest
 import akka.kafka.benchmarks.app.RunTestCommand
 
@@ -13,26 +13,26 @@ class ApacheKafkaBatchedConsumer extends BenchmarksBase() {
   it should "bench with small messages" in {
     val cmd = RunTestCommand("apache-kafka-batched-consumer", bootstrapServers, topic_1000_100.freshTopic)
     runPerfTest(cmd,
-                KafkaConsumerFixtures.filledTopics(cmd),
-                KafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000))
+      KafkaConsumerFixtures.filledTopics(cmd),
+      KafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000))
   }
 
   it should "bench with normal messages" in {
     val cmd =
       RunTestCommand("apache-kafka-batched-consumer-normal-msg", bootstrapServers, topic_1000_5000.freshTopic)
     runPerfTest(cmd,
-                KafkaConsumerFixtures.filledTopics(cmd),
-                KafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000))
+      KafkaConsumerFixtures.filledTopics(cmd),
+      KafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000))
   }
 
   it should "bench with normal messages and eight partitions" in {
     val cmd =
       RunTestCommand("apache-kafka-batched-consumer-normal-msg-8-partitions",
-                     bootstrapServers,
-                     topic_1000_5000_8.freshTopic)
+        bootstrapServers,
+        topic_1000_5000_8.freshTopic)
     runPerfTest(cmd,
-                KafkaConsumerFixtures.filledTopics(cmd),
-                KafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000))
+      KafkaConsumerFixtures.filledTopics(cmd),
+      KafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000))
   }
 }
 
@@ -41,22 +41,22 @@ class AlpakkaKafkaBatchedConsumer extends BenchmarksBase() {
   it should "bench with small messages" in {
     val cmd = RunTestCommand("alpakka-kafka-batched-consumer", bootstrapServers, topic_1000_100)
     runPerfTest(cmd,
-                ReactiveKafkaConsumerFixtures.committableSources(cmd),
-                ReactiveKafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000))
+      ReactiveKafkaConsumerFixtures.committableSources(cmd),
+      ReactiveKafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000))
   }
 
   it should "bench with normal messages" in {
     val cmd = RunTestCommand("alpakka-kafka-batched-consumer-normal-msg", bootstrapServers, topic_1000_5000)
     runPerfTest(cmd,
-                ReactiveKafkaConsumerFixtures.committableSources(cmd),
-                ReactiveKafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000))
+      ReactiveKafkaConsumerFixtures.committableSources(cmd),
+      ReactiveKafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000))
   }
 
   it should "bench with normal messages and eight partitions" in {
     val cmd =
       RunTestCommand("alpakka-kafka-batched-consumer-normal-msg-8-partitions", bootstrapServers, topic_1000_5000_8)
     runPerfTest(cmd,
-                ReactiveKafkaConsumerFixtures.committableSources(cmd),
-                ReactiveKafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000))
+      ReactiveKafkaConsumerFixtures.committableSources(cmd),
+      ReactiveKafkaConsumerBenchmarks.consumerAtLeastOnceBatched(batchSize = 1000))
   }
 }
diff --git a/benchmarks/src/it/scala/akka/kafka/benchmarks/Benchmarks.scala b/benchmarks/src/it/scala/akka/kafka/benchmarks/Benchmarks.scala
index 6f64592d..048b0722 100644
--- a/benchmarks/src/it/scala/akka/kafka/benchmarks/Benchmarks.scala
+++ b/benchmarks/src/it/scala/akka/kafka/benchmarks/Benchmarks.scala
@@ -8,7 +8,7 @@ package akka.kafka.benchmarks
 import akka.kafka.benchmarks.BenchmarksBase._
 import akka.kafka.benchmarks.InflightMetrics._
 import akka.kafka.benchmarks.PerfFixtureHelpers.FilledTopic
-import akka.kafka.benchmarks.Timed.{runPerfTest, runPerfTestInflightMetrics}
+import akka.kafka.benchmarks.Timed.{ runPerfTest, runPerfTestInflightMetrics }
 import akka.kafka.benchmarks.app.RunTestCommand
 import akka.kafka.testkit.KafkaTestkitTestcontainersSettings
 import akka.kafka.testkit.scaladsl.TestcontainersKafkaLike
@@ -65,8 +65,8 @@ class AlpakkaKafkaConsumerNokafka extends BenchmarksBase() {
   it should "bench" in {
     val cmd = RunTestCommand("alpakka-kafka-plain-consumer-nokafka", bootstrapServers, topic_2000_100)
     runPerfTest(cmd,
-                ReactiveKafkaConsumerFixtures.noopFixtureGen(cmd),
-                ReactiveKafkaConsumerBenchmarks.consumePlainNoKafka)
+      ReactiveKafkaConsumerFixtures.noopFixtureGen(cmd),
+      ReactiveKafkaConsumerBenchmarks.consumePlainNoKafka)
   }
 }
 
@@ -92,25 +92,23 @@ class AlpakkaKafkaPlainConsumer extends BenchmarksBase() {
   it should "bench with normal messages and one hundred partitions with inflight metrics" in {
     val cmd =
       RunTestCommand("alpakka-kafka-plain-consumer-normal-msg-100-partitions-with-inflight-metrics",
-                     bootstrapServers,
-                     topic_1000_5000_100)
+        bootstrapServers,
+        topic_1000_5000_100)
     val consumerMetricNames = List[ConsumerMetricRequest](
       ConsumerMetricRequest("bytes-consumed-total", CounterMetricType),
       ConsumerMetricRequest("fetch-rate", GaugeMetricType),
       ConsumerMetricRequest("fetch-total", CounterMetricType),
       ConsumerMetricRequest("records-per-request-avg", GaugeMetricType),
-      ConsumerMetricRequest("records-consumed-total", CounterMetricType)
-    )
+      ConsumerMetricRequest("records-consumed-total", CounterMetricType))
     val brokerMetricNames: List[BrokerMetricRequest] = List(
       BrokerMetricRequest(s"kafka.server:type=BrokerTopicMetrics,name=TotalFetchRequestsPerSec",
-                          topic_1000_5000_100.topic,
-                          "Count",
-                          CounterMetricType),
+        topic_1000_5000_100.topic,
+        "Count",
+        CounterMetricType),
       BrokerMetricRequest(s"kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec",
-                          topic_1000_5000_100.topic,
-                          "Count",
-                          CounterMetricType)
-    )
+        topic_1000_5000_100.topic,
+        "Count",
+        CounterMetricType))
     val brokerJmxUrls = brokerContainers.map(_.getJmxServiceUrl).toList
     runPerfTestInflightMetrics(
       cmd,
@@ -118,8 +116,7 @@ class AlpakkaKafkaPlainConsumer extends BenchmarksBase() {
       brokerMetricNames,
       brokerJmxUrls,
       ReactiveKafkaConsumerFixtures.plainSources(cmd),
-      ReactiveKafkaConsumerBenchmarks.consumePlainInflightMetrics
-    )
+      ReactiveKafkaConsumerBenchmarks.consumePlainInflightMetrics)
   }
 }
 
@@ -134,7 +131,7 @@ class AlpakkaKafkaAtMostOnceConsumer extends BenchmarksBase() {
   it should "bench" in {
     val cmd = RunTestCommand("alpakka-kafka-at-most-once-consumer", bootstrapServers, topic_50_100)
     runPerfTest(cmd,
-                ReactiveKafkaConsumerFixtures.committableSources(cmd),
-                ReactiveKafkaConsumerBenchmarks.consumeCommitAtMostOnce)
+      ReactiveKafkaConsumerFixtures.committableSources(cmd),
+      ReactiveKafkaConsumerBenchmarks.consumeCommitAtMostOnce)
   }
 }
diff --git a/benchmarks/src/it/scala/akka/kafka/benchmarks/NoCommitBackpressure.scala b/benchmarks/src/it/scala/akka/kafka/benchmarks/NoCommitBackpressure.scala
index 500abf28..786e3cf1 100644
--- a/benchmarks/src/it/scala/akka/kafka/benchmarks/NoCommitBackpressure.scala
+++ b/benchmarks/src/it/scala/akka/kafka/benchmarks/NoCommitBackpressure.scala
@@ -16,8 +16,8 @@ class RawKafkaCommitEveryPollConsumer extends BenchmarksBase() {
   it should "bench with small messages" in {
     val cmd = RunTestCommand(prefix + "consumer", bootstrapServers, topic_1000_100)
     runPerfTest(cmd,
-                KafkaConsumerFixtures.filledTopics(cmd),
-                KafkaConsumerBenchmarks.consumerAtLeastOnceCommitEveryPoll())
+      KafkaConsumerFixtures.filledTopics(cmd),
+      KafkaConsumerBenchmarks.consumerAtLeastOnceCommitEveryPoll())
   }
 
 // These are not plotted anyway
@@ -44,8 +44,8 @@ class AlpakkaCommitAndForgetConsumer extends BenchmarksBase() {
   it should "bench with small messages" in {
     val cmd = RunTestCommand(prefix + "consumer", bootstrapServers, topic_1000_100)
     runPerfTest(cmd,
-                ReactiveKafkaConsumerFixtures.committableSources(cmd),
-                ReactiveKafkaConsumerBenchmarks.consumerCommitAndForget(commitBatchSize = 1000))
+      ReactiveKafkaConsumerFixtures.committableSources(cmd),
+      ReactiveKafkaConsumerBenchmarks.consumerCommitAndForget(commitBatchSize = 1000))
   }
 
 // These are not plotted anyway
diff --git a/benchmarks/src/it/scala/akka/kafka/benchmarks/Producer.scala b/benchmarks/src/it/scala/akka/kafka/benchmarks/Producer.scala
index f89091ba..18598934 100644
--- a/benchmarks/src/it/scala/akka/kafka/benchmarks/Producer.scala
+++ b/benchmarks/src/it/scala/akka/kafka/benchmarks/Producer.scala
@@ -5,7 +5,7 @@
 
 package akka.kafka.benchmarks
 
-import akka.kafka.benchmarks.BenchmarksBase.{topic_2000_100, topic_2000_500, topic_2000_5000, topic_2000_5000_8}
+import akka.kafka.benchmarks.BenchmarksBase.{ topic_2000_100, topic_2000_500, topic_2000_5000, topic_2000_5000_8 }
 import akka.kafka.benchmarks.Timed.runPerfTest
 import akka.kafka.benchmarks.app.RunTestCommand
 
diff --git a/benchmarks/src/it/scala/akka/kafka/benchmarks/SpecBase.scala b/benchmarks/src/it/scala/akka/kafka/benchmarks/SpecBase.scala
index d47f70cb..deab03fe 100644
--- a/benchmarks/src/it/scala/akka/kafka/benchmarks/SpecBase.scala
+++ b/benchmarks/src/it/scala/akka/kafka/benchmarks/SpecBase.scala
@@ -6,7 +6,7 @@
 package akka.kafka.benchmarks
 
 import akka.kafka.testkit.scaladsl.ScalatestKafkaSpec
-import org.scalatest.concurrent.{Eventually, ScalaFutures}
+import org.scalatest.concurrent.{ Eventually, ScalaFutures }
 import org.scalatest.flatspec.AnyFlatSpecLike
 import org.scalatest.matchers.should.Matchers
 
diff --git a/benchmarks/src/it/scala/akka/kafka/benchmarks/Transactions.scala b/benchmarks/src/it/scala/akka/kafka/benchmarks/Transactions.scala
index 438b189a..5c0cf7ca 100644
--- a/benchmarks/src/it/scala/akka/kafka/benchmarks/Transactions.scala
+++ b/benchmarks/src/it/scala/akka/kafka/benchmarks/Transactions.scala
@@ -5,7 +5,7 @@
 
 package akka.kafka.benchmarks
 
-import akka.kafka.benchmarks.BenchmarksBase.{topic_100_100, topic_100_5000}
+import akka.kafka.benchmarks.BenchmarksBase.{ topic_100_100, topic_100_5000 }
 import akka.kafka.benchmarks.Timed.runPerfTest
 import akka.kafka.benchmarks.app.RunTestCommand
 import scala.concurrent.duration._
@@ -14,15 +14,15 @@ class ApacheKafkaTransactions extends BenchmarksBase() {
   it should "bench with small messages" in {
     val cmd = RunTestCommand("apache-kafka-transactions", bootstrapServers, topic_100_100)
     runPerfTest(cmd,
-                KafkaTransactionFixtures.initialize(cmd),
-                KafkaTransactionBenchmarks.consumeTransformProduceTransaction(commitInterval = 100.milliseconds))
+      KafkaTransactionFixtures.initialize(cmd),
+      KafkaTransactionBenchmarks.consumeTransformProduceTransaction(commitInterval = 100.milliseconds))
   }
 
   it should "bench with normal messages" in {
     val cmd = RunTestCommand("apache-kafka-transactions-normal-msg", bootstrapServers, topic_100_5000)
     runPerfTest(cmd,
-                KafkaTransactionFixtures.initialize(cmd),
-                KafkaTransactionBenchmarks.consumeTransformProduceTransaction(commitInterval = 100.milliseconds))
+      KafkaTransactionFixtures.initialize(cmd),
+      KafkaTransactionBenchmarks.consumeTransformProduceTransaction(commitInterval = 100.milliseconds))
   }
 }
 
@@ -32,8 +32,7 @@ class AlpakkaKafkaTransactions extends BenchmarksBase() {
     runPerfTest(
       cmd,
       ReactiveKafkaTransactionFixtures.transactionalSourceAndSink(cmd, commitInterval = 100.milliseconds),
-      ReactiveKafkaTransactionBenchmarks.consumeTransformProduceTransaction
-    )
+      ReactiveKafkaTransactionBenchmarks.consumeTransformProduceTransaction)
   }
 
   it should "bench with normal messages" in {
@@ -41,7 +40,6 @@ class AlpakkaKafkaTransactions extends BenchmarksBase() {
     runPerfTest(
       cmd,
       ReactiveKafkaTransactionFixtures.transactionalSourceAndSink(cmd, commitInterval = 100.milliseconds),
-      ReactiveKafkaTransactionBenchmarks.consumeTransformProduceTransaction
-    )
+      ReactiveKafkaTransactionBenchmarks.consumeTransformProduceTransaction)
   }
 }
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/AlpakkaCommittableSinkFixtures.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/AlpakkaCommittableSinkFixtures.scala
index bda4e222..9e7ce55f 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/AlpakkaCommittableSinkFixtures.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/AlpakkaCommittableSinkFixtures.scala
@@ -7,14 +7,14 @@ package akka.kafka.benchmarks
 
 import akka.Done
 import akka.actor.ActorSystem
-import akka.kafka.ConsumerMessage.{Committable, CommittableMessage}
+import akka.kafka.ConsumerMessage.{ Committable, CommittableMessage }
 import akka.kafka.ProducerMessage.Envelope
 import akka.kafka.benchmarks.app.RunTestCommand
-import akka.kafka.scaladsl.Consumer.{Control, DrainingControl}
-import akka.kafka.scaladsl.{Committer, Consumer, Producer}
+import akka.kafka.scaladsl.Consumer.{ Control, DrainingControl }
+import akka.kafka.scaladsl.{ Committer, Consumer, Producer }
 import akka.kafka._
 import akka.stream.Materializer
-import akka.stream.scaladsl.{Keep, Sink, Source}
+import akka.stream.scaladsl.{ Keep, Sink, Source }
 import com.codahale.metrics.Meter
 import com.typesafe.scalalogging.LazyLogging
 import org.apache.kafka.clients.consumer.ConsumerConfig
@@ -27,14 +27,14 @@ import org.apache.kafka.common.serialization.{
 }
 
 import scala.concurrent.duration._
-import scala.concurrent.{Await, Future, Promise}
+import scala.concurrent.{ Await, Future, Promise }
 import scala.util.Success
 
 case class AlpakkaCommittableSinkTestFixture[SOut, FIn](sourceTopic: String,
-                                                        sinkTopic: String,
-                                                        msgCount: Int,
-                                                        source: Source[SOut, Control],
-                                                        sink: Sink[FIn, Future[Done]])
+    sinkTopic: String,
+    msgCount: Int,
+    source: Source[SOut, Control],
+    sink: Sink[FIn, Future[Done]])
 
 object AlpakkaCommittableSinkFixtures extends PerfFixtureHelpers {
   type Key = Array[Byte]
@@ -50,8 +50,7 @@ object AlpakkaCommittableSinkFixtures extends PerfFixtureHelpers {
       .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
 
   private def createProducerSettings(
-      kafkaHost: String
-  )(implicit actorSystem: ActorSystem): ProducerSettings[Array[Byte], String] =
+      kafkaHost: String)(implicit actorSystem: ActorSystem): ProducerSettings[Array[Byte], String] =
     ProducerSettings(actorSystem, new ByteArraySerializer, new StringSerializer)
       .withBootstrapServers(kafkaHost)
 
@@ -69,12 +68,11 @@ object AlpakkaCommittableSinkFixtures extends PerfFixtureHelpers {
           Producer.committableSink(createProducerSettings(c.kafkaHost), CommitterSettings(actorSystem))
 
         AlpakkaCommittableSinkTestFixture[Message, ProducerMessage](c.filledTopic.topic,
-                                                                    sinkTopic,
-                                                                    msgCount,
-                                                                    source,
-                                                                    sink)
-      }
-    )
+          sinkTopic,
+          msgCount,
+          source,
+          sink)
+      })
 
   def composedSink(c: RunTestCommand)(implicit actorSystem: ActorSystem) =
     FixtureGen[AlpakkaCommittableSinkTestFixture[Message, ProducerMessage]](
@@ -93,16 +91,15 @@ object AlpakkaCommittableSinkFixtures extends PerfFixtureHelpers {
             .toMat(Committer.sink(CommitterSettings(actorSystem)))(Keep.right)
 
         AlpakkaCommittableSinkTestFixture[Message, ProducerMessage](c.filledTopic.topic,
-                                                                    sinkTopic,
-                                                                    msgCount,
-                                                                    source,
-                                                                    sink)
-      }
-    )
+          sinkTopic,
+          msgCount,
+          source,
+          sink)
+      })
 }
 
 object AlpakkaCommittableSinkBenchmarks extends LazyLogging {
-  import AlpakkaCommittableSinkFixtures.{Message, ProducerMessage}
+  import AlpakkaCommittableSinkFixtures.{ Message, ProducerMessage }
 
   val streamingTimeout: FiniteDuration = 30.minutes
   type Fixture = AlpakkaCommittableSinkTestFixture[Message, ProducerMessage]
@@ -120,7 +117,7 @@ object AlpakkaCommittableSinkBenchmarks extends LazyLogging {
     val control = source
       .map { msg =>
         ProducerMessage.single(new ProducerRecord[Array[Byte], String](sinkTopic, msg.record.value()),
-                               msg.committableOffset)
+          msg.committableOffset)
       }
       .map { msg =>
         meter.mark()
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/InflightMetrics.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/InflightMetrics.scala
index b0c3db0e..e0b880cd 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/InflightMetrics.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/InflightMetrics.scala
@@ -5,19 +5,19 @@
 
 package akka.kafka.benchmarks
 
-import java.lang.management.{BufferPoolMXBean, ManagementFactory, MemoryType}
+import java.lang.management.{ BufferPoolMXBean, ManagementFactory, MemoryType }
 
 import akka.NotUsed
 import akka.actor.Cancellable
 import akka.kafka.scaladsl.Consumer.Control
 import akka.stream.Materializer
-import akka.stream.scaladsl.{Keep, Sink, Source}
-import com.codahale.metrics.{Histogram, MetricRegistry}
-import javax.management.remote.{JMXConnectorFactory, JMXServiceURL}
-import javax.management.{Attribute, MBeanServerConnection, ObjectName}
+import akka.stream.scaladsl.{ Keep, Sink, Source }
+import com.codahale.metrics.{ Histogram, MetricRegistry }
+import javax.management.remote.{ JMXConnectorFactory, JMXServiceURL }
+import javax.management.{ Attribute, MBeanServerConnection, ObjectName }
 
-import scala.concurrent.duration.{FiniteDuration, _}
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.duration.{ FiniteDuration, _ }
+import scala.concurrent.{ ExecutionContext, Future }
 import scala.jdk.CollectionConverters._
 
 private[benchmarks] trait InflightMetrics {
@@ -47,8 +47,7 @@ private[benchmarks] trait InflightMetrics {
       control: Control,
       consumerMetricNames: List[ConsumerMetricRequest],
       brokerMetricNames: List[BrokerMetricRequest],
-      brokerJmxUrls: List[String]
-  )(implicit mat: Materializer): (Cancellable, Future[List[List[String]]]) = {
+      brokerJmxUrls: List[String])(implicit mat: Materializer): (Cancellable, Future[List[List[String]]]) = {
     implicit val ec: ExecutionContext = mat.executionContext
 
     val consumerMetricNamesSorted = consumerMetricNames.sortBy(_.name)
@@ -59,21 +58,21 @@ private[benchmarks] trait InflightMetrics {
 
     val (metricsControl, metricsFuture) = Source
       .tick(0.seconds, interval, NotUsed)
-      .scanAsync(accStart)({
+      .scanAsync(accStart) {
         case ((timeMs, accLastMetrics), _) =>
-          getAllMetrics(control, consumerMetricNamesSorted, brokerMetricNamesSorted, brokersJmx) map {
+          getAllMetrics(control, consumerMetricNamesSorted, brokerMetricNamesSorted, brokersJmx).map {
             case jvmMetrics :: consumerMetrics :: brokerMetrics :: Nil =>
               val timeMsMeasurement = Measurement(timeMsHeader, timeMs.toMillis.toDouble, GaugeMetricType)
               val newMetrics = timeMsMeasurement +: (jvmMetrics ++ consumerMetrics ++ brokerMetrics)
               val nextInterval = interval + timeMs
               val nextAcc = accLastMetrics match {
-                case None => InflightMetrics.reset(newMetrics, registry)
+                case None              => InflightMetrics.reset(newMetrics, registry)
                 case Some(lastMetrics) => InflightMetrics.update(lastMetrics, newMetrics)
               }
               (nextInterval, Some(nextAcc))
             case _ => throw new IllegalStateException("The wrong number of Future results were returned.")
           }
-      })
+      }
       .mapConcat { case (_, results: Option[List[Metric]]) => results.toList }
       .toMat(Sink.seq)(Keep.both)
       .run()
@@ -83,7 +82,7 @@ private[benchmarks] trait InflightMetrics {
       val metricsStrings = metrics.map(_.map(_.value.toString)).toList
       val summaryLine = metrics.last.map {
         case hg: HistogramGauge => hg.summaryValue.toString
-        case metric => metric.value.toString
+        case metric             => metric.value.toString
       }
       header +: metricsStrings :+ summaryLine
     }
@@ -92,7 +91,7 @@ private[benchmarks] trait InflightMetrics {
   }
 
   private def metricHeaders(consumerMetricNamesSorted: List[ConsumerMetricRequest],
-                            brokerMetricNamesSorted: List[BrokerMetricRequest]): List[String] = {
+      brokerMetricNamesSorted: List[BrokerMetricRequest]): List[String] = {
     jvmHeaders ++
     consumerMetricNamesSorted.map(consumerHeaderPrefix + _.name) ++
     brokerMetricNamesSorted.map(brokerHeaderPrefix + _.name.replace(",", ":"))
@@ -102,16 +101,14 @@ private[benchmarks] trait InflightMetrics {
    * Asynchronously retrieve all metrics
    */
   private def getAllMetrics(control: Control,
-                            consumerMetricNamesSorted: List[ConsumerMetricRequest],
-                            brokerMetricNamesSorted: List[BrokerMetricRequest],
-                            brokersJmx: List[MBeanServerConnection])(implicit ec: ExecutionContext) = {
+      consumerMetricNamesSorted: List[ConsumerMetricRequest],
+      brokerMetricNamesSorted: List[BrokerMetricRequest],
+      brokersJmx: List[MBeanServerConnection])(implicit ec: ExecutionContext) = {
     Future.sequence(
       List(
         jvm(),
         consumer(control, consumerMetricNamesSorted),
-        broker(brokersJmx, brokerMetricNamesSorted)
-      )
-    )
+        broker(brokersJmx, brokerMetricNamesSorted)))
   }
 
   /**
@@ -126,8 +123,7 @@ private[benchmarks] trait InflightMetrics {
       Measurement(gcTimeMsHeader, gcTimeMs, CounterMetricType),
       Measurement(heapBytesHeader, heapBytes, GaugeMetricType, getMeanSummary),
       Measurement(nonHeapBytesHeader, nonHeapBytes, GaugeMetricType, getMeanSummary),
-      Measurement(directBytesHeader, directBytes, GaugeMetricType, getMeanSummary)
-    )
+      Measurement(directBytesHeader, directBytes, GaugeMetricType, getMeanSummary))
   }
 
   /**
@@ -139,9 +135,8 @@ private[benchmarks] trait InflightMetrics {
       .map(bean => (bean.getCollectionCount.toDouble, bean.getCollectionTime.toDouble))
       .getOrElse(
         throw new Exception(
-          s"Compatible GC not found. Need one of: ${compatibleGcNames.mkString(",")}. Found ${gcBeans.map(_.getName()).mkString(",")}."
-        )
-      )
+          s"Compatible GC not found. Need one of: ${compatibleGcNames.mkString(",")}. Found ${gcBeans.map(
+              _.getName()).mkString(",")}."))
   }
 
   /**
@@ -159,8 +154,7 @@ private[benchmarks] trait InflightMetrics {
    * Return specified consumer-level metrics using Alpakka Kafka's [[Control]] metrics API.
    */
   private def consumer[T](control: Control, requests: List[ConsumerMetricRequest])(
-      implicit ec: ExecutionContext
-  ): Future[List[Measurement]] = {
+      implicit ec: ExecutionContext): Future[List[Measurement]] = {
     control.metrics.map { consumerMetrics =>
       val metricValues = consumerMetrics
         .filter { case (name, _) => requests.map(_.name).contains(name.name()) }
@@ -171,7 +165,7 @@ private[benchmarks] trait InflightMetrics {
         .map(parseNumeric)
 
       require(metricValues.size == requests.size,
-              "Number of returned metric values DNE number of requested consumer metrics")
+        "Number of returned metric values DNE number of requested consumer metrics")
 
       val results: List[Measurement] = requests
         .zip(metricValues)
@@ -188,8 +182,7 @@ private[benchmarks] trait InflightMetrics {
    */
   private def broker(
       brokersJmx: Seq[MBeanServerConnection],
-      brokerMetricNames: List[BrokerMetricRequest]
-  )(implicit ec: ExecutionContext): Future[List[Measurement]] = Future {
+      brokerMetricNames: List[BrokerMetricRequest])(implicit ec: ExecutionContext): Future[List[Measurement]] = Future {
     brokerMetricNames
       .sortBy(_.name)
       .map {
@@ -236,9 +229,9 @@ private[benchmarks] object InflightMetrics {
    * dropwizard [[Sampling]].
    */
   final case class Measurement(name: String,
-                               value: Double,
-                               metricType: MetricType,
-                               summaryValueF: Option[com.codahale.metrics.Sampling => Long] = None)
+      value: Double,
+      metricType: MetricType,
+      summaryValueF: Option[com.codahale.metrics.Sampling => Long] = None)
 
   sealed trait Metric {
     def measurement: Measurement
@@ -271,7 +264,7 @@ private[benchmarks] object InflightMetrics {
 
   def parseNumeric(n: Any): Double = n match {
     case n: Double => n
-    case n: Long => n.toDouble
-    case o => java.lang.Double.parseDouble(o.toString)
+    case n: Long   => n.toDouble
+    case o         => java.lang.Double.parseDouble(o.toString)
   }
 }
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaConsumerBenchmarks.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaConsumerBenchmarks.scala
index 176afc58..f80970dd 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaConsumerBenchmarks.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaConsumerBenchmarks.scala
@@ -10,7 +10,7 @@ import java.util
 
 import com.codahale.metrics.Meter
 import com.typesafe.scalalogging.LazyLogging
-import org.apache.kafka.clients.consumer.{OffsetAndMetadata, OffsetCommitCallback}
+import org.apache.kafka.clients.consumer.{ OffsetAndMetadata, OffsetCommitCallback }
 import org.apache.kafka.common.TopicPartition
 
 import scala.annotation.tailrec
@@ -85,8 +85,7 @@ object KafkaConsumerBenchmarks extends LazyLogging {
         new OffsetCommitCallback {
           override def onComplete(map: util.Map[TopicPartition, OffsetAndMetadata], e: Exception): Unit =
             commitInProgress = false
-        }
-      )
+        })
       lastProcessedOffset = Map.empty[Int, Long]
     }
 
@@ -138,8 +137,7 @@ object KafkaConsumerBenchmarks extends LazyLogging {
         offsetMap.asJava,
         new OffsetCommitCallback {
           override def onComplete(map: util.Map[TopicPartition, OffsetAndMetadata], e: Exception): Unit = ()
-        }
-      )
+        })
       lastProcessedOffset = Map.empty[Int, Long]
     }
 
@@ -189,8 +187,7 @@ object KafkaConsumerBenchmarks extends LazyLogging {
             new OffsetCommitCallback {
               override def onComplete(map: util.Map[TopicPartition, OffsetAndMetadata], e: Exception): Unit =
                 consumer.resume(assignment)
-            }
-          )
+            })
         }
 
         val recordCount = records.count()
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaConsumerFixtureGen.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaConsumerFixtureGen.scala
index c84cafe6..c0575f16 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaConsumerFixtureGen.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaConsumerFixtureGen.scala
@@ -6,8 +6,8 @@
 package akka.kafka.benchmarks
 
 import akka.kafka.benchmarks.app.RunTestCommand
-import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
-import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer}
+import org.apache.kafka.clients.consumer.{ ConsumerConfig, KafkaConsumer }
+import org.apache.kafka.common.serialization.{ ByteArrayDeserializer, StringDeserializer }
 
 import scala.jdk.CollectionConverters._
 
@@ -21,8 +21,7 @@ object KafkaConsumerFixtures extends PerfFixtureHelpers {
     c,
     msgCount => {
       KafkaConsumerTestFixture("topic", msgCount, null)
-    }
-  )
+    })
 
   def filledTopics(c: RunTestCommand) = FixtureGen[KafkaConsumerTestFixture](
     c,
@@ -38,6 +37,5 @@ object KafkaConsumerFixtures extends PerfFixtureHelpers {
         new KafkaConsumer[Array[Byte], String](consumerJavaProps, new ByteArrayDeserializer, new StringDeserializer)
       consumer.subscribe(Set(c.filledTopic.topic).asJava)
       KafkaConsumerTestFixture(c.filledTopic.topic, msgCount, consumer)
-    }
-  )
+    })
 }
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaProducerBenchmarks.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaProducerBenchmarks.scala
index aa0d0376..9b6ca58d 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaProducerBenchmarks.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaProducerBenchmarks.scala
@@ -7,7 +7,7 @@ package akka.kafka.benchmarks
 
 import com.codahale.metrics.Meter
 import com.typesafe.scalalogging.LazyLogging
-import org.apache.kafka.clients.producer.{Callback, ProducerRecord, RecordMetadata}
+import org.apache.kafka.clients.producer.{ Callback, ProducerRecord, RecordMetadata }
 
 import scala.concurrent.duration._
 
@@ -30,8 +30,7 @@ object KafkaProducerBenchmarks extends LazyLogging {
         new ProducerRecord[Array[Byte], String](fixture.topic, partition, null, msg),
         new Callback {
           override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = meter.mark()
-        }
-      )
+        })
 
       if (i % logStep == 0) {
         val lastPartEnd = System.nanoTime()
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaProducerFixtureGen.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaProducerFixtureGen.scala
index 92143711..09570f60 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaProducerFixtureGen.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaProducerFixtureGen.scala
@@ -10,10 +10,10 @@ import akka.kafka.benchmarks.app.RunTestCommand
 import org.apache.kafka.clients.producer.KafkaProducer
 
 case class KafkaProducerTestFixture(topic: String,
-                                    msgCount: Int,
-                                    msgSize: Int,
-                                    producer: KafkaProducer[Array[Byte], String],
-                                    numberOfPartitions: Int) {
+    msgCount: Int,
+    msgSize: Int,
+    producer: KafkaProducer[Array[Byte], String],
+    numberOfPartitions: Int) {
   def close(): Unit = producer.close()
 }
 
@@ -23,8 +23,7 @@ object KafkaProducerFixtures extends PerfFixtureHelpers {
     c,
     msgCount => {
       KafkaProducerTestFixture("topic", msgCount, c.msgSize, null, c.numberOfPartitions)
-    }
-  )
+    })
 
   def initializedProducer(c: RunTestCommand) = FixtureGen[KafkaProducerTestFixture](
     c,
@@ -32,6 +31,5 @@ object KafkaProducerFixtures extends PerfFixtureHelpers {
       val ft = FilledTopic(msgCount = 1, msgSize = c.msgSize, numberOfPartitions = c.numberOfPartitions)
       val rawProducer = createTopic(ft, c.kafkaHost)
       KafkaProducerTestFixture(ft.topic, msgCount, c.msgSize, rawProducer, c.numberOfPartitions)
-    }
-  )
+    })
 }
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaTransactionBenchmarks.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaTransactionBenchmarks.scala
index 9983b6d1..20a925c7 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaTransactionBenchmarks.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaTransactionBenchmarks.scala
@@ -9,7 +9,7 @@ import akka.kafka.benchmarks.KafkaConsumerBenchmarks.pollTimeoutMs
 import com.codahale.metrics.Meter
 import com.typesafe.scalalogging.LazyLogging
 import org.apache.kafka.clients.consumer._
-import org.apache.kafka.clients.producer.{Callback, ProducerRecord, RecordMetadata}
+import org.apache.kafka.clients.producer.{ Callback, ProducerRecord, RecordMetadata }
 import org.apache.kafka.common.TopicPartition
 
 import scala.annotation.tailrec
@@ -22,7 +22,7 @@ object KafkaTransactionBenchmarks extends LazyLogging {
    * Process records in a consume-transform-produce transactional workflow and commit every interval.
    */
   def consumeTransformProduceTransaction(commitInterval: FiniteDuration)(fixture: KafkaTransactionTestFixture,
-                                                                         meter: Meter): Unit = {
+      meter: Meter): Unit = {
     val consumer = fixture.consumer
     val producer = fixture.producer
     val msgCount = fixture.msgCount
@@ -62,13 +62,13 @@ object KafkaTransactionBenchmarks extends LazyLogging {
           lastProcessedOffset = record.offset()
 
           val producerRecord = new ProducerRecord(fixture.sinkTopic, record.partition(), record.key(), record.value())
-          producer.send(producerRecord, new Callback {
-            override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = meter.mark()
-          })
+          producer.send(producerRecord,
+            new Callback {
+              override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = meter.mark()
+            })
           if (lastProcessedOffset % loggedStep == 0)
             logger.info(
-              s"Transformed $lastProcessedOffset elements to Kafka (${100 * lastProcessedOffset / msgCount}%)"
-            )
+              s"Transformed $lastProcessedOffset elements to Kafka (${100 * lastProcessedOffset / msgCount}%)")
 
           if (System.nanoTime() >= lastCommit + commitInterval.toNanos) {
             doCommit()
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaTransactionFixtureGen.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaTransactionFixtureGen.scala
index 8e831abc..ad5c2187 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaTransactionFixtureGen.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/KafkaTransactionFixtureGen.scala
@@ -8,8 +8,8 @@ package akka.kafka.benchmarks
 import java.util.Locale
 
 import akka.kafka.benchmarks.app.RunTestCommand
-import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
-import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig}
+import org.apache.kafka.clients.consumer.{ ConsumerConfig, KafkaConsumer }
+import org.apache.kafka.clients.producer.{ KafkaProducer, ProducerConfig }
 import org.apache.kafka.common.IsolationLevel
 import org.apache.kafka.common.serialization.{
   ByteArrayDeserializer,
@@ -21,11 +21,11 @@ import org.apache.kafka.common.serialization.{
 import scala.jdk.CollectionConverters._
 
 case class KafkaTransactionTestFixture(sourceTopic: String,
-                                       sinkTopic: String,
-                                       msgCount: Int,
-                                       groupId: String,
-                                       consumer: KafkaConsumer[Array[Byte], String],
-                                       producer: KafkaProducer[Array[Byte], String]) {
+    sinkTopic: String,
+    msgCount: Int,
+    groupId: String,
+    consumer: KafkaConsumer[Array[Byte], String],
+    producer: KafkaProducer[Array[Byte], String]) {
   def close(): Unit = {
     consumer.close()
     producer.close()
@@ -35,9 +35,10 @@ case class KafkaTransactionTestFixture(sourceTopic: String,
 object KafkaTransactionFixtures extends PerfFixtureHelpers {
 
   def noopFixtureGen(c: RunTestCommand): FixtureGen[KafkaTransactionTestFixture] =
-    FixtureGen[KafkaTransactionTestFixture](c, msgCount => {
-      KafkaTransactionTestFixture("sourceTopic", "sinkTopic", msgCount, "groupId", consumer = null, producer = null)
-    })
+    FixtureGen[KafkaTransactionTestFixture](c,
+      msgCount => {
+        KafkaTransactionTestFixture("sourceTopic", "sinkTopic", msgCount, "groupId", consumer = null, producer = null)
+      })
 
   def initialize(c: RunTestCommand) =
     FixtureGen[KafkaTransactionTestFixture](
@@ -55,7 +56,7 @@ object KafkaTransactionFixtures extends PerfFixtureHelpers {
         consumerJavaProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId)
         consumerJavaProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
         consumerJavaProps.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG,
-                              IsolationLevel.READ_COMMITTED.toString.toLowerCase(Locale.ENGLISH))
+          IsolationLevel.READ_COMMITTED.toString.toLowerCase(Locale.ENGLISH))
         val consumer = new KafkaConsumer[Array[Byte], String](consumerJavaProps)
         consumer.subscribe(Set(c.filledTopic.topic).asJava)
 
@@ -68,6 +69,5 @@ object KafkaTransactionFixtures extends PerfFixtureHelpers {
         val producer = new KafkaProducer[Array[Byte], String](producerJavaProps)
 
         KafkaTransactionTestFixture(c.filledTopic.topic, sinkTopic, msgCount, groupId, consumer, producer)
-      }
-    )
+      })
 }
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/PerfFixtureHelpers.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/PerfFixtureHelpers.scala
index d8b4a100..87003e9b 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/PerfFixtureHelpers.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/PerfFixtureHelpers.scala
@@ -8,15 +8,15 @@ package akka.kafka.benchmarks
 import java.time.Duration
 import java.util
 import java.util.concurrent.TimeUnit
-import java.util.{Arrays, UUID}
+import java.util.{ Arrays, UUID }
 
 import com.typesafe.scalalogging.LazyLogging
-import org.apache.kafka.clients.admin.{Admin, NewTopic}
+import org.apache.kafka.clients.admin.{ Admin, NewTopic }
 import org.apache.kafka.clients.producer._
-import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer}
+import org.apache.kafka.common.serialization.{ ByteArraySerializer, StringSerializer }
 
 import scala.concurrent.duration._
-import scala.concurrent.{Await, Promise}
+import scala.concurrent.{ Await, Promise }
 import scala.language.postfixOps
 
 object PerfFixtureHelpers {
@@ -29,8 +29,7 @@ object PerfFixtureHelpers {
       msgSize: Int,
       numberOfPartitions: Int = 1,
       replicationFactor: Int = 1,
-      topic: String = randomId()
-  ) {
+      topic: String = randomId()) {
     def freshTopic: FilledTopic = copy(topic = randomId())
   }
 }
@@ -76,9 +75,7 @@ private[benchmarks] trait PerfFixtureHelpers extends LazyLogging {
     val result = admin.createTopics(
       Arrays.asList(
         new NewTopic(ft.topic, ft.numberOfPartitions, ft.replicationFactor.toShort)
-          .configs(new util.HashMap[String, String]())
-      )
-    )
+          .configs(new util.HashMap[String, String]())))
     result.all().get(10, TimeUnit.SECONDS)
     // fill topic with messages
     val producer =
@@ -104,8 +101,7 @@ private[benchmarks] trait PerfFixtureHelpers extends LazyLogging {
                   lastElementStoredPromise.failure(e)
                 }
               }
-          }
-        )
+          })
       }
     }
     val lastElementStoredFuture = lastElementStoredPromise.future
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaConsumerBenchmarks.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaConsumerBenchmarks.scala
index 62af9ddc..56aa35fc 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaConsumerBenchmarks.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaConsumerBenchmarks.scala
@@ -10,18 +10,18 @@ import java.util.concurrent.atomic.AtomicInteger
 import akka.actor.ActorSystem
 import akka.dispatch.ExecutionContexts
 import akka.kafka.ConsumerMessage.CommittableMessage
-import akka.kafka.benchmarks.InflightMetrics.{BrokerMetricRequest, ConsumerMetricRequest}
+import akka.kafka.benchmarks.InflightMetrics.{ BrokerMetricRequest, ConsumerMetricRequest }
 import akka.kafka.scaladsl.Committer
 import akka.kafka.scaladsl.Consumer.DrainingControl
-import akka.kafka.{CommitDelivery, CommitterSettings}
+import akka.kafka.{ CommitDelivery, CommitterSettings }
 import akka.stream.Materializer
-import akka.stream.scaladsl.{Keep, Sink, Source}
+import akka.stream.scaladsl.{ Keep, Sink, Source }
 import com.codahale.metrics.Meter
 import com.typesafe.scalalogging.LazyLogging
 import org.apache.kafka.clients.consumer.ConsumerRecord
 
 import scala.concurrent.duration._
-import scala.concurrent.{Await, ExecutionContext, Promise}
+import scala.concurrent.{ Await, ExecutionContext, Promise }
 import scala.language.postfixOps
 import scala.util.Success
 
@@ -67,12 +67,11 @@ object ReactiveKafkaConsumerBenchmarks extends LazyLogging with InflightMetrics
    * metrics.
    */
   def consumePlainInflightMetrics(fixture: NonCommittableFixture,
-                                  meter: Meter,
-                                  consumerMetricNames: List[ConsumerMetricRequest],
-                                  brokerMetricNames: List[BrokerMetricRequest],
-                                  brokerJmxUrls: List[String])(
-      implicit mat: Materializer
-  ): List[List[String]] = {
+      meter: Meter,
+      consumerMetricNames: List[ConsumerMetricRequest],
+      brokerMetricNames: List[BrokerMetricRequest],
+      brokerJmxUrls: List[String])(
+      implicit mat: Materializer): List[List[String]] = {
     logger.debug("Creating and starting a stream")
     val (control, future) = fixture.source
       .take(fixture.msgCount.toLong)
@@ -101,7 +100,7 @@ object ReactiveKafkaConsumerBenchmarks extends LazyLogging with InflightMetrics
    * Reads elements from Kafka source and commits a batch as soon as it's possible.
    */
   def consumerAtLeastOnceBatched(batchSize: Int)(fixture: CommittableFixture, meter: Meter)(implicit sys: ActorSystem,
-                                                                                            mat: Materializer): Unit = {
+      mat: Materializer): Unit = {
     logger.debug("Creating and starting a stream")
     val committerDefaults = CommitterSettings(sys)
     val promise = Promise[Unit]()
@@ -128,8 +127,8 @@ object ReactiveKafkaConsumerBenchmarks extends LazyLogging with InflightMetrics
    * Reads elements from Kafka source and commits in batches with no backpressure on committing.
    */
   def consumerCommitAndForget(
-      commitBatchSize: Int
-  )(fixture: CommittableFixture, meter: Meter)(implicit sys: ActorSystem, mat: Materializer): Unit = {
+      commitBatchSize: Int)(fixture: CommittableFixture, meter: Meter)(
+      implicit sys: ActorSystem, mat: Materializer): Unit = {
     logger.debug("Creating and starting a stream")
     val committerDefaults = CommitterSettings(sys)
     val promise = Promise[Unit]()
@@ -145,8 +144,8 @@ object ReactiveKafkaConsumerBenchmarks extends LazyLogging with InflightMetrics
       }
       .toMat(
         Committer
-          .sink(committerDefaults.withDelivery(CommitDelivery.SendAndForget).withMaxBatch(commitBatchSize.toLong))
-      )(DrainingControl.apply)
+          .sink(committerDefaults.withDelivery(CommitDelivery.SendAndForget).withMaxBatch(commitBatchSize.toLong)))(
+        DrainingControl.apply)
       .run()
 
     Await.result(promise.future, streamingTimeout)
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaConsumerFixtures.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaConsumerFixtures.scala
index 036a37c2..12671c59 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaConsumerFixtures.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaConsumerFixtures.scala
@@ -10,15 +10,15 @@ import akka.kafka.ConsumerMessage.CommittableMessage
 import akka.kafka.benchmarks.app.RunTestCommand
 import akka.kafka.scaladsl.Consumer
 import akka.kafka.scaladsl.Consumer.Control
-import akka.kafka.{ConsumerSettings, Subscriptions}
+import akka.kafka.{ ConsumerSettings, Subscriptions }
 import akka.stream.scaladsl.Source
-import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
-import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer}
+import org.apache.kafka.clients.consumer.{ ConsumerConfig, ConsumerRecord }
+import org.apache.kafka.common.serialization.{ ByteArrayDeserializer, StringDeserializer }
 
 case class ReactiveKafkaConsumerTestFixture[T](topic: String,
-                                               msgCount: Int,
-                                               source: Source[T, Control],
-                                               numberOfPartitions: Int)
+    msgCount: Int,
+    source: Source[T, Control],
+    numberOfPartitions: Int)
 
 object ReactiveKafkaConsumerFixtures extends PerfFixtureHelpers {
 
@@ -37,8 +37,7 @@ object ReactiveKafkaConsumerFixtures extends PerfFixtureHelpers {
         val settings = createConsumerSettings(c.kafkaHost)
         val source = Consumer.plainSource(settings, Subscriptions.topics(c.filledTopic.topic))
         ReactiveKafkaConsumerTestFixture(c.filledTopic.topic, msgCount, source, c.numberOfPartitions)
-      }
-    )
+      })
 
   def committableSources(c: RunTestCommand)(implicit actorSystem: ActorSystem) =
     FixtureGen[ReactiveKafkaConsumerTestFixture[CommittableMessage[Array[Byte], String]]](
@@ -48,15 +47,13 @@ object ReactiveKafkaConsumerFixtures extends PerfFixtureHelpers {
         val settings = createConsumerSettings(c.kafkaHost)
         val source = Consumer.committableSource(settings, Subscriptions.topics(c.filledTopic.topic))
         ReactiveKafkaConsumerTestFixture(c.filledTopic.topic, msgCount, source, c.numberOfPartitions)
-      }
-    )
+      })
 
   def noopFixtureGen(c: RunTestCommand) =
     FixtureGen[ReactiveKafkaConsumerTestFixture[ConsumerRecord[Array[Byte], String]]](
       c,
       msgCount => {
         ReactiveKafkaConsumerTestFixture("topic", msgCount, null, c.numberOfPartitions)
-      }
-    )
+      })
 
 }
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaProducerBenchmarks.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaProducerBenchmarks.scala
index 94530087..8c891c3c 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaProducerBenchmarks.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaProducerBenchmarks.scala
@@ -7,10 +7,10 @@ package akka.kafka.benchmarks
 
 import akka.kafka.ConsumerMessage.CommittableMessage
 import akka.kafka.ProducerMessage
-import akka.kafka.ProducerMessage.{Result, Results}
+import akka.kafka.ProducerMessage.{ Result, Results }
 import akka.kafka.benchmarks.ReactiveKafkaProducerFixtures.ReactiveKafkaProducerTestFixture
 import akka.stream.Materializer
-import akka.stream.scaladsl.{Sink, Source}
+import akka.stream.scaladsl.{ Sink, Source }
 import com.codahale.metrics.Meter
 import com.typesafe.scalalogging.LazyLogging
 import org.apache.kafka.clients.producer.ProducerRecord
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaProducerFixtures.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaProducerFixtures.scala
index a2e86ee0..5d5b1506 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaProducerFixtures.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaProducerFixtures.scala
@@ -7,13 +7,13 @@ package akka.kafka.benchmarks
 
 import akka.NotUsed
 import akka.actor.ActorSystem
-import akka.kafka.ProducerMessage.{Envelope, Results}
+import akka.kafka.ProducerMessage.{ Envelope, Results }
 import akka.kafka.ProducerSettings
 import akka.kafka.benchmarks.app.RunTestCommand
 import akka.kafka.scaladsl.Producer
 import akka.stream.scaladsl.Flow
 import org.apache.kafka.clients.consumer.ConsumerRecord
-import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer}
+import org.apache.kafka.common.serialization.{ ByteArraySerializer, StringSerializer }
 
 object ReactiveKafkaProducerFixtures extends PerfFixtureHelpers {
 
@@ -26,10 +26,10 @@ object ReactiveKafkaProducerFixtures extends PerfFixtureHelpers {
   type FlowType[PassThrough] = Flow[In[PassThrough], Out[PassThrough], NotUsed]
 
   case class ReactiveKafkaProducerTestFixture[PassThrough](topic: String,
-                                                           msgCount: Int,
-                                                           msgSize: Int,
-                                                           flow: FlowType[PassThrough],
-                                                           numberOfPartitions: Int)
+      msgCount: Int,
+      msgSize: Int,
+      flow: FlowType[PassThrough],
+      numberOfPartitions: Int)
 
   private def createProducerSettings(kafkaHost: String)(implicit actorSystem: ActorSystem): ProducerSettings[K, V] =
     ProducerSettings(actorSystem, new ByteArraySerializer, new StringSerializer)
@@ -43,15 +43,13 @@ object ReactiveKafkaProducerFixtures extends PerfFixtureHelpers {
         val flow: FlowType[Int] = Producer.flexiFlow(createProducerSettings(c.kafkaHost))
         fillTopic(c.filledTopic.copy(msgCount = 1), c.kafkaHost)
         ReactiveKafkaProducerTestFixture(c.filledTopic.topic, msgCount, c.msgSize, flow, c.numberOfPartitions)
-      }
-    )
+      })
 
   def noopFixtureGen(c: RunTestCommand) =
     FixtureGen[ReactiveKafkaConsumerTestFixture[ConsumerRecord[Array[Byte], String]]](
       c,
       msgCount => {
         ReactiveKafkaConsumerTestFixture("topic", msgCount, null, c.numberOfPartitions)
-      }
-    )
+      })
 
 }
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaTransactionBenchmarks.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaTransactionBenchmarks.scala
index 71f8ec37..7a5c6de2 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaTransactionBenchmarks.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaTransactionBenchmarks.scala
@@ -6,16 +6,16 @@
 package akka.kafka.benchmarks
 
 import akka.kafka.ProducerMessage
-import akka.kafka.ProducerMessage.{Result, Results}
+import akka.kafka.ProducerMessage.{ Result, Results }
 import akka.kafka.benchmarks.ReactiveKafkaTransactionFixtures._
 import akka.stream.Materializer
-import akka.stream.scaladsl.{Keep, Sink}
+import akka.stream.scaladsl.{ Keep, Sink }
 import com.codahale.metrics.Meter
 import com.typesafe.scalalogging.LazyLogging
 import org.apache.kafka.clients.producer.ProducerRecord
 
 import scala.concurrent.duration._
-import scala.concurrent.{Await, Promise}
+import scala.concurrent.{ Await, Promise }
 import scala.language.postfixOps
 import scala.util.Success
 
@@ -27,7 +27,7 @@ object ReactiveKafkaTransactionBenchmarks extends LazyLogging {
    * Process records in a consume-transform-produce transactional workflow and commit every interval.
    */
   def consumeTransformProduceTransaction(fixture: TransactionFixture,
-                                         meter: Meter)(implicit mat: Materializer): Unit = {
+      meter: Meter)(implicit mat: Materializer): Unit = {
     logger.debug("Creating and starting a stream")
     val msgCount = fixture.msgCount
     val sinkTopic = fixture.sinkTopic
@@ -40,7 +40,7 @@ object ReactiveKafkaTransactionBenchmarks extends LazyLogging {
     val control = source
       .map { msg =>
         ProducerMessage.single(new ProducerRecord[Array[Byte], String](sinkTopic, msg.record.value()),
-                               msg.partitionOffset)
+          msg.partitionOffset)
       }
       .via(fixture.flow)
       .toMat(Sink.foreach {
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaTransactionFixtures.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaTransactionFixtures.scala
index 7dce07d3..c7989271 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaTransactionFixtures.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/ReactiveKafkaTransactionFixtures.scala
@@ -8,12 +8,12 @@ package akka.kafka.benchmarks
 import akka.NotUsed
 import akka.actor.ActorSystem
 import akka.kafka.ConsumerMessage.TransactionalMessage
-import akka.kafka.ProducerMessage.{Envelope, Results}
+import akka.kafka.ProducerMessage.{ Envelope, Results }
 import akka.kafka.benchmarks.app.RunTestCommand
 import akka.kafka.scaladsl.Consumer.Control
 import akka.kafka.scaladsl.Transactional
-import akka.kafka.{ConsumerMessage, ConsumerSettings, ProducerSettings, Subscriptions}
-import akka.stream.scaladsl.{Flow, Source}
+import akka.kafka.{ ConsumerMessage, ConsumerSettings, ProducerSettings, Subscriptions }
+import akka.stream.scaladsl.{ Flow, Source }
 import org.apache.kafka.clients.consumer.ConsumerConfig
 import org.apache.kafka.common.serialization.{
   ByteArrayDeserializer,
@@ -25,10 +25,10 @@ import org.apache.kafka.common.serialization.{
 import scala.concurrent.duration.FiniteDuration
 
 case class ReactiveKafkaTransactionTestFixture[SOut, FIn, FOut](sourceTopic: String,
-                                                                sinkTopic: String,
-                                                                msgCount: Int,
-                                                                source: Source[SOut, Control],
-                                                                flow: Flow[FIn, FOut, NotUsed])
+    sinkTopic: String,
+    msgCount: Int,
+    source: Source[SOut, Control],
+    flow: Flow[FIn, FOut, NotUsed])
 
 object ReactiveKafkaTransactionFixtures extends PerfFixtureHelpers {
   type Key = Array[Byte]
@@ -46,8 +46,7 @@ object ReactiveKafkaTransactionFixtures extends PerfFixtureHelpers {
       .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
 
   private def createProducerSettings(
-      kafkaHost: String
-  )(implicit actorSystem: ActorSystem): ProducerSettings[Array[Byte], String] =
+      kafkaHost: String)(implicit actorSystem: ActorSystem): ProducerSettings[Array[Byte], String] =
     ProducerSettings(actorSystem, new ByteArraySerializer, new StringSerializer)
       .withBootstrapServers(kafkaHost)
 
@@ -66,15 +65,15 @@ object ReactiveKafkaTransactionFixtures extends PerfFixtureHelpers {
         val flow: Flow[KProducerMessage, KResult, NotUsed] = Transactional.flow(producerSettings, randomId())
 
         ReactiveKafkaTransactionTestFixture[KTransactionMessage, KProducerMessage, KResult](c.filledTopic.topic,
-                                                                                            sinkTopic,
-                                                                                            msgCount,
-                                                                                            source,
-                                                                                            flow)
-      }
-    )
+          sinkTopic,
+          msgCount,
+          source,
+          flow)
+      })
 
   def noopFixtureGen(c: RunTestCommand) =
-    FixtureGen[ReactiveKafkaTransactionTestFixture[KTransactionMessage, KProducerMessage, KResult]](c, msgCount => {
-      ReactiveKafkaTransactionTestFixture("sourceTopic", "sinkTopic", msgCount, source = null, flow = null)
-    })
+    FixtureGen[ReactiveKafkaTransactionTestFixture[KTransactionMessage, KProducerMessage, KResult]](c,
+      msgCount => {
+        ReactiveKafkaTransactionTestFixture("sourceTopic", "sinkTopic", msgCount, source = null, flow = null)
+      })
 }
diff --git a/benchmarks/src/main/scala/akka/kafka/benchmarks/Timed.scala b/benchmarks/src/main/scala/akka/kafka/benchmarks/Timed.scala
index e160f55a..1aac253e 100644
--- a/benchmarks/src/main/scala/akka/kafka/benchmarks/Timed.scala
+++ b/benchmarks/src/main/scala/akka/kafka/benchmarks/Timed.scala
@@ -6,18 +6,18 @@
 package akka.kafka.benchmarks
 
 import java.nio.file.Paths
-import java.util.concurrent.{ForkJoinPool, TimeUnit}
+import java.util.concurrent.{ ForkJoinPool, TimeUnit }
 
-import akka.kafka.benchmarks.InflightMetrics.{BrokerMetricRequest, ConsumerMetricRequest}
+import akka.kafka.benchmarks.InflightMetrics.{ BrokerMetricRequest, ConsumerMetricRequest }
 import akka.kafka.benchmarks.app.RunTestCommand
 import akka.stream.Materializer
 import akka.stream.alpakka.csv.scaladsl.CsvFormatting
-import akka.stream.scaladsl.{FileIO, Sink, Source}
+import akka.stream.scaladsl.{ FileIO, Sink, Source }
 import com.codahale.metrics._
 import com.typesafe.scalalogging.LazyLogging
 
 import scala.concurrent.duration._
-import scala.concurrent.{Await, ExecutionContext, Future}
+import scala.concurrent.{ Await, ExecutionContext, Future }
 
 object Timed extends LazyLogging {
   private val benchmarkReportBasePath = Paths.get("benchmarks", "target")
@@ -39,8 +39,7 @@ object Timed extends LazyLogging {
       .build(benchmarkReportBasePath.toFile)
 
   def inflightMetricsReport(inflight: List[List[String]], testName: String)(
-      implicit mat: Materializer
-  ) = {
+      implicit mat: Materializer) = {
     val metricsReportPath = benchmarkReportBasePath.resolve(Paths.get(s"$testName-inflight-metrics.csv"))
     val metricsReportDetailPath = benchmarkReportBasePath.resolve(Paths.get(s"$testName-inflight-metrics-details.csv"))
     require(inflight.size > 1, "At least 2 records (a header and a data row) are required to make a report.")
@@ -76,8 +75,8 @@ object Timed extends LazyLogging {
       brokerMetricNames: List[BrokerMetricRequest],
       brokerJmxUrls: List[String],
       fixtureGen: FixtureGen[F],
-      testBody: (F, Meter, List[ConsumerMetricRequest], List[BrokerMetricRequest], List[String]) => List[List[String]]
-  )(implicit mat: Materializer): Unit = {
+      testBody: (F, Meter, List[ConsumerMetricRequest], List[BrokerMetricRequest], List[String]) => List[List[String]])(
+      implicit mat: Materializer): Unit = {
     val name = command.testName
     val msgCount = command.msgCount
     logger.info(s"Generating fixture for $name ${command.filledTopic}")
diff --git a/build.sbt b/build.sbt
index 2dd9e2b2..181fdf92 100644
--- a/build.sbt
+++ b/build.sbt
@@ -1,4 +1,4 @@
-import com.typesafe.tools.mima.core.{Problem, ProblemFilters}
+import com.typesafe.tools.mima.core.{ Problem, ProblemFilters }
 
 enablePlugins(AutomateHeaderPlugin)
 
@@ -27,29 +27,24 @@ val confluentLibsExclusionRules = Seq(
   ExclusionRule("log4j", "log4j"),
   ExclusionRule("org.slf4j", "slf4j-log4j12"),
   ExclusionRule("com.typesafe.scala-logging"),
-  ExclusionRule("org.apache.kafka")
-)
+  ExclusionRule("org.apache.kafka"))
 
 ThisBuild / resolvers ++= Seq(
   // for Jupiter interface (JUnit 5)
-  Resolver.jcenterRepo
-)
+  Resolver.jcenterRepo)
 
 TaskKey[Unit]("verifyCodeFmt") := {
   javafmtCheckAll.all(ScopeFilter(inAnyProject)).result.value.toEither.left.foreach { _ =>
     throw new MessageOnlyException(
-      "Unformatted Java code found. Please run 'javafmtAll' and commit the reformatted code"
-    )
+      "Unformatted Java code found. Please run 'javafmtAll' and commit the reformatted code")
   }
   scalafmtCheckAll.all(ScopeFilter(inAnyProject)).result.value.toEither.left.foreach { _ =>
     throw new MessageOnlyException(
-      "Unformatted Scala code found. Please run 'scalafmtAll' and commit the reformatted code"
-    )
+      "Unformatted Scala code found. Please run 'scalafmtAll' and commit the reformatted code")
   }
   (Compile / scalafmtSbtCheck).result.value.toEither.left.foreach { _ =>
     throw new MessageOnlyException(
-      "Unformatted sbt code found. Please run 'scalafmtSbt' and commit the reformatted code"
-    )
+      "Unformatted sbt code found. Please run 'scalafmtSbt' and commit the reformatted code")
   }
 }
 
@@ -63,9 +58,9 @@ val commonSettings = Def.settings(
   homepage := Some(url("https://doc.akka.io/docs/alpakka-kafka/current")),
   scmInfo := Some(ScmInfo(url("https://github.com/akka/alpakka-kafka"), "git@github.com:akka/alpakka-kafka.git")),
   developers += Developer("contributors",
-                          "Contributors",
-                          "",
-                          url("https://github.com/akka/alpakka-kafka/graphs/contributors")),
+    "Contributors",
+    "",
+    url("https://github.com/akka/alpakka-kafka/graphs/contributors")),
   startYear := Some(2014),
   licenses := Seq("Apache-2.0" -> url("https://opensource.org/licenses/Apache-2.0")),
   description := "Alpakka is a Reactive Enterprise Integration library for Java and Scala, based on Reactive Streams and Akka.",
@@ -73,34 +68,31 @@ val commonSettings = Def.settings(
   scalaVersion := Scala213,
   crossVersion := CrossVersion.binary,
   javacOptions ++= Seq(
-      "-Xlint:deprecation",
-      "-Xlint:unchecked"
-    ),
+    "-Xlint:deprecation",
+    "-Xlint:unchecked"),
   scalacOptions ++= Seq(
-      "-encoding",
-      "UTF-8", // yes, this is 2 args
-      "-Wconf:cat=feature:w,cat=deprecation:w,cat=unchecked:w,cat=lint:w,cat=unused:w,cat=w-flag:w"
-    ) ++ {
-      if (insideCI.value && !Nightly) Seq("-Werror")
-      else Seq.empty
-    },
+    "-encoding",
+    "UTF-8", // yes, this is 2 args
+    "-Wconf:cat=feature:w,cat=deprecation:w,cat=unchecked:w,cat=lint:w,cat=unused:w,cat=w-flag:w") ++ {
+    if (insideCI.value && !Nightly) Seq("-Werror")
+    else Seq.empty
+  },
   Compile / doc / scalacOptions := scalacOptions.value ++ Seq(
-      "-Wconf:cat=scaladoc:i",
-      "-doc-title",
-      "Alpakka Kafka",
-      "-doc-version",
-      version.value,
-      "-sourcepath",
-      (ThisBuild / baseDirectory).value.toString,
-      "-skip-packages",
-      "akka.pattern:scala", // for some reason Scaladoc creates this
-      "-doc-source-url", {
-        val branch = if (isSnapshot.value) "master" else s"v${version.value}"
-        s"https://github.com/akka/alpakka-kafka/tree/${branch}€{FILE_PATH_EXT}#L€{FILE_LINE}"
-      },
-      "-doc-canonical-base-url",
-      "https://doc.akka.io/api/alpakka-kafka/current/"
-    ),
+    "-Wconf:cat=scaladoc:i",
+    "-doc-title",
+    "Alpakka Kafka",
+    "-doc-version",
+    version.value,
+    "-sourcepath",
+    (ThisBuild / baseDirectory).value.toString,
+    "-skip-packages",
+    "akka.pattern:scala", // for some reason Scaladoc creates this
+    "-doc-source-url", {
+      val branch = if (isSnapshot.value) "master" else s"v${version.value}"
+      s"https://github.com/akka/alpakka-kafka/tree/${branch}€{FILE_PATH_EXT}#L€{FILE_LINE}"
+    },
+    "-doc-canonical-base-url",
+    "https://doc.akka.io/api/alpakka-kafka/current/"),
   Compile / doc / scalacOptions -= "-Xfatal-warnings",
   // show full stack traces and test case durations
   testOptions += Tests.Argument(TestFrameworks.ScalaTest, "-oDF"),
@@ -114,15 +106,12 @@ val commonSettings = Def.settings(
   javafmtOnCompile := false,
   ThisBuild / mimaReportSignatureProblems := true,
   headerLicense := Some(
-      HeaderLicense.Custom(
-        """|Copyright (C) 2014 - 2016 Softwaremill <https://softwaremill.com>
+    HeaderLicense.Custom(
+      """|Copyright (C) 2014 - 2016 Softwaremill <https://softwaremill.com>
            |Copyright (C) 2016 - 2020 Lightbend Inc. <https://www.lightbend.com>
-           |""".stripMargin
-      )
-    ),
+           |""".stripMargin)),
   projectInfoVersion := (if (isSnapshot.value) "snapshot" else version.value),
-  sonatypeProfileName := "com.typesafe"
-)
+  sonatypeProfileName := "com.typesafe")
 
 lazy val `pekko-connectors-kafka` =
   project
@@ -171,8 +160,7 @@ lazy val `pekko-connectors-kafka` =
             |
             |  benchmarks/IntegrationTest/testOnly *.AlpakkaKafkaPlainConsumer
             |    run a single benchmark backed by Docker containers
-          """.stripMargin
-    )
+          """.stripMargin)
     .aggregate(core, testkit, `cluster-sharding`, tests, benchmarks, docs)
 
 lazy val core = project
@@ -184,13 +172,11 @@ lazy val core = project
     name := "pekko-connectors-kafka",
     AutomaticModuleName.settings("akka.stream.alpakka.kafka"),
     libraryDependencies ++= Seq(
-        "com.typesafe.akka" %% "akka-stream" % akkaVersion,
-        "com.typesafe.akka" %% "akka-discovery" % akkaVersion % Provided,
-        "org.apache.kafka" % "kafka-clients" % kafkaVersion
-      ),
-    mimaPreviousArtifacts := Set.empty, //temporarily disable mima checks
-    mimaBinaryIssueFilters += ProblemFilters.exclude[Problem]("akka.kafka.internal.*")
-  )
+      "com.typesafe.akka" %% "akka-stream" % akkaVersion,
+      "com.typesafe.akka" %% "akka-discovery" % akkaVersion % Provided,
+      "org.apache.kafka" % "kafka-clients" % kafkaVersion),
+    mimaPreviousArtifacts := Set.empty, // temporarily disable mima checks
+    mimaBinaryIssueFilters += ProblemFilters.exclude[Problem]("akka.kafka.internal.*"))
 
 lazy val testkit = project
   .dependsOn(core)
@@ -202,15 +188,13 @@ lazy val testkit = project
     AutomaticModuleName.settings("akka.stream.alpakka.kafka.testkit"),
     JupiterKeys.junitJupiterVersion := "5.8.2",
     libraryDependencies ++= Seq(
-        "com.typesafe.akka" %% "akka-stream-testkit" % akkaVersion,
-        "org.testcontainers" % "kafka" % testcontainersVersion % Provided,
-        "org.scalatest" %% "scalatest" % scalatestVersion % Provided,
-        "junit" % "junit" % "4.13.2" % Provided,
-        "org.junit.jupiter" % "junit-jupiter-api" % JupiterKeys.junitJupiterVersion.value % Provided
-      ),
-    mimaPreviousArtifacts := Set.empty, //temporarily disable mima checks
-    mimaBinaryIssueFilters += ProblemFilters.exclude[Problem]("akka.kafka.testkit.internal.*")
-  )
+      "com.typesafe.akka" %% "akka-stream-testkit" % akkaVersion,
+      "org.testcontainers" % "kafka" % testcontainersVersion % Provided,
+      "org.scalatest" %% "scalatest" % scalatestVersion % Provided,
+      "junit" % "junit" % "4.13.2" % Provided,
+      "org.junit.jupiter" % "junit-jupiter-api" % JupiterKeys.junitJupiterVersion.value % Provided),
+    mimaPreviousArtifacts := Set.empty, // temporarily disable mima checks
+    mimaBinaryIssueFilters += ProblemFilters.exclude[Problem]("akka.kafka.testkit.internal.*"))
 
 lazy val `cluster-sharding` = project
   .in(file("./cluster-sharding"))
@@ -222,9 +206,8 @@ lazy val `cluster-sharding` = project
     name := "pekko-connectors-kafka-cluster-sharding",
     AutomaticModuleName.settings("akka.stream.alpakka.kafka.cluster.sharding"),
     libraryDependencies ++= Seq(
-        "com.typesafe.akka" %% "akka-cluster-sharding-typed" % akkaVersion
-      ),
-    mimaPreviousArtifacts := Set.empty //temporarily disable mima checks
+      "com.typesafe.akka" %% "akka-cluster-sharding-typed" % akkaVersion),
+    mimaPreviousArtifacts := Set.empty // temporarily disable mima checks
   )
 
 lazy val tests = project
@@ -238,36 +221,34 @@ lazy val tests = project
   .settings(
     name := "pekko-connectors-kafka-tests",
     libraryDependencies ++= Seq(
-        "com.typesafe.akka" %% "akka-discovery" % akkaVersion,
-        "com.google.protobuf" % "protobuf-java" % "3.19.1", // use the same version as in scalapb
-        "io.confluent" % "kafka-avro-serializer" % confluentAvroSerializerVersion % Test excludeAll (confluentLibsExclusionRules: _*),
-        // See https://github.com/sbt/sbt/issues/3618#issuecomment-448951808
-        "javax.ws.rs" % "javax.ws.rs-api" % "2.1.1" artifacts Artifact("javax.ws.rs-api", "jar", "jar"),
-        "org.testcontainers" % "kafka" % testcontainersVersion % Test,
-        "org.scalatest" %% "scalatest" % scalatestVersion % Test,
-        "io.spray" %% "spray-json" % "1.3.6" % Test,
-        "com.fasterxml.jackson.core" % "jackson-databind" % "2.13.3" % Test, // ApacheV2
-        "org.junit.vintage" % "junit-vintage-engine" % JupiterKeys.junitVintageVersion.value % Test,
-        // See http://hamcrest.org/JavaHamcrest/distributables#upgrading-from-hamcrest-1x
-        "org.hamcrest" % "hamcrest-library" % "2.2" % Test,
-        "org.hamcrest" % "hamcrest" % "2.2" % Test,
-        "net.aichler" % "jupiter-interface" % JupiterKeys.jupiterVersion.value % Test,
-        "com.typesafe.akka" %% "akka-slf4j" % akkaVersion % Test,
-        "ch.qos.logback" % "logback-classic" % "1.2.11" % Test,
-        "org.slf4j" % "log4j-over-slf4j" % slf4jVersion % Test,
-        // Schema registry uses Glassfish which uses java.util.logging
-        "org.slf4j" % "jul-to-slf4j" % slf4jVersion % Test,
-        "org.mockito" % "mockito-core" % "4.6.1" % Test,
-        "com.thesamet.scalapb" %% "scalapb-runtime" % "0.10.11" % Test
-      ),
+      "com.typesafe.akka" %% "akka-discovery" % akkaVersion,
+      "com.google.protobuf" % "protobuf-java" % "3.19.1", // use the same version as in scalapb
+      ("io.confluent" % "kafka-avro-serializer" % confluentAvroSerializerVersion % Test).excludeAll(
+        confluentLibsExclusionRules: _*),
+      // See https://github.com/sbt/sbt/issues/3618#issuecomment-448951808
+      ("javax.ws.rs" % "javax.ws.rs-api" % "2.1.1").artifacts(Artifact("javax.ws.rs-api", "jar", "jar")),
+      "org.testcontainers" % "kafka" % testcontainersVersion % Test,
+      "org.scalatest" %% "scalatest" % scalatestVersion % Test,
+      "io.spray" %% "spray-json" % "1.3.6" % Test,
+      "com.fasterxml.jackson.core" % "jackson-databind" % "2.13.3" % Test, // ApacheV2
+      "org.junit.vintage" % "junit-vintage-engine" % JupiterKeys.junitVintageVersion.value % Test,
+      // See http://hamcrest.org/JavaHamcrest/distributables#upgrading-from-hamcrest-1x
+      "org.hamcrest" % "hamcrest-library" % "2.2" % Test,
+      "org.hamcrest" % "hamcrest" % "2.2" % Test,
+      "net.aichler" % "jupiter-interface" % JupiterKeys.jupiterVersion.value % Test,
+      "com.typesafe.akka" %% "akka-slf4j" % akkaVersion % Test,
+      "ch.qos.logback" % "logback-classic" % "1.2.11" % Test,
+      "org.slf4j" % "log4j-over-slf4j" % slf4jVersion % Test,
+      // Schema registry uses Glassfish which uses java.util.logging
+      "org.slf4j" % "jul-to-slf4j" % slf4jVersion % Test,
+      "org.mockito" % "mockito-core" % "4.6.1" % Test,
+      "com.thesamet.scalapb" %% "scalapb-runtime" % "0.10.11" % Test),
     resolvers ++= Seq(
-        "Confluent Maven Repo" at "https://packages.confluent.io/maven/"
-      ),
+      "Confluent Maven Repo".at("https://packages.confluent.io/maven/")),
     publish / skip := true,
     Test / fork := true,
     Test / parallelExecution := false,
-    IntegrationTest / parallelExecution := false
-  )
+    IntegrationTest / parallelExecution := false)
 
 lazy val docs = project
   .enablePlugins(AkkaParadoxPlugin, ParadoxSitePlugin, PreprocessPlugin, PublishRsyncPlugin)
@@ -281,53 +262,50 @@ lazy val docs = project
     Preprocess / siteSubdirName := s"api/alpakka-kafka/${projectInfoVersion.value}",
     Preprocess / sourceDirectory := (LocalRootProject / ScalaUnidoc / unidoc / target).value,
     Preprocess / preprocessRules := Seq(
-        ("\\.java\\.scala".r, _ => ".java"),
-        ("https://javadoc\\.io/page/".r, _ => "https://javadoc\\.io/static/"),
-        // bug in Scaladoc
-        ("https://docs\\.oracle\\.com/en/java/javase/11/docs/api/java.base/java/time/Duration\\$.html".r,
-         _ => "https://docs\\.oracle\\.com/en/java/javase/11/docs/api/java.base/java/time/Duration.html"),
-        // Add Java module name https://github.com/ThoughtWorksInc/sbt-api-mappings/issues/58
-        ("https://docs\\.oracle\\.com/en/java/javase/11/docs/api/".r,
-         _ => "https://docs\\.oracle\\.com/en/java/javase/11/docs/api/")
-      ),
+      ("\\.java\\.scala".r, _ => ".java"),
+      ("https://javadoc\\.io/page/".r, _ => "https://javadoc\\.io/static/"),
+      // bug in Scaladoc
+      ("https://docs\\.oracle\\.com/en/java/javase/11/docs/api/java.base/java/time/Duration\\$.html".r,
+        _ => "https://docs\\.oracle\\.com/en/java/javase/11/docs/api/java.base/java/time/Duration.html"),
+      // Add Java module name https://github.com/ThoughtWorksInc/sbt-api-mappings/issues/58
+      ("https://docs\\.oracle\\.com/en/java/javase/11/docs/api/".r,
+        _ => "https://docs\\.oracle\\.com/en/java/javase/11/docs/api/")),
     Paradox / siteSubdirName := s"docs/alpakka-kafka/${projectInfoVersion.value}",
     paradoxGroups := Map("Language" -> Seq("Java", "Scala")),
     paradoxProperties ++= Map(
-        "image.base_url" -> "images/",
-        "confluent.version" -> confluentAvroSerializerVersion,
-        "scalatest.version" -> scalatestVersion,
-        "scaladoc.akka.kafka.base_url" -> s"/${(Preprocess / siteSubdirName).value}/",
-        "javadoc.akka.kafka.base_url" -> "",
-        // Akka
-        "akka.version" -> akkaVersion,
-        "extref.akka.base_url" -> s"https://doc.akka.io/docs/akka/$AkkaBinaryVersionForDocs/%s",
-        "scaladoc.akka.base_url" -> s"https://doc.akka.io/api/akka/$AkkaBinaryVersionForDocs/",
-        "javadoc.akka.base_url" -> s"https://doc.akka.io/japi/akka/$AkkaBinaryVersionForDocs/",
-        "javadoc.akka.link_style" -> "direct",
-        "extref.akka-management.base_url" -> s"https://doc.akka.io/docs/akka-management/current/%s",
-        // Kafka
-        "kafka.version" -> kafkaVersion,
-        "extref.kafka.base_url" -> s"https://kafka.apache.org/$KafkaVersionForDocs/%s",
-        "javadoc.org.apache.kafka.base_url" -> s"https://kafka.apache.org/$KafkaVersionForDocs/javadoc/",
-        "javadoc.org.apache.kafka.link_style" -> "direct",
-        // Java
-        "extref.java-docs.base_url" -> "https://docs.oracle.com/en/java/javase/11/%s",
-        "javadoc.base_url" -> "https://docs.oracle.com/en/java/javase/11/docs/api/java.base/",
-        "javadoc.link_style" -> "direct",
-        // Scala
-        "scaladoc.scala.base_url" -> s"https://www.scala-lang.org/api/current/",
-        "scaladoc.com.typesafe.config.base_url" -> s"https://lightbend.github.io/config/latest/api/",
-        // Testcontainers
-        "testcontainers.version" -> testcontainersVersion,
-        "javadoc.org.testcontainers.containers.base_url" -> s"https://www.javadoc.io/doc/org.testcontainers/testcontainers/$testcontainersVersion/",
-        "javadoc.org.testcontainers.containers.link_style" -> "direct"
-      ),
+      "image.base_url" -> "images/",
+      "confluent.version" -> confluentAvroSerializerVersion,
+      "scalatest.version" -> scalatestVersion,
+      "scaladoc.akka.kafka.base_url" -> s"/${(Preprocess / siteSubdirName).value}/",
+      "javadoc.akka.kafka.base_url" -> "",
+      // Akka
+      "akka.version" -> akkaVersion,
+      "extref.akka.base_url" -> s"https://doc.akka.io/docs/akka/$AkkaBinaryVersionForDocs/%s",
+      "scaladoc.akka.base_url" -> s"https://doc.akka.io/api/akka/$AkkaBinaryVersionForDocs/",
+      "javadoc.akka.base_url" -> s"https://doc.akka.io/japi/akka/$AkkaBinaryVersionForDocs/",
+      "javadoc.akka.link_style" -> "direct",
+      "extref.akka-management.base_url" -> s"https://doc.akka.io/docs/akka-management/current/%s",
+      // Kafka
+      "kafka.version" -> kafkaVersion,
+      "extref.kafka.base_url" -> s"https://kafka.apache.org/$KafkaVersionForDocs/%s",
+      "javadoc.org.apache.kafka.base_url" -> s"https://kafka.apache.org/$KafkaVersionForDocs/javadoc/",
+      "javadoc.org.apache.kafka.link_style" -> "direct",
+      // Java
+      "extref.java-docs.base_url" -> "https://docs.oracle.com/en/java/javase/11/%s",
+      "javadoc.base_url" -> "https://docs.oracle.com/en/java/javase/11/docs/api/java.base/",
+      "javadoc.link_style" -> "direct",
+      // Scala
+      "scaladoc.scala.base_url" -> s"https://www.scala-lang.org/api/current/",
+      "scaladoc.com.typesafe.config.base_url" -> s"https://lightbend.github.io/config/latest/api/",
+      // Testcontainers
+      "testcontainers.version" -> testcontainersVersion,
+      "javadoc.org.testcontainers.containers.base_url" -> s"https://www.javadoc.io/doc/org.testcontainers/testcontainers/$testcontainersVersion/",
+      "javadoc.org.testcontainers.containers.link_style" -> "direct"),
     apidocRootPackage := "akka",
     paradoxRoots := List("index.html"),
     resolvers += Resolver.jcenterRepo,
     publishRsyncArtifacts += makeSite.value -> "www/",
-    publishRsyncHost := "akkarepo@gustav.akka.io"
-  )
+    publishRsyncHost := "akkarepo@gustav.akka.io")
 
 lazy val benchmarks = project
   .dependsOn(core, testkit)
@@ -342,14 +320,12 @@ lazy val benchmarks = project
     publish / skip := true,
     IntegrationTest / parallelExecution := false,
     libraryDependencies ++= Seq(
-        "com.typesafe.scala-logging" %% "scala-logging" % "3.9.5",
-        "io.dropwizard.metrics" % "metrics-core" % "4.2.11",
-        "ch.qos.logback" % "logback-classic" % "1.2.11",
-        "org.slf4j" % "log4j-over-slf4j" % slf4jVersion,
-        "com.lightbend.akka" %% "akka-stream-alpakka-csv" % "3.0.4",
-        "org.testcontainers" % "kafka" % testcontainersVersion % IntegrationTest,
-        "com.typesafe.akka" %% "akka-slf4j" % akkaVersion % IntegrationTest,
-        "com.typesafe.akka" %% "akka-stream-testkit" % akkaVersion % IntegrationTest,
-        "org.scalatest" %% "scalatest" % scalatestVersion % IntegrationTest
-      )
-  )
+      "com.typesafe.scala-logging" %% "scala-logging" % "3.9.5",
+      "io.dropwizard.metrics" % "metrics-core" % "4.2.11",
+      "ch.qos.logback" % "logback-classic" % "1.2.11",
+      "org.slf4j" % "log4j-over-slf4j" % slf4jVersion,
+      "com.lightbend.akka" %% "akka-stream-alpakka-csv" % "3.0.4",
+      "org.testcontainers" % "kafka" % testcontainersVersion % IntegrationTest,
+      "com.typesafe.akka" %% "akka-slf4j" % akkaVersion % IntegrationTest,
+      "com.typesafe.akka" %% "akka-stream-testkit" % akkaVersion % IntegrationTest,
+      "org.scalatest" %% "scalatest" % scalatestVersion % IntegrationTest))
diff --git a/cluster-sharding/src/main/scala/akka/kafka/cluster/sharding/KafkaClusterSharding.scala b/cluster-sharding/src/main/scala/akka/kafka/cluster/sharding/KafkaClusterSharding.scala
index 52f368c6..8aba23e9 100644
--- a/cluster-sharding/src/main/scala/akka/kafka/cluster/sharding/KafkaClusterSharding.scala
+++ b/cluster-sharding/src/main/scala/akka/kafka/cluster/sharding/KafkaClusterSharding.scala
@@ -5,17 +5,17 @@
 
 package akka.kafka.cluster.sharding
 
-import java.util.concurrent.{CompletionStage, ConcurrentHashMap}
+import java.util.concurrent.{ CompletionStage, ConcurrentHashMap }
 import java.util.concurrent.atomic.AtomicInteger
 
 import akka.actor.typed.Behavior
 import akka.actor.typed.scaladsl.Behaviors
 import akka.actor.typed.scaladsl.adapter._
-import akka.actor.{ActorSystem, ClassicActorSystemProvider, ExtendedActorSystem, Extension, ExtensionId}
-import akka.annotation.{ApiMayChange, InternalApi}
+import akka.actor.{ ActorSystem, ClassicActorSystemProvider, ExtendedActorSystem, Extension, ExtensionId }
+import akka.annotation.{ ApiMayChange, InternalApi }
 import akka.cluster.sharding.external.ExternalShardAllocation
 import akka.cluster.sharding.typed.scaladsl.EntityTypeKey
-import akka.cluster.sharding.typed.{ShardingEnvelope, ShardingMessageExtractor}
+import akka.cluster.sharding.typed.{ ShardingEnvelope, ShardingMessageExtractor }
 import akka.cluster.typed.Cluster
 import akka.kafka.scaladsl.MetadataClient
 import akka.kafka._
@@ -23,8 +23,8 @@ import akka.util.Timeout._
 import org.apache.kafka.common.utils.Utils
 
 import scala.concurrent.duration._
-import scala.concurrent.{ExecutionContextExecutor, Future}
-import scala.util.{Failure, Success}
+import scala.concurrent.{ ExecutionContextExecutor, Future }
+import scala.util.{ Failure, Success }
 import akka.util.JavaDurationConverters._
 import org.slf4j.LoggerFactory
 
@@ -55,12 +55,11 @@ final class KafkaClusterSharding(system: ExtendedActorSystem) extends Extension
    */
   @ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/1074")
   def messageExtractor[M](topic: String,
-                          timeout: FiniteDuration,
-                          settings: ConsumerSettings[_, _]): Future[KafkaShardingMessageExtractor[M]] =
+      timeout: FiniteDuration,
+      settings: ConsumerSettings[_, _]): Future[KafkaShardingMessageExtractor[M]] =
     getPartitionCount(topic, timeout, settings).map(new KafkaShardingMessageExtractor[M](_))(system.dispatcher)
 
   /**
-   *
    * Java API
    *
    * API MAY CHANGE
@@ -75,12 +74,11 @@ final class KafkaClusterSharding(system: ExtendedActorSystem) extends Extension
    *
    * All topics used in a Consumer [[akka.kafka.Subscription]] must contain the same number of partitions to ensure
    * that entities are routed to the same Entity type.
-   *
    */
   @ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/1074")
   def messageExtractor[M](topic: String,
-                          timeout: java.time.Duration,
-                          settings: ConsumerSettings[_, _]): CompletionStage[KafkaShardingMessageExtractor[M]] =
+      timeout: java.time.Duration,
+      settings: ConsumerSettings[_, _]): CompletionStage[KafkaShardingMessageExtractor[M]] =
     getPartitionCount(topic, timeout.asScala, settings)
       .map(new KafkaShardingMessageExtractor[M](_))(system.dispatcher)
       .toJava
@@ -117,9 +115,9 @@ final class KafkaClusterSharding(system: ExtendedActorSystem) extends Extension
    */
   @ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/1074")
   def messageExtractorNoEnvelope[M](topic: String,
-                                    timeout: FiniteDuration,
-                                    entityIdExtractor: M => String,
-                                    settings: ConsumerSettings[_, _]): Future[KafkaShardingNoEnvelopeExtractor[M]] =
+      timeout: FiniteDuration,
+      entityIdExtractor: M => String,
+      settings: ConsumerSettings[_, _]): Future[KafkaShardingNoEnvelopeExtractor[M]] =
     getPartitionCount(topic, timeout, settings)
       .map(partitions => new KafkaShardingNoEnvelopeExtractor[M](partitions, entityIdExtractor))(system.dispatcher)
 
@@ -145,12 +143,10 @@ final class KafkaClusterSharding(system: ExtendedActorSystem) extends Extension
       topic: String,
       timeout: java.time.Duration,
       entityIdExtractor: java.util.function.Function[M, String],
-      settings: ConsumerSettings[_, _]
-  ): CompletionStage[KafkaShardingNoEnvelopeExtractor[M]] =
+      settings: ConsumerSettings[_, _]): CompletionStage[KafkaShardingNoEnvelopeExtractor[M]] =
     getPartitionCount(topic, timeout.asScala, settings)
       .map(partitions => new KafkaShardingNoEnvelopeExtractor[M](partitions, e => entityIdExtractor.apply(e)))(
-        system.dispatcher
-      )
+        system.dispatcher)
       .toJava
 
   /**
@@ -166,7 +162,7 @@ final class KafkaClusterSharding(system: ExtendedActorSystem) extends Extension
    */
   @ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/1074")
   def messageExtractorNoEnvelope[M](kafkaPartitions: Int,
-                                    entityIdExtractor: M => String): KafkaShardingNoEnvelopeExtractor[M] =
+      entityIdExtractor: M => String): KafkaShardingNoEnvelopeExtractor[M] =
     new KafkaShardingNoEnvelopeExtractor[M](kafkaPartitions, entityIdExtractor)
 
   /**
@@ -183,14 +179,13 @@ final class KafkaClusterSharding(system: ExtendedActorSystem) extends Extension
   @ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/1074")
   def messageExtractorNoEnvelope[M](
       kafkaPartitions: Int,
-      entityIdExtractor: java.util.function.Function[M, String]
-  ): KafkaShardingNoEnvelopeExtractor[M] =
+      entityIdExtractor: java.util.function.Function[M, String]): KafkaShardingNoEnvelopeExtractor[M] =
     new KafkaShardingNoEnvelopeExtractor[M](kafkaPartitions, e => entityIdExtractor.apply(e))
 
   private val metadataConsumerActorNum = new AtomicInteger
   private def getPartitionCount[M](topic: String,
-                                   timeout: FiniteDuration,
-                                   settings: ConsumerSettings[_, _]): Future[Int] = {
+      timeout: FiniteDuration,
+      settings: ConsumerSettings[_, _]): Future[Int] = {
     implicit val ec: ExecutionContextExecutor = system.dispatcher
     val num = metadataConsumerActorNum.getAndIncrement()
     val consumerActor = system
@@ -225,10 +220,11 @@ final class KafkaClusterSharding(system: ExtendedActorSystem) extends Extension
    */
   @ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/1074")
   def rebalanceListener(typeKey: EntityTypeKey[_]): akka.actor.typed.ActorRef[ConsumerRebalanceEvent] = {
-    rebalanceListeners.computeIfAbsent(typeKey, _ => {
-      system.toTyped
-        .systemActorOf(RebalanceListener(typeKey), s"kafka-cluster-sharding-rebalance-listener-${typeKey.name}")
-    })
+    rebalanceListeners.computeIfAbsent(typeKey,
+      _ => {
+        system.toTyped
+          .systemActorOf(RebalanceListener(typeKey), s"kafka-cluster-sharding-rebalance-listener-${typeKey.name}")
+      })
   }
 
   /**
@@ -251,8 +247,8 @@ final class KafkaClusterSharding(system: ExtendedActorSystem) extends Extension
    */
   @ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/1074")
   def rebalanceListener(
-      typeKey: akka.cluster.sharding.typed.javadsl.EntityTypeKey[_]
-  ): akka.actor.typed.ActorRef[ConsumerRebalanceEvent] = {
+      typeKey: akka.cluster.sharding.typed.javadsl.EntityTypeKey[_])
+      : akka.actor.typed.ActorRef[ConsumerRebalanceEvent] = {
     rebalanceListener(typeKey.asScala)
   }
 }
@@ -264,7 +260,7 @@ object KafkaClusterSharding extends ExtensionId[KafkaClusterSharding] {
     def shardId(entityId: String): String = {
       // simplified version of Kafka's `DefaultPartitioner` implementation
       val partition = org.apache.kafka.common.utils.Utils
-          .toPositive(Utils.murmur2(entityId.getBytes())) % kafkaPartitions
+        .toPositive(Utils.murmur2(entityId.getBytes())) % kafkaPartitions
       partition.toString
     }
   }
@@ -279,7 +275,7 @@ object KafkaClusterSharding extends ExtensionId[KafkaClusterSharding] {
 
   @InternalApi
   final class KafkaShardingNoEnvelopeExtractor[M] private[kafka] (val kafkaPartitions: Int,
-                                                                  entityIdExtractor: M => String)
+      entityIdExtractor: M => String)
       extends ShardingMessageExtractor[M, M]
       with KafkaClusterShardingContract {
     override def entityId(message: M): String = entityIdExtractor(message)
@@ -301,9 +297,9 @@ object KafkaClusterSharding extends ExtensionId[KafkaClusterSharding] {
           case TopicPartitionsAssigned(_, partitions) =>
             if (log.isInfoEnabled) {
               log.info("Consumer group '{}' assigned topic partitions to cluster member '{}': [{}]",
-                       typeKey.name,
-                       address,
-                       partitions.mkString(","))
+                typeKey.name,
+                address,
+                partitions.mkString(","))
             }
 
             val updates = shardAllocationClient.updateShardLocations(partitions.map { tp =>
@@ -322,8 +318,7 @@ object KafkaClusterSharding extends ExtensionId[KafkaClusterSharding] {
                       "Completed consumer group '{}' assignment of topic partitions to cluster member '{}': [{}]",
                       typeKey.name,
                       address,
-                      partitions.mkString(",")
-                    )
+                      partitions.mkString(","))
                   }
 
                 case Failure(ex) =>
@@ -333,9 +328,9 @@ object KafkaClusterSharding extends ExtensionId[KafkaClusterSharding] {
           case TopicPartitionsRevoked(_, partitions) =>
             val partitionsList = partitions.mkString(",")
             log.info("Consumer group '{}' revoked topic partitions from cluster member '{}': [{}]",
-                     typeKey.name,
-                     address,
-                     partitionsList)
+              typeKey.name,
+              address,
+              partitionsList)
             Behaviors.same
         }
       }
diff --git a/core/src/main/scala/akka/kafka/CommitterSettings.scala b/core/src/main/scala/akka/kafka/CommitterSettings.scala
index 1897b6d5..0bcc1932 100644
--- a/core/src/main/scala/akka/kafka/CommitterSettings.scala
+++ b/core/src/main/scala/akka/kafka/CommitterSettings.scala
@@ -48,9 +48,9 @@ object CommitDelivery {
   val sendAndForget: CommitDelivery = SendAndForget
 
   def valueOf(s: String): CommitDelivery = s match {
-    case "WaitForAck" => WaitForAck
+    case "WaitForAck"    => WaitForAck
     case "SendAndForget" => SendAndForget
-    case other => throw new IllegalArgumentException(s"allowed values are: WaitForAck, SendAndForget. Received: $other")
+    case other           => throw new IllegalArgumentException(s"allowed values are: WaitForAck, SendAndForget. Received: $other")
   }
 }
 
@@ -89,11 +89,10 @@ object CommitWhen {
 
   def valueOf(s: String): CommitWhen = s match {
     case "OffsetFirstObserved" => OffsetFirstObserved
-    case "NextOffsetObserved" => NextOffsetObserved
+    case "NextOffsetObserved"  => NextOffsetObserved
     case other =>
       throw new IllegalArgumentException(
-        s"allowed values are: OffsetFirstObserved, NextOffsetObserved. Received: $other"
-      )
+        s"allowed values are: OffsetFirstObserved, NextOffsetObserved. Received: $other")
   }
 }
 
@@ -166,8 +165,7 @@ class CommitterSettings private (
     val maxInterval: FiniteDuration,
     val parallelism: Int,
     val delivery: CommitDelivery,
-    val when: CommitWhen
-) {
+    val when: CommitWhen) {
 
   def withMaxBatch(maxBatch: Long): CommitterSettings =
     copy(maxBatch = maxBatch)
@@ -190,10 +188,10 @@ class CommitterSettings private (
     copy(when = value)
 
   private def copy(maxBatch: Long = maxBatch,
-                   maxInterval: FiniteDuration = maxInterval,
-                   parallelism: Int = parallelism,
-                   delivery: CommitDelivery = delivery,
-                   when: CommitWhen = when): CommitterSettings =
+      maxInterval: FiniteDuration = maxInterval,
+      parallelism: Int = parallelism,
+      delivery: CommitDelivery = delivery,
+      when: CommitWhen = when): CommitterSettings =
     new CommitterSettings(maxBatch, maxInterval, parallelism, delivery, when)
 
   override def toString: String =
diff --git a/core/src/main/scala/akka/kafka/ConnectionCheckerSettings.scala b/core/src/main/scala/akka/kafka/ConnectionCheckerSettings.scala
index 6ff1af60..830af9b2 100644
--- a/core/src/main/scala/akka/kafka/ConnectionCheckerSettings.scala
+++ b/core/src/main/scala/akka/kafka/ConnectionCheckerSettings.scala
@@ -10,26 +10,25 @@ import com.typesafe.config.Config
 
 import scala.concurrent.duration._
 
-import java.time.{Duration => JDuration}
+import java.time.{ Duration => JDuration }
 
 class ConnectionCheckerSettings private[kafka] (val enable: Boolean,
-                                                val maxRetries: Int,
-                                                val checkInterval: FiniteDuration,
-                                                val factor: Double) {
+    val maxRetries: Int,
+    val checkInterval: FiniteDuration,
+    val factor: Double) {
 
   require(factor > 0, "Backoff factor for connection checker must be finite positive number")
   require(maxRetries >= 0, "retries for connection checker must be not negative number")
 
   private def copy(enable: Boolean = enable,
-                   maxRetries: Int = maxRetries,
-                   checkInterval: FiniteDuration = checkInterval,
-                   factor: Double = factor): ConnectionCheckerSettings =
+      maxRetries: Int = maxRetries,
+      checkInterval: FiniteDuration = checkInterval,
+      factor: Double = factor): ConnectionCheckerSettings =
     new ConnectionCheckerSettings(
       enable,
       maxRetries,
       checkInterval,
-      factor
-    )
+      factor)
 
   def withEnable(enable: Boolean): ConnectionCheckerSettings = copy(enable = enable)
   def withMaxRetries(maxRetries: Int): ConnectionCheckerSettings = copy(maxRetries = maxRetries)
diff --git a/core/src/main/scala/akka/kafka/ConsumerFailed.scala b/core/src/main/scala/akka/kafka/ConsumerFailed.scala
index e76d7d84..5be16b15 100644
--- a/core/src/main/scala/akka/kafka/ConsumerFailed.scala
+++ b/core/src/main/scala/akka/kafka/ConsumerFailed.scala
@@ -25,10 +25,8 @@ class ConsumerFailed(msg: String) extends RuntimeException(msg) {
 class InitialPollFailed(val timeout: Long, val bootstrapServers: String)
     extends ConsumerFailed(
       s"Initial consumer poll($timeout) with bootstrap servers " +
-      s"$bootstrapServers did not succeed, correct address?"
-    )
+      s"$bootstrapServers did not succeed, correct address?")
 
 class WakeupsExceeded(val timeout: Long, val maxWakeups: Int, val wakeupTimeout: FiniteDuration)
     extends ConsumerFailed(
-      s"WakeupException limit exceeded during poll($timeout), stopping (max-wakeups = $maxWakeups, wakeup-timeout = ${wakeupTimeout.toCoarsest})."
-    )
+      s"WakeupException limit exceeded during poll($timeout), stopping (max-wakeups = $maxWakeups, wakeup-timeout = ${wakeupTimeout.toCoarsest}).")
diff --git a/core/src/main/scala/akka/kafka/ConsumerMessage.scala b/core/src/main/scala/akka/kafka/ConsumerMessage.scala
index bb8f1ad5..6a80ccbe 100644
--- a/core/src/main/scala/akka/kafka/ConsumerMessage.scala
+++ b/core/src/main/scala/akka/kafka/ConsumerMessage.scala
@@ -9,8 +9,8 @@ import java.util.Objects
 import java.util.concurrent.CompletionStage
 
 import akka.Done
-import akka.annotation.{DoNotInherit, InternalApi}
-import akka.kafka.internal.{CommittableOffsetBatchImpl, CommittedMarker}
+import akka.annotation.{ DoNotInherit, InternalApi }
+import akka.kafka.internal.{ CommittableOffsetBatchImpl, CommittedMarker }
 import org.apache.kafka.clients.consumer.ConsumerRecord
 import org.apache.kafka.common.TopicPartition
 
@@ -29,8 +29,7 @@ object ConsumerMessage {
    */
   final case class CommittableMessage[K, V](
       record: ConsumerRecord[K, V],
-      committableOffset: CommittableOffset
-  )
+      committableOffset: CommittableOffset)
 
   /**
    * Output element of `transactionalSource`.
@@ -38,8 +37,7 @@ object ConsumerMessage {
    */
   final case class TransactionalMessage[K, V](
       record: ConsumerRecord[K, V],
-      partitionOffset: PartitionOffset
-  )
+      partitionOffset: PartitionOffset)
 
   /**
    * Carries offsets from Kafka for aggregation and committing by the [[scaladsl.Committer]]
@@ -136,8 +134,7 @@ object ConsumerMessage {
       override val key: GroupTopicPartition,
       override val offset: Long,
       private[kafka] val committedMarker: CommittedMarker,
-      private[kafka] val fromPartitionedSource: Boolean
-  ) extends PartitionOffset(key, offset)
+      private[kafka] val fromPartitionedSource: Boolean) extends PartitionOffset(key, offset)
 
   /**
    * groupId, topic, partition key for an offset position.
@@ -145,8 +142,7 @@ object ConsumerMessage {
   final case class GroupTopicPartition(
       groupId: String,
       topic: String,
-      partition: Int
-  ) {
+      partition: Int) {
     def topicPartition: TopicPartition = new TopicPartition(topic, partition)
   }
 
diff --git a/core/src/main/scala/akka/kafka/ConsumerSettings.scala b/core/src/main/scala/akka/kafka/ConsumerSettings.scala
index fc3c6985..e6cc8c4e 100644
--- a/core/src/main/scala/akka/kafka/ConsumerSettings.scala
+++ b/core/src/main/scala/akka/kafka/ConsumerSettings.scala
@@ -6,19 +6,19 @@
 package akka.kafka
 
 import java.util.Optional
-import java.util.concurrent.{CompletionStage, Executor}
+import java.util.concurrent.{ CompletionStage, Executor }
 
 import akka.annotation.InternalApi
 import akka.kafka.internal._
 import akka.util.JavaDurationConverters._
 import com.typesafe.config.Config
-import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, KafkaConsumer}
+import org.apache.kafka.clients.consumer.{ Consumer, ConsumerConfig, KafkaConsumer }
 import org.apache.kafka.common.serialization.Deserializer
 
 import scala.jdk.CollectionConverters._
 import scala.compat.java8.OptionConverters._
 import scala.compat.java8.FutureConverters._
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ ExecutionContext, Future }
 import scala.concurrent.duration._
 
 object ConsumerSettings {
@@ -33,8 +33,7 @@ object ConsumerSettings {
   def apply[K, V](
       system: akka.actor.ActorSystem,
       keyDeserializer: Option[Deserializer[K]],
-      valueDeserializer: Option[Deserializer[V]]
-  ): ConsumerSettings[K, V] = {
+      valueDeserializer: Option[Deserializer[V]]): ConsumerSettings[K, V] = {
     val config = system.settings.config.getConfig(configPath)
     apply(config, keyDeserializer, valueDeserializer)
   }
@@ -49,8 +48,7 @@ object ConsumerSettings {
   def apply[K, V](
       system: akka.actor.ClassicActorSystemProvider,
       keyDeserializer: Option[Deserializer[K]],
-      valueDeserializer: Option[Deserializer[V]]
-  ): ConsumerSettings[K, V] =
+      valueDeserializer: Option[Deserializer[V]]): ConsumerSettings[K, V] =
     apply(system.classicSystem, keyDeserializer, valueDeserializer)
 
   /**
@@ -61,19 +59,16 @@ object ConsumerSettings {
   def apply[K, V](
       config: Config,
       keyDeserializer: Option[Deserializer[K]],
-      valueDeserializer: Option[Deserializer[V]]
-  ): ConsumerSettings[K, V] = {
+      valueDeserializer: Option[Deserializer[V]]): ConsumerSettings[K, V] = {
     val properties = ConfigSettings.parseKafkaClientsProperties(config.getConfig("kafka-clients"))
     require(
       keyDeserializer != null &&
       (keyDeserializer.isDefined || properties.contains(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG)),
-      "Key deserializer should be defined or declared in configuration"
-    )
+      "Key deserializer should be defined or declared in configuration")
     require(
       valueDeserializer != null &&
       (valueDeserializer.isDefined || properties.contains(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG)),
-      "Value deserializer should be defined or declared in configuration"
-    )
+      "Value deserializer should be defined or declared in configuration")
     val pollInterval = config.getDuration("poll-interval").asScala
     val pollTimeout = config.getDuration("poll-timeout").asScala
     val stopTimeout = config.getDuration("stop-timeout").asScala
@@ -90,8 +85,7 @@ object ConsumerSettings {
     val connectionCheckerSettings = ConnectionCheckerSettings(config.getConfig(ConnectionCheckerSettings.configPath))
     val partitionHandlerWarning = config.getDuration("partition-handler-warning").asScala
     val resetProtectionThreshold = OffsetResetProtectionSettings(
-      config.getConfig(OffsetResetProtectionSettings.configPath)
-    )
+      config.getConfig(OffsetResetProtectionSettings.configPath))
 
     new ConsumerSettings[K, V](
       properties,
@@ -114,8 +108,7 @@ object ConsumerSettings {
       ConsumerSettings.createKafkaConsumer,
       connectionCheckerSettings,
       partitionHandlerWarning,
-      resetProtectionThreshold
-    )
+      resetProtectionThreshold)
   }
 
   /**
@@ -126,8 +119,7 @@ object ConsumerSettings {
   def apply[K, V](
       system: akka.actor.ActorSystem,
       keyDeserializer: Deserializer[K],
-      valueDeserializer: Deserializer[V]
-  ): ConsumerSettings[K, V] =
+      valueDeserializer: Deserializer[V]): ConsumerSettings[K, V] =
     apply(system, Option(keyDeserializer), Option(valueDeserializer))
 
   /**
@@ -140,8 +132,7 @@ object ConsumerSettings {
   def apply[K, V](
       system: akka.actor.ClassicActorSystemProvider,
       keyDeserializer: Deserializer[K],
-      valueDeserializer: Deserializer[V]
-  ): ConsumerSettings[K, V] =
+      valueDeserializer: Deserializer[V]): ConsumerSettings[K, V] =
     apply(system, Option(keyDeserializer), Option(valueDeserializer))
 
   /**
@@ -152,8 +143,7 @@ object ConsumerSettings {
   def apply[K, V](
       config: Config,
       keyDeserializer: Deserializer[K],
-      valueDeserializer: Deserializer[V]
-  ): ConsumerSettings[K, V] =
+      valueDeserializer: Deserializer[V]): ConsumerSettings[K, V] =
     apply(config, Option(keyDeserializer), Option(valueDeserializer))
 
   /**
@@ -164,8 +154,7 @@ object ConsumerSettings {
   def create[K, V](
       system: akka.actor.ActorSystem,
       keyDeserializer: Optional[Deserializer[K]],
-      valueDeserializer: Optional[Deserializer[V]]
-  ): ConsumerSettings[K, V] =
+      valueDeserializer: Optional[Deserializer[V]]): ConsumerSettings[K, V] =
     apply(system, keyDeserializer.asScala, valueDeserializer.asScala)
 
   /**
@@ -178,8 +167,7 @@ object ConsumerSettings {
   def create[K, V](
       system: akka.actor.ClassicActorSystemProvider,
       keyDeserializer: Optional[Deserializer[K]],
-      valueDeserializer: Optional[Deserializer[V]]
-  ): ConsumerSettings[K, V] =
+      valueDeserializer: Optional[Deserializer[V]]): ConsumerSettings[K, V] =
     apply(system, keyDeserializer.asScala, valueDeserializer.asScala)
 
   /**
@@ -190,8 +178,7 @@ object ConsumerSettings {
   def create[K, V](
       config: Config,
       keyDeserializer: Optional[Deserializer[K]],
-      valueDeserializer: Optional[Deserializer[V]]
-  ): ConsumerSettings[K, V] =
+      valueDeserializer: Optional[Deserializer[V]]): ConsumerSettings[K, V] =
     apply(config, keyDeserializer.asScala, valueDeserializer.asScala)
 
   /**
@@ -202,8 +189,7 @@ object ConsumerSettings {
   def create[K, V](
       system: akka.actor.ActorSystem,
       keyDeserializer: Deserializer[K],
-      valueDeserializer: Deserializer[V]
-  ): ConsumerSettings[K, V] =
+      valueDeserializer: Deserializer[V]): ConsumerSettings[K, V] =
     apply(system, keyDeserializer, valueDeserializer)
 
   /**
@@ -216,8 +202,7 @@ object ConsumerSettings {
   def create[K, V](
       system: akka.actor.ClassicActorSystemProvider,
       keyDeserializer: Deserializer[K],
-      valueDeserializer: Deserializer[V]
-  ): ConsumerSettings[K, V] =
+      valueDeserializer: Deserializer[V]): ConsumerSettings[K, V] =
     apply(system, keyDeserializer, valueDeserializer)
 
   /**
@@ -228,8 +213,7 @@ object ConsumerSettings {
   def create[K, V](
       config: Config,
       keyDeserializer: Deserializer[K],
-      valueDeserializer: Deserializer[V]
-  ): ConsumerSettings[K, V] =
+      valueDeserializer: Deserializer[V]): ConsumerSettings[K, V] =
     apply(config, keyDeserializer, valueDeserializer)
 
   /**
@@ -237,8 +221,8 @@ object ConsumerSettings {
    */
   def createKafkaConsumer[K, V](settings: ConsumerSettings[K, V]): Consumer[K, V] =
     new KafkaConsumer[K, V](settings.getProperties,
-                            settings.keyDeserializerOpt.orNull,
-                            settings.valueDeserializerOpt.orNull)
+      settings.keyDeserializerOpt.orNull,
+      settings.valueDeserializerOpt.orNull)
 
 }
 
@@ -271,8 +255,7 @@ class ConsumerSettings[K, V] @InternalApi private[kafka] (
     val consumerFactory: ConsumerSettings[K, V] => Consumer[K, V],
     val connectionCheckerSettings: ConnectionCheckerSettings,
     val partitionHandlerWarning: FiniteDuration,
-    val resetProtectionSettings: OffsetResetProtectionSettings
-) {
+    val resetProtectionSettings: OffsetResetProtectionSettings) {
 
   /**
    * A comma-separated list of host/port pairs to use for establishing the initial connection to the Kafka cluster.
@@ -457,8 +440,7 @@ class ConsumerSettings[K, V] @InternalApi private[kafka] (
    * Enable kafka connection checker with provided settings
    */
   def withConnectionChecker(
-      kafkaConnectionCheckerConfig: ConnectionCheckerSettings
-  ): ConsumerSettings[K, V] =
+      kafkaConnectionCheckerConfig: ConnectionCheckerSettings): ConsumerSettings[K, V] =
     copy(connectionCheckerConfig = kafkaConnectionCheckerConfig)
 
   /**
@@ -522,16 +504,15 @@ class ConsumerSettings[K, V] @InternalApi private[kafka] (
    * @since 2.0.0
    */
   def withEnrichCompletionStage(
-      value: java.util.function.Function[ConsumerSettings[K, V], CompletionStage[ConsumerSettings[K, V]]]
-  ): ConsumerSettings[K, V] =
+      value: java.util.function.Function[ConsumerSettings[K, V], CompletionStage[ConsumerSettings[K, V]]])
+      : ConsumerSettings[K, V] =
     copy(enrichAsync = Some((s: ConsumerSettings[K, V]) => value.apply(s).toScala))
 
   /**
    * Replaces the default Kafka consumer creation logic.
    */
   def withConsumerFactory(
-      factory: ConsumerSettings[K, V] => Consumer[K, V]
-  ): ConsumerSettings[K, V] = copy(consumerFactory = factory)
+      factory: ConsumerSettings[K, V] => Consumer[K, V]): ConsumerSettings[K, V] = copy(consumerFactory = factory)
 
   /**
    * Set the protection for unintentional offset reset.
@@ -570,8 +551,7 @@ class ConsumerSettings[K, V] @InternalApi private[kafka] (
       consumerFactory: ConsumerSettings[K, V] => Consumer[K, V] = consumerFactory,
       connectionCheckerConfig: ConnectionCheckerSettings = connectionCheckerSettings,
       partitionHandlerWarning: FiniteDuration = partitionHandlerWarning,
-      resetProtectionSettings: OffsetResetProtectionSettings = resetProtectionSettings
-  ): ConsumerSettings[K, V] =
+      resetProtectionSettings: OffsetResetProtectionSettings = resetProtectionSettings): ConsumerSettings[K, V] =
     new ConsumerSettings[K, V](
       properties,
       keyDeserializer,
@@ -593,8 +573,7 @@ class ConsumerSettings[K, V] @InternalApi private[kafka] (
       consumerFactory,
       connectionCheckerConfig,
       partitionHandlerWarning,
-      resetProtectionSettings
-    )
+      resetProtectionSettings)
 
   /**
    * Applies `enrichAsync` to complement these settings from asynchronous sources.
@@ -612,8 +591,7 @@ class ConsumerSettings[K, V] @InternalApi private[kafka] (
   def createKafkaConsumer(): Consumer[K, V] =
     if (enrichAsync.isDefined) {
       throw new IllegalStateException(
-        "Asynchronous settings enrichment is set via `withEnrichAsync` or `withEnrichCompletionStage`, you must use `createKafkaConsumerAsync` or `createKafkaConsumerCompletionStage` to apply it"
-      )
+        "Asynchronous settings enrichment is set via `withEnrichAsync` or `withEnrichCompletionStage`, you must use `createKafkaConsumerAsync` or `createKafkaConsumerCompletionStage` to apply it")
     } else {
       consumerFactory.apply(this)
     }
diff --git a/core/src/main/scala/akka/kafka/KafkaConsumerActor.scala b/core/src/main/scala/akka/kafka/KafkaConsumerActor.scala
index 1fa5cfe9..43e8074f 100644
--- a/core/src/main/scala/akka/kafka/KafkaConsumerActor.scala
+++ b/core/src/main/scala/akka/kafka/KafkaConsumerActor.scala
@@ -5,9 +5,9 @@
 
 package akka.kafka
 
-import akka.actor.{ActorRef, NoSerializationVerificationNeeded, Props}
+import akka.actor.{ ActorRef, NoSerializationVerificationNeeded, Props }
 import akka.annotation.InternalApi
-import akka.kafka.internal.{KafkaConsumerActor => InternalKafkaConsumerActor}
+import akka.kafka.internal.{ KafkaConsumerActor => InternalKafkaConsumerActor }
 
 object KafkaConsumerActor {
 
diff --git a/core/src/main/scala/akka/kafka/Metadata.scala b/core/src/main/scala/akka/kafka/Metadata.scala
index b71da572..2f5ba0ce 100644
--- a/core/src/main/scala/akka/kafka/Metadata.scala
+++ b/core/src/main/scala/akka/kafka/Metadata.scala
@@ -8,8 +8,8 @@ package akka.kafka
 import java.util.Optional
 
 import akka.actor.NoSerializationVerificationNeeded
-import org.apache.kafka.clients.consumer.{OffsetAndMetadata, OffsetAndTimestamp}
-import org.apache.kafka.common.{PartitionInfo, TopicPartition}
+import org.apache.kafka.clients.consumer.{ OffsetAndMetadata, OffsetAndTimestamp }
+import org.apache.kafka.common.{ PartitionInfo, TopicPartition }
 
 import scala.jdk.CollectionConverters._
 import scala.util.Try
diff --git a/core/src/main/scala/akka/kafka/OffsetResetProtectionSettings.scala b/core/src/main/scala/akka/kafka/OffsetResetProtectionSettings.scala
index 5135c255..d0590164 100644
--- a/core/src/main/scala/akka/kafka/OffsetResetProtectionSettings.scala
+++ b/core/src/main/scala/akka/kafka/OffsetResetProtectionSettings.scala
@@ -4,7 +4,7 @@
  */
 
 package akka.kafka
-import java.time.{Duration => JDuration}
+import java.time.{ Duration => JDuration }
 
 import akka.annotation.InternalApi
 import akka.util.JavaDurationConverters._
@@ -13,14 +13,14 @@ import com.typesafe.config.Config
 import scala.concurrent.duration._
 
 class OffsetResetProtectionSettings @InternalApi private[kafka] (val enable: Boolean,
-                                                                 val offsetThreshold: Long,
-                                                                 val timeThreshold: FiniteDuration) {
+    val offsetThreshold: Long,
+    val timeThreshold: FiniteDuration) {
   require(offsetThreshold > 0, "An offset threshold must be greater than 0")
   require(timeThreshold.toMillis > 0, "A time threshold must be greater than 0")
 
   private def copy(enable: Boolean = enable,
-                   offsetThreshold: Long = offsetThreshold,
-                   timeThreshold: FiniteDuration = timeThreshold): OffsetResetProtectionSettings = {
+      offsetThreshold: Long = offsetThreshold,
+      timeThreshold: FiniteDuration = timeThreshold): OffsetResetProtectionSettings = {
     new OffsetResetProtectionSettings(enable, offsetThreshold, timeThreshold)
   }
 
diff --git a/core/src/main/scala/akka/kafka/ProducerMessage.scala b/core/src/main/scala/akka/kafka/ProducerMessage.scala
index f521257e..294f474c 100644
--- a/core/src/main/scala/akka/kafka/ProducerMessage.scala
+++ b/core/src/main/scala/akka/kafka/ProducerMessage.scala
@@ -6,7 +6,7 @@
 package akka.kafka
 
 import akka.NotUsed
-import org.apache.kafka.clients.producer.{ProducerRecord, RecordMetadata}
+import org.apache.kafka.clients.producer.{ ProducerRecord, RecordMetadata }
 
 import scala.collection.immutable
 import scala.jdk.CollectionConverters._
@@ -52,8 +52,7 @@ object ProducerMessage {
    */
   final case class Message[K, V, +PassThrough](
       record: ProducerRecord[K, V],
-      passThrough: PassThrough
-  ) extends Envelope[K, V, PassThrough] {
+      passThrough: PassThrough) extends Envelope[K, V, PassThrough] {
     override def withPassThrough[PassThrough2](value: PassThrough2): Message[K, V, PassThrough2] =
       copy(passThrough = value)
   }
@@ -67,8 +66,7 @@ object ProducerMessage {
    */
   def single[K, V, PassThrough](
       record: ProducerRecord[K, V],
-      passThrough: PassThrough
-  ): Envelope[K, V, PassThrough] = Message(record, passThrough)
+      passThrough: PassThrough): Envelope[K, V, PassThrough] = Message(record, passThrough)
 
   /**
    * Create a message containing the `record`.
@@ -93,8 +91,7 @@ object ProducerMessage {
    */
   final case class MultiMessage[K, V, +PassThrough](
       records: immutable.Seq[ProducerRecord[K, V]],
-      passThrough: PassThrough
-  ) extends Envelope[K, V, PassThrough] {
+      passThrough: PassThrough) extends Envelope[K, V, PassThrough] {
 
     /**
      * Java API:
@@ -117,8 +114,7 @@ object ProducerMessage {
    */
   def multi[K, V, PassThrough](
       records: immutable.Seq[ProducerRecord[K, V]],
-      passThrough: PassThrough
-  ): Envelope[K, V, PassThrough] = MultiMessage(records, passThrough)
+      passThrough: PassThrough): Envelope[K, V, PassThrough] = MultiMessage(records, passThrough)
 
   /**
    * Create a multi-message containing several `records`.
@@ -127,8 +123,7 @@ object ProducerMessage {
    * @tparam V the type of values
    */
   def multi[K, V](
-      records: immutable.Seq[ProducerRecord[K, V]]
-  ): Envelope[K, V, NotUsed] = MultiMessage(records, NotUsed)
+      records: immutable.Seq[ProducerRecord[K, V]]): Envelope[K, V, NotUsed] = MultiMessage(records, NotUsed)
 
   /**
    * Java API:
@@ -140,8 +135,7 @@ object ProducerMessage {
    */
   def multi[K, V, PassThrough](
       records: java.util.Collection[ProducerRecord[K, V]],
-      passThrough: PassThrough
-  ): Envelope[K, V, PassThrough] = new MultiMessage(records, passThrough)
+      passThrough: PassThrough): Envelope[K, V, PassThrough] = new MultiMessage(records, passThrough)
 
   /**
    * Java API:
@@ -151,8 +145,7 @@ object ProducerMessage {
    * @tparam V the type of values
    */
   def multi[K, V](
-      records: java.util.Collection[ProducerRecord[K, V]]
-  ): Envelope[K, V, NotUsed] = new MultiMessage(records, NotUsed)
+      records: java.util.Collection[ProducerRecord[K, V]]): Envelope[K, V, NotUsed] = new MultiMessage(records, NotUsed)
 
   /**
    * [[Envelope]] implementation that does not produce anything to Kafka, flows emit
@@ -165,8 +158,7 @@ object ProducerMessage {
    * that can be committed later in the flow.
    */
   final case class PassThroughMessage[K, V, +PassThrough](
-      passThrough: PassThrough
-  ) extends Envelope[K, V, PassThrough] {
+      passThrough: PassThrough) extends Envelope[K, V, PassThrough] {
     override def withPassThrough[PassThrough2](value: PassThrough2): Envelope[K, V, PassThrough2] =
       copy(passThrough = value)
   }
@@ -206,16 +198,14 @@ object ProducerMessage {
    */
   final case class Result[K, V, PassThrough] private (
       metadata: RecordMetadata,
-      message: Message[K, V, PassThrough]
-  ) extends Results[K, V, PassThrough] {
+      message: Message[K, V, PassThrough]) extends Results[K, V, PassThrough] {
     def offset: Long = metadata.offset()
     def passThrough: PassThrough = message.passThrough
   }
 
   final case class MultiResultPart[K, V] private (
       metadata: RecordMetadata,
-      record: ProducerRecord[K, V]
-  )
+      record: ProducerRecord[K, V])
 
   /**
    * [[Results]] implementation emitted when all messages in a [[MultiMessage]] have been
@@ -223,8 +213,7 @@ object ProducerMessage {
    */
   final case class MultiResult[K, V, PassThrough] private (
       parts: immutable.Seq[MultiResultPart[K, V]],
-      passThrough: PassThrough
-  ) extends Results[K, V, PassThrough] {
+      passThrough: PassThrough) extends Results[K, V, PassThrough] {
 
     /**
      * Java API:
diff --git a/core/src/main/scala/akka/kafka/ProducerSettings.scala b/core/src/main/scala/akka/kafka/ProducerSettings.scala
index 3fe54c2b..793c8aa9 100644
--- a/core/src/main/scala/akka/kafka/ProducerSettings.scala
+++ b/core/src/main/scala/akka/kafka/ProducerSettings.scala
@@ -6,12 +6,12 @@
 package akka.kafka
 
 import java.util.Optional
-import java.util.concurrent.{CompletionStage, Executor}
+import java.util.concurrent.{ CompletionStage, Executor }
 
 import akka.annotation.InternalApi
 import akka.kafka.internal.ConfigSettings
 import com.typesafe.config.Config
-import org.apache.kafka.clients.producer.{KafkaProducer, Producer, ProducerConfig}
+import org.apache.kafka.clients.producer.{ KafkaProducer, Producer, ProducerConfig }
 import org.apache.kafka.common.serialization.Serializer
 
 import scala.jdk.CollectionConverters._
@@ -19,7 +19,7 @@ import scala.compat.java8.OptionConverters._
 import scala.concurrent.duration._
 import akka.util.JavaDurationConverters._
 
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ ExecutionContext, Future }
 import scala.compat.java8.FutureConverters._
 
 object ProducerSettings {
@@ -34,8 +34,7 @@ object ProducerSettings {
   def apply[K, V](
       system: akka.actor.ActorSystem,
       keySerializer: Option[Serializer[K]],
-      valueSerializer: Option[Serializer[V]]
-  ): ProducerSettings[K, V] =
+      valueSerializer: Option[Serializer[V]]): ProducerSettings[K, V] =
     apply(system.settings.config.getConfig(configPath), keySerializer, valueSerializer)
 
   /**
@@ -48,8 +47,7 @@ object ProducerSettings {
   def apply[K, V](
       system: akka.actor.ClassicActorSystemProvider,
       keySerializer: Option[Serializer[K]],
-      valueSerializer: Option[Serializer[V]]
-  ): ProducerSettings[K, V] =
+      valueSerializer: Option[Serializer[V]]): ProducerSettings[K, V] =
     apply(system.classicSystem, keySerializer, valueSerializer)
 
   /**
@@ -60,19 +58,16 @@ object ProducerSettings {
   def apply[K, V](
       config: Config,
       keySerializer: Option[Serializer[K]],
-      valueSerializer: Option[Serializer[V]]
-  ): ProducerSettings[K, V] = {
+      valueSerializer: Option[Serializer[V]]): ProducerSettings[K, V] = {
     val properties = ConfigSettings.parseKafkaClientsProperties(config.getConfig("kafka-clients"))
     require(
       keySerializer != null &&
       (keySerializer.isDefined || properties.contains(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)),
-      "Key serializer should be defined or declared in configuration"
-    )
+      "Key serializer should be defined or declared in configuration")
     require(
       valueSerializer != null &&
       (valueSerializer.isDefined || properties.contains(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)),
-      "Value serializer should be defined or declared in configuration"
-    )
+      "Value serializer should be defined or declared in configuration")
     val closeTimeout = config.getDuration("close-timeout").asScala
     val closeOnProducerStop = config.getBoolean("close-on-producer-stop")
     val parallelism = config.getInt("parallelism")
@@ -88,8 +83,7 @@ object ProducerSettings {
       dispatcher,
       eosCommitInterval,
       enrichAsync = None,
-      producerFactorySync = None
-    )
+      producerFactorySync = None)
   }
 
   /**
@@ -100,8 +94,7 @@ object ProducerSettings {
   def apply[K, V](
       system: akka.actor.ActorSystem,
       keySerializer: Serializer[K],
-      valueSerializer: Serializer[V]
-  ): ProducerSettings[K, V] =
+      valueSerializer: Serializer[V]): ProducerSettings[K, V] =
     apply(system, Option(keySerializer), Option(valueSerializer))
 
   /**
@@ -114,8 +107,7 @@ object ProducerSettings {
   def apply[K, V](
       system: akka.actor.ClassicActorSystemProvider,
       keySerializer: Serializer[K],
-      valueSerializer: Serializer[V]
-  ): ProducerSettings[K, V] =
+      valueSerializer: Serializer[V]): ProducerSettings[K, V] =
     apply(system, Option(keySerializer), Option(valueSerializer))
 
   /**
@@ -126,8 +118,7 @@ object ProducerSettings {
   def apply[K, V](
       config: Config,
       keySerializer: Serializer[K],
-      valueSerializer: Serializer[V]
-  ): ProducerSettings[K, V] =
+      valueSerializer: Serializer[V]): ProducerSettings[K, V] =
     apply(config, Option(keySerializer), Option(valueSerializer))
 
   /**
@@ -138,8 +129,7 @@ object ProducerSettings {
   def create[K, V](
       system: akka.actor.ActorSystem,
       keySerializer: Optional[Serializer[K]],
-      valueSerializer: Optional[Serializer[V]]
-  ): ProducerSettings[K, V] =
+      valueSerializer: Optional[Serializer[V]]): ProducerSettings[K, V] =
     apply(system, keySerializer.asScala, valueSerializer.asScala)
 
   /**
@@ -152,8 +142,7 @@ object ProducerSettings {
   def create[K, V](
       system: akka.actor.ClassicActorSystemProvider,
       keySerializer: Optional[Serializer[K]],
-      valueSerializer: Optional[Serializer[V]]
-  ): ProducerSettings[K, V] =
+      valueSerializer: Optional[Serializer[V]]): ProducerSettings[K, V] =
     apply(system, keySerializer.asScala, valueSerializer.asScala)
 
   /**
@@ -164,8 +153,7 @@ object ProducerSettings {
   def create[K, V](
       config: Config,
       keySerializer: Optional[Serializer[K]],
-      valueSerializer: Optional[Serializer[V]]
-  ): ProducerSettings[K, V] =
+      valueSerializer: Optional[Serializer[V]]): ProducerSettings[K, V] =
     apply(config, keySerializer.asScala, valueSerializer.asScala)
 
   /**
@@ -176,8 +164,7 @@ object ProducerSettings {
   def create[K, V](
       system: akka.actor.ActorSystem,
       keySerializer: Serializer[K],
-      valueSerializer: Serializer[V]
-  ): ProducerSettings[K, V] =
+      valueSerializer: Serializer[V]): ProducerSettings[K, V] =
     apply(system, keySerializer, valueSerializer)
 
   /**
@@ -190,8 +177,7 @@ object ProducerSettings {
   def create[K, V](
       system: akka.actor.ClassicActorSystemProvider,
       keySerializer: Serializer[K],
-      valueSerializer: Serializer[V]
-  ): ProducerSettings[K, V] =
+      valueSerializer: Serializer[V]): ProducerSettings[K, V] =
     apply(system, keySerializer, valueSerializer)
 
   /**
@@ -202,8 +188,7 @@ object ProducerSettings {
   def create[K, V](
       config: Config,
       keySerializer: Serializer[K],
-      valueSerializer: Serializer[V]
-  ): ProducerSettings[K, V] =
+      valueSerializer: Serializer[V]): ProducerSettings[K, V] =
     apply(config, keySerializer, valueSerializer)
 
   /**
@@ -211,8 +196,8 @@ object ProducerSettings {
    */
   def createKafkaProducer[K, V](settings: ProducerSettings[K, V]): KafkaProducer[K, V] =
     new KafkaProducer[K, V](settings.getProperties,
-                            settings.keySerializerOpt.orNull,
-                            settings.valueSerializerOpt.orNull)
+      settings.keySerializerOpt.orNull,
+      settings.valueSerializerOpt.orNull)
 }
 
 /**
@@ -233,13 +218,11 @@ class ProducerSettings[K, V] @InternalApi private[kafka] (
     val dispatcher: String,
     val eosCommitInterval: FiniteDuration,
     val enrichAsync: Option[ProducerSettings[K, V] => Future[ProducerSettings[K, V]]],
-    val producerFactorySync: Option[ProducerSettings[K, V] => Producer[K, V]]
-) {
+    val producerFactorySync: Option[ProducerSettings[K, V] => Producer[K, V]]) {
 
   @deprecated(
     "Use createKafkaProducer(), createKafkaProducerAsync(), or createKafkaProducerCompletionStage() to get a new KafkaProducer",
-    "2.0.0"
-  )
+    "2.0.0")
   def producerFactory: ProducerSettings[K, V] => Producer[K, V] = _ => createKafkaProducer()
 
   /**
@@ -353,8 +336,8 @@ class ProducerSettings[K, V] @InternalApi private[kafka] (
    * @since 2.0.0
    */
   def withEnrichCompletionStage(
-      value: java.util.function.Function[ProducerSettings[K, V], CompletionStage[ProducerSettings[K, V]]]
-  ): ProducerSettings[K, V] =
+      value: java.util.function.Function[ProducerSettings[K, V], CompletionStage[ProducerSettings[K, V]]])
+      : ProducerSettings[K, V] =
     copy(enrichAsync = Some((s: ProducerSettings[K, V]) => value.apply(s).toScala))
 
   /**
@@ -362,15 +345,15 @@ class ProducerSettings[K, V] @InternalApi private[kafka] (
    * `closeProducerOnStop = false` by default.
    */
   def withProducer(
-      producer: Producer[K, V]
-  ): ProducerSettings[K, V] = copy(producerFactorySync = Some(_ => producer), closeProducerOnStop = false)
+      producer: Producer[K, V]): ProducerSettings[K, V] =
+    copy(producerFactorySync = Some(_ => producer), closeProducerOnStop = false)
 
   /**
    * Replaces the default Kafka producer creation logic.
    */
   def withProducerFactory(
-      factory: ProducerSettings[K, V] => Producer[K, V]
-  ): ProducerSettings[K, V] = copy(producerFactorySync = Some(factory))
+      factory: ProducerSettings[K, V] => Producer[K, V]): ProducerSettings[K, V] =
+    copy(producerFactorySync = Some(factory))
 
   /**
    * Get the Kafka producer settings as map.
@@ -387,18 +370,18 @@ class ProducerSettings[K, V] @InternalApi private[kafka] (
       dispatcher: String = dispatcher,
       eosCommitInterval: FiniteDuration = eosCommitInterval,
       enrichAsync: Option[ProducerSettings[K, V] => Future[ProducerSettings[K, V]]] = enrichAsync,
-      producerFactorySync: Option[ProducerSettings[K, V] => Producer[K, V]] = producerFactorySync
-  ): ProducerSettings[K, V] =
+      producerFactorySync: Option[ProducerSettings[K, V] => Producer[K, V]] = producerFactorySync)
+      : ProducerSettings[K, V] =
     new ProducerSettings[K, V](properties,
-                               keySerializer,
-                               valueSerializer,
-                               closeTimeout,
-                               closeProducerOnStop,
-                               parallelism,
-                               dispatcher,
-                               eosCommitInterval,
-                               enrichAsync,
-                               producerFactorySync)
+      keySerializer,
+      valueSerializer,
+      closeTimeout,
+      closeProducerOnStop,
+      parallelism,
+      dispatcher,
+      eosCommitInterval,
+      enrichAsync,
+      producerFactorySync)
 
   override def toString: String = {
     val kafkaClients = properties.toSeq
@@ -438,12 +421,11 @@ class ProducerSettings[K, V] @InternalApi private[kafka] (
   def createKafkaProducer(): Producer[K, V] =
     if (enrichAsync.isDefined) {
       throw new IllegalStateException(
-        "Asynchronous settings enrichment is set via `withEnrichAsync` or `withEnrichCompletionStage`, you must use `createKafkaProducerAsync` or `createKafkaProducerCompletionStage` to apply it"
-      )
+        "Asynchronous settings enrichment is set via `withEnrichAsync` or `withEnrichCompletionStage`, you must use `createKafkaProducerAsync` or `createKafkaProducerCompletionStage` to apply it")
     } else {
       producerFactorySync match {
         case Some(factory) => factory.apply(this)
-        case _ => ProducerSettings.createKafkaProducer(this)
+        case _             => ProducerSettings.createKafkaProducer(this)
       }
     }
 
@@ -456,7 +438,7 @@ class ProducerSettings[K, V] @InternalApi private[kafka] (
   def createKafkaProducerAsync()(implicit executionContext: ExecutionContext): Future[Producer[K, V]] =
     producerFactorySync match {
       case Some(factory) => enriched.map(factory)
-      case _ => enriched.map(ProducerSettings.createKafkaProducer)
+      case _             => enriched.map(ProducerSettings.createKafkaProducer)
     }
 
   /**
diff --git a/core/src/main/scala/akka/kafka/RestrictedConsumer.scala b/core/src/main/scala/akka/kafka/RestrictedConsumer.scala
index 4c229985..90570049 100644
--- a/core/src/main/scala/akka/kafka/RestrictedConsumer.scala
+++ b/core/src/main/scala/akka/kafka/RestrictedConsumer.scala
@@ -6,7 +6,7 @@
 package akka.kafka
 
 import akka.annotation.ApiMayChange
-import org.apache.kafka.clients.consumer.{Consumer, OffsetAndMetadata, OffsetAndTimestamp}
+import org.apache.kafka.clients.consumer.{ Consumer, OffsetAndMetadata, OffsetAndTimestamp }
 import org.apache.kafka.common.TopicPartition
 
 /**
@@ -55,8 +55,8 @@ final class RestrictedConsumer(consumer: Consumer[_, _], duration: java.time.Dur
    * See [[org.apache.kafka.clients.consumer.KafkaConsumer#offsetsForTimes(java.util.Map[TopicPartition,Long],java.time.Duration)]]
    */
   def offsetsForTimes(
-      timestampsToSearch: java.util.Map[TopicPartition, java.lang.Long]
-  ): java.util.Map[TopicPartition, OffsetAndTimestamp] =
+      timestampsToSearch: java.util.Map[TopicPartition, java.lang.Long])
+      : java.util.Map[TopicPartition, OffsetAndTimestamp] =
     consumer.offsetsForTimes(timestampsToSearch, duration)
 
   /**
diff --git a/core/src/main/scala/akka/kafka/Subscriptions.scala b/core/src/main/scala/akka/kafka/Subscriptions.scala
index 655318e3..82079796 100644
--- a/core/src/main/scala/akka/kafka/Subscriptions.scala
+++ b/core/src/main/scala/akka/kafka/Subscriptions.scala
@@ -6,7 +6,7 @@
 package akka.kafka
 
 import akka.actor.ActorRef
-import akka.annotation.{ApiMayChange, InternalApi}
+import akka.annotation.{ ApiMayChange, InternalApi }
 import akka.kafka.internal.PartitionAssignmentHelpers
 import akka.kafka.internal.PartitionAssignmentHelpers.EmptyPartitionAssignmentHandler
 import org.apache.kafka.common.TopicPartition
@@ -64,7 +64,7 @@ sealed trait AutoSubscription extends Subscription {
   override protected def renderListener: String =
     rebalanceListener match {
       case Some(ref) => s" rebalanceListener $ref"
-      case None => ""
+      case None      => ""
     }
 }
 
@@ -84,8 +84,7 @@ object Subscriptions {
   private[kafka] final case class TopicSubscription(
       tps: Set[String],
       rebalanceListener: Option[ActorRef],
-      override val partitionAssignmentHandler: scaladsl.PartitionAssignmentHandler
-  ) extends AutoSubscription {
+      override val partitionAssignmentHandler: scaladsl.PartitionAssignmentHandler) extends AutoSubscription {
     def withRebalanceListener(ref: ActorRef): TopicSubscription =
       copy(rebalanceListener = Some(ref))
 
@@ -106,8 +105,7 @@ object Subscriptions {
   private[kafka] final case class TopicSubscriptionPattern(
       pattern: String,
       rebalanceListener: Option[ActorRef],
-      override val partitionAssignmentHandler: scaladsl.PartitionAssignmentHandler
-  ) extends AutoSubscription {
+      override val partitionAssignmentHandler: scaladsl.PartitionAssignmentHandler) extends AutoSubscription {
     def withRebalanceListener(ref: ActorRef): TopicSubscriptionPattern =
       copy(rebalanceListener = Some(ref))
 
diff --git a/core/src/main/scala/akka/kafka/internal/BaseSingleSourceLogic.scala b/core/src/main/scala/akka/kafka/internal/BaseSingleSourceLogic.scala
index 8885fd6b..fbc5c7c8 100644
--- a/core/src/main/scala/akka/kafka/internal/BaseSingleSourceLogic.scala
+++ b/core/src/main/scala/akka/kafka/internal/BaseSingleSourceLogic.scala
@@ -5,17 +5,17 @@
 
 package akka.kafka.internal
 
-import akka.actor.{ActorRef, Status, Terminated}
+import akka.actor.{ ActorRef, Status, Terminated }
 import akka.annotation.InternalApi
-import akka.kafka.Subscriptions.{Assignment, AssignmentOffsetsForTimes, AssignmentWithOffset}
-import akka.kafka.{ConsumerFailed, ManualSubscription}
+import akka.kafka.Subscriptions.{ Assignment, AssignmentOffsetsForTimes, AssignmentWithOffset }
+import akka.kafka.{ ConsumerFailed, ManualSubscription }
 import akka.stream.SourceShape
 import akka.stream.stage.GraphStageLogic.StageActor
-import akka.stream.stage.{AsyncCallback, GraphStageLogic, OutHandler}
+import akka.stream.stage.{ AsyncCallback, GraphStageLogic, OutHandler }
 import org.apache.kafka.common.TopicPartition
 
 import scala.annotation.tailrec
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ ExecutionContext, Future }
 
 /**
  * Internal API.
@@ -23,8 +23,7 @@ import scala.concurrent.{ExecutionContext, Future}
  * Shared GraphStageLogic for [[SingleSourceLogic]] and [[ExternalSingleSourceLogic]].
  */
 @InternalApi private abstract class BaseSingleSourceLogic[K, V, Msg](
-    val shape: SourceShape[Msg]
-) extends GraphStageLogic(shape)
+    val shape: SourceShape[Msg]) extends GraphStageLogic(shape)
     with PromiseControl
     with MetricsControl
     with StageIdLogging
@@ -108,11 +107,12 @@ import scala.concurrent.{ExecutionContext, Future}
     consumerActor.tell(KafkaConsumerActor.Internal.RequestMessages(requestId, tps), sourceActor.ref)
   }
 
-  setHandler(shape.out, new OutHandler {
-    override def onPull(): Unit = pump()
-    override def onDownstreamFinish(cause: Throwable): Unit =
-      performShutdown()
-  })
+  setHandler(shape.out,
+    new OutHandler {
+      override def onPull(): Unit = pump()
+      override def onDownstreamFinish(cause: Throwable): Unit =
+        performShutdown()
+    })
 
   override def postStop(): Unit = {
     onShutdown()
diff --git a/core/src/main/scala/akka/kafka/internal/CommitCollectorStage.scala b/core/src/main/scala/akka/kafka/internal/CommitCollectorStage.scala
index afabf89e..64e75a60 100644
--- a/core/src/main/scala/akka/kafka/internal/CommitCollectorStage.scala
+++ b/core/src/main/scala/akka/kafka/internal/CommitCollectorStage.scala
@@ -7,7 +7,7 @@ package akka.kafka.internal
 
 import akka.annotation.InternalApi
 import akka.kafka.CommitterSettings
-import akka.kafka.ConsumerMessage.{Committable, CommittableOffsetBatch}
+import akka.kafka.ConsumerMessage.{ Committable, CommittableOffsetBatch }
 import akka.stream._
 import akka.stream.stage._
 
@@ -26,16 +26,14 @@ private[kafka] final class CommitCollectorStage(val committerSettings: Committer
   val shape: FlowShape[Committable, CommittableOffsetBatch] = FlowShape(in, out)
 
   override def createLogic(
-      inheritedAttributes: Attributes
-  ): GraphStageLogic = {
+      inheritedAttributes: Attributes): GraphStageLogic = {
     new CommitCollectorStageLogic(this, inheritedAttributes)
   }
 }
 
 private final class CommitCollectorStageLogic(
     stage: CommitCollectorStage,
-    inheritedAttributes: Attributes
-) extends TimerGraphStageLogic(stage.shape)
+    inheritedAttributes: Attributes) extends TimerGraphStageLogic(stage.shape)
     with CommitObservationLogic
     with StageIdLogging {
 
@@ -123,8 +121,7 @@ private final class CommitCollectorStageLogic(
         }
         failStage(ex)
       }
-    }
-  )
+    })
 
   setHandler(
     stage.out,
@@ -136,8 +133,7 @@ private final class CommitCollectorStageLogic(
         } else if (!hasBeenPulled(stage.in)) {
           tryPull(stage.in)
         }
-    }
-  )
+    })
 
   override def postStop(): Unit = {
     log.debug("CommitCollectorStage stopped")
diff --git a/core/src/main/scala/akka/kafka/internal/CommitObservationLogic.scala b/core/src/main/scala/akka/kafka/internal/CommitObservationLogic.scala
index 7a598429..2dd6a1a0 100644
--- a/core/src/main/scala/akka/kafka/internal/CommitObservationLogic.scala
+++ b/core/src/main/scala/akka/kafka/internal/CommitObservationLogic.scala
@@ -7,7 +7,7 @@ package akka.kafka.internal
 
 import akka.kafka.CommitWhen.OffsetFirstObserved
 import akka.kafka.CommitterSettings
-import akka.kafka.ConsumerMessage.{Committable, CommittableOffset, CommittableOffsetBatch, GroupTopicPartition}
+import akka.kafka.ConsumerMessage.{ Committable, CommittableOffset, CommittableOffsetBatch, GroupTopicPartition }
 import akka.stream.stage.GraphStageLogic
 
 /**
@@ -21,7 +21,7 @@ private[internal] trait CommitObservationLogic { self: GraphStageLogic =>
   /** Batches offsets until a commit is triggered. */
   protected var offsetBatch: CommittableOffsetBatch = CommittableOffsetBatch.empty
 
-  /** Deferred offsets when `CommitterSetting.when == CommitWhen.NextOffsetObserved` **/
+  /** Deferred offsets when `CommitterSetting.when == CommitWhen.NextOffsetObserved` * */
   private var deferredOffsets: Map[GroupTopicPartition, Committable] = Map.empty
 
   /**
@@ -40,16 +40,13 @@ private[internal] trait CommitObservationLogic { self: GraphStageLogic =>
           for { (gtp, offsetAndMetadata) <- batch.offsetsAndMetadata } updateBatchForPartition(
             gtp,
             batch.filter(_.equals(gtp)),
-            offsetAndMetadata.offset()
-          )
+            offsetAndMetadata.offset())
         case null =>
           throw new IllegalArgumentException(
-            s"Unknown Committable implementation, got [null]"
-          )
+            s"Unknown Committable implementation, got [null]")
         case unknownImpl =>
           throw new IllegalArgumentException(
-            s"Unknown Committable implementation, got [${unknownImpl.getClass.getName}]"
-          )
+            s"Unknown Committable implementation, got [${unknownImpl.getClass.getName}]")
 
       }
     }
diff --git a/core/src/main/scala/akka/kafka/internal/CommittableSources.scala b/core/src/main/scala/akka/kafka/internal/CommittableSources.scala
index 6cfcf594..c05dc420 100644
--- a/core/src/main/scala/akka/kafka/internal/CommittableSources.scala
+++ b/core/src/main/scala/akka/kafka/internal/CommittableSources.scala
@@ -8,33 +8,32 @@ package akka.kafka.internal
 import akka.actor.ActorRef
 import akka.annotation.InternalApi
 import akka.dispatch.ExecutionContexts
-import akka.kafka.ConsumerMessage.{CommittableMessage, CommittableOffset}
+import akka.kafka.ConsumerMessage.{ CommittableMessage, CommittableOffset }
 import akka.kafka._
-import akka.kafka.internal.KafkaConsumerActor.Internal.{Commit, CommitSingle, CommitWithoutReply}
+import akka.kafka.internal.KafkaConsumerActor.Internal.{ Commit, CommitSingle, CommitWithoutReply }
 import akka.kafka.internal.SubSourceLogic._
 import akka.kafka.scaladsl.Consumer.Control
 import akka.pattern.AskTimeoutException
 import akka.stream.SourceShape
 import akka.stream.scaladsl.Source
-import akka.stream.stage.{AsyncCallback, GraphStageLogic}
+import akka.stream.stage.{ AsyncCallback, GraphStageLogic }
 import akka.util.Timeout
-import akka.{Done, NotUsed}
-import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord, OffsetAndMetadata}
+import akka.{ Done, NotUsed }
+import org.apache.kafka.clients.consumer.{ ConsumerConfig, ConsumerRecord, OffsetAndMetadata }
 import org.apache.kafka.common.TopicPartition
 import org.apache.kafka.common.requests.OffsetFetchResponse
 
 import scala.concurrent.duration.FiniteDuration
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ ExecutionContext, Future }
 
 /** Internal API */
 @InternalApi
 private[kafka] final class CommittableSource[K, V](settings: ConsumerSettings[K, V],
-                                                   subscription: Subscription,
-                                                   _metadataFromRecord: ConsumerRecord[K, V] => String =
-                                                     CommittableMessageBuilder.NoMetadataFromRecord)
+    subscription: Subscription,
+    _metadataFromRecord: ConsumerRecord[K, V] => String =
+      CommittableMessageBuilder.NoMetadataFromRecord)
     extends KafkaSourceStage[K, V, CommittableMessage[K, V]](
-      s"CommittableSource ${subscription.renderStageAttribute}"
-    ) {
+      s"CommittableSource ${subscription.renderStageAttribute}") {
   override protected def logic(shape: SourceShape[CommittableMessage[K, V]]): GraphStageLogic with Control =
     new SingleSourceLogic[K, V, CommittableMessage[K, V]](shape, settings, subscription)
       with CommittableMessageBuilder[K, V] {
@@ -52,13 +51,11 @@ private[kafka] final class CommittableSource[K, V](settings: ConsumerSettings[K,
 private[kafka] final class SourceWithOffsetContext[K, V](
     settings: ConsumerSettings[K, V],
     subscription: Subscription,
-    _metadataFromRecord: ConsumerRecord[K, V] => String = CommittableMessageBuilder.NoMetadataFromRecord
-) extends KafkaSourceStage[K, V, (ConsumerRecord[K, V], CommittableOffset)](
-      s"SourceWithOffsetContext ${subscription.renderStageAttribute}"
-    ) {
+    _metadataFromRecord: ConsumerRecord[K, V] => String = CommittableMessageBuilder.NoMetadataFromRecord)
+    extends KafkaSourceStage[K, V, (ConsumerRecord[K, V], CommittableOffset)](
+      s"SourceWithOffsetContext ${subscription.renderStageAttribute}") {
   override protected def logic(
-      shape: SourceShape[(ConsumerRecord[K, V], CommittableOffset)]
-  ): GraphStageLogic with Control =
+      shape: SourceShape[(ConsumerRecord[K, V], CommittableOffset)]): GraphStageLogic with Control =
     new SingleSourceLogic[K, V, (ConsumerRecord[K, V], CommittableOffset)](shape, settings, subscription)
       with OffsetContextBuilder[K, V] {
       override def metadataFromRecord(record: ConsumerRecord[K, V]): String = _metadataFromRecord(record)
@@ -73,12 +70,11 @@ private[kafka] final class SourceWithOffsetContext[K, V](
 /** Internal API */
 @InternalApi
 private[kafka] final class ExternalCommittableSource[K, V](consumer: ActorRef,
-                                                           _groupId: String,
-                                                           commitTimeout: FiniteDuration,
-                                                           subscription: ManualSubscription)
+    _groupId: String,
+    commitTimeout: FiniteDuration,
+    subscription: ManualSubscription)
     extends KafkaSourceStage[K, V, CommittableMessage[K, V]](
-      s"ExternalCommittableSource ${subscription.renderStageAttribute}"
-    ) {
+      s"ExternalCommittableSource ${subscription.renderStageAttribute}") {
   override protected def logic(shape: SourceShape[CommittableMessage[K, V]]): GraphStageLogic with Control =
     new ExternalSingleSourceLogic[K, V, CommittableMessage[K, V]](shape, consumer, subscription)
       with CommittableMessageBuilder[K, V] {
@@ -98,13 +94,11 @@ private[kafka] final class CommittableSubSource[K, V](
     subscription: AutoSubscription,
     _metadataFromRecord: ConsumerRecord[K, V] => String = CommittableMessageBuilder.NoMetadataFromRecord,
     getOffsetsOnAssign: Option[Set[TopicPartition] => Future[Map[TopicPartition, Long]]] = None,
-    onRevoke: Set[TopicPartition] => Unit = _ => ()
-) extends KafkaSourceStage[K, V, (TopicPartition, Source[CommittableMessage[K, V], NotUsed])](
-      s"CommittableSubSource ${subscription.renderStageAttribute}"
-    ) {
+    onRevoke: Set[TopicPartition] => Unit = _ => ())
+    extends KafkaSourceStage[K, V, (TopicPartition, Source[CommittableMessage[K, V], NotUsed])](
+      s"CommittableSubSource ${subscription.renderStageAttribute}") {
   override protected def logic(
-      shape: SourceShape[(TopicPartition, Source[CommittableMessage[K, V], NotUsed])]
-  ): GraphStageLogic with Control = {
+      shape: SourceShape[(TopicPartition, Source[CommittableMessage[K, V], NotUsed])]): GraphStageLogic with Control = {
 
     val factory = new SubSourceStageLogicFactory[K, V, CommittableMessage[K, V]] {
       def create(
@@ -113,24 +107,23 @@ private[kafka] final class CommittableSubSource[K, V](
           consumerActor: ActorRef,
           subSourceStartedCb: AsyncCallback[SubSourceStageLogicControl],
           subSourceCancelledCb: AsyncCallback[(TopicPartition, SubSourceCancellationStrategy)],
-          actorNumber: Int
-      ): SubSourceStageLogic[K, V, CommittableMessage[K, V]] =
+          actorNumber: Int): SubSourceStageLogic[K, V, CommittableMessage[K, V]] =
         new CommittableSubSourceStageLogic(shape,
-                                           tp,
-                                           consumerActor,
-                                           subSourceStartedCb,
-                                           subSourceCancelledCb,
-                                           actorNumber,
-                                           settings,
-                                           _metadataFromRecord)
+          tp,
+          consumerActor,
+          subSourceStartedCb,
+          subSourceCancelledCb,
+          actorNumber,
+          settings,
+          _metadataFromRecord)
 
     }
     new SubSourceLogic[K, V, CommittableMessage[K, V]](shape,
-                                                       settings,
-                                                       subscription,
-                                                       getOffsetsOnAssign,
-                                                       onRevoke,
-                                                       subSourceStageLogicFactory = factory)
+      settings,
+      subscription,
+      getOffsetsOnAssign,
+      onRevoke,
+      subSourceStageLogicFactory = factory)
   }
 }
 
@@ -140,8 +133,7 @@ private[kafka] object KafkaAsyncConsumerCommitterRef {
   def commit(offset: CommittableOffsetImpl): Future[Done] = {
     offset.committer.commitSingle(
       new TopicPartition(offset.partitionOffset.key.topic, offset.partitionOffset.key.partition),
-      new OffsetAndMetadata(offset.partitionOffset.offset + 1, offset.metadata)
-    )
+      new OffsetAndMetadata(offset.partitionOffset.offset + 1, offset.metadata))
   }
 
   def commit(batch: CommittableOffsetBatchImpl): Future[Done] = {
@@ -170,8 +162,8 @@ private[kafka] object KafkaAsyncConsumerCommitterRef {
   }
 
   private def forBatch[T](
-      batch: CommittableOffsetBatchImpl
-  )(sendMsg: (KafkaAsyncConsumerCommitterRef, TopicPartition, OffsetAndMetadata) => T) = {
+      batch: CommittableOffsetBatchImpl)(
+      sendMsg: (KafkaAsyncConsumerCommitterRef, TopicPartition, OffsetAndMetadata) => T) = {
     val results = batch.offsetsAndMetadata.map {
       case (groupTopicPartition, offset) =>
         // sends one message per partition, they are aggregated in the KafkaConsumerActor
@@ -191,9 +183,8 @@ private[kafka] object KafkaAsyncConsumerCommitterRef {
  */
 @InternalApi
 private[kafka] class KafkaAsyncConsumerCommitterRef(private val consumerActor: ActorRef,
-                                                    private val commitTimeout: FiniteDuration)(
-    private val ec: ExecutionContext
-) {
+    private val commitTimeout: FiniteDuration)(
+    private val ec: ExecutionContext) {
   def commitSingle(topicPartition: TopicPartition, offset: OffsetAndMetadata): Future[Done] = {
     sendWithReply(CommitSingle(topicPartition, offset))
   }
@@ -239,13 +230,13 @@ private final class CommittableSubSourceStageLogic[K, V](
     subSourceCancelledCb: AsyncCallback[(TopicPartition, SubSourceCancellationStrategy)],
     actorNumber: Int,
     consumerSettings: ConsumerSettings[K, V],
-    _metadataFromRecord: ConsumerRecord[K, V] => String = CommittableMessageBuilder.NoMetadataFromRecord
-) extends SubSourceStageLogic[K, V, CommittableMessage[K, V]](shape,
-                                                                tp,
-                                                                consumerActor,
-                                                                subSourceStartedCb,
-                                                                subSourceCancelledCb,
-                                                                actorNumber)
+    _metadataFromRecord: ConsumerRecord[K, V] => String = CommittableMessageBuilder.NoMetadataFromRecord)
+    extends SubSourceStageLogic[K, V, CommittableMessage[K, V]](shape,
+      tp,
+      consumerActor,
+      subSourceStartedCb,
+      subSourceCancelledCb,
+      actorNumber)
     with CommittableMessageBuilder[K, V] {
 
   override def metadataFromRecord(record: ConsumerRecord[K, V]): String = _metadataFromRecord(record)
diff --git a/core/src/main/scala/akka/kafka/internal/CommittingProducerSinkStage.scala b/core/src/main/scala/akka/kafka/internal/CommittingProducerSinkStage.scala
index 138fd497..617c2c92 100644
--- a/core/src/main/scala/akka/kafka/internal/CommittingProducerSinkStage.scala
+++ b/core/src/main/scala/akka/kafka/internal/CommittingProducerSinkStage.scala
@@ -9,17 +9,17 @@ import java.util.concurrent.atomic.AtomicInteger
 
 import akka.Done
 import akka.annotation.InternalApi
-import akka.kafka.ConsumerMessage.{Committable, CommittableOffsetBatch}
+import akka.kafka.ConsumerMessage.{ Committable, CommittableOffsetBatch }
 import akka.kafka.ProducerMessage._
-import akka.kafka.{CommitDelivery, CommitterSettings, ProducerSettings}
+import akka.kafka.{ CommitDelivery, CommitterSettings, ProducerSettings }
 import akka.stream.ActorAttributes.SupervisionStrategy
 import akka.stream.Supervision.Decider
 import akka.stream.stage._
-import akka.stream.{Attributes, Inlet, SinkShape, Supervision}
-import org.apache.kafka.clients.producer.{Callback, RecordMetadata}
+import akka.stream.{ Attributes, Inlet, SinkShape, Supervision }
+import org.apache.kafka.clients.producer.{ Callback, RecordMetadata }
 
-import scala.concurrent.{Future, Promise}
-import scala.util.{Failure, Success, Try}
+import scala.concurrent.{ Future, Promise }
+import scala.util.{ Failure, Success, Try }
 
 /**
  * INTERNAL API.
@@ -29,8 +29,7 @@ import scala.util.{Failure, Success, Try}
 @InternalApi
 private[kafka] final class CommittingProducerSinkStage[K, V, IN <: Envelope[K, V, Committable]](
     val producerSettings: ProducerSettings[K, V],
-    val committerSettings: CommitterSettings
-) extends GraphStageWithMaterializedValue[SinkShape[IN], Future[Done]] {
+    val committerSettings: CommitterSettings) extends GraphStageWithMaterializedValue[SinkShape[IN], Future[Done]] {
 
   require(committerSettings.delivery == CommitDelivery.WaitForAck, "only CommitDelivery.WaitForAck may be used")
 
@@ -45,8 +44,7 @@ private[kafka] final class CommittingProducerSinkStage[K, V, IN <: Envelope[K, V
 
 private final class CommittingProducerSinkStageLogic[K, V, IN <: Envelope[K, V, Committable]](
     stage: CommittingProducerSinkStage[K, V, IN],
-    inheritedAttributes: Attributes
-) extends TimerGraphStageLogic(stage.shape)
+    inheritedAttributes: Attributes) extends TimerGraphStageLogic(stage.shape)
     with CommitObservationLogic
     with StageIdLogging
     with DeferredProducer[K, V] {
@@ -122,7 +120,7 @@ private final class CommittingProducerSinkStageLogic[K, V, IN <: Envelope[K, V,
     case (count, exception) =>
       decider(exception) match {
         case Supervision.Stop => closeAndFailStage(exception)
-        case _ => collectOffsetIgnore(count, exception)
+        case _                => collectOffsetIgnore(count, exception)
       }
   }
 
@@ -167,7 +165,7 @@ private final class CommittingProducerSinkStageLogic[K, V, IN <: Envelope[K, V,
 
   override protected def onTimer(timerKey: Any): Unit = timerKey match {
     case CommittingProducerSinkStage.CommitNow => commit(Interval)
-    case _ => log.warning("unexpected timer [{}]", timerKey)
+    case _                                     => log.warning("unexpected timer [{}]", timerKey)
   }
 
   private def collectOffset(offset: Committable): Unit =
@@ -177,9 +175,9 @@ private final class CommittingProducerSinkStageLogic[K, V, IN <: Envelope[K, V,
   private def commit(triggeredBy: TriggerdBy): Unit = {
     if (offsetBatch.batchSize != 0) {
       log.debug("commit triggered by {} (awaitingProduceResult={} awaitingCommitResult={})",
-                triggeredBy,
-                awaitingProduceResult,
-                awaitingCommitResult)
+        triggeredBy,
+        awaitingProduceResult,
+        awaitingCommitResult)
       val batchSize = offsetBatch.batchSize
       offsetBatch
         .commitInternal()
@@ -207,9 +205,9 @@ private final class CommittingProducerSinkStageLogic[K, V, IN <: Envelope[K, V,
 
   private def emergencyShutdown(ex: Throwable): Unit = {
     log.debug("Emergency shutdown triggered by {} (awaitingProduceResult={} awaitingCommitResult={})",
-              ex,
-              awaitingProduceResult,
-              awaitingCommitResult)
+      ex,
+      awaitingProduceResult,
+      awaitingCommitResult)
 
     offsetBatch.tellCommitEmergency()
     upstreamCompletionState = Some(Failure(ex))
@@ -245,8 +243,7 @@ private final class CommittingProducerSinkStageLogic[K, V, IN <: Envelope[K, V,
         } else {
           emergencyShutdown(ex)
         }
-    }
-  )
+    })
 
   private def awaitingCommitsBeforeShutdown(): Boolean = {
     awaitingCommitResult -= clearDeferredOffsets()
@@ -267,8 +264,8 @@ private final class CommittingProducerSinkStageLogic[K, V, IN <: Envelope[K, V,
         }
       } else
         log.debug("checkForCompletion awaitingProduceResult={} awaitingCommitResult={}",
-                  awaitingProduceResult,
-                  awaitingCommitResult)
+          awaitingProduceResult,
+          awaitingCommitResult)
 
   override def postStop(): Unit = {
     log.debug("CommittingProducerSink stopped")
diff --git a/core/src/main/scala/akka/kafka/internal/ConfigSettings.scala b/core/src/main/scala/akka/kafka/internal/ConfigSettings.scala
index 0e561829..b406985a 100644
--- a/core/src/main/scala/akka/kafka/internal/ConfigSettings.scala
+++ b/core/src/main/scala/akka/kafka/internal/ConfigSettings.scala
@@ -8,7 +8,7 @@ package akka.kafka.internal
 import java.util
 
 import akka.annotation.InternalApi
-import com.typesafe.config.{Config, ConfigObject}
+import com.typesafe.config.{ Config, ConfigObject }
 
 import scala.annotation.tailrec
 import scala.jdk.CollectionConverters._
@@ -30,8 +30,8 @@ import akka.util.JavaDurationConverters._
         c.toConfig.getAnyRef(unprocessedKeys.head) match {
           case o: util.Map[_, _] =>
             collectKeys(c,
-                        processedKeys,
-                        unprocessedKeys.tail ::: o.keySet().asScala.toList.map(unprocessedKeys.head + "." + _))
+              processedKeys,
+              unprocessedKeys.tail ::: o.keySet().asScala.toList.map(unprocessedKeys.head + "." + _))
           case _ =>
             collectKeys(c, processedKeys + unprocessedKeys.head, unprocessedKeys.tail)
         }
@@ -43,7 +43,7 @@ import akka.util.JavaDurationConverters._
 
   def getPotentiallyInfiniteDuration(underlying: Config, path: String): Duration = underlying.getString(path) match {
     case "infinite" => Duration.Inf
-    case _ => underlying.getDuration(path).asScala
+    case _          => underlying.getDuration(path).asScala
   }
 
 }
diff --git a/core/src/main/scala/akka/kafka/internal/ConnectionChecker.scala b/core/src/main/scala/akka/kafka/internal/ConnectionChecker.scala
index d273d6fc..cb1fbe19 100644
--- a/core/src/main/scala/akka/kafka/internal/ConnectionChecker.scala
+++ b/core/src/main/scala/akka/kafka/internal/ConnectionChecker.scala
@@ -5,14 +5,14 @@
 
 package akka.kafka.internal
 
-import akka.actor.{Actor, ActorLogging, Props, Timers}
+import akka.actor.{ Actor, ActorLogging, Props, Timers }
 import akka.annotation.InternalApi
 import akka.event.LoggingReceive
-import akka.kafka.{ConnectionCheckerSettings, KafkaConnectionFailed, Metadata}
+import akka.kafka.{ ConnectionCheckerSettings, KafkaConnectionFailed, Metadata }
 import org.apache.kafka.common.errors.TimeoutException
 
 import scala.concurrent.duration.FiniteDuration
-import scala.util.{Failure, Success}
+import scala.util.{ Failure, Success }
 
 @InternalApi private class ConnectionChecker(config: ConnectionCheckerSettings)
     extends Actor
@@ -20,7 +20,7 @@ import scala.util.{Failure, Success}
     with Timers {
 
   import ConnectionChecker.Internal._
-  import config.{enable => _, _}
+  import config.{ enable => _, _ }
 
   override def preStart(): Unit = {
     super.preStart()
@@ -34,15 +34,14 @@ import scala.util.{Failure, Success}
 
   def backoff(failedAttempts: Int = 1, backoffCheckInterval: FiniteDuration): Receive =
     LoggingReceive.withLabel(s"backoff($failedAttempts, $backoffCheckInterval)")(
-      behaviour(failedAttempts, backoffCheckInterval)
-    )
+      behaviour(failedAttempts, backoffCheckInterval))
 
   def behaviour(failedAttempts: Int, interval: FiniteDuration): Receive = {
     case CheckConnection =>
       context.parent ! Metadata.ListTopics
 
     case Metadata.Topics(Failure(te: TimeoutException)) =>
-      //failedAttempts is a sum of first triggered failure and retries (retries + 1)
+      // failedAttempts is a sum of first triggered failure and retries (retries + 1)
       if (failedAttempts == maxRetries) {
         context.parent ! KafkaConnectionFailed(te, maxRetries)
         context.stop(self)
@@ -55,7 +54,8 @@ import scala.util.{Failure, Success}
 
   def startTimer(): Unit = timers.startSingleTimer(RegularCheck, CheckConnection, checkInterval)
 
-  /** start single timer and return it's interval
+  /**
+   * start single timer and return it's interval
    *
    * @param previousInterval previous CheckConnection interval
    * @return new backoff interval (previousInterval * factor)
@@ -73,11 +73,11 @@ import scala.util.{Failure, Success}
   def props(config: ConnectionCheckerSettings): Props = Props(new ConnectionChecker(config))
 
   private object Internal {
-    //Timer labels
+    // Timer labels
     case object RegularCheck
     case object BackoffCheck
 
-    //Commands
+    // Commands
     case object CheckConnection
   }
 
diff --git a/core/src/main/scala/akka/kafka/internal/ConsumerProgressTracking.scala b/core/src/main/scala/akka/kafka/internal/ConsumerProgressTracking.scala
index 9e017947..64f63ad3 100644
--- a/core/src/main/scala/akka/kafka/internal/ConsumerProgressTracking.scala
+++ b/core/src/main/scala/akka/kafka/internal/ConsumerProgressTracking.scala
@@ -5,7 +5,7 @@
 
 package akka.kafka.internal
 import akka.annotation.InternalApi
-import org.apache.kafka.clients.consumer.{Consumer, ConsumerRecords, OffsetAndMetadata}
+import org.apache.kafka.clients.consumer.{ Consumer, ConsumerRecords, OffsetAndMetadata }
 import org.apache.kafka.common.TopicPartition
 
 import scala.jdk.CollectionConverters._
@@ -41,8 +41,8 @@ trait ConsumerProgressTracking extends ConsumerAssignmentTrackingListener {
   def received[K, V](records: ConsumerRecords[K, V]): Unit = {}
   def committed(offsets: java.util.Map[TopicPartition, OffsetAndMetadata]): Unit = {}
   def assignedPositionsAndSeek(assignedTps: Set[TopicPartition],
-                               consumer: Consumer[_, _],
-                               positionTimeout: java.time.Duration): Unit = {}
+      consumer: Consumer[_, _],
+      positionTimeout: java.time.Duration): Unit = {}
   def addProgressTrackingCallback(callback: ConsumerAssignmentTrackingListener): Unit = {}
 }
 
@@ -79,21 +79,21 @@ final class ConsumerProgressTrackerImpl extends ConsumerProgressTracking {
 
   override def received[K, V](received: ConsumerRecords[K, V]): Unit = {
     receivedMessagesImpl = receivedMessagesImpl ++ received
-        .partitions()
-        .asScala
-        // only tracks the partitions that are currently assigned, as assignment is a synchronous interaction and polls
-        // for an old consumer group epoch will not return (we get to make polls for the current generation). Supposing a
-        // revoke completes and then the poll() is received for a previous epoch, we drop the records here (partitions
-        // are no longer assigned to the consumer). If instead we get a poll() and then a revoke, we only track the
-        // offsets for that short period of time and then they are revoked, so that is also safe.
-        .intersect(assignedPartitions)
-        .map(tp => (tp, received.records(tp)))
-        // get the last record, its the largest offset/most recent timestamp
-        .map { case (partition, records) => (partition, records.get(records.size() - 1)) }
-        .map {
-          case (partition, record) =>
-            partition -> SafeOffsetAndTimestamp(record.offset(), record.timestamp())
-        }
+      .partitions()
+      .asScala
+      // only tracks the partitions that are currently assigned, as assignment is a synchronous interaction and polls
+      // for an old consumer group epoch will not return (we get to make polls for the current generation). Supposing a
+      // revoke completes and then the poll() is received for a previous epoch, we drop the records here (partitions
+      // are no longer assigned to the consumer). If instead we get a poll() and then a revoke, we only track the
+      // offsets for that short period of time and then they are revoked, so that is also safe.
+      .intersect(assignedPartitions)
+      .map(tp => (tp, received.records(tp)))
+      // get the last record, its the largest offset/most recent timestamp
+      .map { case (partition, records) => (partition, records.get(records.size() - 1)) }
+      .map {
+        case (partition, record) =>
+          partition -> SafeOffsetAndTimestamp(record.offset(), record.timestamp())
+      }
   }
 
   override def commitRequested(offsets: Map[TopicPartition, OffsetAndMetadata]): Unit = {
@@ -118,19 +118,19 @@ final class ConsumerProgressTrackerImpl extends ConsumerProgressTracking {
     // progress, so we update them when consumer is assigned. Consumer can always add more partitions - we only lose
     // them on revoke(), which is why this operation is only additive.
     commitRequestedOffsetsImpl = commitRequestedOffsetsImpl ++ assignedOffsets.map {
-        case (partition, offset) =>
-          partition -> commitRequested.getOrElse(partition, new OffsetAndMetadata(offset))
-      }
+      case (partition, offset) =>
+        partition -> commitRequested.getOrElse(partition, new OffsetAndMetadata(offset))
+    }
     committedOffsetsImpl = committedOffsets ++ assignedOffsets.map {
-        case (partition, offset) =>
-          partition -> committedOffsets.getOrElse(partition, new OffsetAndMetadata(offset))
-      }
+      case (partition, offset) =>
+        partition -> committedOffsets.getOrElse(partition, new OffsetAndMetadata(offset))
+    }
     assignedOffsetsCallbacks.foreach(_.assignedPositions(assignedTps, assignedOffsets))
   }
 
   override def assignedPositionsAndSeek(assignedTps: Set[TopicPartition],
-                                        consumer: Consumer[_, _],
-                                        positionTimeout: java.time.Duration): Unit = {
+      consumer: Consumer[_, _],
+      positionTimeout: java.time.Duration): Unit = {
     val assignedOffsets = assignedTps.map(tp => tp -> consumer.position(tp, positionTimeout)).toMap
     assignedPositions(assignedTps, assignedOffsets)
   }
diff --git a/core/src/main/scala/akka/kafka/internal/ConsumerResetProtection.scala b/core/src/main/scala/akka/kafka/internal/ConsumerResetProtection.scala
index f3a26e01..99f291cc 100644
--- a/core/src/main/scala/akka/kafka/internal/ConsumerResetProtection.scala
+++ b/core/src/main/scala/akka/kafka/internal/ConsumerResetProtection.scala
@@ -12,7 +12,7 @@ import akka.annotation.InternalApi
 import akka.event.LoggingAdapter
 import akka.kafka.OffsetResetProtectionSettings
 import akka.kafka.internal.KafkaConsumerActor.Internal.Seek
-import org.apache.kafka.clients.consumer.{ConsumerRecord, ConsumerRecords, OffsetAndMetadata}
+import org.apache.kafka.clients.consumer.{ ConsumerRecord, ConsumerRecords, OffsetAndMetadata }
 import org.apache.kafka.common.TopicPartition
 
 import scala.jdk.CollectionConverters._
@@ -37,8 +37,8 @@ sealed trait ConsumerResetProtection {
 @InternalApi
 object ConsumerResetProtection {
   def apply[K, V](log: LoggingAdapter,
-                  setttings: OffsetResetProtectionSettings,
-                  progress: () => ConsumerProgressTracking): ConsumerResetProtection = {
+      setttings: OffsetResetProtectionSettings,
+      progress: () => ConsumerProgressTracking): ConsumerResetProtection = {
     if (setttings.enable) new Impl(log, setttings, progress()) else ConsumerResetProtection.Noop
   }
 
@@ -47,8 +47,8 @@ object ConsumerResetProtection {
   }
 
   private final class Impl(log: LoggingAdapter,
-                           resetProtection: OffsetResetProtectionSettings,
-                           progress: ConsumerProgressTracking)
+      resetProtection: OffsetResetProtectionSettings,
+      progress: ConsumerProgressTracking)
       extends ConsumerResetProtection {
     override def protect[K, V](consumer: ActorRef, records: ConsumerRecords[K, V]): ConsumerRecords[K, V] = {
       val safe: java.util.Map[TopicPartition, java.util.List[ConsumerRecord[K, V]]] =
@@ -72,12 +72,11 @@ object ConsumerResetProtection {
     private def maybeProtectRecords[K, V](
         consumer: ActorRef,
         tp: TopicPartition,
-        records: ConsumerRecords[K, V]
-    ): Option[(TopicPartition, util.List[ConsumerRecord[K, V]])] = {
+        records: ConsumerRecords[K, V]): Option[(TopicPartition, util.List[ConsumerRecord[K, V]])] = {
       val partitionRecords: util.List[ConsumerRecord[K, V]] = records.records(tp)
       progress.commitRequested.get(tp) match {
         case Some(requested) => protectPartition(consumer, tp, requested, partitionRecords)
-        case None =>
+        case None            =>
           // it's a partition that we have no information on, so assume it's safe and continue because it's likely
           // due to a rebalance, in which we have already reset to the committed offset, which is safe
           Some((tp, partitionRecords))
@@ -97,8 +96,8 @@ object ConsumerResetProtection {
         consumer: ActorRef,
         tp: TopicPartition,
         previouslyCommitted: OffsetAndMetadata,
-        partitionRecords: util.List[ConsumerRecord[K, V]]
-    ): Option[(TopicPartition, util.List[ConsumerRecord[K, V]])] = {
+        partitionRecords: util.List[ConsumerRecord[K, V]])
+        : Option[(TopicPartition, util.List[ConsumerRecord[K, V]])] = {
       val threshold = new RecordThreshold(previouslyCommitted.offset(), progress.receivedMessages.get(tp))
       if (threshold.recordsExceedThreshold(threshold, partitionRecords)) {
         // requested and committed are assumed to be kept in-sync, so this _should_ be safe. Fails
@@ -106,20 +105,18 @@ object ConsumerResetProtection {
         val committed = progress.committedOffsets(tp)
         val requestVersusCommitted = previouslyCommitted.offset() - committed.offset()
         if (resetProtection.offsetThreshold < Long.MaxValue &&
-            requestVersusCommitted > resetProtection.offsetThreshold) {
+          requestVersusCommitted > resetProtection.offsetThreshold) {
           log.warning(
             s"Your last commit request $previouslyCommitted is more than the configured threshold from the last" +
             s"committed offset ($committed) for $tp. See " +
-            "https://doc.akka.io/docs/alpakka-kafka/current/errorhandling.html#setting-offset-threshold-appropriately for more info."
-          )
+            "https://doc.akka.io/docs/alpakka-kafka/current/errorhandling.html#setting-offset-threshold-appropriately for more info.")
         }
         log.warning(
           s"Dropping offsets for partition $tp - received an offset which is less than allowed $threshold " +
           s"from the  last requested offset (threshold: $threshold). Seeking to the latest known safe (committed " +
           s"or assigned) offset: $committed. See  " +
           "https://doc.akka.io/docs/alpakka-kafka/current/errorhandling.html#unexpected-consumer-offset-reset" +
-          "for more information."
-        )
+          "for more information.")
         consumer ! Seek(Map(tp -> committed.offset()))
         None
       } else {
@@ -150,7 +147,7 @@ object ConsumerResetProtection {
        * @return `true` if the records in the batch have gone outside the threshold, `false` otherwise.
        */
       def recordsExceedThreshold[K, V](threshold: RecordThreshold,
-                                       partitionRecords: util.List[ConsumerRecord[K, V]]): Boolean = {
+          partitionRecords: util.List[ConsumerRecord[K, V]]): Boolean = {
         var exceedThreshold = false
         // rather than check all the records in the batch, trust that Kafka has given them to us in order, and just
         // check the first and last offsets in the batch.
@@ -166,11 +163,9 @@ object ConsumerResetProtection {
       def checkExceedsThreshold[K, V](record: ConsumerRecord[K, V]): Boolean = {
         record.offset() < offsetThreshold ||
         // timestamp can be set to -1 for some older client versions, ensure we don't penalize that
-        timeThreshold.exists(
-          threshold =>
-            record.timestamp() != ConsumerRecord.NO_TIMESTAMP &&
-            record.timestamp() < threshold
-        )
+        timeThreshold.exists(threshold =>
+          record.timestamp() != ConsumerRecord.NO_TIMESTAMP &&
+          record.timestamp() < threshold)
       }
 
       override def toString: String = s"max-offset: $offsetThreshold, max-timestamp: $timeThreshold"
diff --git a/core/src/main/scala/akka/kafka/internal/ControlImplementations.scala b/core/src/main/scala/akka/kafka/internal/ControlImplementations.scala
index a55ed995..a77470a2 100644
--- a/core/src/main/scala/akka/kafka/internal/ControlImplementations.scala
+++ b/core/src/main/scala/akka/kafka/internal/ControlImplementations.scala
@@ -4,22 +4,22 @@
  */
 
 package akka.kafka.internal
-import java.util.concurrent.{CompletionStage, Executor}
+import java.util.concurrent.{ CompletionStage, Executor }
 
 import akka.Done
 import akka.actor.ActorRef
 import akka.annotation.InternalApi
 import akka.dispatch.ExecutionContexts
-import akka.kafka.internal.KafkaConsumerActor.Internal.{ConsumerMetrics, RequestMetrics}
-import akka.kafka.{javadsl, scaladsl}
+import akka.kafka.internal.KafkaConsumerActor.Internal.{ ConsumerMetrics, RequestMetrics }
+import akka.kafka.{ javadsl, scaladsl }
 import akka.stream.SourceShape
 import akka.stream.stage.GraphStageLogic
 import akka.util.Timeout
-import org.apache.kafka.common.{Metric, MetricName}
+import org.apache.kafka.common.{ Metric, MetricName }
 
 import scala.jdk.CollectionConverters._
-import scala.compat.java8.FutureConverters.{CompletionStageOps, FutureOps}
-import scala.concurrent.{ExecutionContext, Future, Promise}
+import scala.compat.java8.FutureConverters.{ CompletionStageOps, FutureOps }
+import scala.concurrent.{ ExecutionContext, Future, Promise }
 
 private object PromiseControl {
   sealed trait ControlOperation
@@ -43,10 +43,10 @@ private trait PromiseControl extends GraphStageLogic with scaladsl.Consumer.Cont
   private val shutdownPromise: Promise[Done] = Promise()
   private val stopPromise: Promise[Done] = Promise()
 
-  private val controlCallback = getAsyncCallback[ControlOperation]({
-    case ControlStop => performStop()
+  private val controlCallback = getAsyncCallback[ControlOperation] {
+    case ControlStop     => performStop()
     case ControlShutdown => performShutdown()
-  })
+  }
 
   def onStop() =
     stopPromise.trySuccess(Done)
diff --git a/core/src/main/scala/akka/kafka/internal/DefaultProducerStage.scala b/core/src/main/scala/akka/kafka/internal/DefaultProducerStage.scala
index f6ee80c0..97f9e10a 100644
--- a/core/src/main/scala/akka/kafka/internal/DefaultProducerStage.scala
+++ b/core/src/main/scala/akka/kafka/internal/DefaultProducerStage.scala
@@ -13,19 +13,18 @@ import akka.kafka.internal.ProducerStage.ProducerCompletionState
 import akka.stream.ActorAttributes.SupervisionStrategy
 import akka.stream.Supervision.Decider
 import akka.stream.stage._
-import akka.stream.{Attributes, FlowShape, Supervision}
-import org.apache.kafka.clients.producer.{Callback, ProducerRecord, RecordMetadata}
+import akka.stream.{ Attributes, FlowShape, Supervision }
+import org.apache.kafka.clients.producer.{ Callback, ProducerRecord, RecordMetadata }
 
-import scala.concurrent.{ExecutionContext, Future, Promise}
-import scala.util.{Failure, Success, Try}
+import scala.concurrent.{ ExecutionContext, Future, Promise }
+import scala.util.{ Failure, Success, Try }
 
 /**
  * INTERNAL API
  */
 @InternalApi
 private[kafka] class DefaultProducerStage[K, V, P, IN <: Envelope[K, V, P], OUT <: Results[K, V, P]](
-    val settings: ProducerSettings[K, V]
-) extends GraphStage[FlowShape[IN, Future[OUT]]]
+    val settings: ProducerSettings[K, V]) extends GraphStage[FlowShape[IN, Future[OUT]]]
     with ProducerStage[K, V, P, IN, OUT] {
 
   override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
@@ -39,8 +38,7 @@ private[kafka] class DefaultProducerStage[K, V, P, IN <: Envelope[K, V, P], OUT
  */
 private class DefaultProducerStageLogic[K, V, P, IN <: Envelope[K, V, P], OUT <: Results[K, V, P]](
     stage: ProducerStage[K, V, P, IN, OUT],
-    inheritedAttributes: Attributes
-) extends TimerGraphStageLogic(stage.shape)
+    inheritedAttributes: Attributes) extends TimerGraphStageLogic(stage.shape)
     with StageIdLogging
     with DeferredProducer[K, V]
     with ProducerCompletionState {
@@ -78,9 +76,9 @@ private class DefaultProducerStageLogic[K, V, P, IN <: Envelope[K, V, P], OUT <:
   private def checkForCompletion(): Unit =
     if (isClosed(stage.in) && awaitingConfirmation == 0) {
       completionState match {
-        case Some(Success(_)) => onCompletionSuccess()
+        case Some(Success(_))  => onCompletionSuccess()
         case Some(Failure(ex)) => onCompletionFailure(ex)
-        case None => failStage(new IllegalStateException("Stage completed, but there is no info about status"))
+        case None              => failStage(new IllegalStateException("Stage completed, but there is no info about status"))
       }
     }
 
@@ -104,9 +102,10 @@ private class DefaultProducerStageLogic[K, V, P, IN <: Envelope[K, V, P], OUT <:
 
   protected def resumeDemand(tryToPull: Boolean = true): Unit = {
     log.debug("Resume demand")
-    setHandler(stage.out, new OutHandler {
-      override def onPull(): Unit = tryPull(stage.in)
-    })
+    setHandler(stage.out,
+      new OutHandler {
+        override def onPull(): Unit = tryPull(stage.in)
+      })
     // kick off demand for more messages if we're resuming demand
     if (tryToPull && isAvailable(stage.out) && !hasBeenPulled(stage.in)) {
       tryPull(stage.in)
@@ -124,8 +123,7 @@ private class DefaultProducerStageLogic[K, V, P, IN <: Envelope[K, V, P], OUT <:
       stage.out,
       new OutHandler {
         override def onPull(): Unit = ()
-      }
-    )
+      })
   }
 
   protected def initialInHandler(): Unit = producingInHandler()
diff --git a/core/src/main/scala/akka/kafka/internal/DeferredProducer.scala b/core/src/main/scala/akka/kafka/internal/DeferredProducer.scala
index a3cd6f7e..3960a677 100644
--- a/core/src/main/scala/akka/kafka/internal/DeferredProducer.scala
+++ b/core/src/main/scala/akka/kafka/internal/DeferredProducer.scala
@@ -13,7 +13,7 @@ import akka.util.JavaDurationConverters._
 import org.apache.kafka.clients.producer.Producer
 
 import scala.util.control.NonFatal
-import scala.util.{Failure, Success}
+import scala.util.{ Failure, Success }
 
 /**
  * INTERNAL API
@@ -70,8 +70,7 @@ private[kafka] trait DeferredProducer[K, V] {
               log.error(e, "producer creation failed")
               closeAndFailStageCb.invoke(e)
               e
-            }
-          )(ExecutionContexts.parasitic)
+            })(ExecutionContexts.parasitic)
         changeProducerAssignmentLifecycle(AsyncCreateRequestSent)
     }
   }
diff --git a/core/src/main/scala/akka/kafka/internal/ExternalSingleSourceLogic.scala b/core/src/main/scala/akka/kafka/internal/ExternalSingleSourceLogic.scala
index ced8ebf0..d821c803 100644
--- a/core/src/main/scala/akka/kafka/internal/ExternalSingleSourceLogic.scala
+++ b/core/src/main/scala/akka/kafka/internal/ExternalSingleSourceLogic.scala
@@ -20,8 +20,7 @@ import scala.concurrent.Future
 @InternalApi private abstract class ExternalSingleSourceLogic[K, V, Msg](
     shape: SourceShape[Msg],
     _consumerActor: ActorRef,
-    val subscription: ManualSubscription
-) extends BaseSingleSourceLogic[K, V, Msg](shape) {
+    val subscription: ManualSubscription) extends BaseSingleSourceLogic[K, V, Msg](shape) {
 
   final override protected def logSource: Class[_] = classOf[ExternalSingleSourceLogic[K, V, Msg]]
 
diff --git a/core/src/main/scala/akka/kafka/internal/KafkaConsumerActor.scala b/core/src/main/scala/akka/kafka/internal/KafkaConsumerActor.scala
index a6255661..1d364ba1 100644
--- a/core/src/main/scala/akka/kafka/internal/KafkaConsumerActor.scala
+++ b/core/src/main/scala/akka/kafka/internal/KafkaConsumerActor.scala
@@ -23,18 +23,18 @@ import akka.actor.{
 import akka.annotation.InternalApi
 import akka.util.JavaDurationConverters._
 import akka.event.LoggingReceive
-import akka.kafka.KafkaConsumerActor.{StopLike, StoppingException}
+import akka.kafka.KafkaConsumerActor.{ StopLike, StoppingException }
 import akka.kafka._
 import akka.kafka.scaladsl.PartitionAssignmentHandler
 import org.apache.kafka.clients.consumer._
 import org.apache.kafka.common.errors.RebalanceInProgressException
-import org.apache.kafka.common.{Metric, MetricName, TopicPartition}
+import org.apache.kafka.common.{ Metric, MetricName, TopicPartition }
 
 import scala.annotation.nowarn
 import scala.jdk.CollectionConverters._
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ ExecutionContext, Future }
 import scala.concurrent.duration._
-import scala.util.{Success, Try}
+import scala.util.{ Success, Try }
 import scala.util.control.NonFatal
 
 /**
@@ -47,7 +47,7 @@ import scala.util.control.NonFatal
   object Internal {
     sealed trait SubscriptionRequest extends NoSerializationVerificationNeeded
 
-    //requests
+    // requests
     final case class Assign(tps: Set[TopicPartition]) extends SubscriptionRequest
     final case class AssignWithOffset(tps: Map[TopicPartition, Long]) extends SubscriptionRequest
     final case class AssignOffsetsForTimes(timestampsToSearch: Map[TopicPartition, Long]) extends SubscriptionRequest
@@ -70,7 +70,7 @@ import scala.util.control.NonFatal
     /** Special case commit for non-batched committing. */
     final case class CommitSingle(tp: TopicPartition, offsetAndMetadata: OffsetAndMetadata)
         extends NoSerializationVerificationNeeded
-    //responses
+    // responses
     final case class Assigned(partition: List[TopicPartition]) extends NoSerializationVerificationNeeded
     final case class Revoked(partition: List[TopicPartition]) extends NoSerializationVerificationNeeded
     final case class Messages[K, V](requestId: Int, messages: Iterator[ConsumerRecord[K, V]])
@@ -78,11 +78,10 @@ import scala.util.control.NonFatal
     final case class ConsumerMetrics(metrics: Map[MetricName, Metric]) extends NoSerializationVerificationNeeded {
       def getMetrics: java.util.Map[MetricName, Metric] = metrics.asJava
     }
-    //internal
+    // internal
     private[KafkaConsumerActor] final case class Poll[K, V](
         target: KafkaConsumerActor[K, V],
-        periodic: Boolean
-    ) extends DeadLetterSuppression
+        periodic: Boolean) extends DeadLetterSuppression
         with NoSerializationVerificationNeeded
 
     private[KafkaConsumerActor] case object PollTask
@@ -102,7 +101,7 @@ import scala.util.control.NonFatal
     def apply(commitRefreshInterval: Duration, progress: () => ConsumerProgressTracking): CommitRefreshing =
       commitRefreshInterval match {
         case finite: FiniteDuration => new Impl(finite, progress())
-        case _ => new Noop()
+        case _                      => new Noop()
       }
 
     private final class Noop() extends CommitRefreshing {
@@ -141,11 +140,11 @@ import scala.util.control.NonFatal
         // partition that is no longer assigned to this consumer, so that assumption is not necessarily strictly
         // true, but it's reasonable.
         refreshDeadlines = refreshDeadlines ++ tps.intersect(refreshDeadlines.keySet).map { tp =>
-            (tp, commitRefreshInterval.fromNow)
-          }
+          (tp, commitRefreshInterval.fromNow)
+        }
 
       override def assignedPositions(assignedTps: Set[TopicPartition],
-                                     assignedOffsets: Map[TopicPartition, Long]): Unit = {
+          assignedOffsets: Map[TopicPartition, Long]): Unit = {
         // assigned the partitions, so update all the of deadlines
         refreshDeadlines = refreshDeadlines ++ assignedTps.map(_ -> commitRefreshInterval.fromNow)
       }
@@ -172,7 +171,7 @@ import scala.util.control.NonFatal
  * The actor communicating through the Kafka consumer client.
  */
 @InternalApi final private[kafka] class KafkaConsumerActor[K, V](owner: Option[ActorRef],
-                                                                 _settings: ConsumerSettings[K, V])
+    _settings: ConsumerSettings[K, V])
     extends Actor
     with ActorIdLogging
     with Timers
@@ -337,7 +336,7 @@ import scala.util.control.NonFatal
           consumer.assign((timestampsToSearch.keys.toSeq ++ previousAssigned.asScala).asJava)
           val topicPartitionToOffsetAndTimestamp =
             consumer.offsetsForTimes(timestampsToSearch.map { case (k, v) => (k, long2Long(v)) }.toMap.asJava,
-                                     offsetForTimesTimeout)
+              offsetForTimesTimeout)
           val assignedOffsets = topicPartitionToOffsetAndTimestamp.asScala.filter(_._2 != null).toMap.map {
             case (tp, oat: OffsetAndTimestamp) =>
               val offset = oat.offset()
@@ -563,12 +562,12 @@ import scala.util.control.NonFatal
       commitMap.asJava,
       new OffsetCommitCallback {
         override def onComplete(offsets: java.util.Map[TopicPartition, OffsetAndMetadata],
-                                exception: Exception): Unit = {
+            exception: Exception): Unit = {
           def retryCommits(duration: Long, e: Throwable): Unit = {
             log.warning("Kafka commit is to be retried, after={} ms, commitsInProgress={}, cause={}",
-                        duration / 1000000L,
-                        commitsInProgress,
-                        e.toString)
+              duration / 1000000L,
+              commitsInProgress,
+              e.toString)
             commitMaps = commitMap.toList ++ commitMaps
             commitSenders = commitSenders ++ replyTo
             requestDelayedPoll()
@@ -581,45 +580,43 @@ import scala.util.control.NonFatal
             case null =>
               if (duration > settings.commitTimeWarning.toNanos) {
                 log.warning("Kafka commit took longer than `commit-time-warning`: {} ms, commitsInProgress={}",
-                            duration / 1000000L,
-                            commitsInProgress)
+                  duration / 1000000L,
+                  commitsInProgress)
               }
               progressTracker.committed(offsets)
               replyTo.foreach(_ ! Done)
 
-            case e: RebalanceInProgressException => retryCommits(duration, e)
+            case e: RebalanceInProgressException   => retryCommits(duration, e)
             case e: RetriableCommitFailedException => retryCommits(duration, e.getCause)
 
             case commitException =>
               log.error("Kafka commit failed after={} ms, commitsInProgress={}, exception={}",
-                        duration / 1000000L,
-                        commitsInProgress,
-                        commitException)
+                duration / 1000000L,
+                commitsInProgress,
+                commitException)
               val failure = Status.Failure(commitException)
               replyTo.foreach(_ ! failure)
           }
         }
-      }
-    )
+      })
   }
 
   private def processResult(partitionsToFetch: Set[TopicPartition], rawResult: ConsumerRecords[K, V]): Unit =
     if (!rawResult.isEmpty) {
-      //check the we got only requested partitions and did not drop any messages
+      // check the we got only requested partitions and did not drop any messages
       val fetchedTps = rawResult.partitions().asScala
-      if ((fetchedTps diff partitionsToFetch).nonEmpty)
+      if (fetchedTps.diff(partitionsToFetch).nonEmpty)
         throw new scala.IllegalArgumentException(
           s"Unexpected records polled. Expected: $partitionsToFetch, " +
-          s"result: ${rawResult.partitions()}, consumer assignment: ${consumer.assignment()}"
-        )
+          s"result: ${rawResult.partitions()}, consumer assignment: ${consumer.assignment()}")
 
       val safeRecords = resetProtection.protect(self, rawResult)
       progressTracker.received(safeRecords)
 
-      //send messages to actors
+      // send messages to actors
       requests.foreach {
         case (stageActorRef, req) =>
-          //gather all messages for ref
+          // gather all messages for ref
           // See https://github.com/akka/alpakka-kafka/issues/978
           // Temporary fix to avoid https://github.com/scala/bug/issues/11807
           // Using `VectorIterator` avoids the error from `ConcatIterator`
@@ -705,8 +702,7 @@ import scala.util.control.NonFatal
             .asScala
             .filterNot(_._2 == null)
             .toMap
-        }
-      )
+        })
 
     case req: Metadata.GetCommittedOffset @nowarn("cat=deprecation") =>
       @nowarn("cat=deprecation") val resp = Metadata.CommittedOffset(
@@ -714,15 +710,14 @@ import scala.util.control.NonFatal
           @nowarn("cat=deprecation") val offset = consumer.committed(req.partition, settings.getMetadataRequestTimeout)
           offset
         },
-        req.partition
-      )
+        req.partition)
       resp
   }
 
   private def stopFromMessage(msg: StopLike) = msg match {
-    case Stop => sender()
+    case Stop                         => sender()
     case StopFromStage(sourceStageId) => s"StageId [$sourceStageId]"
-    case other => s"unknown: [$other]"
+    case other                        => s"unknown: [$other]"
   }
 
   /**
@@ -759,8 +754,7 @@ import scala.util.control.NonFatal
   }
 
   private[KafkaConsumerActor] final class RebalanceListenerImpl(
-      partitionAssignmentHandler: PartitionAssignmentHandler
-  ) extends RebalanceListener {
+      partitionAssignmentHandler: PartitionAssignmentHandler) extends RebalanceListener {
 
     private val restrictedConsumer = new RestrictedConsumer(consumer, settings.partitionHandlerWarning.*(0.95d).asJava)
     private val warningDuration = settings.partitionHandlerWarning.toNanos
@@ -802,8 +796,8 @@ import scala.util.control.NonFatal
       val duration = System.nanoTime() - startTime
       if (duration > warningDuration) {
         log.warning("Partition assignment handler `{}` took longer than `partition-handler-warning`: {} ms",
-                    method,
-                    duration / 1000000L)
+          method,
+          duration / 1000000L)
       }
     }
   }
diff --git a/core/src/main/scala/akka/kafka/internal/KafkaSourceStage.scala b/core/src/main/scala/akka/kafka/internal/KafkaSourceStage.scala
index 15b20b97..36da33ba 100644
--- a/core/src/main/scala/akka/kafka/internal/KafkaSourceStage.scala
+++ b/core/src/main/scala/akka/kafka/internal/KafkaSourceStage.scala
@@ -8,7 +8,7 @@ package akka.kafka.internal
 import akka.annotation.InternalApi
 import akka.kafka.scaladsl.Consumer._
 import akka.stream._
-import akka.stream.stage.{GraphStageLogic, GraphStageWithMaterializedValue}
+import akka.stream.stage.{ GraphStageLogic, GraphStageWithMaterializedValue }
 
 /**
  * INTERNAL API
diff --git a/core/src/main/scala/akka/kafka/internal/LoggingWithId.scala b/core/src/main/scala/akka/kafka/internal/LoggingWithId.scala
index 073e0af4..4a1942b6 100644
--- a/core/src/main/scala/akka/kafka/internal/LoggingWithId.scala
+++ b/core/src/main/scala/akka/kafka/internal/LoggingWithId.scala
@@ -5,9 +5,9 @@
 
 package akka.kafka.internal
 
-import akka.actor.{Actor, ActorLogging}
+import akka.actor.{ Actor, ActorLogging }
 import akka.event.LoggingAdapter
-import akka.stream.stage.{GraphStageLogic, StageLogging}
+import akka.stream.stage.{ GraphStageLogic, StageLogging }
 
 /**
  * Generate a short random UID for something.
diff --git a/core/src/main/scala/akka/kafka/internal/MessageBuilder.scala b/core/src/main/scala/akka/kafka/internal/MessageBuilder.scala
index 5576130b..0aa036be 100644
--- a/core/src/main/scala/akka/kafka/internal/MessageBuilder.scala
+++ b/core/src/main/scala/akka/kafka/internal/MessageBuilder.scala
@@ -16,7 +16,7 @@ import akka.kafka.ConsumerMessage.{
   TransactionalMessage,
   _
 }
-import org.apache.kafka.clients.consumer.{ConsumerRecord, OffsetAndMetadata}
+import org.apache.kafka.clients.consumer.{ ConsumerRecord, OffsetAndMetadata }
 import org.apache.kafka.common.TopicPartition
 import org.apache.kafka.common.requests.OffsetFetchResponse
 
@@ -58,12 +58,10 @@ private[kafka] trait TransactionalMessageBuilder[K, V]
       GroupTopicPartition(
         groupId = groupId,
         topic = rec.topic,
-        partition = rec.partition
-      ),
+        partition = rec.partition),
       offset = rec.offset,
       committedMarker,
-      fromPartitionedSource
-    )
+      fromPartitionedSource)
     ConsumerMessage.TransactionalMessage(rec, offset)
   }
 }
@@ -78,12 +76,10 @@ private[kafka] trait TransactionalOffsetContextBuilder[K, V]
       GroupTopicPartition(
         groupId = groupId,
         topic = rec.topic,
-        partition = rec.partition
-      ),
+        partition = rec.partition),
       offset = rec.offset,
       committedMarker,
-      fromPartitionedSource
-    )
+      fromPartitionedSource)
     (rec, offset)
   }
 }
@@ -100,10 +96,8 @@ private[kafka] trait CommittableMessageBuilder[K, V] extends MessageBuilder[K, V
       GroupTopicPartition(
         groupId = groupId,
         topic = rec.topic,
-        partition = rec.partition
-      ),
-      offset = rec.offset
-    )
+        partition = rec.partition),
+      offset = rec.offset)
     ConsumerMessage.CommittableMessage(rec, CommittableOffsetImpl(offset, metadataFromRecord(rec))(committer))
   }
 }
@@ -126,10 +120,8 @@ private[kafka] trait OffsetContextBuilder[K, V]
       GroupTopicPartition(
         groupId = groupId,
         topic = rec.topic,
-        partition = rec.partition
-      ),
-      offset = rec.offset
-    )
+        partition = rec.partition),
+      offset = rec.offset)
     (rec, CommittableOffsetImpl(offset, metadataFromRecord(rec))(committer))
   }
 }
@@ -137,10 +129,8 @@ private[kafka] trait OffsetContextBuilder[K, V]
 /** Internal API */
 @InternalApi private[kafka] final case class CommittableOffsetImpl(
     override val partitionOffset: ConsumerMessage.PartitionOffset,
-    override val metadata: String
-)(
-    val committer: KafkaAsyncConsumerCommitterRef
-) extends CommittableOffsetMetadata {
+    override val metadata: String)(
+    val committer: KafkaAsyncConsumerCommitterRef) extends CommittableOffsetMetadata {
   override def commitScaladsl(): Future[Done] = commitInternal()
   override def commitJavadsl(): CompletionStage[Done] = commitInternal().toJava
   override def commitInternal(): Future[Done] = KafkaAsyncConsumerCommitterRef.commit(this)
@@ -163,22 +153,20 @@ private[kafka] trait CommittedMarker {
 private[kafka] final class CommittableOffsetBatchImpl(
     private[kafka] val offsetsAndMetadata: Map[GroupTopicPartition, OffsetAndMetadata],
     private val committers: Map[GroupTopicPartition, KafkaAsyncConsumerCommitterRef],
-    override val batchSize: Long
-) extends CommittableOffsetBatch {
+    override val batchSize: Long) extends CommittableOffsetBatch {
   def offsets: Map[GroupTopicPartition, Long] = offsetsAndMetadata.view.mapValues(_.offset() - 1L).toMap
 
   def updated(committable: Committable): CommittableOffsetBatch = committable match {
-    case offset: CommittableOffset => updatedWithOffset(offset)
+    case offset: CommittableOffset     => updatedWithOffset(offset)
     case batch: CommittableOffsetBatch => updatedWithBatch(batch)
-    case null => throw new IllegalArgumentException(s"unexpected Committable [null]")
-    case _ => throw new IllegalArgumentException(s"unexpected Committable [${committable.getClass}]")
+    case null                          => throw new IllegalArgumentException(s"unexpected Committable [null]")
+    case _                             => throw new IllegalArgumentException(s"unexpected Committable [${committable.getClass}]")
   }
 
   private[internal] def committerFor(groupTopicPartition: GroupTopicPartition) =
     committers.getOrElse(
       groupTopicPartition,
-      throw new IllegalStateException(s"Unknown committer, got [$groupTopicPartition] (${committers.keys})")
-    )
+      throw new IllegalStateException(s"Unknown committer, got [$groupTopicPartition] (${committers.keys})"))
 
   private def updatedWithOffset(newOffset: CommittableOffset): CommittableOffsetBatch = {
     val partitionOffset = newOffset.partitionOffset
@@ -198,8 +186,7 @@ private[kafka] final class CommittableOffsetBatchImpl(
       case _ =>
         throw new IllegalArgumentException(
           s"Unknown CommittableOffset, got [${newOffset.getClass.getName}], " +
-          s"expected [${classOf[CommittableOffsetImpl].getName}]"
-        )
+          s"expected [${classOf[CommittableOffsetImpl].getName}]")
     }
 
     // the last `KafkaAsyncConsumerCommitterRef` wins (see https://github.com/akka/alpakka-kafka/issues/942)
@@ -217,8 +204,7 @@ private[kafka] final class CommittableOffsetBatchImpl(
       case _ =>
         throw new IllegalArgumentException(
           s"Unknown CommittableOffsetBatch, got [${committableOffsetBatch.getClass.getName}], " +
-          s"expected [${classOf[CommittableOffsetBatchImpl].getName}]"
-        )
+          s"expected [${classOf[CommittableOffsetBatchImpl].getName}]")
     }
 
   override def getOffsets: java.util.Map[GroupTopicPartition, Long] = offsets.asJava
diff --git a/core/src/main/scala/akka/kafka/internal/PartitionAssignmentHelpers.scala b/core/src/main/scala/akka/kafka/internal/PartitionAssignmentHelpers.scala
index 81fcacb0..0f953799 100644
--- a/core/src/main/scala/akka/kafka/internal/PartitionAssignmentHelpers.scala
+++ b/core/src/main/scala/akka/kafka/internal/PartitionAssignmentHelpers.scala
@@ -9,7 +9,7 @@ import akka.actor.ActorRef
 import akka.annotation.InternalApi
 import akka.kafka.scaladsl.PartitionAssignmentHandler
 import akka.kafka.javadsl
-import akka.kafka.{AutoSubscription, RestrictedConsumer, TopicPartitionsAssigned, TopicPartitionsRevoked}
+import akka.kafka.{ AutoSubscription, RestrictedConsumer, TopicPartitionsAssigned, TopicPartitionsRevoked }
 import akka.stream.stage.AsyncCallback
 import org.apache.kafka.common.TopicPartition
 
@@ -55,9 +55,9 @@ object PartitionAssignmentHelpers {
 
   @InternalApi
   final class AsyncCallbacks(subscription: AutoSubscription,
-                             sourceActor: ActorRef,
-                             partitionAssignedCB: AsyncCallback[Set[TopicPartition]],
-                             partitionRevokedCB: AsyncCallback[Set[TopicPartition]])
+      sourceActor: ActorRef,
+      partitionAssignedCB: AsyncCallback[Set[TopicPartition]],
+      partitionRevokedCB: AsyncCallback[Set[TopicPartition]])
       extends PartitionAssignmentHandler {
 
     override def onRevoke(revokedTps: Set[TopicPartition], consumer: RestrictedConsumer): Unit = {
diff --git a/core/src/main/scala/akka/kafka/internal/PlainSources.scala b/core/src/main/scala/akka/kafka/internal/PlainSources.scala
index fbe90acc..79c09290 100644
--- a/core/src/main/scala/akka/kafka/internal/PlainSources.scala
+++ b/core/src/main/scala/akka/kafka/internal/PlainSources.scala
@@ -9,11 +9,11 @@ import akka.NotUsed
 import akka.actor.ActorRef
 import akka.annotation.InternalApi
 import akka.kafka.scaladsl.Consumer.Control
-import akka.kafka.{AutoSubscription, ConsumerSettings, ManualSubscription, Subscription}
+import akka.kafka.{ AutoSubscription, ConsumerSettings, ManualSubscription, Subscription }
 import akka.kafka.internal.SubSourceLogic._
 import akka.stream.SourceShape
 import akka.stream.scaladsl.Source
-import akka.stream.stage.{AsyncCallback, GraphStageLogic}
+import akka.stream.stage.{ AsyncCallback, GraphStageLogic }
 import org.apache.kafka.clients.consumer.ConsumerRecord
 import org.apache.kafka.common.TopicPartition
 
@@ -31,8 +31,7 @@ private[kafka] final class PlainSource[K, V](settings: ConsumerSettings[K, V], s
 @InternalApi
 private[kafka] final class ExternalPlainSource[K, V](consumer: ActorRef, subscription: ManualSubscription)
     extends KafkaSourceStage[K, V, ConsumerRecord[K, V]](
-      s"ExternalPlainSubSource ${subscription.renderStageAttribute}"
-    ) {
+      s"ExternalPlainSubSource ${subscription.renderStageAttribute}") {
   override protected def logic(shape: SourceShape[ConsumerRecord[K, V]]): GraphStageLogic with Control =
     new ExternalSingleSourceLogic[K, V, ConsumerRecord[K, V]](shape, consumer, subscription)
       with PlainMessageBuilder[K, V]
@@ -47,13 +46,11 @@ private[kafka] final class PlainSubSource[K, V](
     settings: ConsumerSettings[K, V],
     subscription: AutoSubscription,
     getOffsetsOnAssign: Option[Set[TopicPartition] => Future[Map[TopicPartition, Long]]],
-    onRevoke: Set[TopicPartition] => Unit
-) extends KafkaSourceStage[K, V, (TopicPartition, Source[ConsumerRecord[K, V], NotUsed])](
-      s"PlainSubSource ${subscription.renderStageAttribute}"
-    ) {
+    onRevoke: Set[TopicPartition] => Unit)
+    extends KafkaSourceStage[K, V, (TopicPartition, Source[ConsumerRecord[K, V], NotUsed])](
+      s"PlainSubSource ${subscription.renderStageAttribute}") {
   override protected def logic(
-      shape: SourceShape[(TopicPartition, Source[ConsumerRecord[K, V], NotUsed])]
-  ): GraphStageLogic with Control = {
+      shape: SourceShape[(TopicPartition, Source[ConsumerRecord[K, V], NotUsed])]): GraphStageLogic with Control = {
 
     val factory = new SubSourceStageLogicFactory[K, V, ConsumerRecord[K, V]] {
       def create(
@@ -62,21 +59,20 @@ private[kafka] final class PlainSubSource[K, V](
           consumerActor: ActorRef,
           subSourceStartedCb: AsyncCallback[SubSourceStageLogicControl],
           subSourceCancelledCb: AsyncCallback[(TopicPartition, SubSourceCancellationStrategy)],
-          actorNumber: Int
-      ): SubSourceStageLogic[K, V, ConsumerRecord[K, V]] =
+          actorNumber: Int): SubSourceStageLogic[K, V, ConsumerRecord[K, V]] =
         new SubSourceStageLogic[K, V, ConsumerRecord[K, V]](shape,
-                                                            tp,
-                                                            consumerActor,
-                                                            subSourceStartedCb,
-                                                            subSourceCancelledCb,
-                                                            actorNumber) with PlainMessageBuilder[K, V]
+          tp,
+          consumerActor,
+          subSourceStartedCb,
+          subSourceCancelledCb,
+          actorNumber) with PlainMessageBuilder[K, V]
     }
 
     new SubSourceLogic[K, V, ConsumerRecord[K, V]](shape,
-                                                   settings,
-                                                   subscription,
-                                                   getOffsetsOnAssign,
-                                                   onRevoke,
-                                                   subSourceStageLogicFactory = factory)
+      settings,
+      subscription,
+      getOffsetsOnAssign,
+      onRevoke,
+      subSourceStageLogicFactory = factory)
   }
 }
diff --git a/core/src/main/scala/akka/kafka/internal/SingleSourceLogic.scala b/core/src/main/scala/akka/kafka/internal/SingleSourceLogic.scala
index 22b5992f..d20a84f9 100644
--- a/core/src/main/scala/akka/kafka/internal/SingleSourceLogic.scala
+++ b/core/src/main/scala/akka/kafka/internal/SingleSourceLogic.scala
@@ -5,15 +5,15 @@
 
 package akka.kafka.internal
 
-import akka.actor.{ActorRef, ExtendedActorSystem, Terminated}
+import akka.actor.{ ActorRef, ExtendedActorSystem, Terminated }
 import akka.annotation.InternalApi
 import akka.kafka.internal.KafkaConsumerActor.Internal.Messages
 import akka.kafka.scaladsl.PartitionAssignmentHandler
-import akka.kafka.{ConsumerSettings, RestrictedConsumer, Subscription}
+import akka.kafka.{ ConsumerSettings, RestrictedConsumer, Subscription }
 import akka.stream.SourceShape
 import org.apache.kafka.common.TopicPartition
 
-import scala.concurrent.{Future, Promise}
+import scala.concurrent.{ Future, Promise }
 
 /**
  * Internal API.
@@ -23,8 +23,7 @@ import scala.concurrent.{Future, Promise}
 @InternalApi private abstract class SingleSourceLogic[K, V, Msg](
     shape: SourceShape[Msg],
     settings: ConsumerSettings[K, V],
-    override protected val subscription: Subscription
-) extends BaseSingleSourceLogic[K, V, Msg](shape) {
+    override protected val subscription: Subscription) extends BaseSingleSourceLogic[K, V, Msg](shape) {
 
   override protected def logSource: Class[_] = classOf[SingleSourceLogic[K, V, Msg]]
   private val consumerPromise = Promise[ActorRef]()
@@ -36,7 +35,7 @@ import scala.concurrent.{Future, Promise}
     val extendedActorSystem = materializer.system.asInstanceOf[ExtendedActorSystem]
     val actor =
       extendedActorSystem.systemActorOf(akka.kafka.KafkaConsumerActor.props(sourceActor.ref, settings),
-                                        s"kafka-consumer-$actorNumber")
+        s"kafka-consumer-$actorNumber")
     consumerPromise.success(actor)
     actor
   }
@@ -57,8 +56,8 @@ import scala.concurrent.{Future, Promise}
         // Prevent stage failure during shutdown by ignoring Messages
         if (messages.hasNext)
           log.debug("Unexpected `Messages` received with requestId={} and a non-empty message iterator: {}",
-                    requestId,
-                    messages.mkString(", "))
+            requestId,
+            messages.mkString(", "))
     })
     stopConsumerActor()
   }
@@ -70,17 +69,17 @@ import scala.concurrent.{Future, Promise}
   }
 
   protected def stopConsumerActor(): Unit =
-    materializer.scheduleOnce(settings.stopTimeout, new Runnable {
-      override def run(): Unit =
-        consumerActor.tell(KafkaConsumerActor.Internal.StopFromStage(id), sourceActor.ref)
-    })
+    materializer.scheduleOnce(settings.stopTimeout,
+      new Runnable {
+        override def run(): Unit =
+          consumerActor.tell(KafkaConsumerActor.Internal.StopFromStage(id), sourceActor.ref)
+      })
 
   /**
    * Opportunity for subclasses to add a different logic to the partition assignment callbacks.
    */
   override protected def addToPartitionAssignmentHandler(
-      handler: PartitionAssignmentHandler
-  ): PartitionAssignmentHandler = {
+      handler: PartitionAssignmentHandler): PartitionAssignmentHandler = {
     val flushMessagesOfRevokedPartitions: PartitionAssignmentHandler = new PartitionAssignmentHandler {
       private var lastRevoked = Set.empty[TopicPartition]
 
diff --git a/core/src/main/scala/akka/kafka/internal/SourceLogicBuffer.scala b/core/src/main/scala/akka/kafka/internal/SourceLogicBuffer.scala
index d09db54d..3c36f5a9 100644
--- a/core/src/main/scala/akka/kafka/internal/SourceLogicBuffer.scala
+++ b/core/src/main/scala/akka/kafka/internal/SourceLogicBuffer.scala
@@ -5,7 +5,7 @@
 
 package akka.kafka.internal
 import akka.annotation.InternalApi
-import akka.stream.stage.{AsyncCallback, GraphStageLogic}
+import akka.stream.stage.{ AsyncCallback, GraphStageLogic }
 import org.apache.kafka.clients.consumer.ConsumerRecord
 import org.apache.kafka.common.TopicPartition
 
diff --git a/core/src/main/scala/akka/kafka/internal/SourceLogicSubscription.scala b/core/src/main/scala/akka/kafka/internal/SourceLogicSubscription.scala
index 48bf13fa..36ed17da 100644
--- a/core/src/main/scala/akka/kafka/internal/SourceLogicSubscription.scala
+++ b/core/src/main/scala/akka/kafka/internal/SourceLogicSubscription.scala
@@ -6,11 +6,11 @@
 package akka.kafka.internal
 import akka.actor.ActorRef
 import akka.annotation.InternalApi
-import akka.kafka.{AutoSubscription, ManualSubscription, Subscription}
+import akka.kafka.{ AutoSubscription, ManualSubscription, Subscription }
 import akka.kafka.Subscriptions._
 import akka.kafka.scaladsl.PartitionAssignmentHandler
 import akka.stream.stage.GraphStageLogic.StageActor
-import akka.stream.stage.{AsyncCallback, GraphStageLogic}
+import akka.stream.stage.{ AsyncCallback, GraphStageLogic }
 import org.apache.kafka.common.TopicPartition
 
 /**
@@ -31,16 +31,15 @@ private[kafka] trait SourceLogicSubscription {
   protected def sourceActor: StageActor
 
   protected def configureSubscription(partitionAssignedCB: AsyncCallback[Set[TopicPartition]],
-                                      partitionRevokedCB: AsyncCallback[Set[TopicPartition]]): Unit = {
+      partitionRevokedCB: AsyncCallback[Set[TopicPartition]]): Unit = {
 
     def rebalanceListener(autoSubscription: AutoSubscription): PartitionAssignmentHandler = {
       PartitionAssignmentHelpers.chain(
         addToPartitionAssignmentHandler(autoSubscription.partitionAssignmentHandler),
         new PartitionAssignmentHelpers.AsyncCallbacks(autoSubscription,
-                                                      sourceActor.ref,
-                                                      partitionAssignedCB,
-                                                      partitionRevokedCB)
-      )
+          sourceActor.ref,
+          partitionAssignedCB,
+          partitionRevokedCB))
     }
 
     subscription match {
@@ -48,18 +47,14 @@ private[kafka] trait SourceLogicSubscription {
         consumerActor.tell(
           KafkaConsumerActor.Internal.Subscribe(
             topics,
-            addToPartitionAssignmentHandler(rebalanceListener(sub))
-          ),
-          sourceActor.ref
-        )
+            addToPartitionAssignmentHandler(rebalanceListener(sub))),
+          sourceActor.ref)
       case sub @ TopicSubscriptionPattern(topics, _, _) =>
         consumerActor.tell(
           KafkaConsumerActor.Internal.SubscribePattern(
             topics,
-            addToPartitionAssignmentHandler(rebalanceListener(sub))
-          ),
-          sourceActor.ref
-        )
+            addToPartitionAssignmentHandler(rebalanceListener(sub))),
+          sourceActor.ref)
       case s: ManualSubscription => configureManualSubscription(s)
     }
   }
diff --git a/core/src/main/scala/akka/kafka/internal/SubSourceLogic.scala b/core/src/main/scala/akka/kafka/internal/SubSourceLogic.scala
index 3e45df06..f292eba3 100644
--- a/core/src/main/scala/akka/kafka/internal/SubSourceLogic.scala
+++ b/core/src/main/scala/akka/kafka/internal/SubSourceLogic.scala
@@ -7,26 +7,26 @@ package akka.kafka.internal
 
 import akka.NotUsed
 import akka.actor.Status
-import akka.actor.{ActorRef, ExtendedActorSystem, Terminated}
+import akka.actor.{ ActorRef, ExtendedActorSystem, Terminated }
 import akka.annotation.InternalApi
 import akka.kafka.internal.KafkaConsumerActor.Internal.RegisterSubStage
 import akka.kafka.internal.SubSourceLogic._
-import akka.kafka.{AutoSubscription, ConsumerFailed, ConsumerSettings, RestrictedConsumer}
+import akka.kafka.{ AutoSubscription, ConsumerFailed, ConsumerSettings, RestrictedConsumer }
 import akka.kafka.scaladsl.Consumer.Control
 import akka.kafka.scaladsl.PartitionAssignmentHandler
-import akka.pattern.{ask, AskTimeoutException}
+import akka.pattern.{ ask, AskTimeoutException }
 import akka.stream.scaladsl.Source
 import akka.stream.stage.GraphStageLogic.StageActor
 import akka.stream.stage._
-import akka.stream.{Attributes, Outlet, SourceShape}
+import akka.stream.{ Attributes, Outlet, SourceShape }
 import akka.util.Timeout
 import org.apache.kafka.common.TopicPartition
 
 import scala.annotation.tailrec
 import scala.collection.immutable
 import scala.concurrent.duration._
-import scala.concurrent.{ExecutionContext, Future, Promise}
-import scala.util.{Failure, Success}
+import scala.concurrent.{ ExecutionContext, Future, Promise }
+import scala.util.{ Failure, Success }
 
 /**
  * Internal API.
@@ -39,7 +39,6 @@ import scala.util.{Failure, Success}
  * The `SubSourceLogic.subSourceStageLogicFactory` parameter is passed to each `SubSourceStage` so that a new
  * `SubSourceStageLogic` can be created for each stage. Context parameters from the `SubSourceLogic` are passed down to
  * `SubSourceStage` and on to the `SubSourceStageLogicFactory` when the stage creates a `GraphStageLogic`.
- *
  */
 @InternalApi
 private class SubSourceLogic[K, V, Msg](
@@ -48,8 +47,7 @@ private class SubSourceLogic[K, V, Msg](
     override protected val subscription: AutoSubscription,
     getOffsetsOnAssign: Option[Set[TopicPartition] => Future[Map[TopicPartition, Long]]] = None,
     onRevoke: Set[TopicPartition] => Unit = _ => (),
-    subSourceStageLogicFactory: SubSourceStageLogicFactory[K, V, Msg]
-) extends TimerGraphStageLogic(shape)
+    subSourceStageLogicFactory: SubSourceStageLogicFactory[K, V, Msg]) extends TimerGraphStageLogic(shape)
     with PromiseControl
     with MetricsControl
     with SourceLogicSubscription
@@ -89,7 +87,7 @@ private class SubSourceLogic[K, V, Msg](
     consumerActor = {
       val extendedActorSystem = materializer.system.asInstanceOf[ExtendedActorSystem]
       extendedActorSystem.systemActorOf(akka.kafka.KafkaConsumerActor.props(sourceActor.ref, settings),
-                                        s"kafka-consumer-$actorNumber")
+        s"kafka-consumer-$actorNumber")
     }
     consumerPromise.success(consumerActor)
     sourceActor.watch(consumerActor)
@@ -109,7 +107,7 @@ private class SubSourceLogic[K, V, Msg](
       val updatedFormerlyUnknown = formerlyUnknown -- (partitionsToRevoke ++ partitionsInStartup ++ pendingPartitions)
       // Filter out the offsetMap so that we don't re-seek for partitions that have been revoked
       seekAndEmitSubSources(updatedFormerlyUnknown,
-                            offsetMap.view.filterKeys(k => !partitionsToRevoke.contains(k)).toMap)
+        offsetMap.view.filterKeys(k => !partitionsToRevoke.contains(k)).toMap)
   }
 
   private val partitionAssignedCB = getAsyncCallback[Set[TopicPartition]] { assigned =>
@@ -134,9 +132,7 @@ private class SubSourceLogic[K, V, Msg](
               stageFailCB.invoke(
                 new ConsumerFailed(
                   s"$idLogPrefix Failed to fetch offset for partitions: ${formerlyUnknown.mkString(", ")}.",
-                  ex
-                )
-              )
+                  ex))
             case Success(offsets) =>
               onOffsetsFromExternalResponseCB.invoke((formerlyUnknown, offsets))
           }
@@ -150,8 +146,7 @@ private class SubSourceLogic[K, V, Msg](
 
   private def seekAndEmitSubSources(
       formerlyUnknown: Set[TopicPartition],
-      offsets: Map[TopicPartition, Long]
-  ): Unit = {
+      offsets: Map[TopicPartition, Long]): Unit = {
     implicit val ec: ExecutionContext = materializer.executionContext
     consumerActor
       .ask(KafkaConsumerActor.Internal.Seek(offsets))(Timeout(10.seconds), sourceActor.ref)
@@ -160,9 +155,7 @@ private class SubSourceLogic[K, V, Msg](
         case _: AskTimeoutException =>
           stageFailCB.invoke(
             new ConsumerFailed(
-              s"$idLogPrefix Consumer failed during seek for partitions: ${offsets.keys.mkString(", ")}."
-            )
-          )
+              s"$idLogPrefix Consumer failed during seek for partitions: ${offsets.keys.mkString(", ")}."))
       }
   }
 
@@ -221,8 +214,7 @@ private class SubSourceLogic[K, V, Msg](
       override def onPull(): Unit =
         emitSubSourcesForPendingPartitions()
       override def onDownstreamFinish(cause: Throwable): Unit = performShutdown()
-    }
-  )
+    })
 
   private def updatePendingPartitionsAndEmitSubSources(formerlyUnknownPartitions: Set[TopicPartition]): Unit = {
     pendingPartitions ++= formerlyUnknownPartitions.filter(!partitionsInStartup.contains(_))
@@ -238,12 +230,11 @@ private class SubSourceLogic[K, V, Msg](
       partitionsInStartup += tp
       val subSource = Source.fromGraph(
         new SubSourceStage(tp,
-                           consumerActor,
-                           subsourceStartedCB,
-                           subsourceCancelledCB,
-                           actorNumber,
-                           subSourceStageLogicFactory)
-      )
+          consumerActor,
+          subsourceStartedCB,
+          subsourceCancelledCB,
+          actorNumber,
+          subSourceStageLogicFactory))
       push(shape.out, (tp, subSource))
       emitSubSourcesForPendingPartitions()
     }
@@ -266,7 +257,7 @@ private class SubSourceLogic[K, V, Msg](
   override def performShutdown(): Unit = {
     log.info("Completing. Partitions [{}], StageActor {}", subSources.keys.mkString(","), sourceActor.ref)
     setKeepGoing(true)
-    //todo we should wait for subsources to be shutdown and next shutdown main stage
+    // todo we should wait for subsources to be shutdown and next shutdown main stage
     subSources.values.foreach {
       _.control.shutdown()
     }
@@ -285,16 +276,14 @@ private class SubSourceLogic[K, V, Msg](
       new Runnable {
         override def run(): Unit =
           consumerActor.tell(KafkaConsumerActor.Internal.StopFromStage(id), sourceActor.ref)
-      }
-    )
+      })
   }
 
   /**
    * Opportunity for subclasses to add a different logic to the partition assignment callbacks.
    */
   override protected def addToPartitionAssignmentHandler(
-      handler: PartitionAssignmentHandler
-  ): PartitionAssignmentHandler = {
+      handler: PartitionAssignmentHandler): PartitionAssignmentHandler = {
     val flushMessagesOfRevokedPartitions: PartitionAssignmentHandler = new PartitionAssignmentHandler {
       private var lastRevoked = Set.empty[TopicPartition]
 
@@ -323,7 +312,8 @@ private class SubSourceLogic[K, V, Msg](
 private object SubSourceLogic {
   case object CloseRevokedPartitions
 
-  /** Internal API
+  /**
+   * Internal API
    *
    * SubSourceStageLogic [[akka.kafka.scaladsl.Consumer.Control]] and the stage actor [[ActorRef]]
    */
@@ -332,13 +322,14 @@ private object SubSourceLogic {
 
   @InternalApi
   final case class SubSourceStageLogicControl(tp: TopicPartition,
-                                              controlAndStageActor: ControlAndStageActor,
-                                              filterRevokedPartitionsCB: AsyncCallback[Set[TopicPartition]]) {
+      controlAndStageActor: ControlAndStageActor,
+      filterRevokedPartitionsCB: AsyncCallback[Set[TopicPartition]]) {
     def control: Control = controlAndStageActor.control
     def stageActor: ActorRef = controlAndStageActor.stageActor
   }
 
-  /** Internal API
+  /**
+   * Internal API
    * Used to determine how the [[SubSourceLogic]] will handle the cancellation of a sub source stage.  The default
    * behavior requested by the [[SubSourceStageLogic]] is to ask the consumer to seek to the last committed offset and
    * then re-emit the sub source stage downstream.
@@ -348,7 +339,8 @@ private object SubSourceLogic {
   case object ReEmit extends SubSourceCancellationStrategy
   case object DoNothing extends SubSourceCancellationStrategy
 
-  /** Internal API
+  /**
+   * Internal API
    *
    * Encapsulates a factory method to create a [[SubSourceStageLogic]] within [[SubSourceLogic]] where the context
    * parameters exist.
@@ -361,12 +353,12 @@ private object SubSourceLogic {
         consumerActor: ActorRef,
         subSourceStartedCb: AsyncCallback[SubSourceStageLogicControl],
         subSourceCancelledCb: AsyncCallback[(TopicPartition, SubSourceCancellationStrategy)],
-        actorNumber: Int
-    ): SubSourceStageLogic[K, V, Msg]
+        actorNumber: Int): SubSourceStageLogic[K, V, Msg]
   }
 }
 
-/** Internal API
+/**
+ * Internal API
  *
  * A [[SubSourceStage]] is created per partition in [[SubSourceLogic]].
  */
@@ -377,8 +369,7 @@ private final class SubSourceStage[K, V, Msg](
     subSourceStartedCb: AsyncCallback[SubSourceStageLogicControl],
     subSourceCancelledCb: AsyncCallback[(TopicPartition, SubSourceCancellationStrategy)],
     actorNumber: Int,
-    subSourceStageLogicFactory: SubSourceStageLogicFactory[K, V, Msg]
-) extends GraphStage[SourceShape[Msg]] { stage =>
+    subSourceStageLogicFactory: SubSourceStageLogicFactory[K, V, Msg]) extends GraphStage[SourceShape[Msg]] { stage =>
 
   val out = Outlet[Msg]("out")
   val shape: SourceShape[Msg] = new SourceShape(out)
@@ -387,7 +378,8 @@ private final class SubSourceStage[K, V, Msg](
     subSourceStageLogicFactory.create(shape, tp, consumerActor, subSourceStartedCb, subSourceCancelledCb, actorNumber)
 }
 
-/** Internal API
+/**
+ * Internal API
  *
  * A [[SubSourceStageLogic]] is the [[GraphStageLogic]] of a [[SubSourceStage]].
  * This emits Kafka messages downstream (not sources).
@@ -399,8 +391,7 @@ private abstract class SubSourceStageLogic[K, V, Msg](
     consumerActor: ActorRef,
     subSourceStartedCb: AsyncCallback[SubSourceStageLogicControl],
     subSourceCancelledCb: AsyncCallback[(TopicPartition, SubSourceCancellationStrategy)],
-    actorNumber: Int
-) extends GraphStageLogic(shape)
+    actorNumber: Int) extends GraphStageLogic(shape)
     with PromiseControl
     with MetricsControl
     with StageIdLogging
@@ -456,8 +447,7 @@ private abstract class SubSourceStageLogic[K, V, Msg](
         subSourceCancelledCb.invoke(tp -> onDownstreamFinishSubSourceCancellationStrategy())
         super.onDownstreamFinish(cause)
       }
-    }
-  )
+    })
 
   def performShutdown() = {
     log.info("Completing. Partition {}", tp)
diff --git a/core/src/main/scala/akka/kafka/internal/TransactionalProducerStage.scala b/core/src/main/scala/akka/kafka/internal/TransactionalProducerStage.scala
index ba2ca0de..f30ad63e 100644
--- a/core/src/main/scala/akka/kafka/internal/TransactionalProducerStage.scala
+++ b/core/src/main/scala/akka/kafka/internal/TransactionalProducerStage.scala
@@ -7,14 +7,14 @@ package akka.kafka.internal
 
 import akka.Done
 import akka.annotation.InternalApi
-import akka.kafka.ConsumerMessage.{GroupTopicPartition, PartitionOffsetCommittedMarker}
-import akka.kafka.ProducerMessage.{Envelope, Results}
+import akka.kafka.ConsumerMessage.{ GroupTopicPartition, PartitionOffsetCommittedMarker }
+import akka.kafka.ProducerMessage.{ Envelope, Results }
 import akka.kafka.internal.DeferredProducer._
 import akka.kafka.internal.ProducerStage.ProducerCompletionState
-import akka.kafka.{ConsumerMessage, ProducerSettings}
+import akka.kafka.{ ConsumerMessage, ProducerSettings }
 import akka.stream.stage._
-import akka.stream.{Attributes, FlowShape}
-import org.apache.kafka.clients.consumer.{ConsumerGroupMetadata, OffsetAndMetadata}
+import akka.stream.{ Attributes, FlowShape }
+import org.apache.kafka.clients.consumer.{ ConsumerGroupMetadata, OffsetAndMetadata }
 import org.apache.kafka.clients.producer.ProducerConfig
 import org.apache.kafka.common.TopicPartition
 
@@ -28,8 +28,7 @@ import scala.jdk.CollectionConverters._
 @InternalApi
 private[kafka] final class TransactionalProducerStage[K, V, P](
     val settings: ProducerSettings[K, V],
-    transactionalId: String
-) extends GraphStage[FlowShape[Envelope[K, V, P], Future[Results[K, V, P]]]]
+    transactionalId: String) extends GraphStage[FlowShape[Envelope[K, V, P], Future[Results[K, V, P]]]]
     with ProducerStage[K, V, P, Envelope[K, V, P], Results[K, V, P]] {
 
   override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
@@ -55,7 +54,7 @@ private object TransactionalProducerStage {
   }
 
   final class NonemptyTransactionBatch(head: PartitionOffsetCommittedMarker,
-                                       tail: Map[GroupTopicPartition, Long] = Map[GroupTopicPartition, Long]())
+      tail: Map[GroupTopicPartition, Long] = Map[GroupTopicPartition, Long]())
       extends TransactionBatch {
     // There is no guarantee that offsets adding callbacks will be called in any particular order.
     // Decreasing an offset stored for the KTP would mean possible data duplication.
@@ -80,12 +79,10 @@ private object TransactionalProducerStage {
     override def updated(partitionOffset: PartitionOffsetCommittedMarker): TransactionBatch = {
       require(
         group == partitionOffset.key.groupId,
-        s"Transaction batch must contain messages from exactly 1 consumer group. $group != ${partitionOffset.key.groupId}"
-      )
+        s"Transaction batch must contain messages from exactly 1 consumer group. $group != ${partitionOffset.key.groupId}")
       require(
         this.committedMarker == partitionOffset.committedMarker,
-        "Transaction batch must contain messages from a single source"
-      )
+        "Transaction batch must contain messages from a single source")
       new NonemptyTransactionBatch(partitionOffset, offsets)
     }
   }
@@ -100,8 +97,8 @@ private object TransactionalProducerStage {
 private final class TransactionalProducerStageLogic[K, V, P](
     stage: TransactionalProducerStage[K, V, P],
     transactionalId: String,
-    inheritedAttributes: Attributes
-) extends DefaultProducerStageLogic[K, V, P, Envelope[K, V, P], Results[K, V, P]](stage, inheritedAttributes)
+    inheritedAttributes: Attributes)
+    extends DefaultProducerStageLogic[K, V, P, Envelope[K, V, P], Results[K, V, P]](stage, inheritedAttributes)
     with StageIdLogging
     with ProducerCompletionState {
 
@@ -149,9 +146,10 @@ private final class TransactionalProducerStageLogic[K, V, P](
   }
 
   override protected def initialInHandler(): Unit =
-    setHandler(stage.in, new DefaultInHandler {
-      override def onPush(): Unit = parseFirstMessage(grab(stage.in))
-    })
+    setHandler(stage.in,
+      new DefaultInHandler {
+        override def onPush(): Unit = parseFirstMessage(grab(stage.in))
+      })
 
   override protected def onTimer(timerKey: Any): Unit =
     if (timerKey == commitSchedulerKey) {
@@ -159,7 +157,7 @@ private final class TransactionalProducerStageLogic[K, V, P](
     }
 
   private def maybeCommitTransaction(beginNewTransaction: Boolean = true,
-                                     abortEmptyTransactionOnComplete: Boolean = false): Unit = {
+      abortEmptyTransactionOnComplete: Boolean = false): Unit = {
     val awaitingConf = awaitingConfirmationValue
     batchOffsets match {
       case batch: NonemptyTransactionBatch if awaitingConf == 0 =>
@@ -180,7 +178,7 @@ private final class TransactionalProducerStageLogic[K, V, P](
    */
   private def parseFirstMessage(msg: Envelope[K, V, P]): Boolean =
     producerAssignmentLifecycle match {
-      case Assigned => true
+      case Assigned                            => true
       case Unassigned if firstMessage.nonEmpty =>
         // this should never happen because demand should be suspended until the producer is assigned
         throw new IllegalStateException("Cannot reapply first message")
@@ -195,8 +193,7 @@ private final class TransactionalProducerStageLogic[K, V, P](
         false
       case AsyncCreateRequestSent =>
         throw new IllegalStateException(
-          s"Should never receive new messages while in producer assignment state '$AsyncCreateRequestSent'"
-        )
+          s"Should never receive new messages while in producer assignment state '$AsyncCreateRequestSent'")
     }
 
   private def generatedTransactionalConfig(msg: Envelope[K, V, P]): ProducerSettings[K, V] = {
@@ -212,13 +209,12 @@ private final class TransactionalProducerStageLogic[K, V, P](
     stage.settings.withProperties(
       ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG -> true.toString,
       ProducerConfig.TRANSACTIONAL_ID_CONFIG -> txId,
-      ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION -> 1.toString
-    )
+      ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION -> 1.toString)
   }
 
   override protected def postSend(msg: Envelope[K, V, P]): Unit = msg.passThrough match {
     case o: ConsumerMessage.PartitionOffsetCommittedMarker => batchOffsets = batchOffsets.updated(o)
-    case _ =>
+    case _                                                 =>
   }
 
   override def onCompletionSuccess(): Unit = {
@@ -237,16 +233,16 @@ private final class TransactionalProducerStageLogic[K, V, P](
   private def commitTransaction(batch: NonemptyTransactionBatch, beginNewTransaction: Boolean): Unit = {
     val group = batch.group
     log.debug("Committing transaction for transactional id '{}' consumer group '{}' with offsets: {}",
-              transactionalId,
-              group,
-              batch.offsets)
+      transactionalId,
+      group,
+      batch.offsets)
     val offsetMap = batch.offsetMap()
     producer.sendOffsetsToTransaction(offsetMap.asJava, new ConsumerGroupMetadata(group))
     producer.commitTransaction()
     log.debug("Committed transaction for transactional id '{}' consumer group '{}' with offsets: {}",
-              transactionalId,
-              group,
-              batch.offsets)
+      transactionalId,
+      group,
+      batch.offsets)
     batchOffsets = TransactionBatch.empty
     batch
       .internalCommit()
@@ -260,9 +256,7 @@ private final class TransactionalProducerStageLogic[K, V, P](
   }
 
   private val onInternalCommitAckCb: AsyncCallback[Unit] = {
-    getAsyncCallback[Unit](
-      _ => scheduleOnce(commitSchedulerKey, producerSettings.eosCommitInterval)
-    )
+    getAsyncCallback[Unit](_ => scheduleOnce(commitSchedulerKey, producerSettings.eosCommitInterval))
   }
 
   private def initTransactions(): Unit = {
diff --git a/core/src/main/scala/akka/kafka/internal/TransactionalSources.scala b/core/src/main/scala/akka/kafka/internal/TransactionalSources.scala
index 04ca7369..56403b37 100644
--- a/core/src/main/scala/akka/kafka/internal/TransactionalSources.scala
+++ b/core/src/main/scala/akka/kafka/internal/TransactionalSources.scala
@@ -7,34 +7,33 @@ package akka.kafka.internal
 
 import java.util.Locale
 
-import akka.{Done, NotUsed}
-import akka.actor.{ActorRef, Status, Terminated}
+import akka.{ Done, NotUsed }
+import akka.actor.{ ActorRef, Status, Terminated }
 import akka.actor.Status.Failure
 import akka.annotation.InternalApi
-import akka.kafka.ConsumerMessage.{PartitionOffset, TransactionalMessage}
+import akka.kafka.ConsumerMessage.{ PartitionOffset, TransactionalMessage }
 import akka.kafka.internal.KafkaConsumerActor.Internal.Revoked
 import akka.kafka.internal.SubSourceLogic._
 import akka.kafka.internal.TransactionalSubSourceStageLogic.DrainingComplete
 import akka.kafka.scaladsl.Consumer.Control
 import akka.kafka.scaladsl.PartitionAssignmentHandler
-import akka.kafka.{AutoSubscription, ConsumerFailed, ConsumerSettings, RestrictedConsumer, Subscription}
+import akka.kafka.{ AutoSubscription, ConsumerFailed, ConsumerSettings, RestrictedConsumer, Subscription }
 import akka.stream.SourceShape
 import akka.stream.scaladsl.Source
-import akka.stream.stage.{AsyncCallback, GraphStageLogic}
+import akka.stream.stage.{ AsyncCallback, GraphStageLogic }
 import akka.util.Timeout
-import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord, OffsetAndMetadata}
-import org.apache.kafka.common.{IsolationLevel, TopicPartition}
+import org.apache.kafka.clients.consumer.{ ConsumerConfig, ConsumerRecord, OffsetAndMetadata }
+import org.apache.kafka.common.{ IsolationLevel, TopicPartition }
 
 import scala.concurrent.duration.FiniteDuration
-import scala.concurrent.{Await, ExecutionContext, Future}
+import scala.concurrent.{ Await, ExecutionContext, Future }
 
 /** Internal API */
 @InternalApi
 private[kafka] final class TransactionalSource[K, V](consumerSettings: ConsumerSettings[K, V],
-                                                     val subscription: Subscription)
+    val subscription: Subscription)
     extends KafkaSourceStage[K, V, TransactionalMessage[K, V]](
-      s"TransactionalSource ${subscription.renderStageAttribute}"
-    ) {
+      s"TransactionalSource ${subscription.renderStageAttribute}") {
 
   require(consumerSettings.properties(ConsumerConfig.GROUP_ID_CONFIG).nonEmpty, "You must define a Consumer group.id.")
 
@@ -58,24 +57,21 @@ private[internal] object TransactionalSource {
    */
   def txConsumerSettings[K, V](consumerSettings: ConsumerSettings[K, V]) = consumerSettings.withProperty(
     ConsumerConfig.ISOLATION_LEVEL_CONFIG,
-    IsolationLevel.READ_COMMITTED.toString.toLowerCase(Locale.ENGLISH)
-  )
+    IsolationLevel.READ_COMMITTED.toString.toLowerCase(Locale.ENGLISH))
 
 }
 
 /** Internal API */
 @InternalApi
 private[kafka] final class TransactionalSourceWithOffsetContext[K, V](consumerSettings: ConsumerSettings[K, V],
-                                                                      subscription: Subscription)
+    subscription: Subscription)
     extends KafkaSourceStage[K, V, (ConsumerRecord[K, V], PartitionOffset)](
-      s"TransactionalSourceWithOffsetContext ${subscription.renderStageAttribute}"
-    ) {
+      s"TransactionalSourceWithOffsetContext ${subscription.renderStageAttribute}") {
 
   require(consumerSettings.properties(ConsumerConfig.GROUP_ID_CONFIG).nonEmpty, "You must define a Consumer group.id.")
 
   override protected def logic(
-      shape: SourceShape[(ConsumerRecord[K, V], PartitionOffset)]
-  ): GraphStageLogic with Control =
+      shape: SourceShape[(ConsumerRecord[K, V], PartitionOffset)]): GraphStageLogic with Control =
     new TransactionalSourceLogic(shape, TransactionalSource.txConsumerSettings(consumerSettings), subscription)
       with TransactionalOffsetContextBuilder[K, V] {
       override val fromPartitionedSource: Boolean = false
@@ -85,8 +81,8 @@ private[kafka] final class TransactionalSourceWithOffsetContext[K, V](consumerSe
 /** Internal API */
 @InternalApi
 private[internal] abstract class TransactionalSourceLogic[K, V, Msg](shape: SourceShape[Msg],
-                                                                     consumerSettings: ConsumerSettings[K, V],
-                                                                     subscription: Subscription)
+    consumerSettings: ConsumerSettings[K, V],
+    subscription: Subscription)
     extends SingleSourceLogic[K, V, Msg](shape, consumerSettings, subscription)
     with TransactionalMessageBuilderBase[K, V, Msg] {
 
@@ -130,8 +126,7 @@ private[internal] abstract class TransactionalSourceLogic[K, V, Msg](shape: Sour
           new Runnable {
             override def run(): Unit =
               sourceActor.ref.tell(Drain(partitions, ack.orElse(Some(sender)), msg), sourceActor.ref)
-          }
-        )
+          })
       }
   }
 
@@ -148,15 +143,13 @@ private[internal] abstract class TransactionalSourceLogic[K, V, Msg](shape: Sour
   override protected def stopConsumerActor(): Unit =
     sourceActor.ref
       .tell(Drain(
-              inFlightRecords.assigned(),
-              Some(consumerActor),
-              KafkaConsumerActor.Internal.StopFromStage(id)
-            ),
-            sourceActor.ref)
+          inFlightRecords.assigned(),
+          Some(consumerActor),
+          KafkaConsumerActor.Internal.StopFromStage(id)),
+        sourceActor.ref)
 
   override protected def addToPartitionAssignmentHandler(
-      handler: PartitionAssignmentHandler
-  ): PartitionAssignmentHandler = {
+      handler: PartitionAssignmentHandler): PartitionAssignmentHandler = {
     val blockingRevokedCall = new PartitionAssignmentHandler {
       override def onAssign(assignedTps: Set[TopicPartition], consumer: RestrictedConsumer): Unit = ()
 
@@ -194,10 +187,9 @@ private[internal] abstract class TransactionalSourceLogic[K, V, Msg](shape: Sour
 @InternalApi
 private[kafka] final class TransactionalSubSource[K, V](
     consumerSettings: ConsumerSettings[K, V],
-    subscription: AutoSubscription
-) extends KafkaSourceStage[K, V, (TopicPartition, Source[TransactionalMessage[K, V], NotUsed])](
-      s"TransactionalSubSource ${subscription.renderStageAttribute}"
-    ) {
+    subscription: AutoSubscription)
+    extends KafkaSourceStage[K, V, (TopicPartition, Source[TransactionalMessage[K, V], NotUsed])](
+      s"TransactionalSubSource ${subscription.renderStageAttribute}") {
   import TransactionalSourceLogic._
 
   require(consumerSettings.properties(ConsumerConfig.GROUP_ID_CONFIG).nonEmpty, "You must define a Consumer group.id.")
@@ -211,12 +203,11 @@ private[kafka] final class TransactionalSubSource[K, V](
    */
   private val txConsumerSettings = consumerSettings.withProperty(
     ConsumerConfig.ISOLATION_LEVEL_CONFIG,
-    IsolationLevel.READ_COMMITTED.toString.toLowerCase(Locale.ENGLISH)
-  )
+    IsolationLevel.READ_COMMITTED.toString.toLowerCase(Locale.ENGLISH))
 
   override protected def logic(
-      shape: SourceShape[(TopicPartition, Source[TransactionalMessage[K, V], NotUsed])]
-  ): GraphStageLogic with Control = {
+      shape: SourceShape[(TopicPartition, Source[TransactionalMessage[K, V], NotUsed])])
+      : GraphStageLogic with Control = {
     val factory = new SubSourceStageLogicFactory[K, V, TransactionalMessage[K, V]] {
       def create(
           shape: SourceShape[TransactionalMessage[K, V]],
@@ -224,22 +215,20 @@ private[kafka] final class TransactionalSubSource[K, V](
           consumerActor: ActorRef,
           subSourceStartedCb: AsyncCallback[SubSourceStageLogicControl],
           subSourceCancelledCb: AsyncCallback[(TopicPartition, SubSourceCancellationStrategy)],
-          actorNumber: Int
-      ): SubSourceStageLogic[K, V, TransactionalMessage[K, V]] =
+          actorNumber: Int): SubSourceStageLogic[K, V, TransactionalMessage[K, V]] =
         new TransactionalSubSourceStageLogic(shape,
-                                             tp,
-                                             consumerActor,
-                                             subSourceStartedCb,
-                                             subSourceCancelledCb,
-                                             actorNumber,
-                                             txConsumerSettings)
+          tp,
+          consumerActor,
+          subSourceStartedCb,
+          subSourceCancelledCb,
+          actorNumber,
+          txConsumerSettings)
     }
 
     new SubSourceLogic(shape, txConsumerSettings, subscription, subSourceStageLogicFactory = factory) {
 
       override protected def addToPartitionAssignmentHandler(
-          handler: PartitionAssignmentHandler
-      ): PartitionAssignmentHandler = {
+          handler: PartitionAssignmentHandler): PartitionAssignmentHandler = {
         val blockingRevokedCall = new PartitionAssignmentHandler {
           override def onAssign(assignedTps: Set[TopicPartition], consumer: RestrictedConsumer): Unit = ()
 
@@ -288,14 +277,13 @@ private object TransactionalSourceLogic {
 
   case object Drained
   final case class Drain[T](partitions: Set[TopicPartition],
-                            drainedConfirmationRef: Option[ActorRef],
-                            drainedConfirmationMsg: T)
+      drainedConfirmationRef: Option[ActorRef],
+      drainedConfirmationMsg: T)
   final case class Committed(offsets: Map[TopicPartition, OffsetAndMetadata])
   case object CommittingFailure
 
   private[internal] final case class CommittedMarkerRef(sourceActor: ActorRef, commitTimeout: FiniteDuration)(
-      implicit ec: ExecutionContext
-  ) extends CommittedMarker {
+      implicit ec: ExecutionContext) extends CommittedMarker {
     override def committed(offsets: Map[TopicPartition, OffsetAndMetadata]): Future[Done] = {
       import akka.pattern.ask
       sourceActor
@@ -332,7 +320,7 @@ private object TransactionalSourceLogic {
       override def committed(committed: Map[TopicPartition, Offset]): Unit =
         inFlightRecords = inFlightRecords.flatMap {
           case (tp, offset) if committed.get(tp).contains(offset) => None
-          case x => Some(x)
+          case x                                                  => Some(x)
         }
 
       override def revoke(revokedTps: Set[TopicPartition]): Unit =
@@ -357,13 +345,12 @@ private final class TransactionalSubSourceStageLogic[K, V](
     subSourceStartedCb: AsyncCallback[SubSourceStageLogicControl],
     subSourceCancelledCb: AsyncCallback[(TopicPartition, SubSourceCancellationStrategy)],
     actorNumber: Int,
-    consumerSettings: ConsumerSettings[K, V]
-) extends SubSourceStageLogic[K, V, TransactionalMessage[K, V]](shape,
-                                                                  tp,
-                                                                  consumerActor,
-                                                                  subSourceStartedCb,
-                                                                  subSourceCancelledCb,
-                                                                  actorNumber)
+    consumerSettings: ConsumerSettings[K, V]) extends SubSourceStageLogic[K, V, TransactionalMessage[K, V]](shape,
+      tp,
+      consumerActor,
+      subSourceStartedCb,
+      subSourceCancelledCb,
+      actorNumber)
     with TransactionalMessageBuilder[K, V] {
 
   import TransactionalSourceLogic._
@@ -426,8 +413,7 @@ private final class TransactionalSubSourceStageLogic[K, V](
           new Runnable {
             override def run(): Unit =
               subSourceActor.ref.tell(Drain(partitions, ack.orElse(Some(sender)), msg), stageActor.ref)
-          }
-        )
+          })
       }
     case (sender, DrainingComplete) =>
       completeStage()
diff --git a/core/src/main/scala/akka/kafka/javadsl/Committer.scala b/core/src/main/scala/akka/kafka/javadsl/Committer.scala
index 0fd1de8e..5341c994 100644
--- a/core/src/main/scala/akka/kafka/javadsl/Committer.scala
+++ b/core/src/main/scala/akka/kafka/javadsl/Committer.scala
@@ -8,10 +8,10 @@ import java.util.concurrent.CompletionStage
 
 import akka.annotation.ApiMayChange
 import akka.japi.Pair
-import akka.{Done, NotUsed}
-import akka.kafka.ConsumerMessage.{Committable, CommittableOffsetBatch}
-import akka.kafka.{scaladsl, CommitterSettings}
-import akka.stream.javadsl.{Flow, FlowWithContext, Sink}
+import akka.{ Done, NotUsed }
+import akka.kafka.ConsumerMessage.{ Committable, CommittableOffsetBatch }
+import akka.kafka.{ scaladsl, CommitterSettings }
+import akka.stream.javadsl.{ Flow, FlowWithContext, Sink }
 
 import scala.compat.java8.FutureConverters.FutureOps
 
@@ -37,8 +37,7 @@ object Committer {
    */
   @ApiMayChange
   def flowWithOffsetContext[E, C <: Committable](
-      settings: CommitterSettings
-  ): FlowWithContext[E, C, NotUsed, CommittableOffsetBatch, NotUsed] =
+      settings: CommitterSettings): FlowWithContext[E, C, NotUsed, CommittableOffsetBatch, NotUsed] =
     scaladsl.Committer.flowWithOffsetContext[E](settings).asJava
 
   /**
@@ -54,8 +53,7 @@ object Committer {
    */
   @ApiMayChange
   def sinkWithOffsetContext[E, C <: Committable](
-      settings: CommitterSettings
-  ): Sink[Pair[E, C], CompletionStage[Done]] =
+      settings: CommitterSettings): Sink[Pair[E, C], CompletionStage[Done]] =
     akka.stream.scaladsl
       .Flow[Pair[E, C]]
       .map(_.toScala)
diff --git a/core/src/main/scala/akka/kafka/javadsl/Consumer.scala b/core/src/main/scala/akka/kafka/javadsl/Consumer.scala
index 8f20cae7..ee32b85c 100644
--- a/core/src/main/scala/akka/kafka/javadsl/Consumer.scala
+++ b/core/src/main/scala/akka/kafka/javadsl/Consumer.scala
@@ -5,19 +5,19 @@
 
 package akka.kafka.javadsl
 
-import java.util.concurrent.{CompletionStage, Executor}
+import java.util.concurrent.{ CompletionStage, Executor }
 
 import akka.actor.ActorRef
 import akka.annotation.ApiMayChange
 import akka.dispatch.ExecutionContexts
 import akka.japi.Pair
-import akka.kafka.ConsumerMessage.{CommittableMessage, CommittableOffset}
+import akka.kafka.ConsumerMessage.{ CommittableMessage, CommittableOffset }
 import akka.kafka._
-import akka.kafka.internal.{ConsumerControlAsJava, SourceWithOffsetContext}
-import akka.stream.javadsl.{Source, SourceWithContext}
-import akka.{Done, NotUsed}
+import akka.kafka.internal.{ ConsumerControlAsJava, SourceWithOffsetContext }
+import akka.stream.javadsl.{ Source, SourceWithContext }
+import akka.{ Done, NotUsed }
 import org.apache.kafka.clients.consumer.ConsumerRecord
-import org.apache.kafka.common.{Metric, MetricName, TopicPartition}
+import org.apache.kafka.common.{ Metric, MetricName, TopicPartition }
 
 import scala.jdk.CollectionConverters._
 import scala.compat.java8.FutureConverters._
@@ -134,7 +134,7 @@ object Consumer {
    * stronger than the "at-least once" semantics you get with Kafka's offset commit functionality.
    */
   def plainSource[K, V](settings: ConsumerSettings[K, V],
-                        subscription: Subscription): Source[ConsumerRecord[K, V], Control] =
+      subscription: Subscription): Source[ConsumerRecord[K, V], Control] =
     scaladsl.Consumer
       .plainSource(settings, subscription)
       .mapMaterializedValue(ConsumerControlAsJava.apply)
@@ -154,7 +154,7 @@ object Consumer {
    * instead of this API.
    */
   def committableSource[K, V](settings: ConsumerSettings[K, V],
-                              subscription: Subscription): Source[CommittableMessage[K, V], Control] =
+      subscription: Subscription): Source[CommittableMessage[K, V], Control] =
     scaladsl.Consumer
       .committableSource(settings, subscription)
       .mapMaterializedValue(ConsumerControlAsJava.apply)
@@ -174,8 +174,7 @@ object Consumer {
   @ApiMayChange
   def sourceWithOffsetContext[K, V](
       settings: ConsumerSettings[K, V],
-      subscription: Subscription
-  ): SourceWithContext[ConsumerRecord[K, V], CommittableOffset, Control] =
+      subscription: Subscription): SourceWithContext[ConsumerRecord[K, V], CommittableOffset, Control] =
     // TODO this could use `scaladsl committableSourceWithContext` but `mapMaterializedValue` is not available, yet
     // See https://github.com/akka/akka/issues/26836
     akka.stream.scaladsl.Source
@@ -204,16 +203,15 @@ object Consumer {
   def sourceWithOffsetContext[K, V](
       settings: ConsumerSettings[K, V],
       subscription: Subscription,
-      metadataFromRecord: java.util.function.Function[ConsumerRecord[K, V], String]
-  ): SourceWithContext[ConsumerRecord[K, V], CommittableOffset, Control] =
+      metadataFromRecord: java.util.function.Function[ConsumerRecord[K, V], String])
+      : SourceWithContext[ConsumerRecord[K, V], CommittableOffset, Control] =
     // TODO this could use `scaladsl committableSourceWithContext` but `mapMaterializedValue` is not available, yet
     // See https://github.com/akka/akka/issues/26836
     akka.stream.scaladsl.Source
       .fromGraph(
         new SourceWithOffsetContext[K, V](settings,
-                                          subscription,
-                                          (record: ConsumerRecord[K, V]) => metadataFromRecord(record))
-      )
+          subscription,
+          (record: ConsumerRecord[K, V]) => metadataFromRecord(record)))
       .mapMaterializedValue(ConsumerControlAsJava.apply)
       .asSourceWithContext(_._2)
       .map(_._1)
@@ -227,8 +225,8 @@ object Consumer {
   def commitWithMetadataSource[K, V](
       settings: ConsumerSettings[K, V],
       subscription: Subscription,
-      metadataFromRecord: java.util.function.Function[ConsumerRecord[K, V], String]
-  ): Source[CommittableMessage[K, V], Control] =
+      metadataFromRecord: java.util.function.Function[ConsumerRecord[K, V], String])
+      : Source[CommittableMessage[K, V], Control] =
     scaladsl.Consumer
       .commitWithMetadataSource(settings, subscription, (record: ConsumerRecord[K, V]) => metadataFromRecord(record))
       .mapMaterializedValue(ConsumerControlAsJava.apply)
@@ -239,7 +237,7 @@ object Consumer {
    * before being emitted downstream.
    */
   def atMostOnceSource[K, V](settings: ConsumerSettings[K, V],
-                             subscription: Subscription): Source[ConsumerRecord[K, V], Control] =
+      subscription: Subscription): Source[ConsumerRecord[K, V], Control] =
     scaladsl.Consumer
       .atMostOnceSource(settings, subscription)
       .mapMaterializedValue(ConsumerControlAsJava.apply)
@@ -253,8 +251,7 @@ object Consumer {
    */
   def plainPartitionedSource[K, V](
       settings: ConsumerSettings[K, V],
-      subscription: AutoSubscription
-  ): Source[Pair[TopicPartition, Source[ConsumerRecord[K, V], NotUsed]], Control] =
+      subscription: AutoSubscription): Source[Pair[TopicPartition, Source[ConsumerRecord[K, V], NotUsed]], Control] =
     scaladsl.Consumer
       .plainPartitionedSource(settings, subscription)
       .map {
@@ -272,17 +269,15 @@ object Consumer {
       settings: ConsumerSettings[K, V],
       subscription: AutoSubscription,
       getOffsetsOnAssign: java.util.function.Function[java.util.Set[TopicPartition], CompletionStage[
-        java.util.Map[TopicPartition, Long]
-      ]]
-  ): Source[Pair[TopicPartition, Source[ConsumerRecord[K, V], NotUsed]], Control] =
+          java.util.Map[TopicPartition, Long]]])
+      : Source[Pair[TopicPartition, Source[ConsumerRecord[K, V], NotUsed]], Control] =
     scaladsl.Consumer
       .plainPartitionedManualOffsetSource(
         settings,
         subscription,
         (tps: Set[TopicPartition]) =>
           getOffsetsOnAssign(tps.asJava).toScala.map(_.asScala.toMap)(ExecutionContexts.parasitic),
-        _ => ()
-      )
+        _ => ())
       .map {
         case (tp, source) => Pair(tp, source.asJava)
       }
@@ -302,18 +297,16 @@ object Consumer {
       settings: ConsumerSettings[K, V],
       subscription: AutoSubscription,
       getOffsetsOnAssign: java.util.function.Function[java.util.Set[TopicPartition], CompletionStage[
-        java.util.Map[TopicPartition, Long]
-      ]],
-      onRevoke: java.util.function.Consumer[java.util.Set[TopicPartition]]
-  ): Source[Pair[TopicPartition, Source[ConsumerRecord[K, V], NotUsed]], Control] =
+          java.util.Map[TopicPartition, Long]]],
+      onRevoke: java.util.function.Consumer[java.util.Set[TopicPartition]])
+      : Source[Pair[TopicPartition, Source[ConsumerRecord[K, V], NotUsed]], Control] =
     scaladsl.Consumer
       .plainPartitionedManualOffsetSource(
         settings,
         subscription,
         (tps: Set[TopicPartition]) =>
           getOffsetsOnAssign(tps.asJava).toScala.map(_.asScala.toMap)(ExecutionContexts.parasitic),
-        (tps: Set[TopicPartition]) => onRevoke.accept(tps.asJava)
-      )
+        (tps: Set[TopicPartition]) => onRevoke.accept(tps.asJava))
       .map {
         case (tp, source) => Pair(tp, source.asJava)
       }
@@ -325,8 +318,8 @@ object Consumer {
    */
   def committablePartitionedSource[K, V](
       settings: ConsumerSettings[K, V],
-      subscription: AutoSubscription
-  ): Source[Pair[TopicPartition, Source[CommittableMessage[K, V], NotUsed]], Control] =
+      subscription: AutoSubscription)
+      : Source[Pair[TopicPartition, Source[CommittableMessage[K, V], NotUsed]], Control] =
     scaladsl.Consumer
       .committablePartitionedSource(settings, subscription)
       .map {
@@ -342,17 +335,15 @@ object Consumer {
       settings: ConsumerSettings[K, V],
       subscription: AutoSubscription,
       getOffsetsOnAssign: java.util.function.Function[java.util.Set[TopicPartition], CompletionStage[
-        java.util.Map[TopicPartition, Long]
-      ]]
-  ): Source[Pair[TopicPartition, Source[CommittableMessage[K, V], NotUsed]], Control] =
+          java.util.Map[TopicPartition, Long]]])
+      : Source[Pair[TopicPartition, Source[CommittableMessage[K, V], NotUsed]], Control] =
     scaladsl.Consumer
       .committablePartitionedManualOffsetSource(
         settings,
         subscription,
         (tps: Set[TopicPartition]) =>
           getOffsetsOnAssign(tps.asJava).toScala.map(_.asScala.toMap)(ExecutionContexts.parasitic),
-        _ => ()
-      )
+        _ => ())
       .map {
         case (tp, source) => Pair(tp, source.asJava)
       }
@@ -366,18 +357,16 @@ object Consumer {
       settings: ConsumerSettings[K, V],
       subscription: AutoSubscription,
       getOffsetsOnAssign: java.util.function.Function[java.util.Set[TopicPartition], CompletionStage[
-        java.util.Map[TopicPartition, Long]
-      ]],
-      onRevoke: java.util.function.Consumer[java.util.Set[TopicPartition]]
-  ): Source[Pair[TopicPartition, Source[CommittableMessage[K, V], NotUsed]], Control] =
+          java.util.Map[TopicPartition, Long]]],
+      onRevoke: java.util.function.Consumer[java.util.Set[TopicPartition]])
+      : Source[Pair[TopicPartition, Source[CommittableMessage[K, V], NotUsed]], Control] =
     scaladsl.Consumer
       .committablePartitionedManualOffsetSource(
         settings,
         subscription,
         (tps: Set[TopicPartition]) =>
           getOffsetsOnAssign(tps.asJava).toScala.map(_.asScala.toMap)(ExecutionContexts.parasitic),
-        (tps: Set[TopicPartition]) => onRevoke.accept(tps.asJava)
-      )
+        (tps: Set[TopicPartition]) => onRevoke.accept(tps.asJava))
       .map {
         case (tp, source) => Pair(tp, source.asJava)
       }
@@ -390,12 +379,12 @@ object Consumer {
   def commitWithMetadataPartitionedSource[K, V](
       settings: ConsumerSettings[K, V],
       subscription: AutoSubscription,
-      metadataFromRecord: java.util.function.Function[ConsumerRecord[K, V], String]
-  ): Source[Pair[TopicPartition, Source[CommittableMessage[K, V], NotUsed]], Control] =
+      metadataFromRecord: java.util.function.Function[ConsumerRecord[K, V], String])
+      : Source[Pair[TopicPartition, Source[CommittableMessage[K, V], NotUsed]], Control] =
     scaladsl.Consumer
       .commitWithMetadataPartitionedSource(settings,
-                                           subscription,
-                                           (record: ConsumerRecord[K, V]) => metadataFromRecord(record))
+        subscription,
+        (record: ConsumerRecord[K, V]) => metadataFromRecord(record))
       .map {
         case (tp, source) => Pair(tp, source.asJava)
       }
@@ -407,7 +396,7 @@ object Consumer {
    * a lot of manually assigned topic-partitions and want to keep only one kafka consumer.
    */
   def plainExternalSource[K, V](consumer: ActorRef,
-                                subscription: ManualSubscription): Source[ConsumerRecord[K, V], Control] =
+      subscription: ManualSubscription): Source[ConsumerRecord[K, V], Control] =
     scaladsl.Consumer
       .plainExternalSource(consumer, subscription)
       .mapMaterializedValue(ConsumerControlAsJava.apply)
@@ -417,9 +406,9 @@ object Consumer {
    * The same as [[#plainExternalSource]] but with offset commit support.
    */
   def committableExternalSource[K, V](consumer: ActorRef,
-                                      subscription: ManualSubscription,
-                                      groupId: String,
-                                      commitTimeout: FiniteDuration): Source[CommittableMessage[K, V], Control] =
+      subscription: ManualSubscription,
+      groupId: String,
+      commitTimeout: FiniteDuration): Source[CommittableMessage[K, V], Control] =
     scaladsl.Consumer
       .committableExternalSource(consumer, subscription, groupId, commitTimeout)
       .mapMaterializedValue(new ConsumerControlAsJava(_))
diff --git a/core/src/main/scala/akka/kafka/javadsl/DiscoverySupport.scala b/core/src/main/scala/akka/kafka/javadsl/DiscoverySupport.scala
index 5b3b2762..ddb0efe7 100644
--- a/core/src/main/scala/akka/kafka/javadsl/DiscoverySupport.scala
+++ b/core/src/main/scala/akka/kafka/javadsl/DiscoverySupport.scala
@@ -7,8 +7,8 @@ package akka.kafka.javadsl
 
 import java.util.concurrent.CompletionStage
 
-import akka.actor.{ActorSystem, ClassicActorSystemProvider}
-import akka.kafka.{scaladsl, ConsumerSettings, ProducerSettings}
+import akka.actor.{ ActorSystem, ClassicActorSystemProvider }
+import akka.kafka.{ scaladsl, ConsumerSettings, ProducerSettings }
 import com.typesafe.config.Config
 
 import scala.compat.java8.FunctionConverters._
@@ -28,8 +28,8 @@ object DiscoverySupport {
    */
   def consumerBootstrapServers[K, V](
       config: Config,
-      system: ClassicActorSystemProvider
-  ): java.util.function.Function[ConsumerSettings[K, V], CompletionStage[ConsumerSettings[K, V]]] = {
+      system: ClassicActorSystemProvider)
+      : java.util.function.Function[ConsumerSettings[K, V], CompletionStage[ConsumerSettings[K, V]]] = {
     implicit val sys: ClassicActorSystemProvider = system
     val function: ConsumerSettings[K, V] => Future[ConsumerSettings[K, V]] =
       scaladsl.DiscoverySupport.consumerBootstrapServers(config)
@@ -39,8 +39,8 @@ object DiscoverySupport {
   // kept for bin-compatibility
   def consumerBootstrapServers[K, V](
       config: Config,
-      system: ActorSystem
-  ): java.util.function.Function[ConsumerSettings[K, V], CompletionStage[ConsumerSettings[K, V]]] = {
+      system: ActorSystem)
+      : java.util.function.Function[ConsumerSettings[K, V], CompletionStage[ConsumerSettings[K, V]]] = {
     val sys: ClassicActorSystemProvider = system
     consumerBootstrapServers(config, sys)
   }
@@ -51,8 +51,8 @@ object DiscoverySupport {
    */
   def producerBootstrapServers[K, V](
       config: Config,
-      system: ClassicActorSystemProvider
-  ): java.util.function.Function[ProducerSettings[K, V], CompletionStage[ProducerSettings[K, V]]] = {
+      system: ClassicActorSystemProvider)
+      : java.util.function.Function[ProducerSettings[K, V], CompletionStage[ProducerSettings[K, V]]] = {
     implicit val sys: ClassicActorSystemProvider = system
     val function: ProducerSettings[K, V] => Future[ProducerSettings[K, V]] =
       scaladsl.DiscoverySupport.producerBootstrapServers(config)
@@ -62,8 +62,8 @@ object DiscoverySupport {
   // kept for bin-compatibility
   def producerBootstrapServers[K, V](
       config: Config,
-      system: ActorSystem
-  ): java.util.function.Function[ProducerSettings[K, V], CompletionStage[ProducerSettings[K, V]]] = {
+      system: ActorSystem)
+      : java.util.function.Function[ProducerSettings[K, V], CompletionStage[ProducerSettings[K, V]]] = {
     val sys: ClassicActorSystemProvider = system
     producerBootstrapServers(config, sys)
   }
diff --git a/core/src/main/scala/akka/kafka/javadsl/MetadataClient.scala b/core/src/main/scala/akka/kafka/javadsl/MetadataClient.scala
index 09f3448b..63b0aecf 100644
--- a/core/src/main/scala/akka/kafka/javadsl/MetadataClient.scala
+++ b/core/src/main/scala/akka/kafka/javadsl/MetadataClient.scala
@@ -5,14 +5,14 @@
 
 package akka.kafka.javadsl
 
-import java.util.concurrent.{CompletionStage, Executor}
+import java.util.concurrent.{ CompletionStage, Executor }
 
-import akka.actor.{ActorRef, ActorSystem}
+import akka.actor.{ ActorRef, ActorSystem }
 import akka.dispatch.ExecutionContexts
 import akka.kafka.ConsumerSettings
 import akka.util.Timeout
 import org.apache.kafka.clients.consumer.OffsetAndMetadata
-import org.apache.kafka.common.{PartitionInfo, TopicPartition}
+import org.apache.kafka.common.{ PartitionInfo, TopicPartition }
 
 import scala.compat.java8.FutureConverters._
 import scala.concurrent.ExecutionContextExecutor
@@ -21,8 +21,7 @@ import scala.jdk.CollectionConverters._
 class MetadataClient private (metadataClient: akka.kafka.scaladsl.MetadataClient) {
 
   def getBeginningOffsets[K, V](
-      partitions: java.util.Set[TopicPartition]
-  ): CompletionStage[java.util.Map[TopicPartition, java.lang.Long]] =
+      partitions: java.util.Set[TopicPartition]): CompletionStage[java.util.Map[TopicPartition, java.lang.Long]] =
     metadataClient
       .getBeginningOffsets(partitions.asScala.toSet)
       .map { beginningOffsets =>
@@ -37,8 +36,7 @@ class MetadataClient private (metadataClient: akka.kafka.scaladsl.MetadataClient
       .toJava
 
   def getEndOffsets(
-      partitions: java.util.Set[TopicPartition]
-  ): CompletionStage[java.util.Map[TopicPartition, java.lang.Long]] =
+      partitions: java.util.Set[TopicPartition]): CompletionStage[java.util.Map[TopicPartition, java.lang.Long]] =
     metadataClient
       .getEndOffsets(partitions.asScala.toSet)
       .map { endOffsets =>
@@ -75,8 +73,7 @@ class MetadataClient private (metadataClient: akka.kafka.scaladsl.MetadataClient
       .toJava
 
   def getCommittedOffsets(
-      partitions: java.util.Set[TopicPartition]
-  ): CompletionStage[java.util.Map[TopicPartition, OffsetAndMetadata]] =
+      partitions: java.util.Set[TopicPartition]): CompletionStage[java.util.Map[TopicPartition, OffsetAndMetadata]] =
     metadataClient
       .getCommittedOffsets(partitions.asScala.toSet)
       .map { committedOffsets =>
@@ -97,9 +94,9 @@ object MetadataClient {
   }
 
   def create[K, V](consumerSettings: ConsumerSettings[K, V],
-                   timeout: Timeout,
-                   system: ActorSystem,
-                   executor: Executor): MetadataClient = {
+      timeout: Timeout,
+      system: ActorSystem,
+      executor: Executor): MetadataClient = {
     val metadataClient = akka.kafka.scaladsl.MetadataClient
       .create(consumerSettings, timeout)(system, ExecutionContexts.fromExecutor(executor))
     new MetadataClient(metadataClient)
diff --git a/core/src/main/scala/akka/kafka/javadsl/Producer.scala b/core/src/main/scala/akka/kafka/javadsl/Producer.scala
index 28f78279..de595ffe 100644
--- a/core/src/main/scala/akka/kafka/javadsl/Producer.scala
+++ b/core/src/main/scala/akka/kafka/javadsl/Producer.scala
@@ -9,9 +9,9 @@ import java.util.concurrent.CompletionStage
 import akka.annotation.ApiMayChange
 import akka.kafka.ConsumerMessage.Committable
 import akka.kafka.ProducerMessage._
-import akka.kafka.{scaladsl, CommitterSettings, ConsumerMessage, ProducerSettings}
-import akka.stream.javadsl.{Flow, FlowWithContext, Sink}
-import akka.{japi, Done, NotUsed}
+import akka.kafka.{ scaladsl, CommitterSettings, ConsumerMessage, ProducerSettings }
+import akka.stream.javadsl.{ Flow, FlowWithContext, Sink }
+import akka.{ japi, Done, NotUsed }
 import org.apache.kafka.clients.producer.ProducerRecord
 
 import scala.annotation.nowarn
@@ -47,8 +47,7 @@ object Producer {
   @Deprecated
   def plainSink[K, V](
       settings: ProducerSettings[K, V],
-      producer: org.apache.kafka.clients.producer.Producer[K, V]
-  ): Sink[ProducerRecord[K, V], CompletionStage[Done]] =
+      producer: org.apache.kafka.clients.producer.Producer[K, V]): Sink[ProducerRecord[K, V], CompletionStage[Done]] =
     plainSink(settings.withProducer(producer))
 
   /**
@@ -71,8 +70,7 @@ object Producer {
    */
   @Deprecated
   def committableSink[K, V, IN <: Envelope[K, V, ConsumerMessage.Committable]](
-      settings: ProducerSettings[K, V]
-  ): Sink[IN, CompletionStage[Done]] = {
+      settings: ProducerSettings[K, V]): Sink[IN, CompletionStage[Done]] = {
     @nowarn("cat=deprecation")
     val sink: Sink[IN, CompletionStage[Done]] = scaladsl.Producer
       .committableSink(settings)
@@ -94,7 +92,6 @@ object Producer {
    *
    * - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, but commits the offset
    *
-   *
    * Note that there is always a risk that something fails after publishing but before
    * committing, so it is "at-least once delivery" semantics.
    *
@@ -105,8 +102,8 @@ object Producer {
   @Deprecated
   def committableSink[K, V](
       settings: ProducerSettings[K, V],
-      producer: org.apache.kafka.clients.producer.Producer[K, V]
-  ): Sink[Envelope[K, V, ConsumerMessage.Committable], CompletionStage[Done]] =
+      producer: org.apache.kafka.clients.producer.Producer[K, V])
+      : Sink[Envelope[K, V, ConsumerMessage.Committable], CompletionStage[Done]] =
     committableSink(settings.withProducer(producer))
 
   /**
@@ -126,8 +123,7 @@ object Producer {
    */
   def committableSink[K, V, IN <: Envelope[K, V, ConsumerMessage.Committable]](
       producerSettings: ProducerSettings[K, V],
-      committerSettings: CommitterSettings
-  ): Sink[IN, CompletionStage[Done]] =
+      committerSettings: CommitterSettings): Sink[IN, CompletionStage[Done]] =
     scaladsl.Producer
       .committableSink(producerSettings, committerSettings)
       .mapMaterializedValue(_.toJava)
@@ -151,8 +147,7 @@ object Producer {
   @ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/880")
   def committableSinkWithOffsetContext[K, V, IN <: Envelope[K, V, _], C <: Committable](
       producerSettings: ProducerSettings[K, V],
-      committerSettings: CommitterSettings
-  ): Sink[akka.japi.Pair[IN, C], CompletionStage[Done]] =
+      committerSettings: CommitterSettings): Sink[akka.japi.Pair[IN, C], CompletionStage[Done]] =
     committableSink(producerSettings, committerSettings)
       .contramap(new akka.japi.function.Function[japi.Pair[IN, C], Envelope[K, V, C]] {
         override def apply(p: japi.Pair[IN, C]) = p.first.withPassThrough(p.second)
@@ -171,8 +166,7 @@ object Producer {
    */
   @Deprecated
   def flow[K, V, PassThrough](
-      settings: ProducerSettings[K, V]
-  ): Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed] = {
+      settings: ProducerSettings[K, V]): Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed] = {
     @nowarn("cat=deprecation")
     val flow = scaladsl.Producer
       .flow(settings)
@@ -197,8 +191,7 @@ object Producer {
    * be committed later in the flow.
    */
   def flexiFlow[K, V, PassThrough](
-      settings: ProducerSettings[K, V]
-  ): Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed] =
+      settings: ProducerSettings[K, V]): Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed] =
     scaladsl.Producer
       .flexiFlow(settings)
       .asJava
@@ -223,8 +216,7 @@ object Producer {
    */
   @ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/880")
   def flowWithContext[K, V, C](
-      settings: ProducerSettings[K, V]
-  ): FlowWithContext[Envelope[K, V, NotUsed], C, Results[K, V, C], C, NotUsed] =
+      settings: ProducerSettings[K, V]): FlowWithContext[Envelope[K, V, NotUsed], C, Results[K, V, C], C, NotUsed] =
     scaladsl.Producer.flowWithContext(settings).asJava
 
   /**
@@ -243,8 +235,8 @@ object Producer {
   @Deprecated
   def flow[K, V, PassThrough](
       settings: ProducerSettings[K, V],
-      producer: org.apache.kafka.clients.producer.Producer[K, V]
-  ): Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed] =
+      producer: org.apache.kafka.clients.producer.Producer[K, V])
+      : Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed] =
     flow(settings.withProducer(producer))
 
   /**
@@ -269,8 +261,8 @@ object Producer {
   @Deprecated
   def flexiFlow[K, V, PassThrough](
       settings: ProducerSettings[K, V],
-      producer: org.apache.kafka.clients.producer.Producer[K, V]
-  ): Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed] =
+      producer: org.apache.kafka.clients.producer.Producer[K, V])
+      : Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed] =
     flexiFlow(settings.withProducer(producer))
 
   /**
@@ -298,8 +290,8 @@ object Producer {
   @ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/880")
   def flowWithContext[K, V, C](
       settings: ProducerSettings[K, V],
-      producer: org.apache.kafka.clients.producer.Producer[K, V]
-  ): FlowWithContext[Envelope[K, V, NotUsed], C, Results[K, V, C], C, NotUsed] =
+      producer: org.apache.kafka.clients.producer.Producer[K, V])
+      : FlowWithContext[Envelope[K, V, NotUsed], C, Results[K, V, C], C, NotUsed] =
     flowWithContext(settings.withProducer(producer))
 
 }
diff --git a/core/src/main/scala/akka/kafka/javadsl/SendProducer.scala b/core/src/main/scala/akka/kafka/javadsl/SendProducer.scala
index 76ea3734..692c0a89 100644
--- a/core/src/main/scala/akka/kafka/javadsl/SendProducer.scala
+++ b/core/src/main/scala/akka/kafka/javadsl/SendProducer.scala
@@ -8,10 +8,10 @@ package akka.kafka.javadsl
 import java.util.concurrent.CompletionStage
 
 import akka.Done
-import akka.actor.{ActorSystem, ClassicActorSystemProvider}
+import akka.actor.{ ActorSystem, ClassicActorSystemProvider }
 import akka.kafka.ProducerMessage._
-import akka.kafka.{scaladsl, ProducerSettings}
-import org.apache.kafka.clients.producer.{ProducerRecord, RecordMetadata}
+import akka.kafka.{ scaladsl, ProducerSettings }
+import org.apache.kafka.clients.producer.{ ProducerRecord, RecordMetadata }
 
 import scala.compat.java8.FutureConverters._
 
diff --git a/core/src/main/scala/akka/kafka/javadsl/Transactional.scala b/core/src/main/scala/akka/kafka/javadsl/Transactional.scala
index 979cba69..2fcc4d17 100644
--- a/core/src/main/scala/akka/kafka/javadsl/Transactional.scala
+++ b/core/src/main/scala/akka/kafka/javadsl/Transactional.scala
@@ -9,13 +9,13 @@ import java.util.concurrent.CompletionStage
 
 import akka.annotation.ApiMayChange
 import akka.japi.Pair
-import akka.kafka.ConsumerMessage.{PartitionOffset, TransactionalMessage}
+import akka.kafka.ConsumerMessage.{ PartitionOffset, TransactionalMessage }
 import akka.kafka.ProducerMessage._
 import akka.kafka._
-import akka.kafka.internal.{ConsumerControlAsJava, TransactionalSourceWithOffsetContext}
+import akka.kafka.internal.{ ConsumerControlAsJava, TransactionalSourceWithOffsetContext }
 import akka.kafka.javadsl.Consumer.Control
 import akka.stream.javadsl._
-import akka.{Done, NotUsed}
+import akka.{ Done, NotUsed }
 import org.apache.kafka.clients.consumer.ConsumerRecord
 
 import scala.compat.java8.FutureConverters.FutureOps
@@ -30,7 +30,7 @@ object Transactional {
    * necessary to use the [[Transactional.sink]] or [[Transactional.flow]] (for passthrough).
    */
   def source[K, V](consumerSettings: ConsumerSettings[K, V],
-                   subscription: Subscription): Source[TransactionalMessage[K, V], Control] =
+      subscription: Subscription): Source[TransactionalMessage[K, V], Control] =
     scaladsl.Transactional
       .source(consumerSettings, subscription)
       .mapMaterializedValue(ConsumerControlAsJava.apply)
@@ -45,8 +45,7 @@ object Transactional {
   @ApiMayChange
   def sourceWithOffsetContext[K, V](
       consumerSettings: ConsumerSettings[K, V],
-      subscription: Subscription
-  ): SourceWithContext[ConsumerRecord[K, V], PartitionOffset, Control] =
+      subscription: Subscription): SourceWithContext[ConsumerRecord[K, V], PartitionOffset, Control] =
     akka.stream.scaladsl.Source
       .fromGraph(new TransactionalSourceWithOffsetContext[K, V](consumerSettings, subscription))
       .mapMaterializedValue(ConsumerControlAsJava.apply)
@@ -85,8 +84,7 @@ object Transactional {
    */
   def sink[K, V, IN <: Envelope[K, V, ConsumerMessage.PartitionOffset]](
       settings: ProducerSettings[K, V],
-      transactionalId: String
-  ): Sink[IN, CompletionStage[Done]] =
+      transactionalId: String): Sink[IN, CompletionStage[Done]] =
     scaladsl.Transactional
       .sink(settings, transactionalId)
       .mapMaterializedValue(_.toJava)
@@ -101,8 +99,7 @@ object Transactional {
   @ApiMayChange
   def sinkWithOffsetContext[K, V](
       settings: ProducerSettings[K, V],
-      transactionalId: String
-  ): Sink[Pair[Envelope[K, V, NotUsed], PartitionOffset], CompletionStage[Done]] =
+      transactionalId: String): Sink[Pair[Envelope[K, V, NotUsed], PartitionOffset], CompletionStage[Done]] =
     akka.stream.scaladsl
       .Flow[Pair[Envelope[K, V, NotUsed], PartitionOffset]]
       .map(_.toScala)
@@ -117,8 +114,7 @@ object Transactional {
    */
   def flow[K, V, IN <: Envelope[K, V, ConsumerMessage.PartitionOffset]](
       settings: ProducerSettings[K, V],
-      transactionalId: String
-  ): Flow[IN, Results[K, V, ConsumerMessage.PartitionOffset], NotUsed] =
+      transactionalId: String): Flow[IN, Results[K, V, ConsumerMessage.PartitionOffset], NotUsed] =
     scaladsl.Transactional.flow(settings, transactionalId).asJava
 
   /**
@@ -134,11 +130,8 @@ object Transactional {
   @ApiMayChange
   def flowWithOffsetContext[K, V](
       settings: ProducerSettings[K, V],
-      transactionalId: String
-  ): FlowWithContext[Envelope[K, V, NotUsed],
-                     ConsumerMessage.PartitionOffset,
-                     Results[K, V, ConsumerMessage.PartitionOffset],
-                     ConsumerMessage.PartitionOffset,
-                     NotUsed] =
+      transactionalId: String): FlowWithContext[Envelope[K, V, NotUsed],
+    ConsumerMessage.PartitionOffset, Results[K, V, ConsumerMessage.PartitionOffset],
+    ConsumerMessage.PartitionOffset, NotUsed] =
     scaladsl.Transactional.flowWithOffsetContext(settings, transactionalId).asJava
 }
diff --git a/core/src/main/scala/akka/kafka/scaladsl/Committer.scala b/core/src/main/scala/akka/kafka/scaladsl/Committer.scala
index dca20560..d2688549 100644
--- a/core/src/main/scala/akka/kafka/scaladsl/Committer.scala
+++ b/core/src/main/scala/akka/kafka/scaladsl/Committer.scala
@@ -8,10 +8,10 @@ package akka.kafka.scaladsl
 import akka.annotation.ApiMayChange
 import akka.dispatch.ExecutionContexts
 import akka.kafka.CommitterSettings
-import akka.kafka.ConsumerMessage.{Committable, CommittableOffsetBatch}
+import akka.kafka.ConsumerMessage.{ Committable, CommittableOffsetBatch }
 import akka.kafka.internal.CommitCollectorStage
-import akka.stream.scaladsl.{Flow, FlowWithContext, Keep, Sink}
-import akka.{Done, NotUsed}
+import akka.stream.scaladsl.{ Flow, FlowWithContext, Keep, Sink }
+import akka.{ Done, NotUsed }
 
 import scala.concurrent.Future
 
@@ -52,8 +52,7 @@ object Committer {
    */
   @ApiMayChange
   def flowWithOffsetContext[E](
-      settings: CommitterSettings
-  ): FlowWithContext[E, Committable, NotUsed, CommittableOffsetBatch, NotUsed] = {
+      settings: CommitterSettings): FlowWithContext[E, Committable, NotUsed, CommittableOffsetBatch, NotUsed] = {
     val value = Flow[(E, Committable)]
       .map(_._2)
       .via(batchFlow(settings))
diff --git a/core/src/main/scala/akka/kafka/scaladsl/Consumer.scala b/core/src/main/scala/akka/kafka/scaladsl/Consumer.scala
index 9af78f69..b28b141d 100644
--- a/core/src/main/scala/akka/kafka/scaladsl/Consumer.scala
+++ b/core/src/main/scala/akka/kafka/scaladsl/Consumer.scala
@@ -8,16 +8,16 @@ package akka.kafka.scaladsl
 import akka.actor.ActorRef
 import akka.annotation.ApiMayChange
 import akka.dispatch.ExecutionContexts
-import akka.kafka.ConsumerMessage.{CommittableMessage, CommittableOffset}
+import akka.kafka.ConsumerMessage.{ CommittableMessage, CommittableOffset }
 import akka.kafka._
 import akka.kafka.internal._
-import akka.stream.scaladsl.{Source, SourceWithContext}
-import akka.{Done, NotUsed}
+import akka.stream.scaladsl.{ Source, SourceWithContext }
+import akka.{ Done, NotUsed }
 import org.apache.kafka.clients.consumer.ConsumerRecord
-import org.apache.kafka.common.{Metric, MetricName, TopicPartition}
+import org.apache.kafka.common.{ Metric, MetricName, TopicPartition }
 
 import scala.concurrent.duration.FiniteDuration
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ ExecutionContext, Future }
 
 /**
  * Akka Stream connector for subscribing to Kafka topics.
@@ -171,7 +171,7 @@ object Consumer {
    * stronger than the "at-least once" semantics you get with Kafka's offset commit functionality.
    */
   def plainSource[K, V](settings: ConsumerSettings[K, V],
-                        subscription: Subscription): Source[ConsumerRecord[K, V], Control] =
+      subscription: Subscription): Source[ConsumerRecord[K, V], Control] =
     Source.fromGraph(new PlainSource[K, V](settings, subscription))
 
   /**
@@ -188,7 +188,7 @@ object Consumer {
    * instead of this API.
    */
   def committableSource[K, V](settings: ConsumerSettings[K, V],
-                              subscription: Subscription): Source[CommittableMessage[K, V], Control] =
+      subscription: Subscription): Source[CommittableMessage[K, V], Control] =
     Source.fromGraph(new CommittableSource[K, V](settings, subscription))
 
   /**
@@ -205,8 +205,7 @@ object Consumer {
   @ApiMayChange
   def sourceWithOffsetContext[K, V](
       settings: ConsumerSettings[K, V],
-      subscription: Subscription
-  ): SourceWithContext[ConsumerRecord[K, V], CommittableOffset, Control] =
+      subscription: Subscription): SourceWithContext[ConsumerRecord[K, V], CommittableOffset, Control] =
     Source
       .fromGraph(new SourceWithOffsetContext[K, V](settings, subscription))
       .asSourceWithContext(_._2)
@@ -231,8 +230,8 @@ object Consumer {
   def sourceWithOffsetContext[K, V](
       settings: ConsumerSettings[K, V],
       subscription: Subscription,
-      metadataFromRecord: ConsumerRecord[K, V] => String
-  ): SourceWithContext[ConsumerRecord[K, V], CommittableOffset, Control] =
+      metadataFromRecord: ConsumerRecord[K, V] => String)
+      : SourceWithContext[ConsumerRecord[K, V], CommittableOffset, Control] =
     Source
       .fromGraph(new SourceWithOffsetContext[K, V](settings, subscription, metadataFromRecord))
       .asSourceWithContext(_._2)
@@ -246,8 +245,7 @@ object Consumer {
   def commitWithMetadataSource[K, V](
       settings: ConsumerSettings[K, V],
       subscription: Subscription,
-      metadataFromRecord: ConsumerRecord[K, V] => String
-  ): Source[CommittableMessage[K, V], Control] =
+      metadataFromRecord: ConsumerRecord[K, V] => String): Source[CommittableMessage[K, V], Control] =
     Source.fromGraph(new CommittableSource[K, V](settings, subscription, metadataFromRecord))
 
   /**
@@ -255,7 +253,7 @@ object Consumer {
    * before being emitted downstream.
    */
   def atMostOnceSource[K, V](settings: ConsumerSettings[K, V],
-                             subscription: Subscription): Source[ConsumerRecord[K, V], Control] =
+      subscription: Subscription): Source[ConsumerRecord[K, V], Control] =
     committableSource[K, V](settings, subscription).mapAsync(1) { m =>
       m.committableOffset.commitInternal().map(_ => m.record)(ExecutionContexts.parasitic)
     }
@@ -268,8 +266,7 @@ object Consumer {
    */
   def plainPartitionedSource[K, V](
       settings: ConsumerSettings[K, V],
-      subscription: AutoSubscription
-  ): Source[(TopicPartition, Source[ConsumerRecord[K, V], NotUsed]), Control] =
+      subscription: AutoSubscription): Source[(TopicPartition, Source[ConsumerRecord[K, V], NotUsed]), Control] =
     Source.fromGraph(new PlainSubSource[K, V](settings, subscription, None, onRevoke = _ => ()))
 
   /**
@@ -285,8 +282,8 @@ object Consumer {
       settings: ConsumerSettings[K, V],
       subscription: AutoSubscription,
       getOffsetsOnAssign: Set[TopicPartition] => Future[Map[TopicPartition, Long]],
-      onRevoke: Set[TopicPartition] => Unit = _ => ()
-  ): Source[(TopicPartition, Source[ConsumerRecord[K, V], NotUsed]), Control] =
+      onRevoke: Set[TopicPartition] => Unit = _ => ())
+      : Source[(TopicPartition, Source[ConsumerRecord[K, V], NotUsed]), Control] =
     Source.fromGraph(new PlainSubSource[K, V](settings, subscription, Some(getOffsetsOnAssign), onRevoke))
 
   /**
@@ -296,22 +293,20 @@ object Consumer {
       settings: ConsumerSettings[K, V],
       subscription: AutoSubscription,
       getOffsetsOnAssign: Set[TopicPartition] => Future[Map[TopicPartition, Long]],
-      onRevoke: Set[TopicPartition] => Unit = _ => ()
-  ): Source[(TopicPartition, Source[CommittableMessage[K, V], NotUsed]), Control] =
+      onRevoke: Set[TopicPartition] => Unit = _ => ())
+      : Source[(TopicPartition, Source[CommittableMessage[K, V], NotUsed]), Control] =
     Source.fromGraph(
       new CommittableSubSource[K, V](settings,
-                                     subscription,
-                                     getOffsetsOnAssign = Some(getOffsetsOnAssign),
-                                     onRevoke = onRevoke)
-    )
+        subscription,
+        getOffsetsOnAssign = Some(getOffsetsOnAssign),
+        onRevoke = onRevoke))
 
   /**
    * The same as [[#plainPartitionedSource]] but with offset commit support.
    */
   def committablePartitionedSource[K, V](
       settings: ConsumerSettings[K, V],
-      subscription: AutoSubscription
-  ): Source[(TopicPartition, Source[CommittableMessage[K, V], NotUsed]), Control] =
+      subscription: AutoSubscription): Source[(TopicPartition, Source[CommittableMessage[K, V], NotUsed]), Control] =
     Source.fromGraph(new CommittableSubSource[K, V](settings, subscription))
 
   /**
@@ -320,8 +315,8 @@ object Consumer {
   def commitWithMetadataPartitionedSource[K, V](
       settings: ConsumerSettings[K, V],
       subscription: AutoSubscription,
-      metadataFromRecord: ConsumerRecord[K, V] => String
-  ): Source[(TopicPartition, Source[CommittableMessage[K, V], NotUsed]), Control] =
+      metadataFromRecord: ConsumerRecord[K, V] => String)
+      : Source[(TopicPartition, Source[CommittableMessage[K, V], NotUsed]), Control] =
     Source.fromGraph(new CommittableSubSource[K, V](settings, subscription, metadataFromRecord))
 
   /**
@@ -329,22 +324,20 @@ object Consumer {
    * a lot of manually assigned topic-partitions and want to keep only one kafka consumer.
    */
   def plainExternalSource[K, V](consumer: ActorRef,
-                                subscription: ManualSubscription): Source[ConsumerRecord[K, V], Control] =
+      subscription: ManualSubscription): Source[ConsumerRecord[K, V], Control] =
     Source.fromGraph(new ExternalPlainSource[K, V](consumer, subscription))
 
   /**
    * The same as [[#plainExternalSource]] but with offset commit support.
    */
   def committableExternalSource[K, V](consumer: ActorRef,
-                                      subscription: ManualSubscription,
-                                      groupId: String,
-                                      commitTimeout: FiniteDuration): Source[CommittableMessage[K, V], Control] =
+      subscription: ManualSubscription,
+      groupId: String,
+      commitTimeout: FiniteDuration): Source[CommittableMessage[K, V], Control] =
     Source.fromGraph(
       new ExternalCommittableSource[K, V](
         consumer,
         groupId,
         commitTimeout,
-        subscription
-      )
-    )
+        subscription))
 }
diff --git a/core/src/main/scala/akka/kafka/scaladsl/DiscoverySupport.scala b/core/src/main/scala/akka/kafka/scaladsl/DiscoverySupport.scala
index 707b4895..fef0c558 100644
--- a/core/src/main/scala/akka/kafka/scaladsl/DiscoverySupport.scala
+++ b/core/src/main/scala/akka/kafka/scaladsl/DiscoverySupport.scala
@@ -5,10 +5,10 @@
 
 package akka.kafka.scaladsl
 
-import akka.actor.{ActorSystem, ActorSystemImpl, ClassicActorSystemProvider}
+import akka.actor.{ ActorSystem, ActorSystemImpl, ClassicActorSystemProvider }
 import akka.annotation.InternalApi
-import akka.discovery.{Discovery, ServiceDiscovery}
-import akka.kafka.{ConsumerSettings, ProducerSettings}
+import akka.discovery.{ Discovery, ServiceDiscovery }
+import akka.kafka.{ ConsumerSettings, ProducerSettings }
 import akka.util.JavaDurationConverters._
 import com.typesafe.config.Config
 
@@ -39,8 +39,7 @@ object DiscoverySupport {
   private def bootstrapServers(
       discovery: ServiceDiscovery,
       serviceName: String,
-      lookupTimeout: FiniteDuration
-  )(implicit system: ActorSystem): Future[String] = {
+      lookupTimeout: FiniteDuration)(implicit system: ActorSystem): Future[String] = {
     import system.dispatcher
     discovery.lookup(serviceName, lookupTimeout).map { resolved =>
       resolved.addresses
@@ -73,8 +72,8 @@ object DiscoverySupport {
    * to be used as `bootstrapServers`.
    */
   def consumerBootstrapServers[K, V](
-      config: Config
-  )(implicit system: ClassicActorSystemProvider): ConsumerSettings[K, V] => Future[ConsumerSettings[K, V]] = {
+      config: Config)(
+      implicit system: ClassicActorSystemProvider): ConsumerSettings[K, V] => Future[ConsumerSettings[K, V]] = {
     val sys: ActorSystem = system.classicSystem
     import sys.dispatcher
     settings =>
@@ -86,8 +85,7 @@ object DiscoverySupport {
 
   // kept for bin-compatibility
   def consumerBootstrapServers[K, V](
-      config: Config
-  )(system: ActorSystem): ConsumerSettings[K, V] => Future[ConsumerSettings[K, V]] = {
+      config: Config)(system: ActorSystem): ConsumerSettings[K, V] => Future[ConsumerSettings[K, V]] = {
     implicit val sys: ClassicActorSystemProvider = system
     consumerBootstrapServers(config)
   }
@@ -97,8 +95,8 @@ object DiscoverySupport {
    * to be used as `bootstrapServers`.
    */
   def producerBootstrapServers[K, V](
-      config: Config
-  )(implicit system: ClassicActorSystemProvider): ProducerSettings[K, V] => Future[ProducerSettings[K, V]] = {
+      config: Config)(
+      implicit system: ClassicActorSystemProvider): ProducerSettings[K, V] => Future[ProducerSettings[K, V]] = {
     val sys: ActorSystem = system.classicSystem
     import sys.dispatcher
     settings =>
@@ -110,8 +108,7 @@ object DiscoverySupport {
 
   // kept for bin-compatibility
   def producerBootstrapServers[K, V](config: Config)(
-      system: ActorSystem
-  ): ProducerSettings[K, V] => Future[ProducerSettings[K, V]] = {
+      system: ActorSystem): ProducerSettings[K, V] => Future[ProducerSettings[K, V]] = {
     implicit val sys: ClassicActorSystemProvider = system
     producerBootstrapServers(config)
   }
@@ -120,8 +117,7 @@ object DiscoverySupport {
     system.dynamicAccess.getClassFor("akka.discovery.Discovery$") match {
       case Failure(_: ClassNotFoundException | _: NoClassDefFoundError) =>
         throw new IllegalStateException(
-          s"Akka Discovery is being used but the `akka-discovery` library is not on the classpath, it must be added explicitly. See https://doc.akka.io/docs/alpakka-kafka/current/discovery.html"
-        )
+          s"Akka Discovery is being used but the `akka-discovery` library is not on the classpath, it must be added explicitly. See https://doc.akka.io/docs/alpakka-kafka/current/discovery.html")
       case _ =>
     }
 
diff --git a/core/src/main/scala/akka/kafka/scaladsl/MetadataClient.scala b/core/src/main/scala/akka/kafka/scaladsl/MetadataClient.scala
index 6bd0bdb3..291b90b4 100644
--- a/core/src/main/scala/akka/kafka/scaladsl/MetadataClient.scala
+++ b/core/src/main/scala/akka/kafka/scaladsl/MetadataClient.scala
@@ -7,21 +7,20 @@ package akka.kafka.scaladsl
 
 import java.util.concurrent.atomic.AtomicLong
 
-import akka.actor.{ActorRef, ActorSystem, ExtendedActorSystem}
+import akka.actor.{ ActorRef, ActorSystem, ExtendedActorSystem }
 import akka.dispatch.ExecutionContexts
 import akka.kafka.Metadata._
-import akka.kafka.{ConsumerSettings, KafkaConsumerActor}
+import akka.kafka.{ ConsumerSettings, KafkaConsumerActor }
 import akka.pattern.ask
 import akka.util.Timeout
 import org.apache.kafka.clients.consumer.OffsetAndMetadata
-import org.apache.kafka.common.{PartitionInfo, TopicPartition}
+import org.apache.kafka.common.{ PartitionInfo, TopicPartition }
 
-import scala.concurrent.{ExecutionContext, Future}
-import scala.util.{Failure, Success}
+import scala.concurrent.{ ExecutionContext, Future }
+import scala.util.{ Failure, Success }
 
 class MetadataClient private (consumerActor: ActorRef, timeout: Timeout, managedActor: Boolean)(
-    implicit ec: ExecutionContext
-) {
+    implicit ec: ExecutionContext) {
 
   def getBeginningOffsets(partitions: Set[TopicPartition]): Future[Map[TopicPartition, Long]] =
     (consumerActor ? GetBeginningOffsets(partitions))(timeout)
@@ -29,7 +28,7 @@ class MetadataClient private (consumerActor: ActorRef, timeout: Timeout, managed
       .map(_.response)
       .flatMap {
         case Success(res) => Future.successful(res)
-        case Failure(e) => Future.failed(e)
+        case Failure(e)   => Future.failed(e)
       }(ExecutionContexts.parasitic)
 
   def getBeginningOffsetForPartition(partition: TopicPartition): Future[Long] =
@@ -42,7 +41,7 @@ class MetadataClient private (consumerActor: ActorRef, timeout: Timeout, managed
       .map(_.response)
       .flatMap {
         case Success(res) => Future.successful(res)
-        case Failure(e) => Future.failed(e)
+        case Failure(e)   => Future.failed(e)
       }(ExecutionContexts.parasitic)
 
   def getEndOffsetForPartition(partition: TopicPartition): Future[Long] =
@@ -55,7 +54,7 @@ class MetadataClient private (consumerActor: ActorRef, timeout: Timeout, managed
       .map(_.response)
       .flatMap {
         case Success(res) => Future.successful(res)
-        case Failure(e) => Future.failed(e)
+        case Failure(e)   => Future.failed(e)
       }(ExecutionContexts.parasitic)
 
   def getPartitionsFor(topic: String): Future[List[PartitionInfo]] =
@@ -64,7 +63,7 @@ class MetadataClient private (consumerActor: ActorRef, timeout: Timeout, managed
       .map(_.response)
       .flatMap {
         case Success(res) => Future.successful(res)
-        case Failure(e) => Future.failed(e)
+        case Failure(e)   => Future.failed(e)
       }(ExecutionContexts.parasitic)
 
   @deprecated("use `getCommittedOffsets`", "2.0.3")
@@ -74,7 +73,7 @@ class MetadataClient private (consumerActor: ActorRef, timeout: Timeout, managed
       .map(_.response)
       .flatMap {
         case Success(res) => Future.successful(res)
-        case Failure(e) => Future.failed(e)
+        case Failure(e)   => Future.failed(e)
       }(ExecutionContexts.parasitic)
 
   def getCommittedOffsets(partitions: Set[TopicPartition]): Future[Map[TopicPartition, OffsetAndMetadata]] =
@@ -83,7 +82,7 @@ class MetadataClient private (consumerActor: ActorRef, timeout: Timeout, managed
       .map(_.response)
       .flatMap {
         case Success(res) => Future.successful(res)
-        case Failure(e) => Future.failed(e)
+        case Failure(e)   => Future.failed(e)
       }(ExecutionContexts.parasitic)
 
   def close(): Unit =
@@ -100,12 +99,11 @@ object MetadataClient {
 
   def create[K, V](
       consumerSettings: ConsumerSettings[K, V],
-      timeout: Timeout
-  )(implicit system: ActorSystem, ec: ExecutionContext): MetadataClient = {
+      timeout: Timeout)(implicit system: ActorSystem, ec: ExecutionContext): MetadataClient = {
     val consumerActor = system
       .asInstanceOf[ExtendedActorSystem]
       .systemActorOf(KafkaConsumerActor.props(consumerSettings),
-                     s"alpakka-kafka-metadata-client-${actorCount.getAndIncrement()}")
+        s"alpakka-kafka-metadata-client-${actorCount.getAndIncrement()}")
     new MetadataClient(consumerActor, timeout, true)
   }
 }
diff --git a/core/src/main/scala/akka/kafka/scaladsl/Producer.scala b/core/src/main/scala/akka/kafka/scaladsl/Producer.scala
index e6c50af9..f3ade013 100644
--- a/core/src/main/scala/akka/kafka/scaladsl/Producer.scala
+++ b/core/src/main/scala/akka/kafka/scaladsl/Producer.scala
@@ -8,11 +8,11 @@ package akka.kafka.scaladsl
 import akka.annotation.ApiMayChange
 import akka.kafka.ConsumerMessage.Committable
 import akka.kafka.ProducerMessage._
-import akka.kafka.internal.{CommittingProducerSinkStage, DefaultProducerStage}
-import akka.kafka.{CommitterSettings, ConsumerMessage, ProducerSettings}
+import akka.kafka.internal.{ CommittingProducerSinkStage, DefaultProducerStage }
+import akka.kafka.{ CommitterSettings, ConsumerMessage, ProducerSettings }
 import akka.stream.ActorAttributes
-import akka.stream.scaladsl.{Flow, FlowWithContext, Keep, Sink}
-import akka.{Done, NotUsed}
+import akka.stream.scaladsl.{ Flow, FlowWithContext, Keep, Sink }
+import akka.{ Done, NotUsed }
 import org.apache.kafka.clients.producer.ProducerRecord
 
 import scala.concurrent.Future
@@ -44,12 +44,10 @@ object Producer {
    */
   @deprecated(
     "Pass in external or shared producer using ProducerSettings.withProducerFactory or ProducerSettings.withProducer",
-    "2.0.0"
-  )
+    "2.0.0")
   def plainSink[K, V](
       settings: ProducerSettings[K, V],
-      producer: org.apache.kafka.clients.producer.Producer[K, V]
-  ): Sink[ProducerRecord[K, V], Future[Done]] =
+      producer: org.apache.kafka.clients.producer.Producer[K, V]): Sink[ProducerRecord[K, V], Future[Done]] =
     plainSink(settings.withProducer(producer))
 
   /**
@@ -70,8 +68,7 @@ object Producer {
    */
   @deprecated("use `committableSink(ProducerSettings, CommitterSettings)` instead", "2.0.0")
   def committableSink[K, V](
-      settings: ProducerSettings[K, V]
-  ): Sink[Envelope[K, V, ConsumerMessage.Committable], Future[Done]] =
+      settings: ProducerSettings[K, V]): Sink[Envelope[K, V, ConsumerMessage.Committable], Future[Done]] =
     flexiFlow[K, V, ConsumerMessage.Committable](settings)
       .mapAsync(settings.parallelism)(_.passThrough.commitInternal())
       .toMat(Sink.ignore)(Keep.right)
@@ -89,7 +86,6 @@ object Producer {
    *
    * - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, but commits the offset
    *
-   *
    * Note that there is always a risk that something fails after publishing but before
    * committing, so it is "at-least once delivery" semantics.
    *
@@ -98,8 +94,8 @@ object Producer {
   @deprecated("use `committableSink(ProducerSettings, CommitterSettings)` instead", "2.0.0")
   def committableSink[K, V](
       settings: ProducerSettings[K, V],
-      producer: org.apache.kafka.clients.producer.Producer[K, V]
-  ): Sink[Envelope[K, V, ConsumerMessage.Committable], Future[Done]] =
+      producer: org.apache.kafka.clients.producer.Producer[K, V])
+      : Sink[Envelope[K, V, ConsumerMessage.Committable], Future[Done]] =
     committableSink(settings.withProducer(producer))
 
   /**
@@ -119,8 +115,7 @@ object Producer {
    */
   def committableSink[K, V](
       producerSettings: ProducerSettings[K, V],
-      committerSettings: CommitterSettings
-  ): Sink[Envelope[K, V, ConsumerMessage.Committable], Future[Done]] =
+      committerSettings: CommitterSettings): Sink[Envelope[K, V, ConsumerMessage.Committable], Future[Done]] =
     Sink.fromGraph(new CommittingProducerSinkStage(producerSettings, committerSettings))
 
   /**
@@ -141,8 +136,7 @@ object Producer {
   @ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/880")
   def committableSinkWithOffsetContext[K, V](
       producerSettings: ProducerSettings[K, V],
-      committerSettings: CommitterSettings
-  ): Sink[(Envelope[K, V, _], Committable), Future[Done]] =
+      committerSettings: CommitterSettings): Sink[(Envelope[K, V, _], Committable), Future[Done]] =
     committableSink(producerSettings, committerSettings)
       .contramap {
         case (env, offset) =>
@@ -160,14 +154,11 @@ object Producer {
    */
   @deprecated("prefer flexiFlow over this flow implementation", "0.21")
   def flow[K, V, PassThrough](
-      settings: ProducerSettings[K, V]
-  ): Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed] = {
+      settings: ProducerSettings[K, V]): Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed] = {
     val flow = Flow
       .fromGraph(
         new DefaultProducerStage[K, V, PassThrough, Message[K, V, PassThrough], Result[K, V, PassThrough]](
-          settings
-        )
-      )
+          settings))
       .mapAsync(settings.parallelism)(identity)
 
     flowWithDispatcher(settings, flow)
@@ -189,14 +180,11 @@ object Producer {
    * be committed later in the flow.
    */
   def flexiFlow[K, V, PassThrough](
-      settings: ProducerSettings[K, V]
-  ): Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed] = {
+      settings: ProducerSettings[K, V]): Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed] = {
     val flow = Flow
       .fromGraph(
         new DefaultProducerStage[K, V, PassThrough, Envelope[K, V, PassThrough], Results[K, V, PassThrough]](
-          settings
-        )
-      )
+          settings))
       .mapAsync(settings.parallelism)(identity)
 
     flowWithDispatcherEnvelope(settings, flow)
@@ -221,12 +209,11 @@ object Producer {
    */
   @ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/880")
   def flowWithContext[K, V, C](
-      settings: ProducerSettings[K, V]
-  ): FlowWithContext[Envelope[K, V, NotUsed], C, Results[K, V, C], C, NotUsed] =
+      settings: ProducerSettings[K, V]): FlowWithContext[Envelope[K, V, NotUsed], C, Results[K, V, C], C, NotUsed] =
     flexiFlow[K, V, C](settings)
-      .asFlowWithContext[Envelope[K, V, NotUsed], C, C]({
+      .asFlowWithContext[Envelope[K, V, NotUsed], C, C] {
         case (env, c) => env.withPassThrough(c)
-      })(res => res.passThrough)
+      }(res => res.passThrough)
 
   /**
    * Create a flow to publish records to Kafka topics and then pass it on.
@@ -242,8 +229,8 @@ object Producer {
   @deprecated("prefer flexiFlow over this flow implementation", "0.21")
   def flow[K, V, PassThrough](
       settings: ProducerSettings[K, V],
-      producer: org.apache.kafka.clients.producer.Producer[K, V]
-  ): Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed] =
+      producer: org.apache.kafka.clients.producer.Producer[K, V])
+      : Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed] =
     flow(settings.withProducer(producer))
 
   /**
@@ -265,12 +252,11 @@ object Producer {
    */
   @deprecated(
     "Pass in external or shared producer using ProducerSettings.withProducerFactory or ProducerSettings.withProducer",
-    "2.0.0"
-  )
+    "2.0.0")
   def flexiFlow[K, V, PassThrough](
       settings: ProducerSettings[K, V],
-      producer: org.apache.kafka.clients.producer.Producer[K, V]
-  ): Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed] =
+      producer: org.apache.kafka.clients.producer.Producer[K, V])
+      : Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed] =
     flexiFlow(settings.withProducer(producer))
 
   /**
@@ -294,26 +280,23 @@ object Producer {
    */
   @deprecated(
     "Pass in external or shared producer using ProducerSettings.withProducerFactory or ProducerSettings.withProducer",
-    "2.0.0"
-  )
+    "2.0.0")
   @ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/880")
   def flowWithContext[K, V, C](
       settings: ProducerSettings[K, V],
-      producer: org.apache.kafka.clients.producer.Producer[K, V]
-  ): FlowWithContext[Envelope[K, V, NotUsed], C, Results[K, V, C], C, NotUsed] =
+      producer: org.apache.kafka.clients.producer.Producer[K, V])
+      : FlowWithContext[Envelope[K, V, NotUsed], C, Results[K, V, C], C, NotUsed] =
     flowWithContext(settings.withProducer(producer))
 
   private def flowWithDispatcher[PassThrough, V, K](
       settings: ProducerSettings[K, V],
-      flow: Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed]
-  ) =
+      flow: Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed]) =
     if (settings.dispatcher.isEmpty) flow
     else flow.withAttributes(ActorAttributes.dispatcher(settings.dispatcher))
 
   private def flowWithDispatcherEnvelope[PassThrough, V, K](
       settings: ProducerSettings[K, V],
-      flow: Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed]
-  ) =
+      flow: Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed]) =
     if (settings.dispatcher.isEmpty) flow
     else flow.withAttributes(ActorAttributes.dispatcher(settings.dispatcher))
 }
diff --git a/core/src/main/scala/akka/kafka/scaladsl/SendProducer.scala b/core/src/main/scala/akka/kafka/scaladsl/SendProducer.scala
index 24bc73a9..60aa0420 100644
--- a/core/src/main/scala/akka/kafka/scaladsl/SendProducer.scala
+++ b/core/src/main/scala/akka/kafka/scaladsl/SendProducer.scala
@@ -6,13 +6,13 @@
 package akka.kafka.scaladsl
 
 import akka.Done
-import akka.actor.{ActorSystem, ClassicActorSystemProvider}
+import akka.actor.{ ActorSystem, ClassicActorSystemProvider }
 import akka.kafka.ProducerMessage._
 import akka.kafka.ProducerSettings
 import akka.util.JavaDurationConverters._
-import org.apache.kafka.clients.producer.{Callback, ProducerRecord, RecordMetadata}
+import org.apache.kafka.clients.producer.{ Callback, ProducerRecord, RecordMetadata }
 
-import scala.concurrent.{ExecutionContext, Future, Promise}
+import scala.concurrent.{ ExecutionContext, Future, Promise }
 
 /**
  * Utility class for producing to Kafka without using Akka Streams.
@@ -63,8 +63,8 @@ final class SendProducer[K, V] private (val settings: ProducerSettings[K, V], sy
   }
 
   private def sendSingle[R](producer: org.apache.kafka.clients.producer.Producer[K, V],
-                            record: ProducerRecord[K, V],
-                            success: RecordMetadata => R): Future[R] = {
+      record: ProducerRecord[K, V],
+      success: RecordMetadata => R): Future[R] = {
     val result = Promise[R]()
     producer.send(
       record,
@@ -75,8 +75,7 @@ final class SendProducer[K, V] private (val settings: ProducerSettings[K, V], sy
           else
             result.failure(exception)
         }
-      }
-    )
+      })
     result.future
   }
 
@@ -89,7 +88,8 @@ final class SendProducer[K, V] private (val settings: ProducerSettings[K, V], sy
       producer.flush()
       producer.close(settings.closeTimeout.asJava)
       Done
-    } else Future.successful(Done)
+    }
+    else Future.successful(Done)
   }
 
   override def toString: String = s"SendProducer($settings)"
diff --git a/core/src/main/scala/akka/kafka/scaladsl/Transactional.scala b/core/src/main/scala/akka/kafka/scaladsl/Transactional.scala
index afd0ccca..73c45d2b 100644
--- a/core/src/main/scala/akka/kafka/scaladsl/Transactional.scala
+++ b/core/src/main/scala/akka/kafka/scaladsl/Transactional.scala
@@ -5,8 +5,8 @@
 
 package akka.kafka.scaladsl
 
-import akka.annotation.{ApiMayChange, InternalApi}
-import akka.kafka.ConsumerMessage.{PartitionOffset, TransactionalMessage}
+import akka.annotation.{ ApiMayChange, InternalApi }
+import akka.kafka.ConsumerMessage.{ PartitionOffset, TransactionalMessage }
 import akka.kafka.ProducerMessage._
 import akka.kafka.internal.{
   TransactionalProducerStage,
@@ -15,10 +15,10 @@ import akka.kafka.internal.{
   TransactionalSubSource
 }
 import akka.kafka.scaladsl.Consumer.Control
-import akka.kafka.{AutoSubscription, ConsumerMessage, ConsumerSettings, ProducerSettings, Subscription}
+import akka.kafka.{ AutoSubscription, ConsumerMessage, ConsumerSettings, ProducerSettings, Subscription }
 import akka.stream.ActorAttributes
-import akka.stream.scaladsl.{Flow, FlowWithContext, Keep, Sink, Source, SourceWithContext}
-import akka.{Done, NotUsed}
+import akka.stream.scaladsl.{ Flow, FlowWithContext, Keep, Sink, Source, SourceWithContext }
+import akka.{ Done, NotUsed }
 import org.apache.kafka.clients.consumer.ConsumerRecord
 import org.apache.kafka.common.TopicPartition
 
@@ -34,7 +34,7 @@ object Transactional {
    * necessary to use the [[Transactional.sink]] or [[Transactional.flow]] (for passthrough).
    */
   def source[K, V](settings: ConsumerSettings[K, V],
-                   subscription: Subscription): Source[TransactionalMessage[K, V], Control] =
+      subscription: Subscription): Source[TransactionalMessage[K, V], Control] =
     Source.fromGraph(new TransactionalSource[K, V](settings, subscription))
 
   /**
@@ -46,8 +46,7 @@ object Transactional {
   @ApiMayChange
   def sourceWithOffsetContext[K, V](
       settings: ConsumerSettings[K, V],
-      subscription: Subscription
-  ): SourceWithContext[ConsumerRecord[K, V], PartitionOffset, Control] =
+      subscription: Subscription): SourceWithContext[ConsumerRecord[K, V], PartitionOffset, Control] =
     Source
       .fromGraph(new TransactionalSourceWithOffsetContext[K, V](settings, subscription))
       .asSourceWithContext(_._2)
@@ -68,8 +67,7 @@ object Transactional {
   @InternalApi
   private[kafka] def partitionedSource[K, V](
       settings: ConsumerSettings[K, V],
-      subscription: AutoSubscription
-  ): Source[(TopicPartition, Source[TransactionalMessage[K, V], NotUsed]), Control] =
+      subscription: AutoSubscription): Source[(TopicPartition, Source[TransactionalMessage[K, V], NotUsed]), Control] =
     Source.fromGraph(new TransactionalSubSource[K, V](settings, subscription))
 
   /**
@@ -78,8 +76,7 @@ object Transactional {
    */
   def sink[K, V](
       settings: ProducerSettings[K, V],
-      transactionalId: String
-  ): Sink[Envelope[K, V, ConsumerMessage.PartitionOffset], Future[Done]] =
+      transactionalId: String): Sink[Envelope[K, V, ConsumerMessage.PartitionOffset], Future[Done]] =
     flow(settings, transactionalId).toMat(Sink.ignore)(Keep.right)
 
   /**
@@ -91,8 +88,7 @@ object Transactional {
   @ApiMayChange
   def sinkWithOffsetContext[K, V](
       settings: ProducerSettings[K, V],
-      transactionalId: String
-  ): Sink[(Envelope[K, V, NotUsed], PartitionOffset), Future[Done]] =
+      transactionalId: String): Sink[(Envelope[K, V, NotUsed], PartitionOffset), Future[Done]] =
     sink(settings, transactionalId)
       .contramap {
         case (env, offset) =>
@@ -106,15 +102,14 @@ object Transactional {
    */
   def flow[K, V](
       settings: ProducerSettings[K, V],
-      transactionalId: String
-  ): Flow[Envelope[K, V, ConsumerMessage.PartitionOffset], Results[K, V, ConsumerMessage.PartitionOffset], NotUsed] = {
+      transactionalId: String): Flow[Envelope[K, V, ConsumerMessage.PartitionOffset], Results[K, V,
+    ConsumerMessage.PartitionOffset], NotUsed] = {
     require(transactionalId != null && transactionalId.length > 0, "You must define a Transactional id.")
     require(settings.producerFactorySync.isEmpty, "You cannot use a shared or external producer factory.")
 
     val flow = Flow
       .fromGraph(
-        new TransactionalProducerStage[K, V, ConsumerMessage.PartitionOffset](settings, transactionalId)
-      )
+        new TransactionalProducerStage[K, V, ConsumerMessage.PartitionOffset](settings, transactionalId))
       .mapAsync(settings.parallelism)(identity)
 
     flowWithDispatcher(settings, flow)
@@ -133,24 +128,20 @@ object Transactional {
   @ApiMayChange
   def flowWithOffsetContext[K, V](
       settings: ProducerSettings[K, V],
-      transactionalId: String
-  ): FlowWithContext[Envelope[K, V, NotUsed],
-                     ConsumerMessage.PartitionOffset,
-                     Results[K, V, ConsumerMessage.PartitionOffset],
-                     ConsumerMessage.PartitionOffset,
-                     NotUsed] = {
+      transactionalId: String): FlowWithContext[Envelope[K, V, NotUsed],
+    ConsumerMessage.PartitionOffset, Results[K, V, ConsumerMessage.PartitionOffset],
+    ConsumerMessage.PartitionOffset, NotUsed] = {
     val noContext: Flow[Envelope[K, V, PartitionOffset], Results[K, V, PartitionOffset], NotUsed] =
       flow(settings, transactionalId)
     noContext
-      .asFlowWithContext[Envelope[K, V, NotUsed], PartitionOffset, PartitionOffset]({
+      .asFlowWithContext[Envelope[K, V, NotUsed], PartitionOffset, PartitionOffset] {
         case (env, c) => env.withPassThrough(c)
-      })(res => res.passThrough)
+      }(res => res.passThrough)
   }
 
   private def flowWithDispatcher[PassThrough, V, K](
       settings: ProducerSettings[K, V],
-      flow: Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed]
-  ) =
+      flow: Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed]) =
     if (settings.dispatcher.isEmpty) flow
     else flow.withAttributes(ActorAttributes.dispatcher(settings.dispatcher))
 }
diff --git a/project/AutomaticModuleName.scala b/project/AutomaticModuleName.scala
index 2c48a005..d2703f9d 100644
--- a/project/AutomaticModuleName.scala
+++ b/project/AutomaticModuleName.scala
@@ -1,4 +1,4 @@
-import sbt.{Def, _}
+import sbt.{ Def, _ }
 import sbt.Keys._
 
 /**
@@ -13,6 +13,5 @@ object AutomaticModuleName {
   private val AutomaticModuleName = "Automatic-Module-Name"
 
   def settings(name: String): Seq[Def.Setting[Task[Seq[PackageOption]]]] = Seq(
-    Compile / packageBin / packageOptions += Package.ManifestAttributes(AutomaticModuleName → name)
-  )
+    Compile / packageBin / packageOptions += Package.ManifestAttributes(AutomaticModuleName -> name))
 }
diff --git a/project/VersionGenerator.scala b/project/VersionGenerator.scala
index 8331bdc9..ca5cbe76 100644
--- a/project/VersionGenerator.scala
+++ b/project/VersionGenerator.scala
@@ -13,17 +13,14 @@ object VersionGenerator {
       resourceGenerators += generateVersion(resourceManaged, _ / "version.conf", """|akka.kafka.version = "%s"
          |"""),
       sourceGenerators += generateVersion(
-          sourceManaged,
-          _ / "akka" / "kafka" / "Version.scala",
-          """|package akka.kafka
+        sourceManaged,
+        _ / "akka" / "kafka" / "Version.scala",
+        """|package akka.kafka
          |
          |object Version {
          |  val current: String = "%s"
          |}
-         |"""
-        )
-    )
-  )
+         |""")))
 
   def generateVersion(dir: SettingKey[File], locate: File => File, template: String) = Def.task[Seq[File]] {
     val file = locate(dir.value)
diff --git a/testkit/src/main/scala/akka/kafka/testkit/ConsumerResultFactory.scala b/testkit/src/main/scala/akka/kafka/testkit/ConsumerResultFactory.scala
index 5b9c1435..56951839 100644
--- a/testkit/src/main/scala/akka/kafka/testkit/ConsumerResultFactory.scala
+++ b/testkit/src/main/scala/akka/kafka/testkit/ConsumerResultFactory.scala
@@ -8,9 +8,9 @@ package akka.kafka.testkit
 import akka.Done
 import akka.annotation.ApiMayChange
 import akka.kafka.ConsumerMessage
-import akka.kafka.ConsumerMessage.{CommittableOffset, GroupTopicPartition, PartitionOffsetCommittedMarker}
-import akka.kafka.internal.{CommittableOffsetImpl, KafkaAsyncConsumerCommitterRef}
-import org.apache.kafka.clients.consumer.{ConsumerRecord, OffsetAndMetadata}
+import akka.kafka.ConsumerMessage.{ CommittableOffset, GroupTopicPartition, PartitionOffsetCommittedMarker }
+import akka.kafka.internal.{ CommittableOffsetImpl, KafkaAsyncConsumerCommitterRef }
+import org.apache.kafka.clients.consumer.{ ConsumerRecord, OffsetAndMetadata }
 import org.apache.kafka.common.TopicPartition
 
 import scala.concurrent.Future
@@ -22,8 +22,7 @@ import scala.concurrent.Future
 object ConsumerResultFactory {
 
   val fakeCommitter: KafkaAsyncConsumerCommitterRef = new KafkaAsyncConsumerCommitterRef(null, null)(
-    ec = scala.concurrent.ExecutionContext.global
-  ) {
+    ec = scala.concurrent.ExecutionContext.global) {
     private val done = Future.successful(Done)
 
     override def commitSingle(topicPartition: TopicPartition, offset: OffsetAndMetadata): Future[Done] = done
@@ -39,24 +38,24 @@ object ConsumerResultFactory {
   def partitionOffset(key: GroupTopicPartition, offset: Long) = ConsumerMessage.PartitionOffset(key, offset)
 
   def committableOffset(groupId: String,
-                        topic: String,
-                        partition: Int,
-                        offset: Long,
-                        metadata: String): ConsumerMessage.CommittableOffset =
+      topic: String,
+      partition: Int,
+      offset: Long,
+      metadata: String): ConsumerMessage.CommittableOffset =
     committableOffset(partitionOffset(groupId, topic, partition, offset), metadata)
 
   def committableOffset(partitionOffset: ConsumerMessage.PartitionOffset,
-                        metadata: String): ConsumerMessage.CommittableOffset =
+      metadata: String): ConsumerMessage.CommittableOffset =
     CommittableOffsetImpl(partitionOffset, metadata)(fakeCommitter)
 
   def committableMessage[K, V](
       record: ConsumerRecord[K, V],
-      committableOffset: CommittableOffset
-  ): ConsumerMessage.CommittableMessage[K, V] = ConsumerMessage.CommittableMessage(record, committableOffset)
+      committableOffset: CommittableOffset): ConsumerMessage.CommittableMessage[K, V] =
+    ConsumerMessage.CommittableMessage(record, committableOffset)
 
   def transactionalMessage[K, V](
       record: ConsumerRecord[K, V],
-      partitionOffset: PartitionOffsetCommittedMarker
-  ): ConsumerMessage.TransactionalMessage[K, V] = ConsumerMessage.TransactionalMessage(record, partitionOffset)
+      partitionOffset: PartitionOffsetCommittedMarker): ConsumerMessage.TransactionalMessage[K, V] =
+    ConsumerMessage.TransactionalMessage(record, partitionOffset)
 
 }
diff --git a/testkit/src/main/scala/akka/kafka/testkit/KafkaTestkitSettings.scala b/testkit/src/main/scala/akka/kafka/testkit/KafkaTestkitSettings.scala
index 101f8008..ec51367a 100644
--- a/testkit/src/main/scala/akka/kafka/testkit/KafkaTestkitSettings.scala
+++ b/testkit/src/main/scala/akka/kafka/testkit/KafkaTestkitSettings.scala
@@ -11,8 +11,8 @@ import com.typesafe.config.Config
 import scala.concurrent.duration._
 
 class KafkaTestkitSettings private (val clusterTimeout: FiniteDuration,
-                                    val consumerGroupTimeout: FiniteDuration,
-                                    val checkInterval: FiniteDuration) {
+    val consumerGroupTimeout: FiniteDuration,
+    val checkInterval: FiniteDuration) {
 
   /**
    * Java Api
diff --git a/testkit/src/main/scala/akka/kafka/testkit/KafkaTestkitTestcontainersSettings.scala b/testkit/src/main/scala/akka/kafka/testkit/KafkaTestkitTestcontainersSettings.scala
index 0ba10707..07dfc28c 100644
--- a/testkit/src/main/scala/akka/kafka/testkit/KafkaTestkitTestcontainersSettings.scala
+++ b/testkit/src/main/scala/akka/kafka/testkit/KafkaTestkitTestcontainersSettings.scala
@@ -39,8 +39,7 @@ final class KafkaTestkitTestcontainersSettings private (
       new Consumer[GenericContainer[_]]() {
         override def accept(arg: GenericContainer[_]): Unit = ()
       },
-    val configureSchemaRegistry: GenericContainer[_] => Unit = _ => ()
-) {
+    val configureSchemaRegistry: GenericContainer[_] => Unit = _ => ()) {
 
   /**
    * Java Api
@@ -156,8 +155,8 @@ final class KafkaTestkitTestcontainersSettings private (
    * Replaces the default Kafka testcontainers configuration logic
    */
   def withConfigureKafkaConsumer(
-      configureKafkaConsumer: java.util.function.Consumer[java.util.Collection[AlpakkaKafkaContainer]]
-  ): KafkaTestkitTestcontainersSettings = copy(configureKafkaConsumer = configureKafkaConsumer)
+      configureKafkaConsumer: java.util.function.Consumer[java.util.Collection[AlpakkaKafkaContainer]])
+      : KafkaTestkitTestcontainersSettings = copy(configureKafkaConsumer = configureKafkaConsumer)
 
   /**
    * Replaces the default Kafka testcontainers configuration logic
@@ -177,8 +176,8 @@ final class KafkaTestkitTestcontainersSettings private (
    * Replaces the default ZooKeeper testcontainers configuration logic
    */
   def withConfigureZooKeeperConsumer(
-      configureZooKeeperConsumer: java.util.function.Consumer[GenericContainer[_]]
-  ): KafkaTestkitTestcontainersSettings =
+      configureZooKeeperConsumer: java.util.function.Consumer[GenericContainer[_]])
+      : KafkaTestkitTestcontainersSettings =
     copy(configureZooKeeperConsumer = configureZooKeeperConsumer)
 
   /**
@@ -186,8 +185,8 @@ final class KafkaTestkitTestcontainersSettings private (
    * Replaces the default schema registry testcontainers configuration logic
    */
   def withConfigureSchemaRegistry(
-      configureSchemaRegistry: GenericContainer[_] => Unit
-  ): KafkaTestkitTestcontainersSettings = copy(configureSchemaRegistry = configureSchemaRegistry)
+      configureSchemaRegistry: GenericContainer[_] => Unit): KafkaTestkitTestcontainersSettings =
+    copy(configureSchemaRegistry = configureSchemaRegistry)
 
   /**
    * Use Schema Registry container.
@@ -247,25 +246,25 @@ final class KafkaTestkitTestcontainersSettings private (
         configureKafkaConsumer,
       configureZooKeeper: GenericContainer[_] => Unit = configureZooKeeper,
       configureZooKeeperConsumer: java.util.function.Consumer[GenericContainer[_]] = configureZooKeeperConsumer,
-      configureSchemaRegistry: GenericContainer[_] => Unit = configureSchemaRegistry
-  ): KafkaTestkitTestcontainersSettings =
+      configureSchemaRegistry: GenericContainer[_] => Unit = configureSchemaRegistry)
+      : KafkaTestkitTestcontainersSettings =
     new KafkaTestkitTestcontainersSettings(zooKeeperImage,
-                                           zooKeeperImageTag,
-                                           kafkaImage,
-                                           kafkaImageTag,
-                                           schemaRegistryImage,
-                                           schemaRegistryImageTag,
-                                           numBrokers,
-                                           internalTopicsReplicationFactor,
-                                           useSchemaRegistry,
-                                           containerLogging,
-                                           clusterStartTimeout,
-                                           readinessCheckTimeout,
-                                           configureKafka,
-                                           configureKafkaConsumer,
-                                           configureZooKeeper,
-                                           configureZooKeeperConsumer,
-                                           configureSchemaRegistry)
+      zooKeeperImageTag,
+      kafkaImage,
+      kafkaImageTag,
+      schemaRegistryImage,
+      schemaRegistryImageTag,
+      numBrokers,
+      internalTopicsReplicationFactor,
+      useSchemaRegistry,
+      containerLogging,
+      clusterStartTimeout,
+      readinessCheckTimeout,
+      configureKafka,
+      configureKafkaConsumer,
+      configureZooKeeper,
+      configureZooKeeperConsumer,
+      configureSchemaRegistry)
 
   override def toString: String =
     "KafkaTestkitTestcontainersSettings(" +
@@ -317,17 +316,17 @@ object KafkaTestkitTestcontainersSettings {
     val readinessCheckTimeout = config.getDuration("readiness-check-timeout").asScala
 
     new KafkaTestkitTestcontainersSettings(zooKeeperImage,
-                                           zooKeeperImageTag,
-                                           kafkaImage,
-                                           kafkaImageTag,
-                                           schemaRegistryImage,
-                                           schemaRegistryImageTag,
-                                           numBrokers,
-                                           internalTopicsReplicationFactor,
-                                           useSchemaRegistry,
-                                           containerLogging,
-                                           clusterStartTimeout,
-                                           readinessCheckTimeout)
+      zooKeeperImageTag,
+      kafkaImage,
+      kafkaImageTag,
+      schemaRegistryImage,
+      schemaRegistryImageTag,
+      numBrokers,
+      internalTopicsReplicationFactor,
+      useSchemaRegistry,
+      containerLogging,
+      clusterStartTimeout,
+      readinessCheckTimeout)
   }
 
   /**
diff --git a/testkit/src/main/scala/akka/kafka/testkit/ProducerResultFactory.scala b/testkit/src/main/scala/akka/kafka/testkit/ProducerResultFactory.scala
index fc186a69..faf82be7 100644
--- a/testkit/src/main/scala/akka/kafka/testkit/ProducerResultFactory.scala
+++ b/testkit/src/main/scala/akka/kafka/testkit/ProducerResultFactory.scala
@@ -7,7 +7,7 @@ package akka.kafka.testkit
 
 import akka.annotation.ApiMayChange
 import akka.kafka.ProducerMessage
-import org.apache.kafka.clients.producer.{ProducerRecord, RecordMetadata}
+import org.apache.kafka.clients.producer.{ ProducerRecord, RecordMetadata }
 import org.apache.kafka.common.TopicPartition
 
 import scala.jdk.CollectionConverters._
@@ -30,39 +30,37 @@ object ProducerResultFactory {
     new RecordMetadata(new TopicPartition(topic, partition), offset, 0, 12345L, 2, 2)
 
   def result[K, V, PassThrough](
-      message: ProducerMessage.Message[K, V, PassThrough]
-  ): ProducerMessage.Result[K, V, PassThrough] = ProducerMessage.Result(recordMetadata(message.record), message)
+      message: ProducerMessage.Message[K, V, PassThrough]): ProducerMessage.Result[K, V, PassThrough] =
+    ProducerMessage.Result(recordMetadata(message.record), message)
 
   def result[K, V, PassThrough](
       metadata: RecordMetadata,
-      message: ProducerMessage.Message[K, V, PassThrough]
-  ): ProducerMessage.Result[K, V, PassThrough] = ProducerMessage.Result(metadata, message)
+      message: ProducerMessage.Message[K, V, PassThrough]): ProducerMessage.Result[K, V, PassThrough] =
+    ProducerMessage.Result(metadata, message)
 
   def multiResultPart[K, V](
       metadata: RecordMetadata,
-      record: ProducerRecord[K, V]
-  ): ProducerMessage.MultiResultPart[K, V] = ProducerMessage.MultiResultPart(metadata, record)
+      record: ProducerRecord[K, V]): ProducerMessage.MultiResultPart[K, V] =
+    ProducerMessage.MultiResultPart(metadata, record)
 
   def multiResult[K, V, PassThrough](
       parts: immutable.Seq[ProducerMessage.MultiResultPart[K, V]],
-      passThrough: PassThrough
-  ): ProducerMessage.MultiResult[K, V, PassThrough] = ProducerMessage.MultiResult(parts, passThrough)
+      passThrough: PassThrough): ProducerMessage.MultiResult[K, V, PassThrough] =
+    ProducerMessage.MultiResult(parts, passThrough)
 
   def multiResult[K, V, PassThrough](
-      message: ProducerMessage.MultiMessage[K, V, PassThrough]
-  ): ProducerMessage.MultiResult[K, V, PassThrough] =
+      message: ProducerMessage.MultiMessage[K, V, PassThrough]): ProducerMessage.MultiResult[K, V, PassThrough] =
     ProducerResultFactory.multiResult(
       message.records.map(r => ProducerResultFactory.multiResultPart(recordMetadata(r), r)),
-      message.passThrough
-    )
+      message.passThrough)
 
   /** Java API */
   def multiResult[K, V, PassThrough](
       parts: java.util.Collection[ProducerMessage.MultiResultPart[K, V]],
-      passThrough: PassThrough
-  ): ProducerMessage.MultiResult[K, V, PassThrough] = ProducerMessage.MultiResult(parts.asScala.toList, passThrough)
+      passThrough: PassThrough): ProducerMessage.MultiResult[K, V, PassThrough] =
+    ProducerMessage.MultiResult(parts.asScala.toList, passThrough)
 
   def passThroughResult[K, V, PassThrough](
-      passThrough: PassThrough
-  ): ProducerMessage.PassThroughResult[K, V, PassThrough] = ProducerMessage.PassThroughResult(passThrough)
+      passThrough: PassThrough): ProducerMessage.PassThroughResult[K, V, PassThrough] =
+    ProducerMessage.PassThroughResult(passThrough)
 }
diff --git a/testkit/src/main/scala/akka/kafka/testkit/internal/KafkaTestKit.scala b/testkit/src/main/scala/akka/kafka/testkit/internal/KafkaTestKit.scala
index 22d148d2..d6cbb363 100644
--- a/testkit/src/main/scala/akka/kafka/testkit/internal/KafkaTestKit.scala
+++ b/testkit/src/main/scala/akka/kafka/testkit/internal/KafkaTestKit.scala
@@ -12,10 +12,10 @@ import java.util.Arrays
 
 import akka.actor.ActorSystem
 import akka.kafka.testkit.KafkaTestkitSettings
-import akka.kafka.{CommitterSettings, ConsumerSettings, ProducerSettings}
-import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, NewTopic}
+import akka.kafka.{ CommitterSettings, ConsumerSettings, ProducerSettings }
+import org.apache.kafka.clients.admin.{ Admin, AdminClientConfig, NewTopic }
 import org.apache.kafka.clients.consumer.ConsumerConfig
-import org.apache.kafka.common.serialization.{Deserializer, Serializer, StringDeserializer, StringSerializer}
+import org.apache.kafka.common.serialization.{ Deserializer, Serializer, StringDeserializer, StringSerializer }
 import org.slf4j.Logger
 
 import scala.jdk.CollectionConverters._
@@ -43,7 +43,7 @@ trait KafkaTestKit {
   def consumerDefaults: ConsumerSettings[String, String] = consumerDefaults(StringDeserializer, StringDeserializer)
 
   def consumerDefaults[K, V](keyDeserializer: Deserializer[K],
-                             valueDeserializer: Deserializer[V]): ConsumerSettings[K, V] =
+      valueDeserializer: Deserializer[V]): ConsumerSettings[K, V] =
     ConsumerSettings(system, keyDeserializer, valueDeserializer)
       .withBootstrapServers(bootstrapServers)
       .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
@@ -98,8 +98,7 @@ trait KafkaTestKit {
   def adminClient: Admin = {
     assert(
       adminClientVar != null,
-      "admin client not created, be sure to call setupAdminClient() and cleanupAdminClient()"
-    )
+      "admin client not created, be sure to call setupAdminClient() and cleanupAdminClient()")
     adminClientVar
   }
 
@@ -161,8 +160,7 @@ trait KafkaTestKit {
       suffix: Int,
       partitions: Int,
       replication: Int,
-      config: scala.collection.Map[String, String]
-  ): String =
+      config: scala.collection.Map[String, String]): String =
     createTopic(suffix, partitions, replication, config.asJava)
 
   /**
@@ -175,8 +173,7 @@ trait KafkaTestKit {
   def createTopic(suffix: Int, partitions: Int, replication: Int, config: java.util.Map[String, String]): String = {
     val topicName = createTopicName(suffix)
     val createResult = adminClient.createTopics(
-      Arrays.asList(new NewTopic(topicName, partitions, replication.toShort).configs(config))
-    )
+      Arrays.asList(new NewTopic(topicName, partitions, replication.toShort).configs(config)))
     createResult.all().get(10, TimeUnit.SECONDS)
     topicName
   }
@@ -200,6 +197,5 @@ object KafkaTestKitClass {
   def createReplicationFactorBrokerProps(replicationFactor: Int): Map[String, String] = Map(
     "offsets.topic.replication.factor" -> s"$replicationFactor",
     "transaction.state.log.replication.factor" -> s"$replicationFactor",
-    "transaction.state.log.min.isr" -> s"$replicationFactor"
-  )
+    "transaction.state.log.min.isr" -> s"$replicationFactor")
 }
diff --git a/testkit/src/main/scala/akka/kafka/testkit/internal/KafkaTestKitChecks.scala b/testkit/src/main/scala/akka/kafka/testkit/internal/KafkaTestKitChecks.scala
index be8f86e9..1a0e4f83 100644
--- a/testkit/src/main/scala/akka/kafka/testkit/internal/KafkaTestKitChecks.scala
+++ b/testkit/src/main/scala/akka/kafka/testkit/internal/KafkaTestKitChecks.scala
@@ -18,37 +18,33 @@ import org.slf4j.Logger
 
 import scala.annotation.tailrec
 import scala.concurrent.duration.FiniteDuration
-import scala.util.{Failure, Success, Try}
+import scala.util.{ Failure, Success, Try }
 
 object KafkaTestKitChecks {
   def waitUntilCluster(timeout: FiniteDuration,
-                       sleepInBetween: FiniteDuration,
-                       adminClient: Admin,
-                       predicate: DescribeClusterResult => Boolean,
-                       log: Logger): Unit =
+      sleepInBetween: FiniteDuration,
+      adminClient: Admin,
+      predicate: DescribeClusterResult => Boolean,
+      log: Logger): Unit =
     periodicalCheck("cluster state", timeout, sleepInBetween)(() => adminClient.describeCluster())(predicate)(log)
 
   def waitUntilConsumerGroup(groupId: String,
-                             timeout: FiniteDuration,
-                             sleepInBetween: FiniteDuration,
-                             adminClient: Admin,
-                             predicate: ConsumerGroupDescription => Boolean,
-                             log: Logger): Unit =
-    periodicalCheck("consumer group state", timeout, sleepInBetween)(
-      () =>
-        adminClient
-          .describeConsumerGroups(
-            Collections.singleton(groupId),
-            new DescribeConsumerGroupsOptions().timeoutMs(timeout.toMillis.toInt)
-          )
-          .describedGroups()
-          .get(groupId)
-          .get(timeout.toMillis, TimeUnit.MILLISECONDS)
-    )(predicate)(log)
+      timeout: FiniteDuration,
+      sleepInBetween: FiniteDuration,
+      adminClient: Admin,
+      predicate: ConsumerGroupDescription => Boolean,
+      log: Logger): Unit =
+    periodicalCheck("consumer group state", timeout, sleepInBetween)(() =>
+      adminClient
+        .describeConsumerGroups(
+          Collections.singleton(groupId),
+          new DescribeConsumerGroupsOptions().timeoutMs(timeout.toMillis.toInt))
+        .describedGroups()
+        .get(groupId)
+        .get(timeout.toMillis, TimeUnit.MILLISECONDS))(predicate)(log)
 
   def periodicalCheck[T](description: String, timeout: FiniteDuration, sleepInBetween: FiniteDuration)(
-      data: () => T
-  )(predicate: T => Boolean)(log: Logger): Unit = {
+      data: () => T)(predicate: T => Boolean)(log: Logger): Unit = {
     val maxTries = (timeout / sleepInBetween).toInt
 
     @tailrec def check(triesLeft: Int): Unit =
@@ -62,8 +58,7 @@ object KafkaTestKitChecks {
           check(triesLeft - 1)
         case Success(false) =>
           throw new Error(
-            s"Timeout while waiting for desired $description. Tried [$maxTries] times, slept [$sleepInBetween] in between."
-          )
+            s"Timeout while waiting for desired $description. Tried [$maxTries] times, slept [$sleepInBetween] in between.")
         case Failure(ex) =>
           throw ex
         case Success(true) => // predicate has been fulfilled, stop checking
diff --git a/testkit/src/main/scala/akka/kafka/testkit/internal/TestFrameworkInterface.scala b/testkit/src/main/scala/akka/kafka/testkit/internal/TestFrameworkInterface.scala
index c83be927..34a50443 100644
--- a/testkit/src/main/scala/akka/kafka/testkit/internal/TestFrameworkInterface.scala
+++ b/testkit/src/main/scala/akka/kafka/testkit/internal/TestFrameworkInterface.scala
@@ -5,7 +5,7 @@
 
 package akka.kafka.testkit.internal
 
-import org.scalatest.{BeforeAndAfterAll, Suite}
+import org.scalatest.{ BeforeAndAfterAll, Suite }
 
 trait TestFrameworkInterface {
   def setUp(): Unit
diff --git a/testkit/src/main/scala/akka/kafka/testkit/internal/TestcontainersKafka.scala b/testkit/src/main/scala/akka/kafka/testkit/internal/TestcontainersKafka.scala
index daba7d3c..42094415 100644
--- a/testkit/src/main/scala/akka/kafka/testkit/internal/TestcontainersKafka.scala
+++ b/testkit/src/main/scala/akka/kafka/testkit/internal/TestcontainersKafka.scala
@@ -59,8 +59,7 @@ object TestcontainersKafka {
       cluster.getSchemaRegistry.asScala
         .map(_.getSchemaRegistryUrl)
         .getOrElse(
-          throw new RuntimeException("Did you enable schema registry in your KafkaTestkitTestcontainersSettings?")
-        )
+          throw new RuntimeException("Did you enable schema registry in your KafkaTestkitTestcontainersSettings?"))
     }
 
     def startCluster(): String = startCluster(testcontainersSettings)
@@ -78,15 +77,14 @@ object TestcontainersKafka {
           settings.useSchemaRegistry,
           settings.containerLogging,
           settings.clusterStartTimeout.asJava,
-          settings.readinessCheckTimeout.asJava
-        )
+          settings.readinessCheckTimeout.asJava)
         configureKafka(brokerContainers)
         configureKafkaConsumer.accept(brokerContainers.asJavaCollection)
         configureZooKeeper(zookeeperContainer)
         configureZooKeeperConsumer.accept(zookeeperContainer)
         schemaRegistryContainer match {
           case Some(container) => configureSchemaRegistry(container)
-          case _ =>
+          case _               =>
         }
         log.info("Starting Kafka cluster with settings: {}", settings)
         cluster.start()
@@ -112,8 +110,7 @@ object TestcontainersKafka {
       schemaRegistryContainer
         .map(_.getSchemaRegistryUrl)
         .getOrElse(
-          throw new RuntimeException("Did you enable schema registry in your KafkaTestkitTestcontainersSettings?")
-        )
+          throw new RuntimeException("Did you enable schema registry in your KafkaTestkitTestcontainersSettings?"))
   }
 
   // the test base type used for Singleton cannot reference ScalaTest types so that it's compatible with JUnit-only test projects
diff --git a/testkit/src/main/scala/akka/kafka/testkit/javadsl/ConsumerControlFactory.scala b/testkit/src/main/scala/akka/kafka/testkit/javadsl/ConsumerControlFactory.scala
index 6e89fb3c..23a54522 100644
--- a/testkit/src/main/scala/akka/kafka/testkit/javadsl/ConsumerControlFactory.scala
+++ b/testkit/src/main/scala/akka/kafka/testkit/javadsl/ConsumerControlFactory.scala
@@ -5,14 +5,14 @@
 
 package akka.kafka.testkit.javadsl
 
-import java.util.concurrent.{CompletableFuture, CompletionStage, Executor}
+import java.util.concurrent.{ CompletableFuture, CompletionStage, Executor }
 
 import akka.Done
 import akka.annotation.ApiMayChange
 import akka.kafka.javadsl.Consumer
-import akka.stream.javadsl.{Flow, Keep, Source}
-import akka.stream.{scaladsl, KillSwitch, KillSwitches}
-import org.apache.kafka.common.{Metric, MetricName}
+import akka.stream.javadsl.{ Flow, Keep, Source }
+import akka.stream.{ scaladsl, KillSwitch, KillSwitches }
+import org.apache.kafka.common.{ Metric, MetricName }
 
 /**
  * Helper factory to create [[akka.kafka.javadsl.Consumer.Control]] instances when
@@ -52,8 +52,7 @@ object ConsumerControlFactory {
 
     override def drainAndShutdown[T](
         streamCompletion: CompletionStage[T],
-        ec: Executor
-    ): CompletionStage[T] =
+        ec: Executor): CompletionStage[T] =
       stop().thenCompose(new java.util.function.Function[Done, CompletionStage[T]] {
         override def apply(t: Done): CompletionStage[T] = streamCompletion
       })
diff --git a/testkit/src/main/scala/akka/kafka/testkit/scaladsl/ConsumerControlFactory.scala b/testkit/src/main/scala/akka/kafka/testkit/scaladsl/ConsumerControlFactory.scala
index 7c39df22..581e5f1d 100644
--- a/testkit/src/main/scala/akka/kafka/testkit/scaladsl/ConsumerControlFactory.scala
+++ b/testkit/src/main/scala/akka/kafka/testkit/scaladsl/ConsumerControlFactory.scala
@@ -8,11 +8,11 @@ package akka.kafka.testkit.scaladsl
 import akka.Done
 import akka.annotation.ApiMayChange
 import akka.kafka.scaladsl.Consumer
-import akka.stream.scaladsl.{Flow, Keep, Source}
-import akka.stream.{KillSwitch, KillSwitches}
-import org.apache.kafka.common.{Metric, MetricName}
+import akka.stream.scaladsl.{ Flow, Keep, Source }
+import akka.stream.{ KillSwitch, KillSwitches }
+import org.apache.kafka.common.{ Metric, MetricName }
 
-import scala.concurrent.{Future, Promise}
+import scala.concurrent.{ Future, Promise }
 
 /**
  * Helper factory to create [[akka.kafka.scaladsl.Consumer.Control]] instances when
diff --git a/testkit/src/main/scala/akka/kafka/testkit/scaladsl/KafkaSpec.scala b/testkit/src/main/scala/akka/kafka/testkit/scaladsl/KafkaSpec.scala
index 0792dc24..228a6310 100644
--- a/testkit/src/main/scala/akka/kafka/testkit/scaladsl/KafkaSpec.scala
+++ b/testkit/src/main/scala/akka/kafka/testkit/scaladsl/KafkaSpec.scala
@@ -14,21 +14,21 @@ import akka.actor.ActorSystem
 import akka.event.LoggingAdapter
 import akka.kafka._
 import akka.kafka.scaladsl.Consumer.Control
-import akka.kafka.scaladsl.{Consumer, Producer}
-import akka.kafka.testkit.internal.{KafkaTestKit, KafkaTestKitChecks}
-import akka.stream.{Materializer, SystemMaterializer}
-import akka.stream.scaladsl.{Keep, Source}
+import akka.kafka.scaladsl.{ Consumer, Producer }
+import akka.kafka.testkit.internal.{ KafkaTestKit, KafkaTestKitChecks }
+import akka.stream.{ Materializer, SystemMaterializer }
+import akka.stream.scaladsl.{ Keep, Source }
 import akka.stream.testkit.TestSubscriber
 import akka.stream.testkit.scaladsl.TestSink
 import akka.testkit.TestKit
 import org.apache.kafka.clients.admin._
-import org.apache.kafka.clients.producer.{ProducerRecord, Producer => KProducer}
+import org.apache.kafka.clients.producer.{ Producer => KProducer, ProducerRecord }
 import org.apache.kafka.common.ConsumerGroupState
-import org.slf4j.{Logger, LoggerFactory}
+import org.slf4j.{ Logger, LoggerFactory }
 
 import scala.collection.immutable
 import scala.concurrent.duration._
-import scala.concurrent.{Await, ExecutionContext, Future}
+import scala.concurrent.{ Await, ExecutionContext, Future }
 import scala.jdk.CollectionConverters._
 import scala.util.Try
 
@@ -88,8 +88,7 @@ abstract class KafkaSpec(_kafkaPort: Int, val zooKeeperPort: Int, actorSystem: A
    * If the predicate does not hold after configured amount of time, throws an exception.
    */
   def waitUntilCluster()(
-      predicate: DescribeClusterResult => Boolean
-  ): Unit =
+      predicate: DescribeClusterResult => Boolean): Unit =
     KafkaTestKitChecks.waitUntilCluster(settings.clusterTimeout, settings.checkInterval, adminClient, predicate, log)
 
   /**
@@ -99,11 +98,11 @@ abstract class KafkaSpec(_kafkaPort: Int, val zooKeeperPort: Int, actorSystem: A
    */
   def waitUntilConsumerGroup(groupId: String)(predicate: ConsumerGroupDescription => Boolean): Unit =
     KafkaTestKitChecks.waitUntilConsumerGroup(groupId,
-                                              settings.consumerGroupTimeout,
-                                              settings.checkInterval,
-                                              adminClient,
-                                              predicate,
-                                              log)
+      settings.consumerGroupTimeout,
+      settings.checkInterval,
+      adminClient,
+      predicate,
+      log)
 
   /**
    * Periodically checks if the given predicate on consumer summary holds.
@@ -130,8 +129,7 @@ abstract class KafkaSpec(_kafkaPort: Int, val zooKeeperPort: Int, actorSystem: A
   }
 
   def periodicalCheck[T](description: String, maxTries: Int, sleepInBetween: FiniteDuration)(
-      data: () => T
-  )(predicate: T => Boolean) =
+      data: () => T)(predicate: T => Boolean) =
     KafkaTestKitChecks.periodicalCheck(description, maxTries * sleepInBetween, sleepInBetween)(data)(predicate)(log)
 
   /**
@@ -143,9 +141,9 @@ abstract class KafkaSpec(_kafkaPort: Int, val zooKeeperPort: Int, actorSystem: A
 
   def produceString(topic: String, range: immutable.Seq[String], partition: Int = partition0): Future[Done] =
     Source(range)
-    // NOTE: If no partition is specified but a key is present a partition will be chosen
-    // using a hash of the key. If neither key nor partition is present a partition
-    // will be assigned in a round-robin fashion.
+      // NOTE: If no partition is specified but a key is present a partition will be chosen
+      // using a hash of the key. If neither key nor partition is present a partition
+      // will be assigned in a round-robin fashion.
       .map(n => new ProducerRecord(topic, partition, DefaultKey, n))
       .runWith(Producer.plainSink(producerDefaults.withProducer(testProducer)))
 
@@ -192,7 +190,7 @@ abstract class KafkaSpec(_kafkaPort: Int, val zooKeeperPort: Int, actorSystem: A
   }
 
   def createProbe(consumerSettings: ConsumerSettings[String, String],
-                  topic: String*): (Control, TestSubscriber.Probe[String]) =
+      topic: String*): (Control, TestSubscriber.Probe[String]) =
     Consumer
       .plainSource(consumerSettings, Subscriptions.topics(topic.toSet))
       .map(_.value)
diff --git a/testkit/src/main/scala/akka/kafka/testkit/scaladsl/ScalatestKafkaSpec.scala b/testkit/src/main/scala/akka/kafka/testkit/scaladsl/ScalatestKafkaSpec.scala
index 9c6e2c47..cfb77dcb 100644
--- a/testkit/src/main/scala/akka/kafka/testkit/scaladsl/ScalatestKafkaSpec.scala
+++ b/testkit/src/main/scala/akka/kafka/testkit/scaladsl/ScalatestKafkaSpec.scala
@@ -11,5 +11,4 @@ import org.scalatest.Suite
 abstract class ScalatestKafkaSpec(kafkaPort: Int)
     extends KafkaSpec(kafkaPort)
     with Suite
-    with TestFrameworkInterface.Scalatest { this: Suite =>
-}
+    with TestFrameworkInterface.Scalatest { this: Suite => }
diff --git a/testkit/src/main/scala/akka/kafka/testkit/scaladsl/TestcontainersKafkaLike.scala b/testkit/src/main/scala/akka/kafka/testkit/scaladsl/TestcontainersKafkaLike.scala
index 896e8e90..cc5f09e5 100644
--- a/testkit/src/main/scala/akka/kafka/testkit/scaladsl/TestcontainersKafkaLike.scala
+++ b/testkit/src/main/scala/akka/kafka/testkit/scaladsl/TestcontainersKafkaLike.scala
@@ -6,7 +6,7 @@
 package akka.kafka.testkit.scaladsl
 
 import akka.kafka.testkit.KafkaTestkitTestcontainersSettings
-import akka.kafka.testkit.internal.{AlpakkaKafkaContainer, SchemaRegistryContainer, TestcontainersKafka}
+import akka.kafka.testkit.internal.{ AlpakkaKafkaContainer, SchemaRegistryContainer, TestcontainersKafka }
 import org.testcontainers.containers.GenericContainer
 
 /**
diff --git a/tests/src/it/scala/akka/kafka/IntegrationTests.scala b/tests/src/it/scala/akka/kafka/IntegrationTests.scala
index ce63f69c..8b610b8c 100644
--- a/tests/src/it/scala/akka/kafka/IntegrationTests.scala
+++ b/tests/src/it/scala/akka/kafka/IntegrationTests.scala
@@ -36,8 +36,7 @@ object IntegrationTests {
     val id = broker.getContainerId
     val networkAliases = broker.getNetworkAliases.asScala.mkString(",")
     log.warn(
-      s"Stopping one Kafka container with network aliases [$networkAliases], container id [$id], after [$msgCount] messages"
-    )
+      s"Stopping one Kafka container with network aliases [$networkAliases], container id [$id], after [$msgCount] messages")
     broker.stop()
   }
 
diff --git a/tests/src/it/scala/akka/kafka/PartitionedSourceFailoverSpec.scala b/tests/src/it/scala/akka/kafka/PartitionedSourceFailoverSpec.scala
index e8f360be..8c45053c 100644
--- a/tests/src/it/scala/akka/kafka/PartitionedSourceFailoverSpec.scala
+++ b/tests/src/it/scala/akka/kafka/PartitionedSourceFailoverSpec.scala
@@ -6,13 +6,13 @@
 package akka.kafka
 
 import akka.Done
-import akka.kafka.scaladsl.{Consumer, Producer, SpecBase}
+import akka.kafka.scaladsl.{ Consumer, Producer, SpecBase }
 import akka.kafka.testkit.KafkaTestkitTestcontainersSettings
 import akka.kafka.testkit.scaladsl.TestcontainersKafkaPerClassLike
-import akka.stream.scaladsl.{Sink, Source}
+import akka.stream.scaladsl.{ Sink, Source }
 import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
 import org.apache.kafka.clients.consumer.ConsumerConfig
-import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord}
+import org.apache.kafka.clients.producer.{ ProducerConfig, ProducerRecord }
 import org.apache.kafka.common.config.TopicConfig
 import org.scalatest.concurrent.ScalaFutures
 import org.scalatest.matchers.should.Matchers
@@ -50,9 +50,7 @@ class PartitionedSourceFailoverSpec
         replication = 3,
         Map(
           // require at least two replicas be in sync before acknowledging produced record
-          TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG -> "2"
-        )
-      )
+          TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG -> "2"))
       val groupId = createGroupId(0)
 
       val consumerConfig = consumerDefaults
@@ -84,8 +82,7 @@ class PartitionedSourceFailoverSpec
 
       val producerConfig = producerDefaults.withProperties(
         // require acknowledgement from at least min in sync replicas (2).  default is 1
-        ProducerConfig.ACKS_CONFIG -> "all"
-      )
+        ProducerConfig.ACKS_CONFIG -> "all")
 
       val result: Future[Done] = Source(1L to totalMessages)
         .via(IntegrationTests.logSentMessages()(log))
diff --git a/tests/src/it/scala/akka/kafka/PlainSourceFailoverSpec.scala b/tests/src/it/scala/akka/kafka/PlainSourceFailoverSpec.scala
index 0b6471ea..e4389f05 100644
--- a/tests/src/it/scala/akka/kafka/PlainSourceFailoverSpec.scala
+++ b/tests/src/it/scala/akka/kafka/PlainSourceFailoverSpec.scala
@@ -5,13 +5,13 @@
 
 package akka.kafka
 
-import akka.kafka.scaladsl.{Consumer, Producer, SpecBase}
+import akka.kafka.scaladsl.{ Consumer, Producer, SpecBase }
 import akka.kafka.testkit.KafkaTestkitTestcontainersSettings
 import akka.kafka.testkit.scaladsl.TestcontainersKafkaPerClassLike
-import akka.stream.scaladsl.{Sink, Source}
+import akka.stream.scaladsl.{ Sink, Source }
 import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
 import org.apache.kafka.clients.consumer.ConsumerConfig
-import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord}
+import org.apache.kafka.clients.producer.{ ProducerConfig, ProducerRecord }
 import org.apache.kafka.common.config.TopicConfig
 import org.scalatest.concurrent.ScalaFutures
 import org.scalatest.matchers.should.Matchers
@@ -49,9 +49,7 @@ class PlainSourceFailoverSpec
         replication = 3,
         Map(
           // require at least two replicas be in sync before acknowledging produced record
-          TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG -> "2"
-        )
-      )
+          TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG -> "2"))
       val groupId = createGroupId(0)
 
       val consumerConfig = consumerDefaults
@@ -71,8 +69,7 @@ class PlainSourceFailoverSpec
 
       val producerConfig = producerDefaults.withProperties(
         // require acknowledgement from at least min in sync replicas (2).  default is 1
-        ProducerConfig.ACKS_CONFIG -> "all"
-      )
+        ProducerConfig.ACKS_CONFIG -> "all")
 
       val result = Source(0L to totalMessages)
         .via(IntegrationTests.logSentMessages()(log))
diff --git a/tests/src/it/scala/akka/kafka/TransactionsPartitionedSourceSpec.scala b/tests/src/it/scala/akka/kafka/TransactionsPartitionedSourceSpec.scala
index eeb59fa9..0f8f199c 100644
--- a/tests/src/it/scala/akka/kafka/TransactionsPartitionedSourceSpec.scala
+++ b/tests/src/it/scala/akka/kafka/TransactionsPartitionedSourceSpec.scala
@@ -12,7 +12,7 @@ import akka.kafka.scaladsl.SpecBase
 import akka.kafka.testkit.KafkaTestkitTestcontainersSettings
 import akka.kafka.testkit.scaladsl.TestcontainersKafkaPerClassLike
 import akka.stream._
-import akka.stream.scaladsl.{Keep, RestartSource, Sink}
+import akka.stream.scaladsl.{ Keep, RestartSource, Sink }
 import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
 import org.scalatest.concurrent.PatienceConfiguration.Interval
 import org.scalatest.concurrent.ScalaFutures
@@ -22,8 +22,8 @@ import org.scalatest.wordspec.AnyWordSpecLike
 
 import scala.collection.immutable
 import scala.concurrent.duration._
-import scala.concurrent.{Await, Future, TimeoutException}
-import scala.util.{Failure, Success}
+import scala.concurrent.{ Await, Future, TimeoutException }
+import scala.util.{ Failure, Success }
 
 @Ignore
 class TransactionsPartitionedSourceSpec
@@ -78,27 +78,24 @@ class TransactionsPartitionedSourceSpec
 
       def runStream(id: String): UniqueKillSwitch =
         RestartSource
-          .onFailuresWithBackoff(RestartSettings(10.millis, 100.millis, 0.2))(
-            () => {
-              transactionalPartitionedCopyStream(
-                consumerSettings,
-                txProducerDefaults,
-                sourceTopic,
-                sinkTopic,
-                transactionalId,
-                idleTimeout = 10.seconds,
-                maxPartitions = sourcePartitions,
-                restartAfter = Some(restartAfter),
-                maxRestarts = Some(maxRestarts)
-              ).recover {
-                case e: TimeoutException =>
-                  if (completedWithTimeout.incrementAndGet() > 10)
-                    "no more messages to copy"
-                  else
-                    throw new Error("Continue restarting copy stream")
-              }
+          .onFailuresWithBackoff(RestartSettings(10.millis, 100.millis, 0.2))(() => {
+            transactionalPartitionedCopyStream(
+              consumerSettings,
+              txProducerDefaults,
+              sourceTopic,
+              sinkTopic,
+              transactionalId,
+              idleTimeout = 10.seconds,
+              maxPartitions = sourcePartitions,
+              restartAfter = Some(restartAfter),
+              maxRestarts = Some(maxRestarts)).recover {
+              case e: TimeoutException =>
+                if (completedWithTimeout.incrementAndGet() > 10)
+                  "no more messages to copy"
+                else
+                  throw new Error("Continue restarting copy stream")
             }
-          )
+          })
           .viaMat(KillSwitches.single)(Keep.right)
           .toMat(Sink.onComplete {
             case Success(_) =>
@@ -118,8 +115,7 @@ class TransactionsPartitionedSourceSpec
       val consumer = consumePartitionOffsetValues(
         probeConsumerSettings(createGroupId(2)),
         sinkTopic,
-        elementsToTake = (elements * destinationPartitions).toLong
-      )
+        elementsToTake = (elements * destinationPartitions).toLong)
 
       val actualValues = Await.result(consumer, 10.minutes)
 
diff --git a/tests/src/it/scala/akka/kafka/TransactionsSourceSpec.scala b/tests/src/it/scala/akka/kafka/TransactionsSourceSpec.scala
index fa77e820..a0553d45 100644
--- a/tests/src/it/scala/akka/kafka/TransactionsSourceSpec.scala
+++ b/tests/src/it/scala/akka/kafka/TransactionsSourceSpec.scala
@@ -9,11 +9,11 @@ import java.util.concurrent.atomic.AtomicInteger
 
 import akka.Done
 import akka.kafka.scaladsl.Consumer.Control
-import akka.kafka.scaladsl.{Consumer, SpecBase, Transactional}
+import akka.kafka.scaladsl.{ Consumer, SpecBase, Transactional }
 import akka.kafka.testkit.KafkaTestkitTestcontainersSettings
 import akka.kafka.testkit.scaladsl.TestcontainersKafkaPerClassLike
 import akka.stream._
-import akka.stream.scaladsl.{Flow, Keep, RestartSource, Sink}
+import akka.stream.scaladsl.{ Flow, Keep, RestartSource, Sink }
 import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
 import org.apache.kafka.clients.producer.ProducerRecord
 import org.scalatest.concurrent.PatienceConfiguration.Interval
@@ -23,8 +23,8 @@ import org.scalatest.wordspec.AnyWordSpecLike
 
 import scala.collection.immutable
 import scala.concurrent.duration._
-import scala.concurrent.{Await, Future, TimeoutException}
-import scala.util.{Failure, Success}
+import scala.concurrent.{ Await, Future, TimeoutException }
+import scala.util.{ Failure, Success }
 
 class TransactionsSourceSpec
     extends SpecBase
@@ -79,26 +79,24 @@ class TransactionsSourceSpec
 
       def runStream(id: String): UniqueKillSwitch =
         RestartSource
-          .onFailuresWithBackoff(RestartSettings(10.millis, 100.millis, 0.2))(
-            () => {
-              val transactionId = s"$group-$id"
-              transactionalCopyStream(consumerSettings,
-                                      txProducerDefaults,
-                                      sourceTopic,
-                                      sinkTopic,
-                                      transactionId,
-                                      10.seconds,
-                                      Some(restartAfter),
-                                      Some(maxRestarts))
-                .recover {
-                  case e: TimeoutException =>
-                    if (completedWithTimeout.incrementAndGet() > 10)
-                      "no more messages to copy"
-                    else
-                      throw new Error("Continue restarting copy stream")
-                }
-            }
-          )
+          .onFailuresWithBackoff(RestartSettings(10.millis, 100.millis, 0.2))(() => {
+            val transactionId = s"$group-$id"
+            transactionalCopyStream(consumerSettings,
+              txProducerDefaults,
+              sourceTopic,
+              sinkTopic,
+              transactionId,
+              10.seconds,
+              Some(restartAfter),
+              Some(maxRestarts))
+              .recover {
+                case e: TimeoutException =>
+                  if (completedWithTimeout.incrementAndGet() > 10)
+                    "no more messages to copy"
+                  else
+                    throw new Error("Continue restarting copy stream")
+              }
+          })
           .viaMat(KillSwitches.single)(Keep.right)
           .toMat(Sink.onComplete {
             case Success(_) =>
@@ -124,8 +122,7 @@ class TransactionsSourceSpec
             .scan(0) { case (count, _) => count + 1 }
             .filter(_ % 10000 == 0)
             .log("received")
-            .to(Sink.ignore)
-        )
+            .to(Sink.ignore))
         .recover {
           case t => (0L, "no-more-elements")
         }
@@ -168,7 +165,7 @@ class TransactionsSourceSpec
             .source(consumerSettings, Subscriptions.topics(sourceTopic))
             .map { msg =>
               ProducerMessage.single(new ProducerRecord[String, String](sinkTopic, msg.record.value),
-                                     msg.partitionOffset)
+                msg.partitionOffset)
             }
             .take(batchSize.toLong)
             .delay(3.seconds, strategy = DelayOverflowStrategy.backpressure)
diff --git a/tests/src/test/scala/akka/kafka/ConfigSettingsSpec.scala b/tests/src/test/scala/akka/kafka/ConfigSettingsSpec.scala
index 0d91b86b..ae32f999 100644
--- a/tests/src/test/scala/akka/kafka/ConfigSettingsSpec.scala
+++ b/tests/src/test/scala/akka/kafka/ConfigSettingsSpec.scala
@@ -23,8 +23,7 @@ class ConfigSettingsSpec extends AnyWordSpec with Matchers with LogCapturing {
         kafka-client.bootstrap.foo = baz
         kafka-client.foo = bar
         kafka-client.client.id = client1
-        """
-        )
+        """)
         .withFallback(ConfigFactory.load())
         .getConfig("kafka-client")
       val settings = ConfigSettings.parseKafkaClientsProperties(conf)
diff --git a/tests/src/test/scala/akka/kafka/ConsumerSettingsSpec.scala b/tests/src/test/scala/akka/kafka/ConsumerSettingsSpec.scala
index 85851b9c..2000d918 100644
--- a/tests/src/test/scala/akka/kafka/ConsumerSettingsSpec.scala
+++ b/tests/src/test/scala/akka/kafka/ConsumerSettingsSpec.scala
@@ -10,9 +10,9 @@ import akka.kafka.tests.scaladsl.LogCapturing
 import akka.testkit.TestKit
 import com.typesafe.config.ConfigFactory
 import org.apache.kafka.common.config.SslConfigs
-import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer}
+import org.apache.kafka.common.serialization.{ ByteArrayDeserializer, StringDeserializer }
 import org.scalatest.OptionValues
-import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
+import org.scalatest.concurrent.{ IntegrationPatience, ScalaFutures }
 import org.scalatest.matchers.should.Matchers
 import org.scalatest.wordspec.AnyWordSpec
 
@@ -50,8 +50,7 @@ class ConsumerSettingsSpec
         akka.kafka.consumer.kafka-clients.key.deserializer = org.apache.kafka.common.serialization.StringDeserializer
         akka.kafka.consumer.kafka-clients.value.deserializer = org.apache.kafka.common.serialization.StringDeserializer
         akka.kafka.consumer.kafka-clients.client.id = client1
-        """
-        )
+        """)
         .withFallback(ConfigFactory.load())
         .getConfig("akka.kafka.consumer")
       val settings = ConsumerSettings(conf, None, None)
@@ -74,8 +73,7 @@ class ConsumerSettingsSpec
         akka.kafka.consumer.kafka-clients.bootstrap.servers = "localhost:9092"
         akka.kafka.consumer.kafka-clients.value.deserializer = org.apache.kafka.common.serialization.StringDeserializer
         akka.kafka.consumer.kafka-clients.client.id = client1
-        """
-        )
+        """)
         .withFallback(ConfigFactory.load())
         .getConfig("akka.kafka.consumer")
       val settings = ConsumerSettings(conf, Some(new ByteArrayDeserializer), None)
@@ -89,8 +87,7 @@ class ConsumerSettingsSpec
         akka.kafka.consumer.kafka-clients.bootstrap.servers = "localhost:9092"
         akka.kafka.consumer.kafka-clients.key.deserializer = org.apache.kafka.common.serialization.StringDeserializer
         akka.kafka.consumer.kafka-clients.client.id = client1
-        """
-        )
+        """)
         .withFallback(ConfigFactory.load())
         .getConfig("akka.kafka.consumer")
       val settings = ConsumerSettings(conf, None, Some(new ByteArrayDeserializer))
@@ -104,9 +101,9 @@ class ConsumerSettingsSpec
         .withProperty("ssl.truststore.password", "geheim")
       val s = settings.toString
       s should include(SslConfigs.SSL_KEY_PASSWORD_CONFIG)
-      s should not include ("hemligt")
+      (s should not).include("hemligt")
       s should include("ssl.truststore.password")
-      s should not include ("geheim")
+      (s should not).include("geheim")
     }
 
     "throw IllegalArgumentException if no value deserializer defined" in {
@@ -116,16 +113,14 @@ class ConsumerSettingsSpec
         akka.kafka.consumer.kafka-clients.bootstrap.servers = "localhost:9092"
         akka.kafka.consumer.kafka-clients.key.deserializer = org.apache.kafka.common.serialization.StringDeserializer
         akka.kafka.consumer.kafka-clients.client.id = client1
-        """
-        )
+        """)
         .withFallback(ConfigFactory.load())
         .getConfig("akka.kafka.consumer")
       val exception = intercept[IllegalArgumentException] {
         ConsumerSettings(conf, None, None)
       }
       exception.getMessage should ===(
-        "requirement failed: Value deserializer should be defined or declared in configuration"
-      )
+        "requirement failed: Value deserializer should be defined or declared in configuration")
     }
 
     "throw IllegalArgumentException if no value deserializer defined (null case). Key serializer passed as args config" in {
@@ -137,8 +132,7 @@ class ConsumerSettingsSpec
         ConsumerSettings(conf, new ByteArrayDeserializer, null)
       }
       exception.getMessage should ===(
-        "requirement failed: Value deserializer should be defined or declared in configuration"
-      )
+        "requirement failed: Value deserializer should be defined or declared in configuration")
     }
 
     "throw IllegalArgumentException if no value deserializer defined (null case). Key serializer defined in config" in {
@@ -148,16 +142,14 @@ class ConsumerSettingsSpec
         akka.kafka.consumer.kafka-clients.bootstrap.servers = "localhost:9092"
         akka.kafka.consumer.kafka-clients.key.deserializer = org.apache.kafka.common.serialization.StringDeserializer
         akka.kafka.consumer.kafka-clients.client.id = client1
-        """
-        )
+        """)
         .withFallback(ConfigFactory.load())
         .getConfig("akka.kafka.consumer")
       val exception = intercept[IllegalArgumentException] {
         ConsumerSettings(conf, None, null)
       }
       exception.getMessage should ===(
-        "requirement failed: Value deserializer should be defined or declared in configuration"
-      )
+        "requirement failed: Value deserializer should be defined or declared in configuration")
     }
 
     "throw IllegalArgumentException if no key deserializer defined" in {
@@ -167,16 +159,14 @@ class ConsumerSettingsSpec
         akka.kafka.consumer.kafka-clients.bootstrap.servers = "localhost:9092"
         akka.kafka.consumer.kafka-clients.value.deserializer = org.apache.kafka.common.serialization.StringDeserializer
         akka.kafka.consumer.kafka-clients.client.id = client1
-        """
-        )
+        """)
         .withFallback(ConfigFactory.load())
         .getConfig("akka.kafka.consumer")
       val exception = intercept[IllegalArgumentException] {
         ConsumerSettings(conf, None, None)
       }
       exception.getMessage should ===(
-        "requirement failed: Key deserializer should be defined or declared in configuration"
-      )
+        "requirement failed: Key deserializer should be defined or declared in configuration")
     }
 
     "throw IllegalArgumentException if no key deserializer defined (null case). Value serializer passed as args config" in {
@@ -188,8 +178,7 @@ class ConsumerSettingsSpec
         ConsumerSettings(conf, null, new ByteArrayDeserializer)
       }
       exception.getMessage should ===(
-        "requirement failed: Key deserializer should be defined or declared in configuration"
-      )
+        "requirement failed: Key deserializer should be defined or declared in configuration")
     }
 
     "throw IllegalArgumentException if no key deserializer defined (null case). Value serializer defined in config" in {
@@ -199,16 +188,14 @@ class ConsumerSettingsSpec
         akka.kafka.consumer.kafka-clients.bootstrap.servers = "localhost:9092"
         akka.kafka.consumer.kafka-clients.value.deserializer = org.apache.kafka.common.serialization.StringDeserializer
         akka.kafka.consumer.kafka-clients.client.id = client1
-        """
-        )
+        """)
         .withFallback(ConfigFactory.load())
         .getConfig("akka.kafka.consumer")
       val exception = intercept[IllegalArgumentException] {
         ConsumerSettings(conf, null, None)
       }
       exception.getMessage should ===(
-        "requirement failed: Key deserializer should be defined or declared in configuration"
-      )
+        "requirement failed: Key deserializer should be defined or declared in configuration")
     }
 
   }
diff --git a/tests/src/test/scala/akka/kafka/ProducerSettingsSpec.scala b/tests/src/test/scala/akka/kafka/ProducerSettingsSpec.scala
index 9a503bbe..abf5b5e4 100644
--- a/tests/src/test/scala/akka/kafka/ProducerSettingsSpec.scala
+++ b/tests/src/test/scala/akka/kafka/ProducerSettingsSpec.scala
@@ -10,11 +10,11 @@ import akka.kafka.tests.scaladsl.LogCapturing
 import akka.testkit.TestKit
 import com.typesafe.config.ConfigFactory
 import org.apache.kafka.common.config.SslConfigs
-import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer}
+import org.apache.kafka.common.serialization.{ ByteArraySerializer, StringSerializer }
 import org.scalatest.matchers.should.Matchers
 import org.scalatest.wordspec.AnyWordSpec
 
-import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
+import org.scalatest.concurrent.{ IntegrationPatience, ScalaFutures }
 
 class ProducerSettingsSpec
     extends AnyWordSpec
@@ -33,8 +33,7 @@ class ProducerSettingsSpec
         akka.kafka.producer.kafka-clients.parallelism = 1
         akka.kafka.producer.kafka-clients.key.serializer = org.apache.kafka.common.serialization.StringSerializer
         akka.kafka.producer.kafka-clients.value.serializer = org.apache.kafka.common.serialization.StringSerializer
-        """
-        )
+        """)
         .withFallback(ConfigFactory.load())
         .getConfig("akka.kafka.producer")
       val settings = ProducerSettings(conf, None, None)
@@ -57,8 +56,7 @@ class ProducerSettingsSpec
         akka.kafka.producer.kafka-clients.bootstrap.servers = "localhost:9092"
         akka.kafka.producer.kafka-clients.parallelism = 1
         akka.kafka.producer.kafka-clients.value.serializer = org.apache.kafka.common.serialization.StringSerializer
-        """
-        )
+        """)
         .withFallback(ConfigFactory.load())
         .getConfig("akka.kafka.producer")
       val settings = ProducerSettings(conf, Some(new ByteArraySerializer), None)
@@ -72,8 +70,7 @@ class ProducerSettingsSpec
         akka.kafka.producer.kafka-clients.bootstrap.servers = "localhost:9092"
         akka.kafka.producer.kafka-clients.parallelism = 1
         akka.kafka.producer.kafka-clients.key.serializer = org.apache.kafka.common.serialization.StringSerializer
-        """
-        )
+        """)
         .withFallback(ConfigFactory.load())
         .getConfig("akka.kafka.producer")
       val settings = ProducerSettings(conf, None, Some(new ByteArraySerializer))
@@ -87,9 +84,9 @@ class ProducerSettingsSpec
         .withProperty("ssl.truststore.password", "geheim")
       val s = settings.toString
       s should include(SslConfigs.SSL_KEY_PASSWORD_CONFIG)
-      s should not include ("hemligt")
+      (s should not).include("hemligt")
       s should include("ssl.truststore.password")
-      s should not include ("geheim")
+      (s should not).include("geheim")
     }
 
     "throw IllegalArgumentException if no value serializer defined" in {
@@ -99,16 +96,14 @@ class ProducerSettingsSpec
         akka.kafka.producer.kafka-clients.bootstrap.servers = "localhost:9092"
         akka.kafka.producer.kafka-clients.parallelism = 1
         akka.kafka.producer.kafka-clients.key.serializer = org.apache.kafka.common.serialization.StringSerializer
-        """
-        )
+        """)
         .withFallback(ConfigFactory.load())
         .getConfig("akka.kafka.producer")
       val exception = intercept[IllegalArgumentException] {
         ProducerSettings(conf, None, None)
       }
       exception.getMessage should ===(
-        "requirement failed: Value serializer should be defined or declared in configuration"
-      )
+        "requirement failed: Value serializer should be defined or declared in configuration")
     }
 
     "throw IllegalArgumentException if no value serializer defined (null case). Key serializer passed as args config" in {
@@ -120,8 +115,7 @@ class ProducerSettingsSpec
         ProducerSettings(conf, new ByteArraySerializer, null)
       }
       exception.getMessage should ===(
-        "requirement failed: Value serializer should be defined or declared in configuration"
-      )
+        "requirement failed: Value serializer should be defined or declared in configuration")
     }
 
     "throw IllegalArgumentException if no value serializer defined (null case). Key serializer defined in config" in {
@@ -131,16 +125,14 @@ class ProducerSettingsSpec
         akka.kafka.producer.kafka-clients.bootstrap.servers = "localhost:9092"
         akka.kafka.producer.kafka-clients.parallelism = 1
         akka.kafka.producer.kafka-clients.key.serializer = org.apache.kafka.common.serialization.StringSerializer
-        """
-        )
+        """)
         .withFallback(ConfigFactory.load())
         .getConfig("akka.kafka.producer")
       val exception = intercept[IllegalArgumentException] {
         ProducerSettings(conf, None, null)
       }
       exception.getMessage should ===(
-        "requirement failed: Value serializer should be defined or declared in configuration"
-      )
+        "requirement failed: Value serializer should be defined or declared in configuration")
     }
 
     "throw IllegalArgumentException if no key serializer defined" in {
@@ -150,16 +142,14 @@ class ProducerSettingsSpec
         akka.kafka.producer.kafka-clients.bootstrap.servers = "localhost:9092"
         akka.kafka.producer.kafka-clients.parallelism = 1
         akka.kafka.producer.kafka-clients.value.serializer = org.apache.kafka.common.serialization.StringSerializer
-        """
-        )
+        """)
         .withFallback(ConfigFactory.load())
         .getConfig("akka.kafka.producer")
       val exception = intercept[IllegalArgumentException] {
         ProducerSettings(conf, None, None)
       }
       exception.getMessage should ===(
-        "requirement failed: Key serializer should be defined or declared in configuration"
-      )
+        "requirement failed: Key serializer should be defined or declared in configuration")
     }
 
     "throw IllegalArgumentException if no key serializer defined (null case). Value serializer passed as args config" in {
@@ -171,8 +161,7 @@ class ProducerSettingsSpec
         ProducerSettings(conf, null, new ByteArraySerializer)
       }
       exception.getMessage should ===(
-        "requirement failed: Key serializer should be defined or declared in configuration"
-      )
+        "requirement failed: Key serializer should be defined or declared in configuration")
     }
 
     "throw IllegalArgumentException if no key serializer defined (null case). Value serializer defined in config" in {
@@ -182,16 +171,14 @@ class ProducerSettingsSpec
         akka.kafka.producer.kafka-clients.bootstrap.servers = "localhost:9092"
         akka.kafka.producer.kafka-clients.parallelism = 1
         akka.kafka.producer.kafka-clients.value.serializer = org.apache.kafka.common.serialization.StringSerializer
-        """
-        )
+        """)
         .withFallback(ConfigFactory.load())
         .getConfig("akka.kafka.producer")
       val exception = intercept[IllegalArgumentException] {
         ProducerSettings(conf, null, None)
       }
       exception.getMessage should ===(
-        "requirement failed: Key serializer should be defined or declared in configuration"
-      )
+        "requirement failed: Key serializer should be defined or declared in configuration")
     }
 
   }
diff --git a/tests/src/test/scala/akka/kafka/Repeated.scala b/tests/src/test/scala/akka/kafka/Repeated.scala
index 986b9022..eac51cd0 100644
--- a/tests/src/test/scala/akka/kafka/Repeated.scala
+++ b/tests/src/test/scala/akka/kafka/Repeated.scala
@@ -36,7 +36,7 @@ trait Repeated extends TestSuiteMixin { this: TestSuite =>
   final def retry[T](n: Int)(fn: Int => T): T =
     util.Try { fn(n + 1) } match {
       case util.Success(x) => x
-      case _ if n > 1 => retry(n - 1)(fn)
+      case _ if n > 1      => retry(n - 1)(fn)
       case util.Failure(e) => throw e
     }
 }
diff --git a/tests/src/test/scala/akka/kafka/TransactionsOps.scala b/tests/src/test/scala/akka/kafka/TransactionsOps.scala
index 78ccb810..d3b7b199 100644
--- a/tests/src/test/scala/akka/kafka/TransactionsOps.scala
+++ b/tests/src/test/scala/akka/kafka/TransactionsOps.scala
@@ -6,18 +6,18 @@
 package akka.kafka
 import java.util.concurrent.atomic.AtomicInteger
 
-import akka.{Done, NotUsed}
+import akka.{ Done, NotUsed }
 import akka.actor.ActorSystem
 import akka.kafka.ConsumerMessage.PartitionOffset
 import akka.kafka.ProducerMessage.MultiMessage
 import akka.kafka.scaladsl.Consumer.Control
-import akka.kafka.scaladsl.{Consumer, Producer, Transactional}
+import akka.kafka.scaladsl.{ Consumer, Producer, Transactional }
 import akka.stream.Materializer
-import akka.stream.scaladsl.{Flow, Sink, Source}
+import akka.stream.scaladsl.{ Flow, Sink, Source }
 import akka.stream.testkit.TestSubscriber
 import akka.stream.testkit.scaladsl.TestSink
 import org.apache.kafka.clients.consumer.ConsumerConfig
-import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord}
+import org.apache.kafka.clients.producer.{ ProducerConfig, ProducerRecord }
 import org.scalatest.TestSuite
 import org.scalatest.matchers.should.Matchers
 
@@ -34,8 +34,8 @@ trait TransactionsOps extends TestSuite with Matchers {
       transactionalId: String,
       idleTimeout: FiniteDuration,
       restartAfter: Option[Int] = None,
-      maxRestarts: Option[AtomicInteger] = None
-  ): Source[ProducerMessage.Results[String, String, PartitionOffset], Control] =
+      maxRestarts: Option[AtomicInteger] = None)
+      : Source[ProducerMessage.Results[String, String, PartitionOffset], Control] =
     Transactional
       .source(consumerSettings, Subscriptions.topics(sourceTopic))
       .zip(Source.unfold(1)(count => Some((count + 1, count))))
@@ -63,12 +63,13 @@ trait TransactionsOps extends TestSuite with Matchers {
       idleTimeout: FiniteDuration,
       maxPartitions: Int,
       restartAfter: Option[Int] = None,
-      maxRestarts: Option[AtomicInteger] = None
-  ): Source[ProducerMessage.Results[String, String, PartitionOffset], Control] =
+      maxRestarts: Option[AtomicInteger] = None)
+      : Source[ProducerMessage.Results[String, String, PartitionOffset], Control] =
     Transactional
       .partitionedSource(consumerSettings, Subscriptions.topics(sourceTopic))
       .flatMapMerge(
-        maxPartitions, {
+        maxPartitions,
+        {
           case (_, source) =>
             val results: Source[ProducerMessage.Results[String, String, PartitionOffset], NotUsed] = source
               .zip(Source.unfold(1)(count => Some((count + 1, count))))
@@ -81,28 +82,27 @@ trait TransactionsOps extends TestSuite with Matchers {
               .idleTimeout(idleTimeout)
               .map { msg =>
                 ProducerMessage.single(new ProducerRecord[String, String](sinkTopic,
-                                                                          msg.record.partition(),
-                                                                          msg.record.key(),
-                                                                          msg.record.value),
-                                       msg.partitionOffset)
+                    msg.record.partition(),
+                    msg.record.key(),
+                    msg.record.value),
+                  msg.partitionOffset)
               }
               .via(Transactional.flow(producerSettings, transactionalId))
             results
-        }
-      )
+        })
 
   def restart(count: Int, restartAfter: Option[Int], maxRestarts: Option[AtomicInteger]): Boolean = {
     (restartAfter, maxRestarts) match {
       case (Some(restart), Some(maxRestart)) => count >= restart && maxRestart.decrementAndGet() > 0
-      case (Some(restart), _) => count >= restart
-      case _ => false
+      case (Some(restart), _)                => count >= restart
+      case _                                 => false
     }
   }
 
   def produceToAllPartitions(producerSettings: ProducerSettings[String, String],
-                             topic: String,
-                             partitions: Int,
-                             range: Range)(implicit mat: Materializer): Future[Done] =
+      topic: String,
+      partitions: Int,
+      range: Range)(implicit mat: Materializer): Future[Done] =
     Source(range)
       .map { n =>
         val msgs = (0 until partitions).map(p => new ProducerRecord(topic, p, n.toString, n.toString))
@@ -113,7 +113,7 @@ trait TransactionsOps extends TestSuite with Matchers {
 
   def checkForDuplicates(values: immutable.Seq[(Long, String)], expected: immutable.IndexedSeq[String]): Unit =
     withClue("Checking for duplicates: ") {
-      val duplicates = values.map(_._2) diff expected
+      val duplicates = values.map(_._2).diff(expected)
       if (duplicates.nonEmpty) {
         val duplicatesWithDifferentOffsets = values
           .filter {
@@ -138,7 +138,7 @@ trait TransactionsOps extends TestSuite with Matchers {
 
   def checkForMissing(values: immutable.Seq[(Long, String)], expected: immutable.IndexedSeq[String]): Unit =
     withClue("Checking for missing: ") {
-      val missing = expected diff values.map(_._2)
+      val missing = expected.diff(values.map(_._2))
       if (missing.nonEmpty) {
         val continuousBlocks = missing
           .scanLeft(("-1", 0)) {
@@ -158,21 +158,19 @@ trait TransactionsOps extends TestSuite with Matchers {
 
   def valuesProbeConsumer(
       settings: ConsumerSettings[String, String],
-      topic: String
-  )(implicit actorSystem: ActorSystem, mat: Materializer): TestSubscriber.Probe[String] =
+      topic: String)(implicit actorSystem: ActorSystem, mat: Materializer): TestSubscriber.Probe[String] =
     offsetValueSource(settings, topic)
       .map(_._2)
       .runWith(TestSink.probe)
 
   def offsetValueSource(settings: ConsumerSettings[String, String],
-                        topic: String): Source[(Long, String), Consumer.Control] =
+      topic: String): Source[(Long, String), Consumer.Control] =
     Consumer
       .plainSource(settings, Subscriptions.topics(topic))
       .map(r => (r.offset(), r.value()))
 
   def consumePartitionOffsetValues(settings: ConsumerSettings[String, String], topic: String, elementsToTake: Long)(
-      implicit mat: Materializer
-  ): Future[immutable.Seq[(Int, Long, String)]] =
+      implicit mat: Materializer): Future[immutable.Seq[(Int, Long, String)]] =
     Consumer
       .plainSource(settings, Subscriptions.topics(topic))
       .map(r => (r.partition(), r.offset(), r.value()))
@@ -182,8 +180,7 @@ trait TransactionsOps extends TestSuite with Matchers {
           .scan(0) { case (count, _) => count + 1 }
           .filter(_ % 100 == 0)
           .log("received")
-          .to(Sink.ignore)
-      )
+          .to(Sink.ignore))
       .recover {
         case t => (0, 0L, "no-more-elements")
       }
@@ -193,8 +190,7 @@ trait TransactionsOps extends TestSuite with Matchers {
   def assertPartitionedConsistency(
       elements: Int,
       maxPartitions: Int,
-      values: immutable.Seq[(Int, Long, String)]
-  ): Unit = {
+      values: immutable.Seq[(Int, Long, String)]): Unit = {
     val expectedValues: immutable.Seq[String] = (1 to elements).map(_.toString)
 
     for (partition <- 0 until maxPartitions) {
@@ -209,7 +205,7 @@ trait TransactionsOps extends TestSuite with Matchers {
   }
 
   def withProbeConsumerSettings(settings: ConsumerSettings[String, String],
-                                groupId: String): ConsumerSettings[String, String] =
+      groupId: String): ConsumerSettings[String, String] =
     TransactionsOps.withProbeConsumerSettings(settings, groupId)
 
   def withTestProducerSettings(settings: ProducerSettings[String, String]): ProducerSettings[String, String] =
@@ -221,7 +217,7 @@ trait TransactionsOps extends TestSuite with Matchers {
 
 object TransactionsOps {
   def withProbeConsumerSettings(settings: ConsumerSettings[String, String],
-                                groupId: String): ConsumerSettings[String, String] =
+      groupId: String): ConsumerSettings[String, String] =
     settings
       .withGroupId(groupId)
       .withProperties(ConsumerConfig.ISOLATION_LEVEL_CONFIG -> "read_committed")
diff --git a/tests/src/test/scala/akka/kafka/internal/CommitCollectorStageSpec.scala b/tests/src/test/scala/akka/kafka/internal/CommitCollectorStageSpec.scala
index 1d54d41e..7c495d42 100644
--- a/tests/src/test/scala/akka/kafka/internal/CommitCollectorStageSpec.scala
+++ b/tests/src/test/scala/akka/kafka/internal/CommitCollectorStageSpec.scala
@@ -9,27 +9,27 @@ import java.util.concurrent.atomic.AtomicLong
 import akka.Done
 import akka.actor.ActorSystem
 import akka.event.LoggingAdapter
-import akka.kafka.ConsumerMessage.{Committable, CommittableOffset, CommittableOffsetBatch}
-import akka.kafka.scaladsl.{Committer, Consumer}
+import akka.kafka.ConsumerMessage.{ Committable, CommittableOffset, CommittableOffsetBatch }
+import akka.kafka.scaladsl.{ Committer, Consumer }
 import akka.kafka.testkit.ConsumerResultFactory
-import akka.kafka.testkit.scaladsl.{ConsumerControlFactory, Slf4jToAkkaLoggingAdapter}
+import akka.kafka.testkit.scaladsl.{ ConsumerControlFactory, Slf4jToAkkaLoggingAdapter }
 import akka.kafka.tests.scaladsl.LogCapturing
-import akka.kafka.{CommitWhen, CommitterSettings, Repeated}
+import akka.kafka.{ CommitWhen, CommitterSettings, Repeated }
 import akka.stream.scaladsl.Keep
 import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
-import akka.stream.testkit.scaladsl.{TestSink, TestSource}
-import akka.stream.testkit.{TestPublisher, TestSubscriber}
+import akka.stream.testkit.scaladsl.{ TestSink, TestSource }
+import akka.stream.testkit.{ TestPublisher, TestSubscriber }
 import akka.testkit.TestKit
 import org.apache.kafka.clients.consumer.OffsetAndMetadata
 import org.apache.kafka.common.TopicPartition
-import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures}
+import org.scalatest.concurrent.{ Eventually, IntegrationPatience, ScalaFutures }
 import org.scalatest.matchers.should.Matchers
 import org.scalatest.wordspec.AnyWordSpecLike
-import org.scalatest.{AppendedClues, BeforeAndAfterAll}
-import org.slf4j.{Logger, LoggerFactory}
+import org.scalatest.{ AppendedClues, BeforeAndAfterAll }
+import org.slf4j.{ Logger, LoggerFactory }
 
-import scala.concurrent.duration.{FiniteDuration, _}
-import scala.concurrent.{ExecutionContext, Future, Promise}
+import scala.concurrent.duration.{ FiniteDuration, _ }
+import scala.concurrent.{ ExecutionContext, Future, Promise }
 
 class CommitCollectorStageSpec(_system: ActorSystem)
     extends TestKit(_system)
@@ -78,7 +78,7 @@ class CommitCollectorStageSpec(_system: ActorSystem)
         committedBatch.batchSize shouldBe 2
         committedBatch.offsets.values should have size 1
         committedBatch.offsets.values.last shouldBe msg2.partitionOffset.offset
-        offsetFactory.committer.commits.size shouldBe 1 withClue "expected only one batch commit"
+        (offsetFactory.committer.commits.size shouldBe 1).withClue("expected only one batch commit")
 
         control.shutdown().futureValue shouldBe Done
       }
@@ -99,7 +99,7 @@ class CommitCollectorStageSpec(_system: ActorSystem)
         committedBatch.batchSize shouldBe 1
         committedBatch.offsets.values should have size 1
         committedBatch.offsets.values.last shouldBe msg.partitionOffset.offset
-        factory.committer.commits.size shouldBe 1 withClue "expected only one batch commit"
+        (factory.committer.commits.size shouldBe 1).withClue("expected only one batch commit")
 
         control.shutdown().futureValue shouldBe Done
       }
@@ -120,7 +120,7 @@ class CommitCollectorStageSpec(_system: ActorSystem)
         committedBatch.batchSize shouldBe 1
         committedBatch.offsets.values should have size 1
         committedBatch.offsets.values.last shouldBe msg.partitionOffset.offset
-        factory.committer.commits.size shouldBe 1 withClue "expected only one batch commit"
+        (factory.committer.commits.size shouldBe 1).withClue("expected only one batch commit")
 
         control.shutdown().futureValue shouldBe Done
       }
@@ -175,7 +175,7 @@ class CommitCollectorStageSpec(_system: ActorSystem)
         committedBatch.batchSize shouldBe 1
         committedBatch.offsets.values should have size 1
         committedBatch.offsets.values.last shouldBe msg.partitionOffset.offset
-        factory.committer.commits.size shouldBe 1 withClue "expected only one batch commit"
+        (factory.committer.commits.size shouldBe 1).withClue("expected only one batch commit")
 
         control.shutdown().futureValue shouldBe Done
       }
@@ -197,7 +197,7 @@ class CommitCollectorStageSpec(_system: ActorSystem)
         committedBatch.batchSize shouldBe 2
         committedBatch.offsets.values should have size 1
         committedBatch.offsets.values.last shouldBe msg2.partitionOffset.offset
-        committer.commits.size shouldBe 1 withClue "expected only one batch commit"
+        (committer.commits.size shouldBe 1).withClue("expected only one batch commit")
 
         control.shutdown().futureValue shouldBe Done
       }
@@ -223,7 +223,7 @@ class CommitCollectorStageSpec(_system: ActorSystem)
 
         val commits = factory.committer.commits
 
-        commits.last._2 shouldBe 10 withClue "last offset commit should be exactly the one preceeding the error"
+        (commits.last._2 shouldBe 10).withClue("last offset commit should be exactly the one preceeding the error")
 
         control.shutdown().futureValue shouldBe Done
       }
@@ -249,8 +249,9 @@ class CommitCollectorStageSpec(_system: ActorSystem)
         // downstream out of order
         val lastBatch = batches.maxBy(_.offsets.values.last)
 
-        lastBatch.offsets.values.last shouldBe msg2.partitionOffset.offset withClue "expect only the second offset to be committed"
-        offsetFactory.committer.commits.size shouldBe 2 withClue "expected only two commits"
+        (lastBatch.offsets.values.last shouldBe msg2.partitionOffset.offset).withClue(
+          "expect only the second offset to be committed")
+        (offsetFactory.committer.commits.size shouldBe 2).withClue("expected only two commits")
 
         control.shutdown().futureValue shouldBe Done
       }
@@ -274,12 +275,12 @@ class CommitCollectorStageSpec(_system: ActorSystem)
         // downstream out of order
         val lastBatch = batches.maxBy(_.offsets.values.last)
 
-        lastBatch.offsets.values.last shouldBe batch2
+        (lastBatch.offsets.values.last shouldBe batch2
           .asInstanceOf[CommittableOffsetBatch]
           .offsets
           .head
-          ._2 withClue "expect only the second offset to be committed"
-        offsetFactory.committer.commits.size shouldBe 2 withClue "expected only two commits"
+          ._2).withClue("expect only the second offset to be committed")
+        (offsetFactory.committer.commits.size shouldBe 2).withClue("expected only two commits")
 
         control.shutdown().futureValue shouldBe Done
       }
@@ -303,8 +304,9 @@ class CommitCollectorStageSpec(_system: ActorSystem)
         // downstream out of order
         val lastBatch = batches.maxBy(_.offsets.values.last)
 
-        lastBatch.offsets.values.last shouldBe msg2.partitionOffset.offset withClue "expect only the second offset to be committed"
-        offsetFactory.committer.commits.size shouldBe 2 withClue "expected only two commits"
+        (lastBatch.offsets.values.last shouldBe msg2.partitionOffset.offset).withClue(
+          "expect only the second offset to be committed")
+        (offsetFactory.committer.commits.size shouldBe 2).withClue("expected only two commits")
 
         control.shutdown().futureValue shouldBe Done
       }
@@ -328,22 +330,22 @@ class CommitCollectorStageSpec(_system: ActorSystem)
         // downstream out of order
         val lastBatch = batches.maxBy(_.offsets.values.last)
 
-        lastBatch.offsets.values.last shouldBe batch2
+        (lastBatch.offsets.values.last shouldBe batch2
           .asInstanceOf[CommittableOffsetBatch]
           .offsets
           .head
-          ._2 withClue "expect only the second offset to be committed"
-        offsetFactory.committer.commits.size shouldBe 2 withClue "expected only two commits"
+          ._2).withClue("expect only the second offset to be committed")
+        (offsetFactory.committer.commits.size shouldBe 2).withClue("expected only two commits")
 
         control.shutdown().futureValue shouldBe Done
       }
       "only commit when the next offset is observed for the correct partitions" in assertAllStagesStopped {
         val (sourceProbe, control, sinkProbe, offsetFactory) = streamProbesWithOffsetFactory(settings)
         val (msg1, msg2, msg3, msg4, msg5) = (offsetFactory.makeOffset(partitionNum = 1),
-                                              offsetFactory.makeOffset(partitionNum = 2),
-                                              offsetFactory.makeOffset(partitionNum = 1),
-                                              offsetFactory.makeOffset(partitionNum = 2),
-                                              offsetFactory.makeOffset(partitionNum = 1))
+          offsetFactory.makeOffset(partitionNum = 2),
+          offsetFactory.makeOffset(partitionNum = 1),
+          offsetFactory.makeOffset(partitionNum = 2),
+          offsetFactory.makeOffset(partitionNum = 1))
         val all = Seq(msg1, msg2, msg3, msg4, msg5)
 
         sinkProbe.request(100)
@@ -356,13 +358,15 @@ class CommitCollectorStageSpec(_system: ActorSystem)
         val lastBatches = batches.sortBy(_.offsets.values.last).reverse.take(2)
         lastBatches match {
           case lastBatch :: secondLastBatch :: Nil =>
-            lastBatch.offsets(msg3.partitionOffset.key) shouldBe msg3.partitionOffset.offset withClue "expect the second offset of partition 1"
-            secondLastBatch.offsets(msg2.partitionOffset.key) shouldBe msg2.partitionOffset.offset withClue "expect the first offset of partition 2"
+            (lastBatch.offsets(msg3.partitionOffset.key) shouldBe msg3.partitionOffset.offset).withClue(
+              "expect the second offset of partition 1")
+            (secondLastBatch.offsets(msg2.partitionOffset.key) shouldBe msg2.partitionOffset.offset).withClue(
+              "expect the first offset of partition 2")
 
           case list =>
             fail(s"extracting the last batches failed: $list")
         }
-        offsetFactory.committer.commits.size shouldBe 3 withClue "expected only three commits"
+        (offsetFactory.committer.commits.size shouldBe 3).withClue("expected only three commits")
 
         control.shutdown().futureValue shouldBe Done
       }
@@ -372,8 +376,7 @@ class CommitCollectorStageSpec(_system: ActorSystem)
   @scala.annotation.tailrec
   private def pullTillFailure(
       sinkProbe: TestSubscriber.Probe[CommittableOffsetBatch],
-      maxEvents: Int
-  ): Throwable = {
+      maxEvents: Int): Throwable = {
     val nextOrError = sinkProbe.expectNextOrError()
     if (maxEvents < 0) {
       fail("Max number events has been read, no error encountered.")
@@ -389,8 +392,8 @@ class CommitCollectorStageSpec(_system: ActorSystem)
   }
 
   private def streamProbes(
-      committerSettings: CommitterSettings
-  ): (TestPublisher.Probe[Committable], Consumer.Control, TestSubscriber.Probe[CommittableOffsetBatch]) = {
+      committerSettings: CommitterSettings)
+      : (TestPublisher.Probe[Committable], Consumer.Control, TestSubscriber.Probe[CommittableOffsetBatch]) = {
 
     val flow = Committer.batchFlow(committerSettings)
 
@@ -405,8 +408,7 @@ class CommitCollectorStageSpec(_system: ActorSystem)
   }
 
   private def streamProbesWithOffsetFactory(
-      committerSettings: CommitterSettings
-  ) = {
+      committerSettings: CommitterSettings) = {
     val (source, control, sink) = streamProbes(committerSettings)
     val factory = TestOffsetFactory(new TestBatchCommitter(committerSettings))
     (source, control, sink, factory)
@@ -415,17 +417,16 @@ class CommitCollectorStageSpec(_system: ActorSystem)
   object TestCommittableOffset {
 
     def apply(offsetCounter: AtomicLong,
-              committer: TestBatchCommitter,
-              failWith: Option[Throwable] = None,
-              partitionNum: Int = 1): CommittableOffset = {
+        committer: TestBatchCommitter,
+        failWith: Option[Throwable] = None,
+        partitionNum: Int = 1): CommittableOffset = {
       CommittableOffsetImpl(
         ConsumerResultFactory
           .partitionOffset(groupId = "group1",
-                           topic = "topic1",
-                           partition = partitionNum,
-                           offset = offsetCounter.incrementAndGet()),
-        "metadata1"
-      )(committer.underlying)
+            topic = "topic1",
+            partition = partitionNum,
+            offset = offsetCounter.incrementAndGet()),
+        "metadata1")(committer.underlying)
     }
   }
 
@@ -449,10 +450,8 @@ class CommitCollectorStageSpec(_system: ActorSystem)
 
   class TestBatchCommitter(
       commitSettings: CommitterSettings,
-      commitDelay: () => FiniteDuration = () => Duration.Zero
-  )(
-      implicit system: ActorSystem
-  ) {
+      commitDelay: () => FiniteDuration = () => Duration.Zero)(
+      implicit system: ActorSystem) {
 
     var commits = List.empty[(TopicPartition, Long)]
 
diff --git a/tests/src/test/scala/akka/kafka/internal/CommittingProducerSinkSpec.scala b/tests/src/test/scala/akka/kafka/internal/CommittingProducerSinkSpec.scala
index 748ebde7..d611691f 100644
--- a/tests/src/test/scala/akka/kafka/internal/CommittingProducerSinkSpec.scala
+++ b/tests/src/test/scala/akka/kafka/internal/CommittingProducerSinkSpec.scala
@@ -14,22 +14,22 @@ import akka.kafka.internal.KafkaConsumerActor.Internal
 import akka.kafka.scaladsl.Consumer.DrainingControl
 import akka.kafka.scaladsl.Producer
 import akka.kafka.testkit.ConsumerResultFactory
-import akka.kafka.testkit.scaladsl.{ConsumerControlFactory, Slf4jToAkkaLoggingAdapter}
+import akka.kafka.testkit.scaladsl.{ ConsumerControlFactory, Slf4jToAkkaLoggingAdapter }
 import akka.kafka.tests.scaladsl.LogCapturing
 import akka.kafka._
-import akka.stream.scaladsl.{Keep, Source}
+import akka.stream.scaladsl.{ Keep, Source }
 import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
-import akka.stream.{ActorAttributes, Supervision}
-import akka.testkit.{TestKit, TestProbe}
+import akka.stream.{ ActorAttributes, Supervision }
+import akka.testkit.{ TestKit, TestProbe }
 import org.apache.kafka.clients.consumer.ConsumerRecord
 import org.apache.kafka.clients.producer._
 import org.apache.kafka.common.TopicPartition
 import org.apache.kafka.common.serialization.StringSerializer
 import org.scalatest.BeforeAndAfterAll
-import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures}
+import org.scalatest.concurrent.{ Eventually, IntegrationPatience, ScalaFutures }
 import org.scalatest.flatspec.AnyFlatSpecLike
 import org.scalatest.matchers.should.Matchers
-import org.slf4j.{Logger, LoggerFactory}
+import org.slf4j.{ Logger, LoggerFactory }
 
 import scala.collection.immutable
 import scala.concurrent.duration._
@@ -69,8 +69,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
 
     val elements = immutable.Seq(
       consumer.message(partition, "value 1"),
-      consumer.message(partition, "value 2")
-    )
+      consumer.message(partition, "value 2"))
 
     val producer = new MockProducer[String, String](true, new StringSerializer, new StringSerializer)
     val producerSettings = ProducerSettings(system, new StringSerializer, new StringSerializer)
@@ -84,8 +83,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
       .map { msg =>
         ProducerMessage.single(
           new ProducerRecord("targetTopic", msg.record.key, msg.record.value),
-          msg.committableOffset
-        )
+          msg.committableOffset)
       }
       .toMat(Producer.committableSink(producerSettings, committerSettings))(DrainingControl.apply)
       .run()
@@ -97,7 +95,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
     consumer.actor.reply(Done)
 
     eventually {
-      producer.history.asScala should have size (2)
+      producer.history.asScala should have size 2
     }
     control.drainAndShutdown().futureValue shouldBe Done
   }
@@ -107,8 +105,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
 
     val elements = immutable.Seq(
       consumer.message(partition, "skip"),
-      consumer.message(partition, "send")
-    )
+      consumer.message(partition, "send"))
 
     val producer = new MockProducer[String, String](true, new StringSerializer, new StringSerializer)
     val producerSettings = ProducerSettings(system, new StringSerializer, new StringSerializer)
@@ -125,8 +122,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
         } else {
           ProducerMessage.single(
             new ProducerRecord("targetTopic", msg.record.key, msg.record.value),
-            msg.committableOffset
-          )
+            msg.committableOffset)
         }
       }
       .toMat(Producer.committableSink(producerSettings, committerSettings))(DrainingControl.apply)
@@ -139,7 +135,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
     consumer.actor.reply(Done)
 
     eventually {
-      producer.history.asScala should have size (1)
+      producer.history.asScala should have size 1
     }
     control.drainAndShutdown().futureValue shouldBe Done
   }
@@ -149,8 +145,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
 
     val elements = immutable.Seq(
       consumer.message(partition, "value 1"),
-      consumer.message(partition, "value 2")
-    )
+      consumer.message(partition, "value 2"))
 
     val producer = new MockProducer[String, String](true, new StringSerializer, new StringSerializer)
     val producerSettings = ProducerSettings(system, new StringSerializer, new StringSerializer)
@@ -163,8 +158,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
       .map { msg =>
         ProducerMessage.single(
           new ProducerRecord("targetTopic", msg.record.key, msg.record.value),
-          msg.committableOffset
-        )
+          msg.committableOffset)
       }
       .toMat(Producer.committableSink(producerSettings, committerSettings))(DrainingControl.apply)
       .run()
@@ -175,7 +169,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
     consumer.actor.reply(Done)
 
     eventually {
-      producer.history.asScala should have size (2)
+      producer.history.asScala should have size 2
     }
     control.drainAndShutdown().futureValue shouldBe Done
   }
@@ -185,8 +179,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
 
     val elements = immutable.Seq(
       consumer.message(partition, "value 1"),
-      consumer.message(partition, "value 2")
-    )
+      consumer.message(partition, "value 2"))
 
     val producer = new MockProducer[String, String](true, new StringSerializer, new StringSerializer)
     val producerSettings = ProducerSettings(system, new StringSerializer, new StringSerializer)
@@ -208,7 +201,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
     consumer.actor.reply(Done)
 
     eventually {
-      producer.history.asScala should have size (0)
+      producer.history.asScala should have size 0
     }
     control.drainAndShutdown().futureValue shouldBe Done
   }
@@ -218,8 +211,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
 
     val elements = immutable.Seq(
       consumer.message(partition, "value 1"),
-      consumer.message(partition, "value 2")
-    )
+      consumer.message(partition, "value 2"))
 
     val producerRecordsPerInput = 2
     val totalProducerRecords = elements.size * producerRecordsPerInput
@@ -236,12 +228,10 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
         ProducerMessage.multi(
           (1 to producerRecordsPerInput)
             .map(n => new ProducerRecord("targetTopic", msg.record.key, msg.record.value)),
-          msg.committableOffset
-        )
+          msg.committableOffset)
       }
       .toMat(
-        Producer.committableSink(producerSettings, committerSettings)
-      )(DrainingControl.apply)
+        Producer.committableSink(producerSettings, committerSettings))(DrainingControl.apply)
       .run()
 
     val commitMsg = consumer.actor.expectMsgClass(classOf[Internal.Commit])
@@ -290,8 +280,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
 
     val elements = immutable.Seq(
       consumer.message(partition, "value 1"),
-      consumer.message(partition, "value 2")
-    )
+      consumer.message(partition, "value 2"))
 
     val producer = new MockProducer[String, String](true, new StringSerializer, new StringSerializer)
     val producerSettings = ProducerSettings(system, new StringSerializer, new StringSerializer)
@@ -306,8 +295,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
       .map { msg =>
         ProducerMessage.single(
           new ProducerRecord("targetTopic", msg.record.key, msg.record.value),
-          msg.committableOffset
-        )
+          msg.committableOffset)
       }
       .toMat(Producer.committableSink(producerSettings, committerSettings))(DrainingControl.apply)
       .run()
@@ -318,7 +306,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
     consumer.actor.reply(Done)
 
     eventually {
-      producer.history.asScala should have size (2)
+      producer.history.asScala should have size 2
     }
     control.drainAndShutdown().futureValue shouldBe Done
   }
@@ -328,8 +316,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
 
     val elements = immutable.Seq(
       consumer.message(partition, "value 1"),
-      consumer.message(partition, "value 2")
-    )
+      consumer.message(partition, "value 2"))
 
     val producer = new MockProducer[String, String](true, new StringSerializer, new StringSerializer)
     val producerSettings = ProducerSettings(system, new StringSerializer, new StringSerializer)
@@ -343,8 +330,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
       .map { msg =>
         ProducerMessage.single(
           new ProducerRecord("targetTopic", msg.record.key, msg.record.value),
-          msg.committableOffset
-        )
+          msg.committableOffset)
       }
       .toMat(Producer.committableSink(producerSettings, committerSettings))(DrainingControl.apply)
       .run()
@@ -356,7 +342,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
     consumer.actor.reply(Done)
 
     eventually {
-      producer.history.asScala should have size (2)
+      producer.history.asScala should have size 2
     }
     control.drainAndShutdown().futureValue shouldBe Done
   }
@@ -366,8 +352,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
 
     val elements = immutable.Seq(
       consumer.message(partition, "value 1"),
-      consumer.message(partition, "value 2")
-    )
+      consumer.message(partition, "value 2"))
 
     val producer = new MockProducer[String, String](true, new StringSerializer, new StringSerializer)
     val producerSettings = ProducerSettings(system, new StringSerializer, new StringSerializer)
@@ -378,15 +363,15 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
     val control = Source(elements)
       .concat(Source.maybe) // keep the source alive
       .idleTimeout(50.millis)
-      .recoverWithRetries(1, {
-        case _ => Source.empty
-      })
+      .recoverWithRetries(1,
+        {
+          case _ => Source.empty
+        })
       .viaMat(ConsumerControlFactory.controlFlow())(Keep.right)
       .map { msg =>
         ProducerMessage.single(
           new ProducerRecord("targetTopic", msg.record.key, msg.record.value),
-          msg.committableOffset
-        )
+          msg.committableOffset)
       }
       .toMat(Producer.committableSink(producerSettings, committerSettings))(DrainingControl.apply)
       .run()
@@ -397,7 +382,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
     consumer.actor.reply(Done)
 
     eventually {
-      producer.history.asScala should have size (2)
+      producer.history.asScala should have size 2
     }
     control.drainAndShutdown().futureValue shouldBe Done
   }
@@ -407,8 +392,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
 
     val elements = immutable.Seq(
       consumer.message(partition, "value 1"),
-      consumer.message(partition, "value 2")
-    )
+      consumer.message(partition, "value 2"))
 
     val producer = new MockProducer[String, String](true, new StringSerializer, new StringSerializer)
     val producerSettings = ProducerSettings(system, new StringSerializer, new StringSerializer)
@@ -423,8 +407,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
       .map { msg =>
         ProducerMessage.single(
           new ProducerRecord("targetTopic", msg.record.key, msg.record.value),
-          msg.committableOffset
-        )
+          msg.committableOffset)
       }
       .toMat(Producer.committableSink(producerSettings, committerSettings))(DrainingControl.apply)
       .run()
@@ -435,7 +418,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
     consumer.actor.reply(Done)
 
     eventually {
-      producer.history.asScala should have size (2)
+      producer.history.asScala should have size 2
     }
     control.drainAndShutdown().failed.futureValue shouldBe a[java.util.concurrent.TimeoutException]
   }
@@ -445,8 +428,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
 
     val elements = immutable.Seq(
       consumer.message(partition, "value 1"),
-      consumer.message(partition, "value 2")
-    )
+      consumer.message(partition, "value 2"))
 
     // this producer does not auto complete messages
     val producer = new MockProducer[String, String](false, new StringSerializer, new StringSerializer)
@@ -460,8 +442,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
       .map { msg =>
         ProducerMessage.single(
           new ProducerRecord("targetTopic", msg.record.key, msg.record.value),
-          msg.committableOffset
-        )
+          msg.committableOffset)
       }
       .toMat(Producer.committableSink(producerSettings, committerSettings))(DrainingControl.apply)
       .run()
@@ -477,7 +458,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
     while (!producer.completeNext()) {}
 
     eventually {
-      producer.history.asScala should have size (2)
+      producer.history.asScala should have size 2
     }
     control.drainAndShutdown().failed.futureValue shouldBe an[akka.kafka.CommitTimeoutException]
   }
@@ -487,8 +468,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
 
     val elements = immutable.Seq(
       consumer.message(partition, "value 1"),
-      consumer.message(partition, "value 2")
-    )
+      consumer.message(partition, "value 2"))
 
     val producer = new MockProducer[String, String](false, new StringSerializer, new StringSerializer)
     val producerSettings = ProducerSettings(system, new StringSerializer, new StringSerializer)
@@ -501,14 +481,12 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
       .map { msg =>
         ProducerMessage.single(
           new ProducerRecord("targetTopic", msg.record.key, msg.record.value),
-          msg.committableOffset
-        )
+          msg.committableOffset)
       }
       .toMat(
         Producer
           .committableSink(producerSettings, committerSettings)
-          .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider))
-      )(DrainingControl.apply)
+          .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)))(DrainingControl.apply)
       .run()
 
     // fail the first message
@@ -523,7 +501,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
     consumer.actor.reply(Done)
 
     eventually {
-      producer.history.asScala should have size (2)
+      producer.history.asScala should have size 2
     }
     control.drainAndShutdown().futureValue shouldBe Done
   }
@@ -533,8 +511,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
 
     val elements = immutable.Seq(
       consumer.message(partition, "value 1"),
-      consumer.message(partition, "value 2")
-    )
+      consumer.message(partition, "value 2"))
 
     val producer = new MockProducer[String, String](false, new StringSerializer, new StringSerializer)
     val producerSettings = ProducerSettings(system, new StringSerializer, new StringSerializer)
@@ -548,14 +525,12 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
       .map { msg =>
         ProducerMessage.single(
           new ProducerRecord("targetTopic", msg.record.key, msg.record.value),
-          msg.committableOffset
-        )
+          msg.committableOffset)
       }
       .toMat(
         Producer
           .committableSink(producerSettings, committerSettings)
-          .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider))
-      )(DrainingControl.apply)
+          .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)))(DrainingControl.apply)
       .run()
 
     // fail the first message
@@ -572,7 +547,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
     consumer.actor.reply(Done)
 
     eventually {
-      producer.history.asScala should have size (2)
+      producer.history.asScala should have size 2
     }
     control.drainAndShutdown().futureValue shouldBe Done
   }
@@ -582,8 +557,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
 
     val elements = immutable.Seq(
       consumer.message(partition, "value 1"),
-      consumer.message(partition, "value 2")
-    )
+      consumer.message(partition, "value 2"))
 
     val producer = new MockProducer[String, String](true, new StringSerializer, new StringSerializer)
     val producerSettings = ProducerSettings(system, new StringSerializer, new StringSerializer)
@@ -596,8 +570,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
       .map { msg =>
         ProducerMessage.single(
           new ProducerRecord("targetTopic", msg.record.key, msg.record.value),
-          msg.committableOffset
-        )
+          msg.committableOffset)
       }
       .toMat(Producer.committableSink(producerSettings, committerSettings))(DrainingControl.apply)
       .run()
@@ -607,7 +580,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
     commitMsg.offsetAndMetadata.offset() shouldBe (consumer.startOffset + 2)
 
     eventually {
-      producer.history.asScala should have size (2)
+      producer.history.asScala should have size 2
     }
     control.drainAndShutdown().failed.futureValue shouldBe an[akka.kafka.CommitTimeoutException]
   }
@@ -617,8 +590,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
 
     val elements = immutable.Seq(
       consumer.message(partition, "value 1"),
-      consumer.message(partition, "value 2")
-    )
+      consumer.message(partition, "value 2"))
 
     val producer = new MockProducer[String, String](true, new StringSerializer, new StringSerializer)
     val producerSettings = ProducerSettings(system, new StringSerializer, new StringSerializer)
@@ -631,14 +603,12 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
       .map { msg =>
         ProducerMessage.single(
           new ProducerRecord("targetTopic", msg.record.key, msg.record.value),
-          msg.committableOffset
-        )
+          msg.committableOffset)
       }
       .toMat(
         Producer
           .committableSink(producerSettings, committerSettings)
-          .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider))
-      )(DrainingControl.apply)
+          .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)))(DrainingControl.apply)
       .run()
 
     val commitMsg = consumer.actor.expectMsgClass(classOf[Internal.Commit])
@@ -646,7 +616,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
     commitMsg.offsetAndMetadata.offset() shouldBe (consumer.startOffset + 2)
 
     eventually {
-      producer.history.asScala should have size (2)
+      producer.history.asScala should have size 2
     }
 
     // commit failure is ignored
@@ -658,8 +628,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
 
     val elements = immutable.Seq(
       consumer.message(partition, "value 1"),
-      consumer.message(partition, "value 2")
-    )
+      consumer.message(partition, "value 2"))
 
     val producer = new MockProducer[String, String](true, new StringSerializer, new StringSerializer)
     val producerSettings = ProducerSettings(system, new StringSerializer, new StringSerializer)
@@ -675,20 +644,18 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
         if (msg eq elements(1)) throw new RuntimeException("error")
         ProducerMessage.single(
           new ProducerRecord("targetTopic", msg.record.key, msg.record.value),
-          msg.committableOffset
-        )
+          msg.committableOffset)
       }
       .toMat(
         Producer
           .committableSink(producerSettings, committerSettings)
-          .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider))
-      )(DrainingControl.apply)
+          .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)))(DrainingControl.apply)
       .run()
 
     consumer.actor.expectNoMessage(10.millis)
 
     eventually {
-      producer.history.asScala should have size (1)
+      producer.history.asScala should have size 1
     }
 
     ScalaFutures.whenReady(control.drainAndShutdown().failed) { e =>
@@ -708,8 +675,7 @@ class CommittingProducerSinkSpec(_system: ActorSystem)
       .map { msg =>
         ProducerMessage.single(
           new ProducerRecord("targetTopic", msg.record.key, msg.record.value),
-          msg.committableOffset
-        )
+          msg.committableOffset)
       }
       .toMat(Producer.committableSink(producerSettings, committerSettings))(DrainingControl.apply)
       .run()
@@ -730,8 +696,6 @@ object CommittingProducerSinkSpec {
         new ConsumerRecord(topic, partition, startOffset, "key", value),
         CommittableOffsetImpl(
           ConsumerResultFactory.partitionOffset(groupId, topic, partition, offset.getAndIncrement()),
-          "metadata"
-        )(fakeCommitter)
-      )
+          "metadata")(fakeCommitter))
   }
 }
diff --git a/tests/src/test/scala/akka/kafka/internal/CommittingWithMockSpec.scala b/tests/src/test/scala/akka/kafka/internal/CommittingWithMockSpec.scala
index 1c1c39c9..1a525b97 100644
--- a/tests/src/test/scala/akka/kafka/internal/CommittingWithMockSpec.scala
+++ b/tests/src/test/scala/akka/kafka/internal/CommittingWithMockSpec.scala
@@ -12,7 +12,7 @@ import akka.actor.ActorSystem
 import akka.kafka.ConsumerMessage._
 import akka.kafka._
 import akka.kafka.scaladsl.Consumer.Control
-import akka.kafka.scaladsl.{Committer, Consumer}
+import akka.kafka.scaladsl.{ Committer, Consumer }
 import akka.kafka.tests.scaladsl.LogCapturing
 import akka.stream._
 import akka.stream.scaladsl._
@@ -24,13 +24,13 @@ import org.apache.kafka.clients.consumer._
 import org.apache.kafka.common.TopicPartition
 import org.apache.kafka.common.errors.RebalanceInProgressException
 import org.apache.kafka.common.serialization.StringDeserializer
-import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures}
+import org.scalatest.concurrent.{ Eventually, IntegrationPatience, ScalaFutures }
 import org.scalatest.BeforeAndAfterAll
 import org.scalatest.flatspec.AnyFlatSpecLike
 import org.scalatest.matchers.should.Matchers
 
 import scala.concurrent.duration._
-import scala.concurrent.{Await, Future}
+import scala.concurrent.{ Await, Future }
 
 object CommittingWithMockSpec {
   type K = String
@@ -40,9 +40,9 @@ object CommittingWithMockSpec {
   def createMessage(seed: Int): CommittableMessage[K, V] = createMessage(seed, "topic")
 
   def createMessage(seed: Int,
-                    topic: String,
-                    groupId: String = "group1",
-                    metadata: String = ""): CommittableMessage[K, V] = {
+      topic: String,
+      groupId: String = "group1",
+      metadata: String = ""): CommittableMessage[K, V] = {
     val offset = PartitionOffset(GroupTopicPartition(groupId, topic, 1), seed.toLong)
     val record = new ConsumerRecord(offset.key.topic, offset.key.partition, offset.offset, seed.toString, seed.toString)
     CommittableMessage(record, CommittableOffsetImpl(offset, metadata)(null))
@@ -67,10 +67,9 @@ class CommittingWithMockSpec(_system: ActorSystem)
   def this() =
     this(
       ActorSystem("CommittingWithMockSpec",
-                  ConfigFactory
-                    .parseString("""akka.stream.materializer.debug.fuzzing-mode = on""")
-                    .withFallback(ConfigFactory.load()))
-    )
+        ConfigFactory
+          .parseString("""akka.stream.materializer.debug.fuzzing-mode = on""")
+          .withFallback(ConfigFactory.load())))
 
   override def afterAll(): Unit =
     shutdown(system)
@@ -81,8 +80,8 @@ class CommittingWithMockSpec(_system: ActorSystem)
   val onCompleteFailure: ConsumerMock.OnCompleteHandler = _ => (null, failure)
 
   def createCommittableSource(mock: Consumer[K, V],
-                              groupId: String = "group1",
-                              topics: Set[String] = Set("topic")): Source[CommittableMessage[K, V], Control] =
+      groupId: String = "group1",
+      topics: Set[String] = Set("topic")): Source[CommittableMessage[K, V], Control] =
     Consumer
       .committableSource(
         ConsumerSettings
@@ -90,13 +89,12 @@ class CommittingWithMockSpec(_system: ActorSystem)
           .withGroupId(groupId)
           .withConsumerFactory(_ => mock)
           .withStopTimeout(0.seconds),
-        Subscriptions.topics(topics)
-      )
+        Subscriptions.topics(topics))
 
   def createSourceWithMetadata(mock: Consumer[K, V],
-                               metadataFromRecord: ConsumerRecord[K, V] => String,
-                               groupId: String = "group1",
-                               topics: Set[String] = Set("topic")): Source[CommittableMessage[K, V], Control] =
+      metadataFromRecord: ConsumerRecord[K, V] => String,
+      groupId: String = "group1",
+      topics: Set[String] = Set("topic")): Source[CommittableMessage[K, V], Control] =
     Consumer.commitWithMetadataSource(
       ConsumerSettings
         .create(system, new StringDeserializer, new StringDeserializer)
@@ -104,8 +102,7 @@ class CommittingWithMockSpec(_system: ActorSystem)
         .withCloseTimeout(ConsumerMock.closeTimeout)
         .withConsumerFactory(_ => mock),
       Subscriptions.topics(topics),
-      metadataFromRecord
-    )
+      metadataFromRecord)
 
   it should "commit metadata in message" in assertAllStagesStopped {
     val commitLog = new ConsumerMock.LogHandler()
@@ -122,7 +119,7 @@ class CommittingWithMockSpec(_system: ActorSystem)
     val done = probe.expectNext().committableOffset.commitInternal()
 
     awaitAssert {
-      commitLog.calls should have size (1)
+      commitLog.calls should have size 1
     }
 
     val (topicPartition, offsetMeta) = commitLog.calls.head._1.head
@@ -153,7 +150,7 @@ class CommittingWithMockSpec(_system: ActorSystem)
     val done = probe.expectNext().committableOffset.commitInternal()
 
     awaitAssert {
-      commitLog.calls should have size (1)
+      commitLog.calls should have size 1
     }
 
     val (topicPartition, offsetMeta) = commitLog.calls.head._1.head
@@ -183,7 +180,7 @@ class CommittingWithMockSpec(_system: ActorSystem)
     val done = probe.expectNext().committableOffset.commitInternal()
 
     awaitAssert {
-      commitLog.calls should have size (1)
+      commitLog.calls should have size 1
     }
 
     // allow poll to emulate commits
@@ -196,7 +193,7 @@ class CommittingWithMockSpec(_system: ActorSystem)
   }
 
   val exceptions = List(new RebalanceInProgressException(),
-                        new RetriableCommitFailedException(new CommitTimeoutException("injected15")))
+    new RetriableCommitFailedException(new CommitTimeoutException("injected15")))
   for (exception <- exceptions) {
     it should s"retry commit on ${exception.getClass.getSimpleName}" in assertAllStagesStopped {
       val retries = 4
@@ -218,7 +215,7 @@ class CommittingWithMockSpec(_system: ActorSystem)
       val done = probe.expectNext().committableOffset.commitInternal()
 
       awaitAssert {
-        commitLog.calls should have size (1)
+        commitLog.calls should have size 1
       }
 
       // allow poll to emulate commits
@@ -285,7 +282,7 @@ class CommittingWithMockSpec(_system: ActorSystem)
     val done = batch.commitInternal()
 
     awaitAssert {
-      commitLog.calls should have size (1)
+      commitLog.calls should have size 1
     }
 
     val commitMap = commitLog.calls.head._1
@@ -303,8 +300,8 @@ class CommittingWithMockSpec(_system: ActorSystem)
     val commitLog = new ConsumerMock.LogHandler()
     val mock = new ConsumerMock[K, V](commitLog)
     val (control, probe) = createSourceWithMetadata(mock.mock,
-                                                    (rec: ConsumerRecord[K, V]) => rec.offset.toString,
-                                                    topics = Set("topic1", "topic2"))
+      (rec: ConsumerRecord[K, V]) => rec.offset.toString,
+      topics = Set("topic1", "topic2"))
       .toMat(TestSink.probe)(Keep.both)
       .run()
 
@@ -322,7 +319,7 @@ class CommittingWithMockSpec(_system: ActorSystem)
     val done = batch.commitInternal()
 
     awaitAssert {
-      commitLog.calls should have size (1)
+      commitLog.calls should have size 1
     }
 
     val commitMap = commitLog.calls.head._1
@@ -342,8 +339,8 @@ class CommittingWithMockSpec(_system: ActorSystem)
     val commitLog = new ConsumerMock.LogHandler()
     val mock = new ConsumerMock[K, V](commitLog)
     val (control, probe) = createSourceWithMetadata(mock.mock,
-                                                    (rec: ConsumerRecord[K, V]) => rec.offset.toString,
-                                                    topics = Set("topic1", "topic2"))
+      (rec: ConsumerRecord[K, V]) => rec.offset.toString,
+      topics = Set("topic1", "topic2"))
       .toMat(TestSink.probe)(Keep.both)
       .run()
 
@@ -363,7 +360,7 @@ class CommittingWithMockSpec(_system: ActorSystem)
     val done = batch.commitInternal()
 
     awaitAssert {
-      commitLog.calls should have size (1)
+      commitLog.calls should have size 1
     }
 
     val commitMap = commitLog.calls.head._1
@@ -379,7 +376,7 @@ class CommittingWithMockSpec(_system: ActorSystem)
     Await.result(control.shutdown(), remainingOrDefault)
   }
 
-  //FIXME looks like current implementation of batch committer is incorrect
+  // FIXME looks like current implementation of batch committer is incorrect
   it should "support commit batching from more than one stage" in assertAllStagesStopped {
     val commitLog1 = new ConsumerMock.LogHandler()
     val commitLog2 = new ConsumerMock.LogHandler()
@@ -418,8 +415,8 @@ class CommittingWithMockSpec(_system: ActorSystem)
     val done2 = batch2.commitInternal()
 
     awaitAssert {
-      commitLog1.calls should have size (1)
-      commitLog2.calls should have size (1)
+      commitLog1.calls should have size 1
+      commitLog2.calls should have size 1
     }
 
     val commitMap1 = commitLog1.calls.head._1
@@ -444,8 +441,8 @@ class CommittingWithMockSpec(_system: ActorSystem)
     val commitLog = new ConsumerMock.LogHandler()
     val mock = new ConsumerMock[K, V](commitLog)
     val (control, probe) = createSourceWithMetadata(mock.mock,
-                                                    (rec: ConsumerRecord[K, V]) => rec.offset.toString,
-                                                    topics = Set("topic1", "topic2"))
+      (rec: ConsumerRecord[K, V]) => rec.offset.toString,
+      topics = Set("topic1", "topic2"))
       .toMat(TestSink.probe)(Keep.both)
       .run()
 
@@ -463,7 +460,7 @@ class CommittingWithMockSpec(_system: ActorSystem)
     batch.tellCommit()
 
     awaitAssert {
-      commitLog.calls should have size (1)
+      commitLog.calls should have size 1
     }
 
     val commitMap = commitLog.calls.head._1
@@ -514,7 +511,7 @@ class CommittingWithMockSpec(_system: ActorSystem)
 
     val resumeOnCommitFailed: Supervision.Decider = {
       case _: CommitFailedException => Supervision.Resume
-      case _ => Supervision.Stop
+      case _                        => Supervision.Stop
     }
 
     val (control, probe) = createCommittableSource(mock.mock)
@@ -522,8 +519,7 @@ class CommittingWithMockSpec(_system: ActorSystem)
       .toMat(
         Committer
           .sink(committerSettings)
-          .withAttributes(ActorAttributes.supervisionStrategy(resumeOnCommitFailed))
-      )(Keep.both)
+          .withAttributes(ActorAttributes.supervisionStrategy(resumeOnCommitFailed)))(Keep.both)
       .run()
 
     awaitAssert {
diff --git a/tests/src/test/scala/akka/kafka/internal/ConnectionCheckerSpec.scala b/tests/src/test/scala/akka/kafka/internal/ConnectionCheckerSpec.scala
index 5898a31f..e2b8a966 100644
--- a/tests/src/test/scala/akka/kafka/internal/ConnectionCheckerSpec.scala
+++ b/tests/src/test/scala/akka/kafka/internal/ConnectionCheckerSpec.scala
@@ -5,7 +5,7 @@
 
 package akka.kafka.internal
 
-import akka.actor.{ActorRef, ActorSystem}
+import akka.actor.{ ActorRef, ActorSystem }
 import akka.kafka.Metadata
 import akka.kafka.ConnectionCheckerSettings
 import akka.kafka.KafkaConnectionFailed
@@ -17,7 +17,7 @@ import org.scalatest.wordspec.AnyWordSpecLike
 import org.scalatest.matchers.should.Matchers
 
 import scala.concurrent.duration._
-import scala.util.{Failure, Success}
+import scala.util.{ Failure, Success }
 
 class ConnectionCheckerSpec
     extends TestKit(ActorSystem("KafkaConnectionCheckerSpec", ConfigFactory.load()))
diff --git a/tests/src/test/scala/akka/kafka/internal/ConsumerDummy.scala b/tests/src/test/scala/akka/kafka/internal/ConsumerDummy.scala
index 14139d57..dd49bfc1 100644
--- a/tests/src/test/scala/akka/kafka/internal/ConsumerDummy.scala
+++ b/tests/src/test/scala/akka/kafka/internal/ConsumerDummy.scala
@@ -11,8 +11,8 @@ import java.util.concurrent.atomic.AtomicInteger
 
 import akka.Done
 import org.apache.kafka.clients.consumer._
-import org.apache.kafka.common.{Metric, MetricName, PartitionInfo, TopicPartition}
-import org.slf4j.{Logger, LoggerFactory}
+import org.apache.kafka.common.{ Metric, MetricName, PartitionInfo, TopicPartition }
+import org.slf4j.{ Logger, LoggerFactory }
 
 import scala.concurrent.Promise
 
@@ -47,7 +47,7 @@ abstract class ConsumerDummy[K, V] extends Consumer[K, V] {
   override def commitAsync(): Unit = ???
   override def commitAsync(callback: OffsetCommitCallback): Unit = ???
   override def commitAsync(offsets: java.util.Map[TopicPartition, OffsetAndMetadata],
-                           callback: OffsetCommitCallback): Unit = ???
+      callback: OffsetCommitCallback): Unit = ???
   override def seek(partition: TopicPartition, offset: Long): Unit = ???
   override def seek(partition: TopicPartition, offsetAndMetadata: OffsetAndMetadata): Unit = ???
   override def seekToBeginning(partitions: java.util.Collection[TopicPartition]): Unit = ???
@@ -63,34 +63,32 @@ abstract class ConsumerDummy[K, V] extends Consumer[K, V] {
     firstPausingPromise.trySuccess(Done)
   override def resume(partitions: java.util.Collection[TopicPartition]): Unit = ???
   override def offsetsForTimes(
-      timestampsToSearch: java.util.Map[TopicPartition, java.lang.Long]
-  ): java.util.Map[TopicPartition, OffsetAndTimestamp] = ???
+      timestampsToSearch: java.util.Map[TopicPartition, java.lang.Long])
+      : java.util.Map[TopicPartition, OffsetAndTimestamp] = ???
   override def offsetsForTimes(timestampsToSearch: java.util.Map[TopicPartition, java.lang.Long],
-                               timeout: java.time.Duration): java.util.Map[TopicPartition, OffsetAndTimestamp] = ???
+      timeout: java.time.Duration): java.util.Map[TopicPartition, OffsetAndTimestamp] = ???
   override def beginningOffsets(
-      partitions: java.util.Collection[TopicPartition]
-  ): java.util.Map[TopicPartition, java.lang.Long] = ???
+      partitions: java.util.Collection[TopicPartition]): java.util.Map[TopicPartition, java.lang.Long] = ???
   override def endOffsets(
-      partitions: java.util.Collection[TopicPartition]
-  ): java.util.Map[TopicPartition, java.lang.Long] = ???
+      partitions: java.util.Collection[TopicPartition]): java.util.Map[TopicPartition, java.lang.Long] = ???
   override def close(): Unit = {}
   override def close(timeout: java.time.Duration): Unit = {}
   override def wakeup(): Unit = ???
 
   override def commitSync(timeout: java.time.Duration): Unit = ???
   override def commitSync(offsets: java.util.Map[TopicPartition, OffsetAndMetadata],
-                          timeout: java.time.Duration): Unit = ???
+      timeout: java.time.Duration): Unit = ???
   override def committed(partition: TopicPartition, timeout: java.time.Duration): OffsetAndMetadata = ???
   override def committed(partitions: util.Set[TopicPartition]): util.Map[TopicPartition, OffsetAndMetadata] = ???
   override def committed(partitions: util.Set[TopicPartition],
-                         timeout: Duration): util.Map[TopicPartition, OffsetAndMetadata] = ???
+      timeout: Duration): util.Map[TopicPartition, OffsetAndMetadata] = ???
 
   override def partitionsFor(topic: String, timeout: java.time.Duration): java.util.List[PartitionInfo] = ???
   override def listTopics(timeout: java.time.Duration): java.util.Map[String, java.util.List[PartitionInfo]] = ???
   override def beginningOffsets(partitions: java.util.Collection[TopicPartition],
-                                timeout: java.time.Duration): java.util.Map[TopicPartition, java.lang.Long] = ???
+      timeout: java.time.Duration): java.util.Map[TopicPartition, java.lang.Long] = ???
   override def endOffsets(partitions: java.util.Collection[TopicPartition],
-                          timeout: java.time.Duration): java.util.Map[TopicPartition, java.lang.Long] = ???
+      timeout: java.time.Duration): java.util.Map[TopicPartition, java.lang.Long] = ???
   override def poll(timeout: java.time.Duration): ConsumerRecords[K, V] = ???
   override def groupMetadata(): ConsumerGroupMetadata = ???
   override def enforceRebalance(): Unit = ???
diff --git a/tests/src/test/scala/akka/kafka/internal/ConsumerMock.scala b/tests/src/test/scala/akka/kafka/internal/ConsumerMock.scala
index 70b9061f..95094e88 100644
--- a/tests/src/test/scala/akka/kafka/internal/ConsumerMock.scala
+++ b/tests/src/test/scala/akka/kafka/internal/ConsumerMock.scala
@@ -15,7 +15,7 @@ import org.mockito.Mockito._
 import org.mockito.invocation.InvocationOnMock
 import org.mockito.stubbing.Answer
 import org.mockito.verification.VerificationMode
-import org.mockito.{ArgumentMatchers, Mockito}
+import org.mockito.{ ArgumentMatchers, Mockito }
 
 import scala.jdk.CollectionConverters._
 import scala.collection.immutable.Seq
@@ -109,8 +109,7 @@ class ConsumerMock[K, V](handler: ConsumerMock.CommitHandler = new ConsumerMock.
     Mockito
       .when(
         result.commitAsync(ArgumentMatchers.any[java.util.Map[TopicPartition, OffsetAndMetadata]],
-                           ArgumentMatchers.any[OffsetCommitCallback])
-      )
+          ArgumentMatchers.any[OffsetCommitCallback]))
       .thenAnswer(new Answer[Unit] {
         override def answer(invocation: InvocationOnMock) = {
           val offsets = invocation.getArgument[java.util.Map[TopicPartition, OffsetAndMetadata]](0)
@@ -121,8 +120,7 @@ class ConsumerMock[K, V](handler: ConsumerMock.CommitHandler = new ConsumerMock.
       })
     Mockito
       .when(
-        result.subscribe(ArgumentMatchers.any[java.util.List[String]], ArgumentMatchers.any[ConsumerRebalanceListener])
-      )
+        result.subscribe(ArgumentMatchers.any[java.util.List[String]], ArgumentMatchers.any[ConsumerRebalanceListener]))
       .thenAnswer(new Answer[Unit] {
         override def answer(invocation: InvocationOnMock) = {
           val topics = invocation.getArgument[java.util.List[String]](0)
diff --git a/tests/src/test/scala/akka/kafka/internal/ConsumerProgressTrackingSpec.scala b/tests/src/test/scala/akka/kafka/internal/ConsumerProgressTrackingSpec.scala
index 78c74ad4..0f67304b 100644
--- a/tests/src/test/scala/akka/kafka/internal/ConsumerProgressTrackingSpec.scala
+++ b/tests/src/test/scala/akka/kafka/internal/ConsumerProgressTrackingSpec.scala
@@ -6,7 +6,7 @@
 package akka.kafka.internal
 
 import akka.kafka.tests.scaladsl.LogCapturing
-import org.apache.kafka.clients.consumer.{Consumer, ConsumerRecord, ConsumerRecords, OffsetAndMetadata}
+import org.apache.kafka.clients.consumer.{ Consumer, ConsumerRecord, ConsumerRecords, OffsetAndMetadata }
 import org.apache.kafka.common.TopicPartition
 import org.mockito.Mockito
 import org.scalatest.flatspec.AnyFlatSpecLike
@@ -75,10 +75,8 @@ class ConsumerProgressTrackingSpec extends AnyFlatSpecLike with Matchers with Lo
     tracker.received(
       new ConsumerRecords[String, String](
         Map(
-          tp2 -> List(new ConsumerRecord[String, String](tp2.topic(), tp2.partition(), 10L, "k1", "kv")).asJava
-        ).asJava
-      )
-    )
+          tp2 -> List(new ConsumerRecord[String, String](tp2.topic(), tp2.partition(), 10L, "k1",
+            "kv")).asJava).asJava))
     tracker.receivedMessages.map(extractOffsetFromSafe) should be(Map(tp -> 10L))
     // no change to the committing
     tracker.commitRequested.map(extractOffset) should be(Map(tp -> 0L))
@@ -168,7 +166,7 @@ class ConsumerProgressTrackingSpec extends AnyFlatSpecLike with Matchers with Lo
         state = state.filter { case (tp, _) => !revokedTps.contains(tp) }
       }
       override def assignedPositions(assignedTps: Set[TopicPartition],
-                                     assignedOffsets: Map[TopicPartition, Long]): Unit = {
+          assignedOffsets: Map[TopicPartition, Long]): Unit = {
         state = state ++ assignedOffsets
       }
     }
diff --git a/tests/src/test/scala/akka/kafka/internal/ConsumerResetProtectionSpec.scala b/tests/src/test/scala/akka/kafka/internal/ConsumerResetProtectionSpec.scala
index 0457a650..598da943 100644
--- a/tests/src/test/scala/akka/kafka/internal/ConsumerResetProtectionSpec.scala
+++ b/tests/src/test/scala/akka/kafka/internal/ConsumerResetProtectionSpec.scala
@@ -11,14 +11,14 @@ import akka.kafka.OffsetResetProtectionSettings
 import akka.kafka.internal.KafkaConsumerActor.Internal.Seek
 import akka.kafka.testkit.scaladsl.Slf4jToAkkaLoggingAdapter
 import akka.kafka.tests.scaladsl.LogCapturing
-import akka.testkit.{ImplicitSender, TestKit}
-import org.apache.kafka.clients.consumer.{ConsumerRecord, ConsumerRecords}
+import akka.testkit.{ ImplicitSender, TestKit }
+import org.apache.kafka.clients.consumer.{ ConsumerRecord, ConsumerRecords }
 import org.apache.kafka.common.TopicPartition
 import org.apache.kafka.common.header.internals.RecordHeaders
 import org.apache.kafka.common.record.TimestampType
 import org.scalatest.matchers.should.Matchers
 import org.scalatest.wordspec.AnyWordSpecLike
-import org.slf4j.{Logger, LoggerFactory}
+import org.slf4j.{ Logger, LoggerFactory }
 
 import java.util.Optional
 import scala.concurrent.duration._
@@ -89,33 +89,30 @@ class ConsumerResetProtectionSpec
       progress.received(
         asConsumerRecords(
           new ConsumerRecord(tp.topic(),
-                             tp.partition(),
-                             /* offset= */ 100L,
-                             /* timestamp = */ 100L,
-                             TimestampType.LOG_APPEND_TIME,
-                             ConsumerRecord.NULL_SIZE,
-                             ConsumerRecord.NULL_SIZE,
-                             "k1",
-                             "kv",
-                             new RecordHeaders(),
-                             Optional.empty[Integer]())
-        )
-      )
+            tp.partition(),
+            /* offset= */ 100L,
+            /* timestamp = */ 100L,
+            TimestampType.LOG_APPEND_TIME,
+            ConsumerRecord.NULL_SIZE,
+            ConsumerRecord.NULL_SIZE,
+            "k1",
+            "kv",
+            new RecordHeaders(),
+            Optional.empty[Integer]())))
 
       // later, we get offset 90L and timestamp 10, the latter of which is outside our 50 milli threshold
       val timeRecords = asConsumerRecords(
         new ConsumerRecord(tp.topic(),
-                           tp.partition(),
-                           /* offset= */ 90L,
-                           /* timestamp = */ 10L,
-                           TimestampType.LOG_APPEND_TIME,
-                           ConsumerRecord.NULL_SIZE,
-                           ConsumerRecord.NULL_SIZE,
-                           "k1",
-                           "kv",
-                           new RecordHeaders(),
-                           Optional.empty[Integer]())
-      )
+          tp.partition(),
+          /* offset= */ 90L,
+          /* timestamp = */ 10L,
+          TimestampType.LOG_APPEND_TIME,
+          ConsumerRecord.NULL_SIZE,
+          ConsumerRecord.NULL_SIZE,
+          "k1",
+          "kv",
+          new RecordHeaders(),
+          Optional.empty[Integer]()))
       protection.protect[String, String](self, timeRecords).count() should be(0)
       expectMsg(10.seconds, Seek(Map(tp -> 100L)))
     }
@@ -137,12 +134,10 @@ class ConsumerResetProtectionSpec
       // drop the old offsets in this batch, so back to the original set of records
       protectedRecords = protection
         .protect(self,
-                 new ConsumerRecords(
-                   Map(
-                     tp -> List(m1).asJava,
-                     tp1 -> List(new ConsumerRecord(tp1.topic(), tp1.partition(), 10L, "k1", "kv")).asJava
-                   ).asJava
-                 ))
+          new ConsumerRecords(
+            Map(
+              tp -> List(m1).asJava,
+              tp1 -> List(new ConsumerRecord(tp1.topic(), tp1.partition(), 10L, "k1", "kv")).asJava).asJava))
       shouldHaveEqualRecords(records, protectedRecords)
     }
 
@@ -164,11 +159,7 @@ class ConsumerResetProtectionSpec
             tp -> List(
               new ConsumerRecord(tp.topic(), tp.partition(), 101L, "k1", "kv"),
               new ConsumerRecord(tp.topic(), tp.partition(), 1L, "k2", "kv"),
-              new ConsumerRecord(tp.topic(), tp.partition(), 102L, "k1", "kv")
-            ).asJava
-          ).asJava
-        )
-      )
+              new ConsumerRecord(tp.topic(), tp.partition(), 102L, "k1", "kv")).asJava).asJava))
       records.count() should be(3)
       records.records(tp).asScala.map(_.offset()) should be(Seq(101L, 1L, 102L))
     }
diff --git a/tests/src/test/scala/akka/kafka/internal/ConsumerSpec.scala b/tests/src/test/scala/akka/kafka/internal/ConsumerSpec.scala
index 132a745e..1e1f3a0e 100644
--- a/tests/src/test/scala/akka/kafka/internal/ConsumerSpec.scala
+++ b/tests/src/test/scala/akka/kafka/internal/ConsumerSpec.scala
@@ -11,7 +11,7 @@ import akka.kafka.ConsumerMessage._
 import akka.kafka.scaladsl.Consumer
 import akka.kafka.scaladsl.Consumer.Control
 import akka.kafka.tests.scaladsl.LogCapturing
-import akka.kafka.{CommitTimeoutException, ConsumerSettings, Repeated, Subscriptions}
+import akka.kafka.{ CommitTimeoutException, ConsumerSettings, Repeated, Subscriptions }
 import akka.stream.scaladsl._
 import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
 import akka.stream.testkit.scaladsl.TestSink
@@ -27,7 +27,7 @@ import org.scalatest.matchers.should.Matchers
 
 import scala.collection.immutable.Seq
 import scala.concurrent.duration._
-import scala.concurrent.{Await, Future}
+import scala.concurrent.{ Await, Future }
 import scala.jdk.CollectionConverters._
 
 object ConsumerSpec {
@@ -38,9 +38,9 @@ object ConsumerSpec {
   def createMessage(seed: Int): CommittableMessage[K, V] = createMessage(seed, "topic")
 
   def createMessage(seed: Int,
-                    topic: String,
-                    groupId: String = "group1",
-                    metadata: String = ""): CommittableMessage[K, V] = {
+      topic: String,
+      groupId: String = "group1",
+      metadata: String = ""): CommittableMessage[K, V] = {
     val offset = PartitionOffset(GroupTopicPartition(groupId, topic, 1), seed.toLong)
     val record = new ConsumerRecord(offset.key.topic, offset.key.partition, offset.offset, seed.toString, seed.toString)
     CommittableMessage(record, CommittableOffsetImpl(offset, metadata)(null))
@@ -63,10 +63,9 @@ class ConsumerSpec(_system: ActorSystem)
   def this() =
     this(
       ActorSystem("ConsumerSpec",
-                  ConfigFactory
-                    .parseString("""akka.stream.materializer.debug.fuzzing-mode = on""")
-                    .withFallback(ConfigFactory.load()))
-    )
+        ConfigFactory
+          .parseString("""akka.stream.materializer.debug.fuzzing-mode = on""")
+          .withFallback(ConfigFactory.load())))
 
   override def afterAll(): Unit =
     shutdown(system)
@@ -88,8 +87,8 @@ class ConsumerSpec(_system: ActorSystem)
   }
 
   def createCommittableSource(mock: Consumer[K, V],
-                              groupId: String = "group1",
-                              topics: Set[String] = Set("topic")): Source[CommittableMessage[K, V], Control] =
+      groupId: String = "group1",
+      topics: Set[String] = Set("topic")): Source[CommittableMessage[K, V], Control] =
     Consumer.committableSource(
       ConsumerSettings
         .create(system, new StringDeserializer, new StringDeserializer)
@@ -97,13 +96,12 @@ class ConsumerSpec(_system: ActorSystem)
         .withCloseTimeout(ConsumerMock.closeTimeout)
         .withCommitTimeout(500.millis)
         .withConsumerFactory(_ => mock),
-      Subscriptions.topics(topics)
-    )
+      Subscriptions.topics(topics))
 
   def createSourceWithMetadata(mock: Consumer[K, V],
-                               metadataFromRecord: ConsumerRecord[K, V] => String,
-                               groupId: String = "group1",
-                               topics: Set[String] = Set("topic")): Source[CommittableMessage[K, V], Control] =
+      metadataFromRecord: ConsumerRecord[K, V] => String,
+      groupId: String = "group1",
+      topics: Set[String] = Set("topic")): Source[CommittableMessage[K, V], Control] =
     Consumer.commitWithMetadataSource(
       ConsumerSettings
         .create(system, new StringDeserializer, new StringDeserializer)
@@ -111,8 +109,7 @@ class ConsumerSpec(_system: ActorSystem)
         .withCloseTimeout(ConsumerMock.closeTimeout)
         .withConsumerFactory(_ => mock),
       Subscriptions.topics(topics),
-      metadataFromRecord
-    )
+      metadataFromRecord)
 
   it should "fail stream when poll() fails with unhandled exception" in assertAllStagesStopped {
     val mock = new FailingConsumerMock[K, V](new Exception("Fatal Kafka error"), failOnCallNumber = 1)
@@ -170,8 +167,7 @@ class ConsumerSpec(_system: ActorSystem)
         .grouped(97)
         .map(x => Seq(Seq.empty, x))
         .flatten
-        .toList
-    )
+        .toList)
   }
 
   it should "complete out and keep underlying client open when control.stop called" in assertAllStagesStopped {
@@ -254,7 +250,7 @@ class ConsumerSpec(_system: ActorSystem)
     probe.expectNextN(9)
 
     awaitAssert {
-      commitLog.calls should have size (1)
+      commitLog.calls should have size 1
     }
 
     val stopped = control.shutdown()
@@ -263,7 +259,7 @@ class ConsumerSpec(_system: ActorSystem)
     Thread.sleep(100)
     stopped.isCompleted should ===(false)
 
-    //emulate commit
+    // emulate commit
     commitLog.calls.foreach {
       case (offsets, callback) => callback.onComplete(offsets.asJava, null)
     }
@@ -320,7 +316,7 @@ class ConsumerSpec(_system: ActorSystem)
     probe.expectNoMessage(200.millis)
     control.isShutdown.isCompleted should ===(false)
 
-    //emulate commit
+    // emulate commit
     commitLog.calls.foreach {
       case (offsets, callback) => callback.onComplete(offsets.asJava, null)
     }
diff --git a/tests/src/test/scala/akka/kafka/internal/OffsetAggregationSpec.scala b/tests/src/test/scala/akka/kafka/internal/OffsetAggregationSpec.scala
index f20176de..6989d011 100644
--- a/tests/src/test/scala/akka/kafka/internal/OffsetAggregationSpec.scala
+++ b/tests/src/test/scala/akka/kafka/internal/OffsetAggregationSpec.scala
@@ -38,16 +38,13 @@ class OffsetAggregationSpec extends AnyWordSpec with Matchers with LogCapturing
     "give the highest offsets (when mixed)" in {
       val in1 = List(
         new TopicPartition(topicA, 1) -> new OffsetAndMetadata(42, OffsetFetchResponse.NO_METADATA),
-        new TopicPartition(topicB, 1) -> new OffsetAndMetadata(11, OffsetFetchResponse.NO_METADATA)
-      )
+        new TopicPartition(topicB, 1) -> new OffsetAndMetadata(11, OffsetFetchResponse.NO_METADATA))
       val in2 = List(
         new TopicPartition(topicA, 1) -> new OffsetAndMetadata(12, OffsetFetchResponse.NO_METADATA),
-        new TopicPartition(topicB, 1) -> new OffsetAndMetadata(43, OffsetFetchResponse.NO_METADATA)
-      )
+        new TopicPartition(topicB, 1) -> new OffsetAndMetadata(43, OffsetFetchResponse.NO_METADATA))
       KafkaConsumerActor.aggregateOffsets(in1 ++ in2) shouldBe Map(
         new TopicPartition(topicA, 1) -> new OffsetAndMetadata(42, OffsetFetchResponse.NO_METADATA),
-        new TopicPartition(topicB, 1) -> new OffsetAndMetadata(43, OffsetFetchResponse.NO_METADATA)
-      )
+        new TopicPartition(topicB, 1) -> new OffsetAndMetadata(43, OffsetFetchResponse.NO_METADATA))
     }
   }
 
diff --git a/tests/src/test/scala/akka/kafka/internal/PartitionedSourceSpec.scala b/tests/src/test/scala/akka/kafka/internal/PartitionedSourceSpec.scala
index 46e4993f..0d770dd9 100644
--- a/tests/src/test/scala/akka/kafka/internal/PartitionedSourceSpec.scala
+++ b/tests/src/test/scala/akka/kafka/internal/PartitionedSourceSpec.scala
@@ -6,7 +6,7 @@
 package akka.kafka.internal
 
 import java.util.concurrent.atomic.AtomicReference
-import java.util.concurrent.{CountDownLatch, TimeUnit}
+import java.util.concurrent.{ CountDownLatch, TimeUnit }
 import java.util.function.UnaryOperator
 
 import akka.Done
@@ -14,7 +14,7 @@ import akka.actor.ActorSystem
 import akka.kafka.ConsumerMessage._
 import akka.kafka.scaladsl.Consumer
 import akka.kafka.tests.scaladsl.LogCapturing
-import akka.kafka.{ConsumerSettings, Subscriptions}
+import akka.kafka.{ ConsumerSettings, Subscriptions }
 import akka.stream.scaladsl._
 import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
 import akka.stream.testkit.scaladsl.TestSink
@@ -23,11 +23,11 @@ import com.typesafe.config.ConfigFactory
 import org.apache.kafka.clients.consumer._
 import org.apache.kafka.common.TopicPartition
 import org.apache.kafka.common.serialization.StringDeserializer
-import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures}
+import org.scalatest.concurrent.{ Eventually, IntegrationPatience, ScalaFutures }
 import org.scalatest.flatspec.AnyFlatSpecLike
 import org.scalatest.matchers.should.Matchers
-import org.scalatest.{BeforeAndAfterAll, OptionValues}
-import org.slf4j.{Logger, LoggerFactory}
+import org.scalatest.{ BeforeAndAfterAll, OptionValues }
+import org.slf4j.{ Logger, LoggerFactory }
 
 import scala.concurrent.Future
 import scala.concurrent.duration._
@@ -49,10 +49,9 @@ class PartitionedSourceSpec(_system: ActorSystem)
   def this() =
     this(
       ActorSystem("PartitionedSourceSpec",
-                  ConfigFactory
-                    .parseString("""akka.stream.materializer.debug.fuzzing-mode = on""")
-                    .withFallback(ConfigFactory.load()))
-    )
+        ConfigFactory
+          .parseString("""akka.stream.materializer.debug.fuzzing-mode = on""")
+          .withFallback(ConfigFactory.load())))
 
   override def afterAll(): Unit =
     shutdown(system)
@@ -366,11 +365,11 @@ class PartitionedSourceSpec(_system: ActorSystem)
 
     val sink = Consumer
       .plainPartitionedManualOffsetSource(consumerSettings(dummy),
-                                          Subscriptions.topics(topic),
-                                          getOffsetsOnAssign,
-                                          onRevoke = { tp =>
-                                            revoked = revoked ++ tp
-                                          })
+        Subscriptions.topics(topic),
+        getOffsetsOnAssign,
+        onRevoke = { tp =>
+          revoked = revoked ++ tp
+        })
       .runWith(TestSink.probe)
 
     dummy.started.futureValue should be(Done)
@@ -473,8 +472,8 @@ class PartitionedSourceSpec(_system: ActorSystem)
 
     val sink = Consumer
       .committablePartitionedManualOffsetSource(consumerSettings(dummy),
-                                                Subscriptions.topics(topic),
-                                                getOffsetsOnAssign)
+        Subscriptions.topics(topic),
+        getOffsetsOnAssign)
       .runWith(TestSink.probe)
 
     dummy.started.futureValue should be(Done)
@@ -507,8 +506,8 @@ class PartitionedSourceSpec(_system: ActorSystem)
 
     val sink = Consumer
       .committablePartitionedManualOffsetSource(consumerSettings(dummy),
-                                                Subscriptions.topics(topic),
-                                                getOffsetsOnAssign)
+        Subscriptions.topics(topic),
+        getOffsetsOnAssign)
       .runWith(TestSink.probe)
 
     dummy.started.futureValue should be(Done)
@@ -548,8 +547,8 @@ class PartitionedSourceSpec(_system: ActorSystem)
 
     val sink = Consumer
       .committablePartitionedManualOffsetSource(consumerSettings(dummy),
-                                                Subscriptions.topics(topic),
-                                                getOffsetsOnAssign)
+        Subscriptions.topics(topic),
+        getOffsetsOnAssign)
       .runWith(TestSink.probe)
 
     dummy.started.futureValue should be(Done)
@@ -583,11 +582,11 @@ class PartitionedSourceSpec(_system: ActorSystem)
 
     val sink = Consumer
       .committablePartitionedManualOffsetSource(consumerSettings(dummy),
-                                                Subscriptions.topics(topic),
-                                                getOffsetsOnAssign,
-                                                onRevoke = { tp =>
-                                                  revoked = revoked ++ tp
-                                                })
+        Subscriptions.topics(topic),
+        getOffsetsOnAssign,
+        onRevoke = { tp =>
+          revoked = revoked ++ tp
+        })
       .runWith(TestSink.probe)
 
     dummy.started.futureValue should be(Done)
diff --git a/tests/src/test/scala/akka/kafka/internal/ProducerSpec.scala b/tests/src/test/scala/akka/kafka/internal/ProducerSpec.scala
index 39a8fbd7..6b7a7d98 100644
--- a/tests/src/test/scala/akka/kafka/internal/ProducerSpec.scala
+++ b/tests/src/test/scala/akka/kafka/internal/ProducerSpec.scala
@@ -7,19 +7,19 @@ package akka.kafka.internal
 
 import java.util.concurrent.CompletableFuture
 import akka.actor.ActorSystem
-import akka.kafka.ConsumerMessage.{GroupTopicPartition, PartitionOffset, PartitionOffsetCommittedMarker}
+import akka.kafka.ConsumerMessage.{ GroupTopicPartition, PartitionOffset, PartitionOffsetCommittedMarker }
 import akka.kafka.ProducerMessage._
 import akka.kafka.scaladsl.Producer
 import akka.kafka.tests.scaladsl.LogCapturing
-import akka.kafka.{ConsumerMessage, ProducerMessage, ProducerSettings}
-import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
+import akka.kafka.{ ConsumerMessage, ProducerMessage, ProducerSettings }
+import akka.stream.scaladsl.{ Flow, Keep, Sink, Source }
 import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
-import akka.stream.testkit.scaladsl.{TestSink, TestSource}
-import akka.stream.{ActorAttributes, Supervision}
+import akka.stream.testkit.scaladsl.{ TestSink, TestSource }
+import akka.stream.{ ActorAttributes, Supervision }
 import akka.testkit.TestKit
-import akka.{Done, NotUsed}
+import akka.{ Done, NotUsed }
 import com.typesafe.config.ConfigFactory
-import org.apache.kafka.clients.consumer.{ConsumerGroupMetadata, OffsetAndMetadata}
+import org.apache.kafka.clients.consumer.{ ConsumerGroupMetadata, OffsetAndMetadata }
 import org.apache.kafka.clients.producer._
 import org.apache.kafka.common.TopicPartition
 import org.apache.kafka.common.serialization.StringSerializer
@@ -34,9 +34,9 @@ import org.scalatest.flatspec.AnyFlatSpecLike
 import org.scalatest.matchers.should.Matchers
 
 import scala.concurrent.duration._
-import scala.concurrent.{Await, ExecutionContext, Future, Promise}
+import scala.concurrent.{ Await, ExecutionContext, Future, Promise }
 import scala.jdk.CollectionConverters._
-import scala.util.{Failure, Success, Try}
+import scala.util.{ Failure, Success, Try }
 
 class ProducerSpec(_system: ActorSystem)
     extends TestKit(_system)
@@ -48,10 +48,9 @@ class ProducerSpec(_system: ActorSystem)
   def this() =
     this(
       ActorSystem("ProducerSpec",
-                  ConfigFactory
-                    .parseString("""akka.stream.materializer.debug.fuzzing-mode = on""")
-                    .withFallback(ConfigFactory.load()))
-    )
+        ConfigFactory
+          .parseString("""akka.stream.materializer.debug.fuzzing-mode = on""")
+          .withFallback(ConfigFactory.load())))
 
   override def afterAll(): Unit = shutdown(system)
 
@@ -75,13 +74,12 @@ class ProducerSpec(_system: ActorSystem)
       .PartitionOffset(GroupTopicPartition(group, tuple._1.topic(), 1), tuple._2.offset())
     val partitionOffsetCommittedMarker =
       PartitionOffsetCommittedMarker(consumerMessage.key,
-                                     consumerMessage.offset,
-                                     committer,
-                                     fromPartitionedSource = false)
+        consumerMessage.offset,
+        committer,
+        fromPartitionedSource = false)
     ProducerMessage.Message(
       tuple._1,
-      partitionOffsetCommittedMarker
-    )
+      partitionOffsetCommittedMarker)
   }
 
   def result(r: Record, m: RecordMetadata) = Result(m, ProducerMessage.Message(r, NotUsed))
@@ -97,24 +95,21 @@ class ProducerSpec(_system: ActorSystem)
       .withEosCommitInterval(10.milliseconds)
 
   def testProducerFlow[P](mock: ProducerMock[K, V],
-                          closeOnStop: Boolean = true): Flow[Message[K, V, P], Result[K, V, P], NotUsed] = {
+      closeOnStop: Boolean = true): Flow[Message[K, V, P], Result[K, V, P], NotUsed] = {
     val pSettings = settings.withProducer(mock.mock).withCloseProducerOnStop(closeOnStop)
     Flow
       .fromGraph(
-        new DefaultProducerStage[K, V, P, Message[K, V, P], Result[K, V, P]](pSettings)
-      )
+        new DefaultProducerStage[K, V, P, Message[K, V, P], Result[K, V, P]](pSettings))
       .mapAsync(1)(identity)
   }
 
   def testTransactionProducerFlow[P](
       mock: ProducerMock[K, V],
-      closeOnStop: Boolean = true
-  ): Flow[Envelope[K, V, P], Results[K, V, P], NotUsed] = {
+      closeOnStop: Boolean = true): Flow[Envelope[K, V, P], Results[K, V, P], NotUsed] = {
     val pSettings = settings.withProducerFactory(_ => mock.mock).withCloseProducerOnStop(closeOnStop)
     Flow
       .fromGraph(
-        new TransactionalProducerStage[K, V, P](pSettings, "transactionalId")
-      )
+        new TransactionalProducerStage[K, V, P](pSettings, "transactionalId"))
       .mapAsync(1)(identity)
   }
 
@@ -151,7 +146,7 @@ class ProducerSpec(_system: ActorSystem)
 
   it should "work with a provided Producer" in {
     assertAllStagesStopped {
-      val input = 1 to 10 map { recordAndMetadata(_)._1 }
+      val input = (1 to 10).map { recordAndMetadata(_)._1 }
 
       val mockProducer = new MockProducer[String, String](true, new StringSerializer, new StringSerializer)
 
@@ -165,7 +160,7 @@ class ProducerSpec(_system: ActorSystem)
 
   it should "emit confirmation in same order as inputs" in {
     assertAllStagesStopped {
-      val input = 1 to 3 map recordAndMetadata
+      val input = (1 to 3).map(recordAndMetadata)
 
       val client = {
         val inputMap = input.toMap
@@ -187,7 +182,7 @@ class ProducerSpec(_system: ActorSystem)
   }
 
   it should "in case of source error complete emitted messages and push error" in assertAllStagesStopped {
-    val input = 1 to 10 map recordAndMetadata
+    val input = (1 to 10).map(recordAndMetadata)
 
     val client = {
       val inputMap = input.toMap
@@ -215,7 +210,7 @@ class ProducerSpec(_system: ActorSystem)
 
   it should "fail stream and force-close producer in callback on send failure" in {
     assertAllStagesStopped {
-      val input = 1 to 3 map recordAndMetadata
+      val input = (1 to 3).map(recordAndMetadata)
       val error = new Exception("Something wrong in kafka")
 
       val client = {
@@ -244,7 +239,7 @@ class ProducerSpec(_system: ActorSystem)
 
   it should "stop emitting messages after encountering a send failure" in {
     assertAllStagesStopped {
-      val input = 1 to 3 map recordAndMetadata
+      val input = (1 to 3).map(recordAndMetadata)
       val error = new Exception("Something wrong in kafka")
 
       val client = {
@@ -277,7 +272,7 @@ class ProducerSpec(_system: ActorSystem)
 
   it should "resume stream and gracefully close producer on send failure if specified by supervision-strategy" in {
     assertAllStagesStopped {
-      val input = 1 to 3 map recordAndMetadata
+      val input = (1 to 3).map(recordAndMetadata)
       val error = new Exception("Something wrong in kafka")
 
       val client = {
@@ -290,8 +285,7 @@ class ProducerSpec(_system: ActorSystem)
       val (source, sink) = TestSource
         .probe[Msg]
         .via(
-          testProducerFlow(client).withAttributes(ActorAttributes.withSupervisionStrategy(Supervision.resumingDecider))
-        )
+          testProducerFlow(client).withAttributes(ActorAttributes.withSupervisionStrategy(Supervision.resumingDecider)))
         .toMat(TestSink.probe)(Keep.both)
         .run()
 
@@ -311,7 +305,7 @@ class ProducerSpec(_system: ActorSystem)
 
   it should "fail stream on exception of producer send" in {
     assertAllStagesStopped {
-      val input = 1 to 3 map recordAndMetadata
+      val input = (1 to 3).map(recordAndMetadata)
 
       val client = new ProducerMock[K, V](ProducerMock.handlers.fail)
       val probe = Source(input.map(toMessage))
@@ -330,7 +324,7 @@ class ProducerSpec(_system: ActorSystem)
 
   it should "close client and complete in case of cancellation of outlet" in {
     assertAllStagesStopped {
-      val input = 1 to 3 map recordAndMetadata
+      val input = (1 to 3).map(recordAndMetadata)
 
       val client = {
         val inputMap = input.toMap
@@ -354,7 +348,7 @@ class ProducerSpec(_system: ActorSystem)
 
   it should "not close the producer if closeProducerOnStop is false" in {
     assertAllStagesStopped {
-      val input = 1 to 3 map recordAndMetadata
+      val input = (1 to 3).map(recordAndMetadata)
 
       val client = {
         val inputMap = input.toMap
@@ -376,7 +370,7 @@ class ProducerSpec(_system: ActorSystem)
 
   it should "not close the producer on failure if closeProducerOnStop is false" in {
     assertAllStagesStopped {
-      val input = 1 to 3 map recordAndMetadata
+      val input = (1 to 3).map(recordAndMetadata)
       val error = new Exception("Something wrong in kafka")
 
       val client = new ProducerMock[K, V](ProducerMock.handlers.delayedMap(100.millis) { _ =>
@@ -570,8 +564,8 @@ object ProducerMock {
   object handlers {
     def fail[K, V]: Handler[K, V] = (_, _) => throw new Exception("Should not be called")
     def delayedMap[K, V](
-        delay: FiniteDuration
-    )(f: ProducerRecord[K, V] => Try[RecordMetadata])(implicit as: ActorSystem): Handler[K, V] = { (record, _) =>
+        delay: FiniteDuration)(f: ProducerRecord[K, V] => Try[RecordMetadata])(
+        implicit as: ActorSystem): Handler[K, V] = { (record, _) =>
       implicit val ec = as.dispatcher
       val promise = Promise[RecordMetadata]()
       as.scheduler.scheduleOnce(delay) {
@@ -594,9 +588,9 @@ class ProducerMock[K, V](handler: ProducerMock.Handler[K, V])(implicit ec: Execu
           val record = invocation.getArguments()(0).asInstanceOf[ProducerRecord[K, V]]
           val callback = invocation.getArguments()(1).asInstanceOf[Callback]
           handler(record, callback).onComplete {
-            case Success(value) if !closed => callback.onCompletion(value, null)
-            case Success(_) => callback.onCompletion(null, new Exception("Kafka producer already closed"))
-            case Failure(ex: Exception) => callback.onCompletion(null, ex)
+            case Success(value) if !closed     => callback.onCompletion(value, null)
+            case Success(_)                    => callback.onCompletion(null, new Exception("Kafka producer already closed"))
+            case Failure(ex: Exception)        => callback.onCompletion(null, ex)
             case Failure(throwableUnsupported) => throw new Exception("Throwable failure are not supported")
           }
           val result = new CompletableFuture[RecordMetadata]()
@@ -667,18 +661,16 @@ class ProducerMock[K, V](handler: ProducerMock.Handler[K, V])(implicit ec: Execu
 class CommittedMarkerMock {
   val mock = Mockito.mock(classOf[CommittedMarker])
   when(
-    mock.committed(mockito.ArgumentMatchers.any[Map[TopicPartition, OffsetAndMetadata]])
-  ).thenAnswer(new Answer[Future[Done]] {
-    override def answer(invocation: InvocationOnMock): Future[Done] =
-      Future.successful(Done)
-  })
+    mock.committed(mockito.ArgumentMatchers.any[Map[TopicPartition, OffsetAndMetadata]])).thenAnswer(
+    new Answer[Future[Done]] {
+      override def answer(invocation: InvocationOnMock): Future[Done] =
+        Future.successful(Done)
+    })
 
   private[kafka] def verifyOffsets(pos: ConsumerMessage.PartitionOffsetCommittedMarker*): Future[Done] =
     Mockito
       .verify(mock, Mockito.only())
       .committed(
         mockito.ArgumentMatchers.eq(
-          pos.map(p => new TopicPartition(p.key.topic, p.key.partition) -> new OffsetAndMetadata(p.offset + 1)).toMap
-        )
-      )
+          pos.map(p => new TopicPartition(p.key.topic, p.key.partition) -> new OffsetAndMetadata(p.offset + 1)).toMap))
 }
diff --git a/tests/src/test/scala/akka/kafka/internal/SubscriptionsSpec.scala b/tests/src/test/scala/akka/kafka/internal/SubscriptionsSpec.scala
index 37b31f6d..b04045d5 100644
--- a/tests/src/test/scala/akka/kafka/internal/SubscriptionsSpec.scala
+++ b/tests/src/test/scala/akka/kafka/internal/SubscriptionsSpec.scala
@@ -8,7 +8,7 @@ package akka.kafka.internal
 import java.net.URLEncoder
 
 import akka.kafka.tests.scaladsl.LogCapturing
-import akka.kafka.{Subscription, Subscriptions}
+import akka.kafka.{ Subscription, Subscriptions }
 import akka.util.ByteString
 import org.apache.kafka.common.TopicPartition
 import org.scalatest.matchers.should.Matchers
@@ -19,8 +19,7 @@ class SubscriptionsSpec extends AnyWordSpec with Matchers with LogCapturing {
   "URL encoded subscription" should {
     "be readable for topics" in {
       encode(Subscriptions.topics(Set("topic1", "topic2"))) should be(
-        "topic1+topic2"
-      )
+        "topic1+topic2")
     }
 
     "be readable for patterns" in {
@@ -33,28 +32,21 @@ class SubscriptionsSpec extends AnyWordSpec with Matchers with LogCapturing {
 
     "be readable for assignments with offset" in {
       encode(Subscriptions.assignmentWithOffset(Map(new TopicPartition("topic1", 1) -> 123L))) should be(
-        "topic1-1+offset123"
-      )
+        "topic1-1+offset123")
     }
 
     "be readable for multiple assignments with offset" in {
       encode(
         Subscriptions.assignmentWithOffset(
-          Map(new TopicPartition("topic1", 1) -> 123L, new TopicPartition("A-Topic-Name", 2) -> 456L)
-        )
-      ) should be(
-        "topic1-1+offset123+A-Topic-Name-2+offset456"
-      )
+          Map(new TopicPartition("topic1", 1) -> 123L, new TopicPartition("A-Topic-Name", 2) -> 456L))) should be(
+        "topic1-1+offset123+A-Topic-Name-2+offset456")
     }
 
     "be readable for multiple assignments with timestamp" in {
       encode(
         Subscriptions.assignmentOffsetsForTimes(
-          Map(new TopicPartition("topic1", 1) -> 12345L, new TopicPartition("Another0Topic", 1) -> 998822L)
-        )
-      ) should be(
-        "topic1-1+timestamp12345+Another0Topic-1+timestamp998822"
-      )
+          Map(new TopicPartition("topic1", 1) -> 12345L, new TopicPartition("Another0Topic", 1) -> 998822L))) should be(
+        "topic1-1+timestamp12345+Another0Topic-1+timestamp998822")
     }
   }
 
diff --git a/tests/src/test/scala/akka/kafka/javadsl/ControlSpec.scala b/tests/src/test/scala/akka/kafka/javadsl/ControlSpec.scala
index 6f548e5c..7edff1ad 100644
--- a/tests/src/test/scala/akka/kafka/javadsl/ControlSpec.scala
+++ b/tests/src/test/scala/akka/kafka/javadsl/ControlSpec.scala
@@ -6,13 +6,13 @@
 package akka.kafka.javadsl
 
 import java.util
-import java.util.concurrent.{CompletionStage, Executor, Executors}
+import java.util.concurrent.{ CompletionStage, Executor, Executors }
 import java.util.concurrent.atomic.AtomicBoolean
 
 import akka.Done
 import akka.kafka.internal.ConsumerControlAsJava
 import akka.kafka.tests.scaladsl.LogCapturing
-import org.apache.kafka.common.{Metric, MetricName}
+import org.apache.kafka.common.{ Metric, MetricName }
 import org.scalatest.concurrent.ScalaFutures
 import org.scalatest.matchers.should.Matchers
 import org.scalatest.wordspec.AnyWordSpec
@@ -23,7 +23,7 @@ import scala.language.reflectiveCalls
 
 object ControlSpec {
   def createControl(stopFuture: Future[Done] = Future.successful(Done),
-                    shutdownFuture: Future[Done] = Future.successful(Done)) = {
+      shutdownFuture: Future[Done] = Future.successful(Done)) = {
     val control = new akka.kafka.scaladsl.ControlSpec.ControlImpl(stopFuture, shutdownFuture)
     val wrapped = new ConsumerControlAsJava(control)
     new Consumer.Control {
@@ -57,8 +57,7 @@ class ControlSpec extends AnyWordSpec with ScalaFutures with Matchers with LogCa
 
       val drainingControl = Consumer.createDrainingControl(
         control,
-        Future.failed[String](new RuntimeException("expected")).toJava
-      )
+        Future.failed[String](new RuntimeException("expected")).toJava)
       val value = drainingControl.drainAndShutdown(ec).toScala.failed.futureValue
       value shouldBe a[RuntimeException]
       value.getMessage should be("expected")
@@ -70,8 +69,7 @@ class ControlSpec extends AnyWordSpec with ScalaFutures with Matchers with LogCa
 
       val drainingControl = Consumer.createDrainingControl(
         control,
-        Future.failed[String](new RuntimeException("expected")).toJava
-      )
+        Future.failed[String](new RuntimeException("expected")).toJava)
       val value = drainingControl.drainAndShutdown(ec).toScala.failed.futureValue
       value shouldBe a[RuntimeException]
       value.getMessage should be("expected")
diff --git a/tests/src/test/scala/akka/kafka/scaladsl/CommittableSinkSpec.scala b/tests/src/test/scala/akka/kafka/scaladsl/CommittableSinkSpec.scala
index e9d1be35..ffbbf818 100644
--- a/tests/src/test/scala/akka/kafka/scaladsl/CommittableSinkSpec.scala
+++ b/tests/src/test/scala/akka/kafka/scaladsl/CommittableSinkSpec.scala
@@ -9,7 +9,7 @@ import akka.Done
 import akka.kafka._
 import akka.kafka.scaladsl.Consumer.DrainingControl
 import akka.kafka.testkit.scaladsl.TestcontainersKafkaLike
-import akka.stream.scaladsl.{Keep, Sink, Source}
+import akka.stream.scaladsl.{ Keep, Sink, Source }
 import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
 import org.apache.kafka.clients.producer.ProducerRecord
 
@@ -76,7 +76,7 @@ class CommittableSinkSpec extends SpecBase with TestcontainersKafkaLike {
             source
               .map { message =>
                 ProducerMessage.single(new ProducerRecord(targetTopic, message.record.key(), message.record.value()),
-                                       message.committableOffset)
+                  message.committableOffset)
               }
               .toMat(Producer.committableSink(producerDefaults, committerDefaults))(Keep.right)
               .run()
@@ -103,9 +103,9 @@ class CommittableSinkSpec extends SpecBase with TestcontainersKafkaLike {
 
   def produceStringRoundRobin(topic: String, range: immutable.Seq[String]): Future[Done] =
     Source(range)
-    // NOTE: If no partition is specified but a key is present a partition will be chosen
-    // using a hash of the key. If neither key nor partition is present a partition
-    // will be assigned in a round-robin fashion.
+      // NOTE: If no partition is specified but a key is present a partition will be chosen
+      // using a hash of the key. If neither key nor partition is present a partition
+      // will be assigned in a round-robin fashion.
       .map(n => new ProducerRecord[String, String](topic, n))
       .runWith(Producer.plainSink(producerDefaults.withProducer(testProducer)))
 
diff --git a/tests/src/test/scala/akka/kafka/scaladsl/CommittingSpec.scala b/tests/src/test/scala/akka/kafka/scaladsl/CommittingSpec.scala
index 17085c48..094f7327 100644
--- a/tests/src/test/scala/akka/kafka/scaladsl/CommittingSpec.scala
+++ b/tests/src/test/scala/akka/kafka/scaladsl/CommittingSpec.scala
@@ -9,17 +9,17 @@ import java.util.concurrent.atomic.AtomicInteger
 import java.util.function.IntUnaryOperator
 
 import akka.actor.ActorRef
-import akka.kafka.ConsumerMessage.{CommittableOffsetBatch, GroupTopicPartition}
+import akka.kafka.ConsumerMessage.{ CommittableOffsetBatch, GroupTopicPartition }
 import akka.kafka.ProducerMessage.MultiMessage
 import akka.kafka._
 import akka.kafka.internal.CommittableOffsetBatchImpl
 import akka.kafka.testkit.scaladsl.TestcontainersKafkaLike
 import akka.stream.RestartSettings
-import akka.stream.scaladsl.{Keep, RestartSource, Sink, Source}
+import akka.stream.scaladsl.{ Keep, RestartSource, Sink, Source }
 import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
 import akka.stream.testkit.scaladsl.TestSink
 import akka.testkit.TestProbe
-import akka.{Done, NotUsed}
+import akka.{ Done, NotUsed }
 import org.apache.kafka.clients.producer.ProducerRecord
 import org.apache.kafka.common.TopicPartition
 import org.scalatest.Inside
@@ -109,10 +109,9 @@ class CommittingSpec extends SpecBase with TestcontainersKafkaLike with Inside {
       Source(Numbers.take(count))
         .map { n =>
           MultiMessage(List(
-                         new ProducerRecord(topic1, partition0, DefaultKey, n + "-p0"),
-                         new ProducerRecord(topic1, partition1, DefaultKey, n + "-p1")
-                       ),
-                       NotUsed)
+              new ProducerRecord(topic1, partition0, DefaultKey, n + "-p0"),
+              new ProducerRecord(topic1, partition1, DefaultKey, n + "-p1")),
+            NotUsed)
         }
         .via(Producer.flexiFlow(producerDefaults.withProducer(testProducer)))
         .runWith(Sink.ignore)
@@ -128,8 +127,7 @@ class CommittingSpec extends SpecBase with TestcontainersKafkaLike with Inside {
       // Await initial partition assignment
       rebalanceActor1.expectMsg(
... 2355 lines suppressed ...


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pekko.apache.org
For additional commands, e-mail: commits-help@pekko.apache.org