You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pekko.apache.org by fa...@apache.org on 2023/03/06 20:35:50 UTC

[incubator-pekko-connectors] branch main updated: remove AWS Lambda connector (#35)

This is an automated email from the ASF dual-hosted git repository.

fanningpj pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-pekko-connectors.git


The following commit(s) were added to refs/heads/main by this push:
     new 3bbe04ff remove AWS Lambda connector (#35)
3bbe04ff is described below

commit 3bbe04ff8ef196fd7799ac0bb70b0c87f4e8ea5e
Author: PJ Fanning <pj...@users.noreply.github.com>
AuthorDate: Mon Mar 6 21:35:44 2023 +0100

    remove AWS Lambda connector (#35)
---
 build.sbt                                 |  28 +--
 docs/src/main/paradox/aws-event-bridge.md | 125 -----------
 docs/src/main/paradox/awslambda.md        |  69 ------
 docs/src/main/paradox/dynamodb.md         | 104 ---------
 docs/src/main/paradox/index.md            |   6 -
 docs/src/main/paradox/kinesis.md          | 264 -----------------------
 docs/src/main/paradox/sns.md              |  88 --------
 docs/src/main/paradox/sqs.md              | 346 ------------------------------
 project/Dependencies.scala                |  11 +
 9 files changed, 26 insertions(+), 1015 deletions(-)

diff --git a/build.sbt b/build.sbt
index dd25590a..ac3ac67c 100644
--- a/build.sbt
+++ b/build.sbt
@@ -5,14 +5,14 @@ lazy val `pekko-connectors` = project
   .aggregate(
     amqp,
     avroparquet,
-    awslambda,
+    // awsLambda,
     azureStorageQueue,
     cassandra,
     couchbase,
     csv,
-    dynamodb,
+    // dynamodb,
     elasticsearch,
-    eventbridge,
+    // eventbridge,
     files,
     ftp,
     geode,
@@ -30,7 +30,7 @@ lazy val `pekko-connectors` = project
     ironmq,
     jms,
     jsonStreaming,
-    kinesis,
+    // kinesis,
     kudu,
     mongodb,
     mqtt,
@@ -42,9 +42,9 @@ lazy val `pekko-connectors` = project
     springWeb,
     simpleCodecs,
     slick,
-    sns,
+    // sns,
     solr,
-    sqs,
+    // sqs,
     sse,
     text,
     udp,
@@ -106,7 +106,8 @@ lazy val amqp = pekkoConnectorProject("amqp", "amqp", Dependencies.Amqp)
 lazy val avroparquet =
   pekkoConnectorProject("avroparquet", "avroparquet", Dependencies.AvroParquet)
 
-lazy val awslambda = pekkoConnectorProject("awslambda", "aws.lambda", Dependencies.AwsLambda)
+// https://github.com/apache/incubator-pekko-connectors/issues/34
+// lazy val awslambda = pekkoConnectorProject("awslambda", "aws.lambda", Dependencies.AwsLambda)
 
 lazy val azureStorageQueue = pekkoConnectorProject(
   "azure-storage-queue",
@@ -125,7 +126,7 @@ lazy val csvBench = internalProject("csv-bench")
   .dependsOn(csv)
   .enablePlugins(JmhPlugin)
 
-lazy val dynamodb = pekkoConnectorProject("dynamodb", "aws.dynamodb", Dependencies.DynamoDB)
+//lazy val dynamodb = pekkoConnectorProject("dynamodb", "aws.dynamodb", Dependencies.DynamoDB)
 
 lazy val elasticsearch = pekkoConnectorProject(
   "elasticsearch",
@@ -241,7 +242,7 @@ lazy val jms = pekkoConnectorProject("jms", "jms", Dependencies.Jms)
 
 lazy val jsonStreaming = pekkoConnectorProject("json-streaming", "json.streaming", Dependencies.JsonStreaming)
 
-lazy val kinesis = pekkoConnectorProject("kinesis", "aws.kinesis", Dependencies.Kinesis)
+//lazy val kinesis = pekkoConnectorProject("kinesis", "aws.kinesis", Dependencies.Kinesis)
 
 lazy val kudu = pekkoConnectorProject("kudu", "kudu", Dependencies.Kudu)
 
@@ -284,14 +285,14 @@ lazy val simpleCodecs = pekkoConnectorProject("simple-codecs", "simplecodecs")
 
 lazy val slick = pekkoConnectorProject("slick", "slick", Dependencies.Slick)
 
-lazy val eventbridge =
-  pekkoConnectorProject("aws-event-bridge", "aws.eventbridge", Dependencies.Eventbridge)
+//lazy val eventbridge =
+//  pekkoConnectorProject("aws-event-bridge", "aws.eventbridge", Dependencies.Eventbridge)
 
-lazy val sns = pekkoConnectorProject("sns", "aws.sns", Dependencies.Sns)
+//lazy val sns = pekkoConnectorProject("sns", "aws.sns", Dependencies.Sns)
 
 lazy val solr = pekkoConnectorProject("solr", "solr", Dependencies.Solr)
 
-lazy val sqs = pekkoConnectorProject("sqs", "aws.sqs", Dependencies.Sqs)
+//lazy val sqs = pekkoConnectorProject("sqs", "aws.sqs", Dependencies.Sqs)
 
 lazy val sse = pekkoConnectorProject("sse", "sse", Dependencies.Sse)
 
@@ -378,6 +379,7 @@ lazy val docs = project
       "examples/ftp-samples.html",
       "examples/jms-samples.html",
       "examples/mqtt-samples.html",
+      "aws-shared-configuration.html",
       "index.html"),
     resolvers += Resolver.jcenterRepo,
     publishRsyncArtifacts += makeSite.value -> "www/",
diff --git a/docs/src/main/paradox/aws-event-bridge.md b/docs/src/main/paradox/aws-event-bridge.md
deleted file mode 100644
index 2cd39fce..00000000
--- a/docs/src/main/paradox/aws-event-bridge.md
+++ /dev/null
@@ -1,125 +0,0 @@
-# AWS EventBridge
-
-@@@ note { title="Amazon EventBridge" }
-
-Amazon EventBridge is a serverless event bus that allows your applications to asynchronously consume events from 3rd party SaaS offerings, AWS services, and other applications in your own infrastructure. 
-It evolved from Amazon CloudWatch Events ([official documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/WhatIsCloudWatchEvents.html)). 
-The EventBridge acts as broker that you can configure with your own rules to route events to the correct service. 
-
-For more information about AWS EventBridge please visit the [official documentation](https://aws.amazon.com/eventbridge/).
-
-The publishing of the events is implemented using the [AWS PUT Events API](https://docs.aws.amazon.com/eventbridge/latest/userguide/add-events-putevents.html).
-
-When publishing events any of the entries inside of the Put request can fail. 
-The response contains information about which entries were not successfully published.
-Currently, there are no retries supported apart from the configuration provided to the EventBridge client. 
-
-Adding Support for configurable retry behaviour as part of the connector may be part of a future release.
-
-By default the client will publish to a default event bus, but normally you should publish to a specific event bus that you create.
-
-An event bus name is defined per event in a [PutEventsRequestEntry](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_PutEventsRequestEntry.html) object.
-It would be possible to define helper flows/sinks with default values such as source and `eventBusName`. 
-The `detail` is JSON as a string and `detailType` is the name of the event for rule matching.
-
-@@@
-
-The Apache Pekko Connectors AWS EventBridge connector provides Apache Pekko Stream flows and sinks to publish to AWS EventBridge event buses.
-
-
-@@project-info{ projectId="aws-event-bridge" }
-
-
-## Artifacts
-
-@@dependency [sbt,Maven,Gradle] {
-  group=org.apache.pekko
-  artifact=pekko-connectors-aws-event-bridge_$scala.binary.version$
-  version=$project.version$
-}
-
-The table below shows direct dependencies of this module and the second tab shows all libraries it depends on transitively.
-
-@@dependencies { projectId="aws-event-bridge" }
-
-
-## Setup
-
-Prepare an @scaladoc[ActorSystem](akka.actor.ActorSystem).
-
-Scala
-: @@snip [snip](/aws-event-bridge/src/test/scala/akka/stream/alpakka/aws/eventbridge/IntegrationTestContext.scala) { #init-system }
-
-Java
-: @@snip [snip](/aws-event-bridge/src/test/java/docs/javadsl/EventBridgePublisherTest.java) { #init-system }
-
-
-This connector requires an @javadoc[EventBridge](software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient) instance to communicate with AWS EventBridge.
-
-
-It is your code's responsibility to call `close` to free any resources held by the client. In this example it will be called when the actor system is terminated.
-
-Scala
-: @@snip [snip](/aws-event-bridge/src/test/scala/akka/stream/alpakka/aws/eventbridge/IntegrationTestContext.scala) { #init-client }
-
-Java
-: @@snip [snip](/aws-event-bridge/src/test/java/docs/javadsl/EventBridgePublisherTest.java) { #init-client }
-
-The example above uses @extref:[Apache Pekko HTTP](akka-http:) as the default HTTP client implementation. For more details about the HTTP client, configuring request retrying and best practices for credentials, see @ref[AWS client configuration](aws-shared-configuration.md).
-
-
-
-## Publish messages to AWS EventBridge Event Bus
-
-Create a `PutEventsEntry`-accepting sink, publishing to an event bus.
-
-
-Scala
-: @@snip [snip](/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala) { #run-events-entry }
-
-Java
-: @@snip [snip](/aws-event-bridge/src/test/java/docs/javadsl/EventBridgePublisherTest.java) { #run-events-entry }
-
-
-Create a sink that accepts `PutEventsRequestEntries` to be published to an Event Bus.
-
-
-Scala
-: @@snip [snip](/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala) { #run-events-request }
-
-Java
-: @@snip [snip](/aws-event-bridge/src/test/java/docs/javadsl/EventBridgePublisherTest.java) { #run-events-request }
-
-You can also build flow stages which publish messages to Event Bus and then forward 
-@javadoc[PutEventsResponse](software.amazon.awssdk.services.eventbridge.model.PutEventsResponse) further down the stream.
-
-Flow for `PutEventEntry`.
-
-Scala
-: @@snip [snip](/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala) { #flow-events-entry }
-
-Java
-: @@snip [snip](/aws-event-bridge/src/test/java/docs/javadsl/EventBridgePublisherTest.java) { #flow-events-entry }
-
-Flow for `PutEventsRequest`.
-
-Scala
-: @@snip [snip](/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala) { #flow-events-request }
-
-Java
-: @@snip [snip](/aws-event-bridge/src/test/java/docs/javadsl/EventBridgePublisherTest.java) { #flow-events-request }
-
-Flow supporting a list of `PutEventEntry` objects.
-
-Messages published in a batch using @apidoc[EventBridgePublisher.flowSeq](EventBridgePublisher$) are not published in an "all or nothing" manner. Event Bridge will process each event independently. Retries of the failed messages in the `PutEventsResponse` are not yet implemented.
-
-
-## Integration testing
-
-For integration testing without connecting directly to Amazon EventBridge, Apache Pekko Connectors uses [Localstack](https://github.com/localstack/localstack), which comes as a docker image - and has a corresponding service `amazoneventbridge` in the `docker-compose.yml` file. Which needs to be started before running the integration tests `docker-compose up amazoneventbridge`.
-
-@@@ index
-
-* [retry conf](aws-shared-configuration.md)
-
-@@@
diff --git a/docs/src/main/paradox/awslambda.md b/docs/src/main/paradox/awslambda.md
deleted file mode 100644
index 6403707e..00000000
--- a/docs/src/main/paradox/awslambda.md
+++ /dev/null
@@ -1,69 +0,0 @@
-# AWS Lambda
-
-The AWS Lambda connector provides Apache Pekko Flow for AWS Lambda integration.
-
-For more information about AWS Lambda please visit the [AWS lambda documentation](https://docs.aws.amazon.com/lambda/index.html).
-
-@@project-info{ projectId="awslambda" }
-
-## Artifacts
-
-@@dependency [sbt,Maven,Gradle] {
-  group=org.apache.pekko
-  artifact=pekko-connectors-awslambda_$scala.binary.version$
-  version=$project.version$
-  symbol2=PekkoVersion
-  value2=$akka.version$
-  group2=org.apache.pekko
-  artifact2=pekko-stream_$scala.binary.version$
-  version2=PekkoVersion
-}
-
-The table below shows direct dependencies of this module and the second tab shows all libraries it depends on transitively.
-
-@@dependencies { projectId="awslambda" }
-
-## Setup
-
-The flow provided by this connector needs a prepared @javadoc[LambdaAsyncClient](software.amazon.awssdk.services.lambda.LambdaAsyncClient) to be able to invoke lambda functions.
-
-Scala
-: @@snip (/awslambda/src/test/scala/docs/scaladsl/Examples.scala) { #init-client }
-
-Java
-: @@snip (/awslambda/src/test/java/docs/javadsl/Examples.java) { #init-client }
-
-The example above uses @extref:[Apache Pekko HTTP](akka-http:) as the default HTTP client implementation. For more details about the HTTP client, configuring request retrying and best practices for credentials, see @ref[AWS client configuration](aws-shared-configuration.md) for more details.
-
-We will need an @apidoc[akka.actor.ActorSystem].
-
-Scala
-: @@snip (/awslambda/src/test/scala/docs/scaladsl/Examples.scala) { #init-sys }
-
-Java
-: @@snip (/awslambda/src/test/java/docs/javadsl/Examples.java) { #init-sys }
-
-This is all preparation that we are going to need.
-
-## Sending messages
-
-Now we can stream AWS Java SDK Lambda `InvokeRequest` to AWS Lambda functions
-@apidoc[AwsLambdaFlow$] factory.
-
-Scala
-: @@snip (/awslambda/src/test/scala/docs/scaladsl/Examples.scala) { #run }
-
-Java
-: @@snip (/awslambda/src/test/java/docs/javadsl/Examples.java) { #run }
-
-## AwsLambdaFlow configuration
-
-Options:
-
- - `parallelism` - Number of parallel executions. Should be less or equal to number of threads in ExecutorService for LambdaAsyncClient 
-
-@@@ index
-
-* [retry conf](aws-shared-configuration.md)
-
-@@@
diff --git a/docs/src/main/paradox/dynamodb.md b/docs/src/main/paradox/dynamodb.md
deleted file mode 100644
index 38c760c4..00000000
--- a/docs/src/main/paradox/dynamodb.md
+++ /dev/null
@@ -1,104 +0,0 @@
-# AWS DynamoDB
-
-The AWS DynamoDB connector provides a flow for streaming DynamoDB requests. For more information about DynamoDB please visit the [official documentation](https://aws.amazon.com/dynamodb/).
-
-@@project-info{ projectId="dynamodb" }
-
-## Artifacts
-
-@@dependency [sbt,Maven,Gradle] {
-  group=org.apache.pekko
-  artifact=pekko-connectors-dynamodb_$scala.binary.version$
-  version=$project.version$
-  symbol2=PekkoVersion
-  value2=$akka.version$
-  group2=org.apache.pekko
-  artifact2=pekko-stream_$scala.binary.version$
-  version2=PekkoVersion
-  symbol3=PekkoHttpVersion
-  value3=$akka-http.version$
-  group3=org.apache.pekko
-  artifact3=akka-http_$scala.binary.version$
-  version3=PekkoHttpVersion
-}
-
-The table below shows direct dependencies of this module and the second tab shows all libraries it depends on transitively.
-
-@@dependencies { projectId="dynamodb" }
-
-
-## Setup
-
-This connector requires a @javadoc[DynamoDbAsyncClient](software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient) instance to communicate with AWS DynamoDB.
-
-It is your code's responsibility to call `close` to free any resources held by the client. In this example it will be called when the actor system is terminated.
-
-Scala
-: @@snip [snip](/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala) { #init-client }
-
-Java
-: @@snip [snip](/dynamodb/src/test/java/docs/javadsl/ExampleTest.java) { #init-client }
-
-The example above uses @extref:[Apache Pekko HTTP](akka-http:) as the default HTTP client implementation. For more details about the HTTP client, configuring request retrying and best practices for credentials, see @ref[AWS client configuration](aws-shared-configuration.md) for more details.
-
-
-## Sending requests and receiving responses
-
-For simple operations you can issue a single request, and get back the result in a @scala[`Future`]@java[`CompletionStage`].
-
-Scala
-: @@snip [snip](/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala) { #simple-request }
-
-Java
-: @@snip [snip](/dynamodb/src/test/java/docs/javadsl/ExampleTest.java) { #simple-request }
-
-You can also get the response to a request as an element emitted from a Flow:
-
-Scala
-: @@snip [snip](/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala) { #flow }
-
-Java
-: @@snip [snip](/dynamodb/src/test/java/docs/javadsl/ExampleTest.java) { #flow }
-
-
-### Flow with context
-
-The `flowWithContext` allows to send an arbitrary value, such as commit handles for JMS or Kafka, past the DynamoDb operation.
-The responses are wrapped in a @scaladoc[Try](scala.util.Try) to differentiate between successful operations and errors in-stream.
-
-Scala
-: @@snip [snip](/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala) { #withContext }
-
-Java
-: @@snip [snip](/dynamodb/src/test/java/docs/javadsl/ExampleTest.java) { #withContext }
-
-
-### Pagination
-
-The DynamoDB operations `BatchGetItem`, `ListTables`, `Query` and `Scan` allow paginating of results.
-The requests with paginated results can be used as source or in a flow with `flowPaginated`:
-
-Scala
-: @@snip [snip](/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala) { #paginated }
-
-Java
-: @@snip [snip](/dynamodb/src/test/java/docs/javadsl/ExampleTest.java) { #paginated }
-
-
-## Error Retries and Exponential Backoff
-
-The AWS SDK 2 implements error retrying with exponential backoff which is configurable via the @javadoc[DynamoDbAsyncClient](software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient) configuration by using the @javadoc[RetryPolicy](software.amazon.awssdk.core.retry.RetryPolicy) in `overrideConfiguration`.
-
-See @ref[AWS Retry configuration](aws-shared-configuration.md) for more details.
-
-Scala
-: @@snip [snip](/dynamodb/src/test/scala/docs/scaladsl/RetrySpec.scala) { #clientRetryConfig }
-
-Java
-: @@snip [snip](/dynamodb/src/test/java/docs/javadsl/RetryTest.java) { #clientRetryConfig }
-
-@@@ index
-
-* [retry conf](aws-shared-configuration.md)
-
-@@@
diff --git a/docs/src/main/paradox/index.md b/docs/src/main/paradox/index.md
index b073235a..57577c78 100644
--- a/docs/src/main/paradox/index.md
+++ b/docs/src/main/paradox/index.md
@@ -15,13 +15,7 @@ The [Apache Pekko Connectors project](https://doc.akka.io/docs/alpakka/current/)
 * [Apache Kudu](kudu.md)
 * [Apache Solr](solr.md)
 * [Avro Parquet](avroparquet.md)
-* [AWS EventBridge](aws-event-bridge.md)
-* [AWS DynamoDB](dynamodb.md)
-* [AWS Kinesis](kinesis.md)
-* [AWS Lambda](awslambda.md)
 * [AWS S3](s3.md)
-* [AWS SNS](sns.md)
-* [AWS SQS](sqs.md)
 * [Azure Storage Queue](azure-storage-queue.md)
 * [Couchbase](couchbase.md)
 * [Elasticsearch](elasticsearch.md)
diff --git a/docs/src/main/paradox/kinesis.md b/docs/src/main/paradox/kinesis.md
deleted file mode 100644
index df83faaa..00000000
--- a/docs/src/main/paradox/kinesis.md
+++ /dev/null
@@ -1,264 +0,0 @@
-# AWS Kinesis and Firehose
-
-The AWS Kinesis connector provides flows for streaming data to and from Kinesis Data streams and to Kinesis Firehose streams.
-
-For more information about Kinesis please visit the [Kinesis documentation](https://docs.aws.amazon.com/kinesis/index.html).
-
-@@@ note { title="Alternative connector 1" }
-
-Another Kinesis connector which is based on the Kinesis Client Library is available.
-
-This library combines the convenience of Apache Pekko Streams with KCL checkpoint management, failover, load-balancing, and re-sharding capabilities.
-
-Please read more about it at [GitHub StreetContxt/kcl-akka-stream](https://github.com/StreetContxt/kcl-akka-stream).
-@@@
-
-@@@ note { title="Alternative connector 2" }
-
-Another Kinesis connector which is based on the Kinesis Client Library 2.x is available.
-
-This library exposes an Apache Pekko Streams Source backed by the KCL for checkpoint management, failover, load-balancing, and re-sharding capabilities.
-
-Please read more about it at [GitHub 500px/kinesis-stream](https://github.com/500px/kinesis-stream).
-@@@
-
-@@project-info{ projectId="kinesis" }
-
-## Artifacts
-
-@@dependency [sbt,Maven,Gradle] {
-  group=org.apache.pekko
-  artifact=pekko-connectors-kinesis_$scala.binary.version$
-  version=$project.version$
-  symbol2=PekkoVersion
-  value2=$akka.version$
-  group2=org.apache.pekko
-  artifact2=pekko-stream_$scala.binary.version$
-  version2=PekkoVersion
-  symbol3=PekkoHttpVersion
-  value3=$akka-http.version$
-  group3=org.apache.pekko
-  artifact3=akka-http_$scala.binary.version$
-  version3=PekkoHttpVersion
-}
-
-The table below shows direct dependencies of this module and the second tab shows all libraries it depends on transitively.
-
-@@dependencies { projectId="kinesis" }
-
-
-## Kinesis Data Streams
-
-### Create the Kinesis client
-
-Sources and Flows provided by this connector need a `KinesisAsyncClient` instance to consume messages from a shard.
-
-@@@ note
-The `KinesisAsyncClient` instance you supply is thread-safe and can be shared amongst multiple `GraphStages`.
-As a result, individual `GraphStages` will not automatically shutdown the supplied client when they complete.
-It is recommended to shut the client instance down on Actor system termination.
-@@@
-
-Scala
-: @@snip [snip](/kinesis/src/test/scala/docs/scaladsl/KinesisSnippets.scala) { #init-client }
-
-Java
-: @@snip [snip](/kinesis/src/test/java/docs/javadsl/KinesisSnippets.java) { #init-client }
-
-The example above uses @extref:[Apache Pekko HTTP](akka-http:) as the default HTTP client implementation. For more details about the HTTP client, configuring request retrying and best practices for credentials, see @ref[AWS client configuration](aws-shared-configuration.md) for more details.
-
-### Kinesis as Source
-
-The `KinesisSource` creates one `GraphStage` per shard. Reading from a shard requires an instance of `ShardSettings`.
-
-Scala
-: @@snip [snip](/kinesis/src/test/scala/docs/scaladsl/KinesisSnippets.scala) { #source-settings }
-
-Java
-: @@snip [snip](/kinesis/src/test/java/docs/javadsl/KinesisSnippets.java) { #source-settings }
-
-You have the choice of reading from a single shard, or reading from multiple shards. In the case of multiple shards the results of running a separate `GraphStage` for each shard will be merged together.
-
-@@@ warning
-The `GraphStage` associated with a shard will remain open until the graph is stopped, or a [GetRecords](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) result returns an empty shard iterator indicating that the shard has been closed. This means that if you wish to continue processing records after a merge or reshard, you will need to recreate the source with the results of a new [DescribeStream](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_Desc [...]
-@@@
-
-For a single shard you simply provide the settings for a single shard.
-
-Scala
-: @@snip [snip](/kinesis/src/test/scala/docs/scaladsl/KinesisSnippets.scala) { #source-single }
-
-Java
-: @@snip [snip](/kinesis/src/test/java/docs/javadsl/KinesisSnippets.java) { #source-single }
-
-You can merge multiple shards by providing a list settings.
-
-Scala
-: @@snip [snip](/kinesis/src/test/scala/docs/scaladsl/KinesisSnippets.scala) { #source-list }
-
-Java
-: @@snip [snip](/kinesis/src/test/java/docs/javadsl/KinesisSnippets.java) { #source-list }
-
-The constructed `Source` will return [Record](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_Record.html)
-objects by calling [GetRecords](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) at the specified interval and according to the downstream demand.
-
-### Kinesis Put via Flow or as Sink
-
-The
-@scala[@scaladoc[KinesisFlow](akka.stream.alpakka.kinesis.scaladsl.KinesisFlow$) (or @scaladoc[KinesisSink](akka.stream.alpakka.kinesis.scaladsl.KinesisSink$))]
-@java[@scaladoc[KinesisFlow](akka.stream.alpakka.kinesis.javadsl.KinesisFlow$) (or @scaladoc[KinesisSink](akka.stream.alpakka.kinesis.javadsl.KinesisSink$))]
-publishes messages into a Kinesis stream using its partition key and message body. It uses dynamic size batches, can perform several requests in parallel and retries failed records. These features are necessary to achieve the best possible write throughput to the stream. The Flow outputs the result of publishing each record.
-
-@@@ warning
-Batching has a drawback: message order cannot be guaranteed, as some records within a single batch may fail to be published. That also means that the Flow output may not match the same input order.
-
-More information can be found in the [AWS documentation](https://docs.aws.amazon.com/streams/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords) and the [AWS API reference](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecords.html).
-@@@
-
-In order to correlate the results with the original message, an optional user context object of arbitrary type can be associated with every message and will be returned with the corresponding result. This allows keeping track of which messages have been successfully sent to Kinesis even if the message order gets mixed up. 
-
-
-Publishing to a Kinesis stream requires an instance of `KinesisFlowSettings`, although a default instance with sane values and a method that returns settings based on the stream shard number are also available:
-
-Scala
-: @@snip [snip](/kinesis/src/test/scala/docs/scaladsl/KinesisSnippets.scala) { #flow-settings }
-
-Java
-: @@snip [snip](/kinesis/src/test/java/docs/javadsl/KinesisSnippets.java) { #flow-settings }
-
-@@@ warning
-Note that throughput settings `maxRecordsPerSecond` and `maxBytesPerSecond` are vital to minimize server errors (like `ProvisionedThroughputExceededException`) and retries, and thus achieve a higher publication rate.
-@@@
-
-The Flow/Sink can now be created.
-
-Scala
-: @@snip [snip](/kinesis/src/test/scala/docs/scaladsl/KinesisSnippets.scala) { #flow-sink }
-
-Java
-: @@snip [snip](/kinesis/src/test/java/docs/javadsl/KinesisSnippets.java) { #flow-sink }
-
-@@@ warning
-As of version 2, the library will not retry failed requests: this is handled by the underlying `KinesisAsyncClient` (see [client configuration](https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.html#overrideConfiguration-software.amazon.awssdk.core.client.config.ClientOverrideConfiguration-)). This means that you may have to inspect individual responses to make sure they have been successful:
-
-Scala
-: @@snip [snip](/kinesis/src/test/scala/docs/scaladsl/KinesisSnippets.scala) { #error-handling }
-
-Java
-: @@snip [snip](/kinesis/src/test/java/docs/javadsl/KinesisSnippets.java) { #error-handling }
-@@@
-
-@@@ note
-The default behavior of the `KinesisFlow` and `KinesisSink` is to batch according to the `KinesisFlowSettings` provided and to throw any error the Kinesis client throws. If it is necessary to have special handling for batching or of errors and successful results the methods `KinesisFlow.batchingFlow` & `KinesisFlow.batchWritingFlow` can be used and combined in other ways than the default.
-@@@
-
-# AWS KCL Scheduler Source & checkpointer
-
-The KCL Source can read from several shards and rebalance automatically when other Schedulers are started or stopped. It also handles record sequence checkpoints.
-
-For more information about KCL please visit the [official documentation](https://docs.aws.amazon.com/streams/latest/dev/developing-consumers-with-kcl-v2.html).
-
-## Usage
-
-The KCL Scheduler Source needs to create and manage Scheduler instances in order to consume records from Kinesis Streams.
-
-In order to use it, you need to provide a Scheduler builder and the Source settings:
-
-Scala
-: @@snip (/kinesis/src/test/scala/docs/scaladsl/KclSnippets.scala) { #scheduler-settings }
-
-Java
-: @@snip (/kinesis/src/test/java/docs/javadsl/KclSnippets.java) { #scheduler-settings }
-
-Then the Source can be created as usual:
-
-Scala
-: @@snip (/kinesis/src/test/scala/docs/scaladsl/KclSnippets.scala) { #scheduler-source }
-
-Java
-: @@snip (/kinesis/src/test/java/docs/javadsl/KclSnippets.java) { #scheduler-source }
-
-## Committing records
-
-The KCL Scheduler Source publishes messages downstream that can be committed in order to mark progression of consumers by shard. This process can be done manually or using the provided checkpointer Flow/Sink.
-
-In order to use the Flow/Sink you must provide additional checkpoint settings:
-
-Scala
-: @@snip (/kinesis/src/test/scala/docs/scaladsl/KclSnippets.scala) { #checkpoint }
-
-Java
-: @@snip (/kinesis/src/test/java/docs/javadsl/KclSnippets.java) { #checkpoint }
-
-Note that checkpointer Flow may not maintain the input order of records of different shards.
-
-## Kinesis Firehose Streams
-
-### Create the Kinesis Firehose client
-
-Flows provided by this connector need a `FirehoseAsyncClient` instance to publish messages.
-
-@@@ note
-The `FirehoseAsyncClient` instance you supply is thread-safe and can be shared amongst multiple `GraphStages`.
-As a result, individual `GraphStages` will not automatically shutdown the supplied client when they complete.
-It is recommended to shut the client instance down on Actor system termination.
-@@@
-
-Scala
-: @@snip [snip](/kinesis/src/test/scala/docs/scaladsl/KinesisFirehoseSnippets.scala) { #init-client }
-
-Java
-: @@snip [snip](/kinesis/src/test/java/docs/javadsl/KinesisFirehoseSnippets.java) { #init-client }
-
-The example above uses @extref:[Apache Pekko HTTP](akka-http:) as the default HTTP client implementation. For more details about the HTTP client, configuring request retrying and best practices for credentials, see @ref[AWS client configuration](aws-shared-configuration.md) for more details.
-
-### Kinesis Firehose Put via Flow or as Sink
-
-The
-@scala[@scaladoc[KinesisFirehoseFlow](akka.stream.alpakka.kinesisfirehose.scaladsl.KinesisFirehoseFlow$) (or @scaladoc[KinesisFirehoseSink](akka.stream.alpakka.kinesisfirehose.scaladsl.KinesisFirehoseSink$))]
-@java[@scaladoc[KinesisFirehoseFlow](akka.stream.alpakka.kinesisfirehose.javadsl.KinesisFirehoseFlow$) (or @scaladoc[KinesisFirehoseSink](akka.stream.alpakka.kinesisfirehose.javadsl.KinesisFirehoseSink$))]
-publishes messages into a Kinesis Firehose stream using its message body. It uses dynamic size batches and can perform several requests in parallel. These features are necessary to achieve the best possible write throughput to the stream. The Flow outputs the result of publishing each record.
-
-@@@ warning
-Batching has a drawback: message order cannot be guaranteed, as some records within a single batch may fail to be published. That also means that the Flow output may not match the same input order.
-
-More information can be found in the [AWS API reference](https://docs.aws.amazon.com/firehose/latest/APIReference/API_PutRecordBatch.html).
-@@@
-
-Publishing to a Kinesis Firehose stream requires an instance of `KinesisFirehoseFlowSettings`, although a default instance with sane values is available:
-
-Scala
-: @@snip [snip](/kinesis/src/test/scala/docs/scaladsl/KinesisFirehoseSnippets.scala) { #flow-settings }
-
-Java
-: @@snip [snip](/kinesis/src/test/java/docs/javadsl/KinesisFirehoseSnippets.java) { #flow-settings }
-
-@@@ warning
-Note that throughput settings `maxRecordsPerSecond` and `maxBytesPerSecond` are vital to minimize server errors (like `ProvisionedThroughputExceededException`) and retries, and thus achieve a higher publication rate.
-@@@
-
-The Flow/Sink can now be created.
-
-Scala
-: @@snip [snip](/kinesis/src/test/scala/docs/scaladsl/KinesisFirehoseSnippets.scala) { #flow-sink }
-
-Java
-: @@snip [snip](/kinesis/src/test/java/docs/javadsl/KinesisFirehoseSnippets.java) { #flow-sink }
-
-@@@ warning
-As of version 2, the library will not retry failed requests. See @ref[AWS Retry Configuration](aws-shared-configuration.md) how to configure it for the @javadoc[FirehoseAsyncClient](software.amazon.awssdk.services.firehose.FirehoseAsyncClient).
-
-This means that you may have to inspect individual responses to make sure they have been successful:
-
-Scala
-: @@snip [snip](/kinesis/src/test/scala/docs/scaladsl/KinesisFirehoseSnippets.scala) { #error-handling }
-
-Java
-: @@snip [snip](/kinesis/src/test/java/docs/javadsl/KinesisFirehoseSnippets.java) { #error-handling }
-@@@
-
-@@@ index
-
-* [retry conf](aws-shared-configuration.md)
-
-@@@
diff --git a/docs/src/main/paradox/sns.md b/docs/src/main/paradox/sns.md
deleted file mode 100644
index fd126fd3..00000000
--- a/docs/src/main/paradox/sns.md
+++ /dev/null
@@ -1,88 +0,0 @@
-# AWS SNS
-
-The AWS SNS connector provides an Apache Pekko Stream Flow and Sink for push notifications through AWS SNS.
-
-For more information about AWS SNS please visit the [official documentation](https://docs.aws.amazon.com/sns/index.html).
-
-@@project-info{ projectId="sns" }
-
-## Artifacts
-
-@@dependency [sbt,Maven,Gradle] {
-  group=org.apache.pekko
-  artifact=pekko-connectors-sns_$scala.binary.version$
-  version=$project.version$
-  symbol2=PekkoVersion
-  value2=$akka.version$
-  group2=org.apache.pekko
-  artifact2=akka-stream_$scala.binary.version$
-  version2=PekkoVersion
-  symbol3=PekkoHttpVersion
-  value3=$akka-http.version$
-  group3=org.apache.pekko
-  artifact3=akka-http_$scala.binary.version$
-  version3=PekkoHttpVersion
-}
-
-The table below shows direct dependencies of this module and the second tab shows all libraries it depends on transitively.
-
-@@dependencies { projectId="sns" }
-
-
-## Setup
-
-This connector requires an @scala[implicit] @javadoc[SnsAsyncClient](software.amazon.awssdk.services.sns.SnsAsyncClient) instance to communicate with AWS SNS.
-
-It is your code's responsibility to call `close` to free any resources held by the client. In this example it will be called when the actor system is terminated.
-
-Scala
-: @@snip [snip](/sns/src/test/scala/akka/stream/alpakka/sns/IntegrationTestContext.scala) { #init-client }
-
-Java
-: @@snip [snip](/sns/src/test/java/docs/javadsl/SnsPublisherTest.java) { #init-client }
-
-The example above uses @extref:[Apache Pekko HTTP](akka-http:) as the default HTTP client implementation. For more details about the HTTP client, configuring request retrying and best practices for credentials, see @ref[AWS client configuration](aws-shared-configuration.md) for more details.
-
-We will also need an @apidoc[akka.actor.ActorSystem].
-
-Scala
-: @@snip [snip](/sns/src/test/scala/akka/stream/alpakka/sns/IntegrationTestContext.scala) { #init-system }
-
-Java
-: @@snip [snip](/sns/src/test/java/docs/javadsl/SnsPublisherTest.java) { #init-system }
-
-This is all preparation that we are going to need.
-
-## Publish messages to an SNS topic
-
-Now we can publish a message to any SNS topic where we have access to by providing the topic ARN to the
-@apidoc[SnsPublisher$] Flow or Sink factory method.
-
-### Using a Flow
-
-Scala
-: @@snip [snip](/sns/src/test/scala/docs/scaladsl/SnsPublisherSpec.scala) { #use-flow }
-
-Java
-: @@snip [snip](/sns/src/test/java/docs/javadsl/SnsPublisherTest.java) { #use-flow }
-
-As you can see, this would publish the messages from the source to the specified AWS SNS topic.
-After a message has been successfully published, a
-@javadoc[PublishResult](software.amazon.awssdk.services.sns.model.PublishRequest)
-will be pushed downstream.
-
-### Using a Sink
-
-Scala
-: @@snip [snip](/sns/src/test/scala/docs/scaladsl/SnsPublisherSpec.scala) { #use-sink }
-
-Java
-: @@snip [snip](/sns/src/test/java/docs/javadsl/SnsPublisherTest.java) { #use-sink }
-
-As you can see, this would publish the messages from the source to the specified AWS SNS topic.
-
-@@@ index
-
-* [retry conf](aws-shared-configuration.md)
-
-@@@
diff --git a/docs/src/main/paradox/sqs.md b/docs/src/main/paradox/sqs.md
deleted file mode 100644
index 244ef1d2..00000000
--- a/docs/src/main/paradox/sqs.md
+++ /dev/null
@@ -1,346 +0,0 @@
-# AWS SQS
-
-@@@ note { title="Amazon Simple Queue Service" }
-
-Amazon Simple Queue Service (Amazon SQS) offers a secure, durable, and available hosted queue that lets you integrate and decouple distributed software systems and components. Amazon SQS offers common constructs such as dead-letter queues and cost allocation tags. It provides a generic web services API and it can be accessed by any programming language that the AWS SDK supports. 
-
-For more information about AWS SQS please visit the [official documentation](https://docs.aws.amazon.com/sqs/index.html).
-
-@@@
-
-The AWS SQS connector provides Apache Pekko Stream sources and sinks for AWS SQS queues.
-
-@@project-info{ projectId="sqs" }
-
-
-## Artifacts
-
-@@dependency [sbt,Maven,Gradle] {
-  group=org.apache.pekko
-  artifact=pekko-connectors-sqs_$scala.binary.version$
-  version=$project.version$
-  symbol2=PekkoVersion
-  value2=$akka.version$
-  group2=org.apache.pekko
-  artifact2=pekko-stream_$scala.binary.version$
-  version2=PekkoVersion
-  symbol3=PekkoHttpVersion
-  value3=$akka-http.version$
-  group3=org.apache.pekko
-  artifact3=akka-http_$scala.binary.version$
-  version3=PekkoHttpVersion
-}
-
-The table below shows direct dependencies of this module and the second tab shows all libraries it depends on transitively.
-
-@@dependencies { projectId="sqs" }
-
-
-## Setup
-
-Prepare an @apidoc[akka.actor.ActorSystem].
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/akka/stream/alpakka/sqs/scaladsl/DefaultTestContext.scala) { #init-mat }
-
-Java
-: @@snip [snip](/sqs/src/test/java/akka/stream/alpakka/sqs/javadsl/BaseSqsTest.java) { #init-mat }
-
-
-This connector requires an @scala[implicit] @javadoc[SqsAsyncClient](software.amazon.awssdk.services.sqs.SqsAsyncClient) instance to communicate with AWS SQS.
-
-It is your code's responsibility to call `close` to free any resources held by the client. In this example it will be called when the actor system is terminated.
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/akka/stream/alpakka/sqs/scaladsl/DefaultTestContext.scala) { #init-client }
-
-Java
-: @@snip [snip](/sqs/src/test/java/akka/stream/alpakka/sqs/javadsl/BaseSqsTest.java) { #init-client }
-
-The example above uses @extref:[Apache Pekko HTTP](akka-http:) as the default HTTP client implementation. For more details about the HTTP client, configuring request retrying and best practices for credentials, see @ref[AWS client configuration](aws-shared-configuration.md) for more details.
-
-## Read from an SQS queue
-
-The @apidoc[SqsSource$] created source reads AWS Java SDK SQS `Message` objects from any SQS queue given by the queue URL.
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsSourceSpec.scala) { #run }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsSourceTest.java) { #run }
-
-In this example we use the `closeOnEmptyReceive` to let the stream complete when there are no more messages on the queue. In realistic scenarios, you should add a `KillSwitch` to the stream, see @extref:["Controlling stream completion with KillSwitch" in the Apache Pekko documentation](pekko:stream/stream-dynamic.html#controlling-stream-completion-with-killswitch).
-
-
-### Source configuration
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsSourceSpec.scala) { #SqsSourceSettings }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsSourceTest.java) { #SqsSourceSettings }
-
-
-Options:
-
- - `maxBatchSize` - the maximum number of messages to return per request (allowed values 1-10, see `MaxNumberOfMessages` in AWS docs). Default: 10
- - `maxBufferSize` - internal buffer size used by the `Source`. Default: 100 messages
- - `waitTimeSeconds` - the duration for which the call waits for a message to arrive in the queue before
-    returning (see `WaitTimeSeconds` in AWS docs). Default: 20 seconds  
- - `closeOnEmptyReceive` - If true, the source completes when no messages are available.
- 
-More details are available in the [AWS SQS Receive Message documentation](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html#API_ReceiveMessage_RequestParameters).
- 
-An `SqsSource` can either provide an infinite stream of messages (the default), or can
-drain its source queue until no further messages are available. The latter
-behaviour is enabled by setting the `closeOnEmptyReceive` flag on creation. If set, the
-`Source` will receive messages until it encounters an empty reply from the server. It 
-then continues to emit any remaining messages in its local buffer. The stage will complete
-once the last message has been sent downstream.
-
-Note that for short-polling (`waitTimeSeconds` of 0), SQS may respond with an empty 
-reply even if there are still messages in the queue. This behavior can be prevented by 
-switching to long-polling (by setting `waitTimeSeconds` to a nonzero value).
-
-Be aware that the `SqsSource` runs multiple requests to Amazon SQS in parallel. The maximum number of concurrent
-requests is limited by `parallelism = maxBufferSize / maxBatchSize`. E.g.: By default `maxBatchSize` is set to 10 and
-`maxBufferSize` is set to 100 so at the maximum, `SqsSource` will run 10 concurrent requests to Amazon SQS. 
-
-## Publish messages to an SQS queue
-
-Create a `String`-accepting sink, publishing to an SQS queue.
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsPublishSpec.scala) { #run-string }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsPublishTest.java) { #run-string }
-
-
-Create a `SendMessageRequest`-accepting sink, that publishes an SQS queue.
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsPublishSpec.scala) { #run-send-request }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsPublishTest.java) { #run-send-request }
-
-You can also build flow stages which publish messages to SQS queues, backpressure on queue response, and then forward 
-@apidoc[SqsPublishResult] further down the stream.
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsPublishSpec.scala) { #flow }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsPublishTest.java) { #flow }
-
-
-
-
-### Group messages and publish batches to an SQS queue
-
-Create a sink, that forwards `String` to the SQS queue. However, the main difference from the previous use case, 
-it batches items and sends as one request and forwards a @apidoc[SqsPublishResultEntry]
-further down the stream for each item processed.
-
-Note: There is also another option to send a batch of messages to SQS which is using `AmazonSQSBufferedAsyncClient`.
-This client buffers `SendMessageRequest`s under the hood and sends them as a batch instead of sending them one by one. However, beware that `AmazonSQSBufferedAsyncClient`
-does not support FIFO Queues. See [documentation for client-side buffering.](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-client-side-buffering-request-batching.html)
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsPublishSpec.scala) { #group }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsPublishTest.java) { #group }
-
-
-### Grouping configuration
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsPublishSpec.scala) { #SqsPublishGroupedSettings }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsPublishTest.java) { #SqsPublishGroupedSettings }
-
-
-Options:
-
- - `maxBatchSize` - the maximum number of messages in batch to send SQS. Default: 10.
- - `maxBatchWait` - the maximum duration for which the stage waits until `maxBatchSize` messages arrived.
-    Sends what is collects at the end of the time period
-    even though the `maxBatchSize` is not fulfilled. Default: 500 milliseconds
- - `concurrentRequests` - the number of batches sending to SQS concurrently.
-
-
-### Publish lists as batches to an SQS queue
-
-Create a sink, that publishes @scala[`Iterable[String]`]@java[`Iterable<String>`] to the SQS queue.
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsPublishSpec.scala) { #batch-string }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsPublishTest.java) { #batch-string }
-
-Create a sink, that publishes @scala[`Iterable[SendMessageRequest]`]@java[`Iterable<SendMessageRequest>`] to the SQS queue.
-
-@@@ warning
-
-Be aware that the size of the batch must be less than or equal to 10 because Amazon SQS has a limit for batch requests.
-If the batch has more than 10 entries, the request will fail.
-
-@@@
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsPublishSpec.scala) { #batch-send-request }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsPublishTest.java) { #batch-send-request }
-
-
-### Batch configuration
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsPublishSpec.scala) { #SqsPublishBatchSettings }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsPublishTest.java) { #SqsPublishBatchSettings }
-
-Options:
-
- - `concurrentRequests` - the number of batches sending to SQS concurrently.
-
-
-## Updating message statuses
-
-`SqsAckSink` and `SqsAckFlow` provide the possibility to acknowledge (delete), ignore, or postpone messages on an SQS queue.
-They accept @apidoc[MessageAction] sub-classes to select the action to be taken.
-
-For every message you may decide which action to take and push it together with message back to the queue:
-
- - `Delete` - delete message from the queue
- - `Ignore` - don't change that message, and let it reappear in the queue after the visibility timeout
- - `ChangeMessageVisibility(visibilityTimeout)` - can be used to postpone a message, or make
- the message immediately visible to other consumers. See [official documentation](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html)
-for more details.
-
-
-### Acknowledge (delete) messages
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala) { #ack }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsAckTest.java) { #ack }
-
-
-### Ignore messages
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala) { #ignore }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsAckTest.java) { #ignore }
-
-
-### Change Visibility Timeout of messages
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala) { #requeue }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsAckTest.java) { #requeue }
-
-
-### Update message status in a flow
-
-The `SqsAckFlow` forwards a @apidoc[SqsAckResult] sub-class down the stream:
-
-- `DeleteResult` to acknowledge message deletion
-- `ChangeMessageVisibilityResult` to acknowledge message visibility change
-- In case of `Ignore` action, nothing is performed on the sqs queue, thus no `SqsAckResult` is forwarded.
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala) { #flow-ack }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsAckTest.java) { #flow-ack }
-
-
-
-### SqsAck configuration
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala) { #SqsAckSettings }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsAckTest.java) { #SqsAckSettings }
-
-
-Options:
-
- - `maxInFlight` - maximum number of messages being processed by `AmazonSQSAsync` at the same time. Default: 10
-
-
-### Updating message statuses in batches with grouping
-
-`SqsAckFlow.grouped` batches actions on their type and forwards a @apidoc[SqsAckResultEntry] 
-sub-class for each item processed:
-
-- `DeleteResultEntry` to acknowledge message deletion
-- `ChangeMessageVisibilityResultEntry` to acknowledge message visibility change
-- In case of `Ignore` action, nothing is performed on the sqs queue, thus no `SqsAckResult` is forwarded.
-
-Acknowledge (delete) messages:
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala) { #batch-ack }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsAckTest.java) { #batch-ack }
-
-Ignore messages:
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala) { #batch-ignore }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsAckTest.java) { #batch-ignore }
-
-Change Visibility Timeout of messages:
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala) { #batch-requeue }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsAckTest.java) { #batch-requeue }
-
-
-### Acknowledge grouping configuration
-
-Scala
-: @@snip [snip](/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala) { #SqsAckGroupedSettings }
-
-Java
-: @@snip [snip](/sqs/src/test/java/docs/javadsl/SqsAckTest.java) { #SqsAckGroupedSettings }
-
-
-Options:
-
- - `maxBatchSize` - the maximum number of messages in batch to send SQS. Default: 10.
- - `maxBatchWait` - the maximum duration for which the stage waits until `maxBatchSize` messages arrived.
-    Sends what is collects at the end of the time period
-    even though the `maxBatchSize` is not fulfilled. Default: 500 milliseconds
- - `concurrentRequests` - the number of batches sending to SQS concurrently.
-
-
-## Integration testing
-
-For integration testing without touching Amazon SQS, Apache Pekko Connectors uses [ElasticMQ](https://github.com/softwaremill/elasticmq), 
-a queuing service which serves an AWS SQS compatible API.
-
-@@@ index
-
-* [retry conf](aws-shared-configuration.md)
-
-@@@
diff --git a/project/Dependencies.scala b/project/Dependencies.scala
index ed5d6cb5..0ad0c24d 100644
--- a/project/Dependencies.scala
+++ b/project/Dependencies.scala
@@ -79,6 +79,7 @@ object Dependencies {
       "com.rabbitmq" % "amqp-client" % "5.14.2" // APLv2
     ) ++ Mockito)
 
+  /* see https://github.com/apache/incubator-pekko-connectors/issues/34
   val AwsLambda = Seq(
     libraryDependencies ++= Seq(
       "com.typesafe.akka" %% "akka-http" % AkkaHttpVersion, // ApacheV2
@@ -89,6 +90,7 @@ object Dependencies {
 
         ExclusionRule("software.amazon.awssdk", "apache-client"),
         ExclusionRule("software.amazon.awssdk", "netty-nio-client"))) ++ Mockito)
+   */
 
   val AzureStorageQueue = Seq(
     libraryDependencies ++= Seq(
@@ -125,6 +127,7 @@ object Dependencies {
       "org.scalatest" %% "scalatest" % "3.2.11" % Test // ApacheV2
     ))
 
+  /*
   val DynamoDB = Seq(
     libraryDependencies ++= Seq(
       ("com.github.matsluni" %% "aws-spi-akka-http" % AwsSpiAkkaHttpVersion).excludeAll( // ApacheV2
@@ -136,6 +139,7 @@ object Dependencies {
         ExclusionRule("software.amazon.awssdk", "netty-nio-client")),
       "com.typesafe.akka" %% "akka-http" % AkkaHttpVersion // ApacheV2
     ))
+   */
 
   val Elasticsearch = Seq(
     libraryDependencies ++= Seq(
@@ -304,6 +308,7 @@ object Dependencies {
       "com.github.jsurfer" % "jsurfer-jackson" % "1.6.0" // MIT
     ) ++ JacksonDatabindDependencies)
 
+  /*
   val Kinesis = Seq(
     libraryDependencies ++= Seq(
       "com.typesafe.akka" %% "akka-http" % AkkaHttpVersion, // ApacheV2
@@ -316,6 +321,7 @@ object Dependencies {
       _.excludeAll(
         ExclusionRule("software.amazon.awssdk", "apache-client"),
         ExclusionRule("software.amazon.awssdk", "netty-nio-client"))) ++ Mockito)
+   */
 
   val KuduVersion = "1.7.1"
   val Kudu = Seq(
@@ -395,6 +401,8 @@ object Dependencies {
       "com.typesafe.slick" %% "slick-hikaricp" % SlickVersion, // BSD 2-clause "Simplified" License
       "com.h2database" % "h2" % "2.1.210" % Test // Eclipse Public License 1.0
     ))
+
+  /*
   val Eventbridge = Seq(
     libraryDependencies ++= Seq(
       ("com.github.matsluni" %% "aws-spi-akka-http" % AwsSpiAkkaHttpVersion).excludeAll( // ApacheV2
@@ -418,6 +426,7 @@ object Dependencies {
         ExclusionRule("software.amazon.awssdk", "netty-nio-client")),
       "com.typesafe.akka" %% "akka-http" % AkkaHttpVersion // ApacheV2
     ) ++ Mockito)
+   */
 
   val SolrjVersion = "7.7.3"
   val SolrVersionForDocs = "7_7"
@@ -431,6 +440,7 @@ object Dependencies {
     ),
     resolvers += ("restlet".at("https://maven.restlet.talend.com")))
 
+  /*
   val Sqs = Seq(
     libraryDependencies ++= Seq(
       ("com.github.matsluni" %% "aws-spi-akka-http" % AwsSpiAkkaHttpVersion).excludeAll( // ApacheV2
@@ -443,6 +453,7 @@ object Dependencies {
       "com.typesafe.akka" %% "akka-http" % AkkaHttpVersion, // ApacheV2
       "org.mockito" % "mockito-inline" % mockitoVersion % Test // MIT
     ) ++ Mockito)
+   */
 
   val Sse = Seq(
     libraryDependencies ++= Seq(


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pekko.apache.org
For additional commands, e-mail: commits-help@pekko.apache.org