You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pekko.apache.org by md...@apache.org on 2023/01/06 08:57:38 UTC
[incubator-pekko-connectors] 03/03: format source with scalafmt, #12
This is an automated email from the ASF dual-hosted git repository.
mdedetrich pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-pekko-connectors.git
commit 433daa5c5b0b2c3f85c6f1c7e62a4f4171856468
Author: Auto Format <nobody>
AuthorDate: Fri Jan 6 09:48:29 2023 +0100
format source with scalafmt, #12
---
.../alpakka/amqp/AmqpConnectionProvider.scala | 59 ++-
.../alpakka/amqp/AmqpConnectorSettings.scala | 58 +--
.../impl/AbstractAmqpAsyncFlowStageLogic.scala | 41 +-
.../amqp/impl/AbstractAmqpFlowStageLogic.scala | 19 +-
.../alpakka/amqp/impl/AmqpAsyncFlowStage.scala | 61 ++-
.../amqp/impl/AmqpAsyncUnorderedFlowStage.scala | 36 +-
.../alpakka/amqp/impl/AmqpConnectorLogic.scala | 13 +-
.../alpakka/amqp/impl/AmqpReplyToSinkStage.scala | 104 ++--
.../alpakka/amqp/impl/AmqpRpcFlowStage.scala | 326 ++++++-------
.../alpakka/amqp/impl/AmqpSimpleFlowStage.scala | 35 +-
.../stream/alpakka/amqp/impl/AmqpSourceStage.scala | 29 +-
.../stream/alpakka/amqp/javadsl/AmqpFlow.scala | 16 +-
.../alpakka/amqp/javadsl/AmqpFlowWithContext.scala | 8 +-
.../stream/alpakka/amqp/javadsl/AmqpRpcFlow.scala | 11 +-
.../stream/alpakka/amqp/javadsl/AmqpSink.scala | 3 +-
.../stream/alpakka/amqp/javadsl/AmqpSource.scala | 2 +-
.../scala/akka/stream/alpakka/amqp/model.scala | 12 +-
.../stream/alpakka/amqp/scaladsl/AmqpFlow.scala | 25 +-
.../amqp/scaladsl/AmqpFlowWithContext.scala | 14 +-
.../stream/alpakka/amqp/scaladsl/AmqpRpcFlow.scala | 10 +-
.../stream/alpakka/amqp/scaladsl/AmqpSink.scala | 2 +-
.../stream/alpakka/amqp/scaladsl/AmqpSource.scala | 2 +-
.../stream/alpakka/amqp/AmqpProxyConnection.scala | 2 +-
.../scaladsl/AmqpConnectionProvidersSpec.scala | 8 +-
.../alpakka/amqp/scaladsl/AmqpConnectorsSpec.scala | 74 +--
.../alpakka/amqp/scaladsl/AmqpFlowSpec.scala | 22 +-
...AmqpGraphStageLogicConnectionShutdownSpec.scala | 5 +-
.../stream/alpakka/amqp/scaladsl/AmqpMocking.scala | 2 +-
.../test/scala/docs/scaladsl/AmqpDocsSpec.scala | 90 ++--
.../alpakka/avroparquet/impl/AvroParquetFlow.scala | 16 +-
.../avroparquet/impl/AvroParquetSource.scala | 7 +-
.../avroparquet/javadsl/AvroParquetSink.scala | 4 +-
.../avroparquet/scaladsl/AvroParquetSink.scala | 2 +-
.../scala/docs/scaladsl/AbstractAvroParquet.scala | 11 +-
.../scala/docs/scaladsl/AvroParquetFlowSpec.scala | 14 +-
.../scala/docs/scaladsl/AvroParquetSinkSpec.scala | 16 +-
.../docs/scaladsl/AvroParquetSourceSpec.scala | 16 +-
.../eventbridge/EventBridgePublishSettings.scala | 2 +-
.../eventbridge/javadsl/EventBridgePublisher.scala | 18 +-
.../scaladsl/EventBridgePublisher.scala | 25 +-
.../aws/eventbridge/DefaultTestContext.scala | 2 +-
.../eventbridge/EventBridgePublishMockSpec.scala | 7 +-
.../aws/eventbridge/IntegrationTestContext.scala | 15 +-
.../docs/scaladsl/EventBridgePublisherSpec.scala | 4 +-
.../alpakka/awslambda/javadsl/AwsLambdaFlow.scala | 2 +-
.../alpakka/awslambda/scaladsl/AwsLambdaFlow.scala | 5 +-
.../scala/docs/scaladsl/AwsLambdaFlowSpec.scala | 16 +-
.../src/test/scala/docs/scaladsl/Examples.scala | 16 +-
.../impl/AzureQueueSinkFunctions.scala | 16 +-
.../storagequeue/impl/AzureQueueSourceStage.scala | 11 +-
.../storagequeue/javadsl/AzureQueueSink.scala | 19 +-
.../storagequeue/javadsl/AzureQueueSource.scala | 2 +-
.../storagequeue/scaladsl/AzureQueueSink.scala | 18 +-
.../storagequeue/scaladsl/AzureQueueSource.scala | 5 +-
.../alpakka/azure/storagequeue/settings.scala | 11 +-
.../test/scala/docs/scaladsl/AzureQueueSpec.scala | 26 +-
build.sbt | 241 ++++-----
.../cassandra/AkkaDiscoverySessionProvider.scala | 15 +-
.../cassandra/CassandraMetricsRegistry.scala | 2 +-
.../cassandra/CassandraSessionSettings.scala | 8 +-
.../alpakka/cassandra/CassandraWriteSettings.scala | 14 +-
.../alpakka/cassandra/CqlSessionProvider.scala | 10 +-
.../alpakka/cassandra/javadsl/CassandraFlow.scala | 34 +-
.../cassandra/javadsl/CassandraSession.scala | 47 +-
.../javadsl/CassandraSessionRegistry.scala | 4 +-
.../cassandra/javadsl/CassandraSource.scala | 2 +-
.../alpakka/cassandra/scaladsl/CassandraFlow.scala | 18 +-
.../cassandra/scaladsl/CassandraSession.scala | 26 +-
.../scaladsl/CassandraSessionRegistry.scala | 22 +-
.../cassandra/scaladsl/CassandraSource.scala | 2 +-
.../cassandra/scaladsl/CassandraLifecycle.scala | 14 +-
.../scaladsl/CassandraSessionPerformanceSpec.scala | 7 +-
.../cassandra/scaladsl/CassandraSpecBase.scala | 4 +-
.../scala/docs/javadsl/CassandraSessionSpec.scala | 14 +-
.../scala/docs/scaladsl/AkkaDiscoverySpec.scala | 2 +-
.../scala/docs/scaladsl/CassandraFlowSpec.scala | 36 +-
.../scala/docs/scaladsl/CassandraSourceSpec.scala | 8 +-
.../couchbase/CouchbaseResponseException.scala | 3 +-
.../couchbase/CouchbaseSessionRegistry.scala | 11 +-
.../couchbase/impl/CouchbaseClusterRegistry.scala | 7 +-
.../couchbase/impl/CouchbaseSessionImpl.scala | 80 ++-
.../impl/CouchbaseSessionJavaAdapter.scala | 19 +-
.../alpakka/couchbase/impl/RxUtilities.scala | 6 +-
.../alpakka/couchbase/javadsl/CouchbaseFlow.scala | 38 +-
.../couchbase/javadsl/CouchbaseSession.scala | 28 +-
.../alpakka/couchbase/javadsl/CouchbaseSink.scala | 26 +-
.../couchbase/javadsl/CouchbaseSource.scala | 12 +-
.../akka/stream/alpakka/couchbase/model.scala | 53 +-
.../alpakka/couchbase/scaladsl/CouchbaseFlow.scala | 132 +++--
.../couchbase/scaladsl/CouchbaseSession.scala | 22 +-
.../alpakka/couchbase/scaladsl/CouchbaseSink.scala | 16 +-
.../couchbase/scaladsl/CouchbaseSource.scala | 12 +-
.../couchbase/scaladsl/DiscoverySupport.scala | 18 +-
.../couchbase/testing/CouchbaseSupport.scala | 18 +-
.../scala/docs/scaladsl/CouchbaseFlowSpec.scala | 167 +++----
.../scaladsl/CouchbaseSessionExamplesSpec.scala | 6 +-
.../scala/docs/scaladsl/CouchbaseSourceSpec.scala | 10 +-
.../test/scala/docs/scaladsl/DiscoverySpec.scala | 10 +-
.../stream/alpakka/csv/scaladsl/CsvBench.scala | 6 +-
.../stream/alpakka/csv/impl/CsvFormatter.scala | 12 +-
.../akka/stream/alpakka/csv/impl/CsvParser.scala | 27 +-
.../stream/alpakka/csv/impl/CsvParsingStage.scala | 25 +-
.../alpakka/csv/impl/CsvToMapJavaStage.scala | 54 +--
.../stream/alpakka/csv/impl/CsvToMapStage.scala | 43 +-
.../alpakka/csv/scaladsl/CsvFormatting.scala | 10 +-
.../stream/alpakka/csv/scaladsl/CsvParsing.scala | 9 +-
.../alpakka/csv/scaladsl/CsvQuotingStyle.scala | 2 +-
.../stream/alpakka/csv/scaladsl/CsvToMap.scala | 67 ++-
.../akka/stream/alpakka/csv/CsvParserSpec.scala | 20 +-
.../scala/docs/scaladsl/CsvFormattingSpec.scala | 9 +-
.../test/scala/docs/scaladsl/CsvParsingSpec.scala | 37 +-
csv/src/test/scala/docs/scaladsl/CsvSpec.scala | 2 +-
.../test/scala/docs/scaladsl/CsvToMapSpec.scala | 107 ++--
.../alpakka/eip/scaladsl/PassThroughExamples.scala | 24 +-
.../akka/stream/alpakka/dynamodb/DynamoDbOp.scala | 6 +-
.../stream/alpakka/dynamodb/javadsl/DynamoDb.scala | 29 +-
.../alpakka/dynamodb/scaladsl/DynamoDb.scala | 34 +-
.../akka/stream/alpakka/dynamodb/ItemSpec.scala | 4 +-
.../akka/stream/alpakka/dynamodb/TableSpec.scala | 4 +-
.../akka/stream/alpakka/dynamodb/TestOps.scala | 48 +-
.../src/test/scala/docs/scaladsl/ExampleSpec.scala | 24 +-
.../src/test/scala/docs/scaladsl/RetrySpec.scala | 8 +-
.../ElasticsearchConnectionSettings.scala | 30 +-
.../ElasticsearchSourceSettings.scala | 49 +-
.../elasticsearch/ElasticsearchWriteSettings.scala | 38 +-
.../elasticsearch/OpensearchSourceSettings.scala | 49 +-
.../elasticsearch/OpensearchWriteSettings.scala | 30 +-
.../stream/alpakka/elasticsearch/ReadResult.scala | 4 +-
.../alpakka/elasticsearch/SourceSettingsBase.scala | 22 +-
.../alpakka/elasticsearch/WriteMessage.scala | 52 +-
.../alpakka/elasticsearch/WriteSettingsBase.scala | 13 +-
.../elasticsearch/impl/ElasticsearchApi.scala | 10 +-
.../impl/ElasticsearchSimpleFlowStage.scala | 35 +-
.../impl/ElasticsearchSourceStage.scala | 61 +--
.../elasticsearch/impl/NDJsonProtocol.scala | 2 +-
.../alpakka/elasticsearch/impl/RestBulkApi.scala | 16 +-
.../alpakka/elasticsearch/impl/RestBulkApiV5.scala | 21 +-
.../alpakka/elasticsearch/impl/RestBulkApiV7.scala | 18 +-
.../elasticsearch/javadsl/ElasticsearchFlow.scala | 35 +-
.../elasticsearch/javadsl/ElasticsearchSink.scala | 5 +-
.../javadsl/ElasticsearchSource.scala | 64 ++-
.../elasticsearch/scaladsl/ElasticsearchFlow.scala | 67 ++-
.../elasticsearch/scaladsl/ElasticsearchSink.scala | 7 +-
.../scaladsl/ElasticsearchSource.scala | 40 +-
.../elasticsearch/testkit/MessageFactory.scala | 26 +-
.../impl/ElasticsearchSimpleFlowStageTest.scala | 23 +-
.../impl/ElasticsearchSourcStageTest.scala | 8 +-
.../scaladsl/ElasticsearchConnectorBehaviour.scala | 228 ++++-----
.../scala/docs/scaladsl/ElasticsearchSpec.scala | 10 +-
.../docs/scaladsl/ElasticsearchSpecBase.scala | 10 +-
.../docs/scaladsl/ElasticsearchSpecUtils.scala | 23 +-
.../scala/docs/scaladsl/ElasticsearchV5Spec.scala | 179 +++----
.../scala/docs/scaladsl/ElasticsearchV7Spec.scala | 135 ++----
.../scaladsl/OpensearchConnectorBehaviour.scala | 228 ++++-----
.../scala/docs/scaladsl/OpensearchV1Spec.scala | 169 +++----
.../file/impl/archive/EnsureByteStreamSize.scala | 18 +-
.../file/impl/archive/TarArchiveEntry.scala | 27 +-
.../file/impl/archive/TarArchiveManager.scala | 2 +-
.../alpakka/file/impl/archive/TarReaderStage.scala | 29 +-
.../alpakka/file/impl/archive/ZipArchiveFlow.scala | 17 +-
.../file/impl/archive/ZipArchiveManager.scala | 2 +-
.../file/impl/archive/ZipReaderSource.scala | 16 +-
.../akka/stream/alpakka/file/javadsl/Archive.scala | 12 +-
.../alpakka/file/javadsl/LogRotatorSink.scala | 27 +-
.../scala/akka/stream/alpakka/file/model.scala | 63 ++-
.../stream/alpakka/file/scaladsl/Archive.scala | 14 +-
.../stream/alpakka/file/scaladsl/Directory.scala | 8 +-
.../file/scaladsl/DirectoryChangesSource.scala | 7 +-
.../alpakka/file/scaladsl/FileTailSource.scala | 19 +-
.../alpakka/file/scaladsl/LogRotatorSink.scala | 49 +-
.../file/impl/archive/TarArchiveEntrySpec.scala | 2 +-
.../file/impl/archive/ZipArchiveFlowTest.scala | 2 +-
.../src/test/scala/docs/scaladsl/ArchiveSpec.scala | 19 +-
.../test/scala/docs/scaladsl/DirectorySpec.scala | 10 +-
.../test/scala/docs/scaladsl/ExecutableUtils.scala | 8 +-
.../docs/scaladsl/FileTailSourceExtrasSpec.scala | 20 +-
.../scala/docs/scaladsl/FileTailSourceSpec.scala | 3 +-
.../scala/docs/scaladsl/LogRotatorSinkSpec.scala | 59 +--
.../test/scala/docs/scaladsl/TarArchiveSpec.scala | 43 +-
.../alpakka/ftp/impl/CommonFtpOperations.scala | 11 +-
.../alpakka/ftp/impl/FtpBrowserGraphStage.scala | 5 +-
.../impl/FtpDirectoryOperationsGraphStage.scala | 5 +-
.../stream/alpakka/ftp/impl/FtpGraphStage.scala | 2 +-
.../alpakka/ftp/impl/FtpGraphStageLogic.scala | 5 +-
.../stream/alpakka/ftp/impl/FtpIOGraphStage.scala | 41 +-
.../akka/stream/alpakka/ftp/impl/FtpLike.scala | 4 +-
.../stream/alpakka/ftp/impl/FtpOperations.scala | 13 +-
.../stream/alpakka/ftp/impl/FtpSourceFactory.scala | 58 +--
.../stream/alpakka/ftp/impl/FtpsOperations.scala | 10 +-
.../stream/alpakka/ftp/impl/SftpOperations.scala | 25 +-
.../akka/stream/alpakka/ftp/javadsl/FtpApi.scala | 164 +++----
.../main/scala/akka/stream/alpakka/ftp/model.scala | 91 ++--
.../akka/stream/alpakka/ftp/scaladsl/FtpApi.scala | 92 ++--
.../akka/stream/alpakka/ftp/BaseFtpSpec.scala | 16 +-
.../akka/stream/alpakka/ftp/BaseFtpsSpec.scala | 16 +-
.../akka/stream/alpakka/ftp/BaseSftpSpec.scala | 16 +-
.../scala/akka/stream/alpakka/ftp/BaseSpec.scala | 17 +-
.../stream/alpakka/ftp/CommonFtpStageSpec.scala | 56 +--
.../test/scala/docs/scaladsl/FtpExamplesSpec.scala | 16 +-
.../test/scala/docs/scaladsl/scalaExamples.scala | 44 +-
.../akka/stream/alpakka/geode/GeodeSettings.scala | 14 +-
.../stream/alpakka/geode/impl/GeodeCache.scala | 4 +-
.../alpakka/geode/impl/GeodeCapabilities.scala | 2 +-
.../geode/impl/pdx/DelegatingPdxSerializer.scala | 5 +-
.../stream/alpakka/geode/impl/pdx/PdxDecoder.scala | 12 +-
.../stream/alpakka/geode/impl/pdx/PdxEncoder.scala | 8 +-
.../geode/impl/pdx/ShapelessPdxSerializer.scala | 7 +-
.../geode/impl/stage/GeodeCQueryGraphLogic.scala | 14 +-
.../impl/stage/GeodeContinuousSourceStage.scala | 61 ++-
.../geode/impl/stage/GeodeFiniteSourceStage.scala | 31 +-
.../alpakka/geode/impl/stage/GeodeFlowStage.scala | 24 +-
.../geode/impl/stage/GeodeQueryGraphLogic.scala | 4 +-
.../geode/impl/stage/GeodeSourceStageLogic.scala | 4 +-
.../akka/stream/alpakka/geode/scaladsl/Geode.scala | 30 +-
.../alpakka/geode/impl/pdx/PDXDecoderSpec.scala | 2 +-
.../alpakka/geode/impl/pdx/PDXEncodeSpec.scala | 2 +-
.../alpakka/geode/impl/pdx/PdxWriterMock.scala | 8 +-
.../test/scala/docs/scaladsl/GeodeBaseSpec.scala | 8 +-
.../docs/scaladsl/GeodeContinuousSourceSpec.scala | 14 +-
.../docs/scaladsl/GeodeFiniteSourceSpec.scala | 4 +-
.../test/scala/docs/scaladsl/GeodeFlowSpec.scala | 14 +-
.../test/scala/docs/scaladsl/GeodeSinkSpec.scala | 8 +-
geode/src/test/scala/docs/scaladsl/Model.scala | 2 +-
.../scala/docs/scaladsl/PersonPdxSerializer.scala | 2 +-
.../bigquery/storage/BigQueryRecordMapImpl.scala | 4 +-
.../bigquery/storage/BigQueryStorageSettings.scala | 5 +-
.../bigquery/storage/ProtobufConverters.scala | 6 +-
.../bigquery/storage/impl/AkkaGrpcSettings.scala | 8 +-
.../bigquery/storage/impl/ArrowSource.scala | 21 +-
.../bigquery/storage/impl/AvroDecoder.scala | 2 +-
.../bigquery/storage/impl/AvroSource.scala | 18 +-
.../bigquery/storage/impl/SDKClientSource.scala | 2 +-
.../storage/javadsl/BigQueryArrowStorage.scala | 82 ++--
.../storage/javadsl/BigQueryAvroStorage.scala | 86 ++--
.../bigquery/storage/javadsl/BigQueryStorage.scala | 65 ++-
.../storage/scaladsl/BigQueryArrowStorage.scala | 88 ++--
.../storage/scaladsl/BigQueryAvroStorage.scala | 88 ++--
.../storage/scaladsl/BigQueryStorage.scala | 36 +-
.../scaladsl/BigQueryStorageAttributes.scala | 3 +-
.../scaladsl/GrpcBigQueryStorageReader.scala | 2 +-
.../storage/javadsl/AvroByteStringDecoder.scala | 6 +-
.../bigquery/storage/BigQueryStorageSpecBase.scala | 7 +-
.../bigquery/storage/mock/BigQueryMockData.scala | 10 +-
.../bigquery/storage/mock/BigQueryMockServer.scala | 23 +-
.../storage/scaladsl/ArrowByteStringDecoder.scala | 19 +-
.../storage/scaladsl/AvroByteStringDecoder.scala | 6 +-
.../scaladsl/BigQueryArrowStorageSpec.scala | 7 +-
.../storage/scaladsl/BigQueryStorageSpec.scala | 30 +-
.../test/scala/docs/scaladsl/ExampleReader.scala | 37 +-
.../googlecloud/bigquery/BigQueryAttributes.scala | 6 +-
.../googlecloud/bigquery/BigQueryException.scala | 9 +-
.../alpakka/googlecloud/bigquery/BigQueryExt.scala | 8 +-
.../googlecloud/bigquery/javadsl/BigQuery.scala | 118 +++--
.../javadsl/jackson/BigQueryMarshallers.scala | 11 +-
.../bigquery/model/DatasetJsonProtocol.scala | 14 +-
.../bigquery/model/ErrorProtoJsonProtocol.scala | 6 +-
.../bigquery/model/JobJsonProtocol.scala | 35 +-
.../bigquery/model/QueryJsonProtocol.scala | 117 +++--
.../googlecloud/bigquery/model/StringEnum.scala | 4 +-
.../bigquery/model/TableDataJsonProtocol.scala | 31 +-
.../bigquery/model/TableJsonProtocol.scala | 61 ++-
.../bigquery/scaladsl/BigQueryDatasets.scala | 22 +-
.../bigquery/scaladsl/BigQueryJobs.scala | 34 +-
.../bigquery/scaladsl/BigQueryQueries.scala | 41 +-
.../bigquery/scaladsl/BigQueryRest.scala | 4 +-
.../bigquery/scaladsl/BigQueryTableData.scala | 34 +-
.../bigquery/scaladsl/BigQueryTables.scala | 19 +-
.../bigquery/scaladsl/schema/BasicSchemas.scala | 2 +-
.../bigquery/scaladsl/schema/JavaTimeSchemas.scala | 4 +-
.../scaladsl/schema/PrimitiveSchemaWriter.scala | 2 +-
.../bigquery/scaladsl/schema/ProductSchemas.scala | 4 +-
.../bigquery/scaladsl/schema/SchemaWriter.scala | 2 +-
.../bigquery/scaladsl/schema/StandardSchemas.scala | 2 +-
.../scaladsl/spray/BigQueryBasicFormats.scala | 46 +-
.../scaladsl/spray/BigQueryCollectionFormats.scala | 8 +-
.../scaladsl/spray/BigQueryJavaTimeFormats.scala | 14 +-
.../scaladsl/spray/BigQueryJsonFormat.scala | 2 +-
.../scaladsl/spray/BigQueryJsonProtocol.scala | 2 +-
.../scaladsl/spray/BigQueryProductFormats.scala | 5 +-
.../scaladsl/spray/BigQueryRestBasicFormats.scala | 12 +-
.../scaladsl/spray/BigQueryRestJsonProtocol.scala | 2 +-
.../scaladsl/spray/BigQueryStandardFormats.scala | 2 +-
.../googlecloud/bigquery/HoverflySupport.scala | 7 +-
.../alpakka/googlecloud/bigquery/e2e/A.scala | 13 +-
.../e2e/scaladsl/BigQueryEndToEndSpec.scala | 13 +-
.../bigquery/e2e/scaladsl/EndToEndHelper.scala | 17 +-
.../bigquery/scaladsl/BigQueryQueriesSpec.scala | 43 +-
.../scaladsl/schema/BigQuerySchemasSpec.scala | 11 +-
.../scaladsl/spray/BigQueryJsonProtocolSpec.scala | 6 +-
.../src/test/scala/docs/scaladsl/BigQueryDoc.scala | 50 +-
.../googlecloud/pubsub/grpc/PubSubSettings.scala | 24 +-
.../pubsub/grpc/impl/AkkaGrpcSettings.scala | 8 +-
.../pubsub/grpc/impl/DeprecatedCredentials.scala | 4 +-
.../pubsub/grpc/javadsl/GooglePubSub.scala | 21 +-
.../pubsub/grpc/javadsl/GrpcPublisher.scala | 2 +-
.../pubsub/grpc/javadsl/GrpcSubscriber.scala | 6 +-
.../pubsub/grpc/scaladsl/GooglePubSub.scala | 28 +-
.../pubsub/grpc/scaladsl/GrpcPublisher.scala | 4 +-
.../pubsub/grpc/scaladsl/GrpcSubscriber.scala | 4 +-
.../src/test/scala/docs/scaladsl/ExampleApp.scala | 14 +-
.../test/scala/docs/scaladsl/IntegrationSpec.scala | 42 +-
.../googlecloud/pubsub/impl/PubSubApi.scala | 44 +-
.../googlecloud/pubsub/javadsl/GooglePubSub.scala | 28 +-
.../stream/alpakka/googlecloud/pubsub/model.scala | 116 +++--
.../googlecloud/pubsub/scaladsl/GooglePubSub.scala | 39 +-
.../googlecloud/pubsub/GooglePubSubSpec.scala | 27 +-
.../googlecloud/pubsub/impl/PubSubApiSpec.scala | 133 ++---
.../test/scala/docs/scaladsl/ExampleUsage.scala | 54 +--
.../test/scala/docs/scaladsl/IntegrationSpec.scala | 6 +-
.../alpakka/googlecloud/storage/Bucket.scala | 21 +-
.../googlecloud/storage/CustomerEncryption.scala | 2 +-
.../alpakka/googlecloud/storage/FailedUpload.scala | 3 +-
.../alpakka/googlecloud/storage/GCSExt.scala | 2 +-
.../alpakka/googlecloud/storage/GCStorageExt.scala | 2 +-
.../googlecloud/storage/GCStorageSettings.scala | 26 +-
.../googlecloud/storage/ObjectAccessControls.scala | 147 +++---
.../googlecloud/storage/StorageObject.scala | 24 +-
.../googlecloud/storage/StorageSettings.scala | 4 +-
.../storage/impl/BucketListResult.scala | 12 +-
.../alpakka/googlecloud/storage/impl/Formats.scala | 102 ++--
.../googlecloud/storage/impl/GCStorageStream.scala | 81 ++--
.../googlecloud/storage/impl/RewriteResponse.scala | 3 +-
.../storage/impl/UploadPartResponse.scala | 4 +-
.../googlecloud/storage/javadsl/GCStorage.scala | 77 ++-
.../googlecloud/storage/scaladsl/GCStorage.scala | 57 ++-
.../alpakka/googlecloud/storage/settings.scala | 2 +-
.../alpakka/googlecloud/storage/GCSExtSpec.scala | 4 +-
.../googlecloud/storage/GCSSettingsSpec.scala | 4 +-
.../googlecloud/storage/GCStorageExtSpec.scala | 4 +-
.../storage/GCStorageSettingsSpec.scala | 4 +-
.../storage/WithMaterializerGlobal.scala | 4 +-
.../impl/GCStorageStreamIntegrationSpec.scala | 49 +-
.../storage/scaladsl/GCStorageWiremockBase.scala | 212 +++-----
.../scala/docs/scaladsl/GCStorageSinkSpec.scala | 21 +-
.../scala/docs/scaladsl/GCStorageSourceSpec.scala | 127 ++---
.../stream/alpakka/google/GoogleAttributes.scala | 4 +-
.../akka/stream/alpakka/google/GoogleExt.scala | 8 +-
.../stream/alpakka/google/GoogleSettings.scala | 66 ++-
.../stream/alpakka/google/PaginatedRequest.scala | 5 +-
.../stream/alpakka/google/ResumableUpload.scala | 34 +-
.../stream/alpakka/google/auth/AccessToken.scala | 2 +-
.../google/auth/ComputeEngineCredentials.scala | 4 +-
.../stream/alpakka/google/auth/Credentials.scala | 18 +-
.../google/auth/GoogleComputeMetadata.scala | 6 +-
.../stream/alpakka/google/auth/GoogleOAuth2.scala | 19 +-
.../google/auth/GoogleOAuth2Credentials.scala | 11 +-
.../google/auth/GoogleOAuth2Exception.scala | 11 +-
.../stream/alpakka/google/auth/NoCredentials.scala | 4 +-
.../alpakka/google/auth/OAuth2Credentials.scala | 20 +-
.../google/auth/ServiceAccountCredentials.scala | 24 +-
.../google/auth/UserAccessCredentials.scala | 36 +-
.../alpakka/google/auth/UserAccessMetadata.scala | 8 +-
.../google/http/ForwardProxyHttpsContext.scala | 6 +-
.../google/http/ForwardProxyPoolSettings.scala | 26 +-
.../stream/alpakka/google/http/GoogleHttp.scala | 119 +++--
.../akka/stream/alpakka/google/implicits.scala | 7 +-
.../stream/alpakka/google/javadsl/Google.scala | 19 +-
.../stream/alpakka/google/scaladsl/Google.scala | 7 +-
.../google/scaladsl/`X-Upload-Content-Type`.scala | 11 +-
.../stream/alpakka/google/util/AnnotateLast.scala | 2 +-
.../stream/alpakka/google/util/EitherFlow.scala | 14 +-
.../akka/stream/alpakka/google/util/Retry.scala | 18 +-
.../alpakka/google/GoogleHttpException.scala | 2 +-
.../stream/alpakka/google/GoogleSettingsSpec.scala | 6 +-
.../stream/alpakka/google/HoverflySupport.scala | 7 +-
.../alpakka/google/PaginatedRequestSpec.scala | 22 +-
.../alpakka/google/ResumableUploadSpec.scala | 16 +-
.../alpakka/google/auth/GoogleOAuth2Spec.scala | 8 +-
.../google/auth/OAuth2CredentialsSpec.scala | 8 +-
.../alpakka/google/http/GoogleHttpSpec.scala | 62 +--
.../test/scala/docs/scaladsl/GoogleCommonDoc.scala | 10 +-
.../firebase/fcm/FcmNotificationModels.scala | 15 +-
.../alpakka/google/firebase/fcm/FcmSettings.scala | 79 ++-
.../google/firebase/fcm/impl/FcmFlows.scala | 12 +-
.../google/firebase/fcm/impl/FcmJsonSupport.scala | 21 +-
.../google/firebase/fcm/impl/FcmSender.scala | 9 +-
.../google/firebase/fcm/javadsl/GoogleFcm.scala | 6 +-
.../google/firebase/fcm/scaladsl/GoogleFcm.scala | 6 +-
.../google/firebase/fcm/v1/impl/FcmFlows.scala | 2 +-
.../firebase/fcm/v1/impl/FcmJsonSupport.scala | 61 ++-
.../google/firebase/fcm/v1/impl/FcmSender.scala | 9 +-
.../google/firebase/fcm/v1/javadsl/GoogleFcm.scala | 4 +-
.../firebase/fcm/v1/models/AndroidConfig.scala | 9 +-
.../google/firebase/fcm/v1/models/ApnsConfig.scala | 3 +-
.../firebase/fcm/v1/models/FcmNotification.scala | 7 +-
.../firebase/fcm/v1/models/WebPushConfig.scala | 6 +-
.../firebase/fcm/v1/scaladsl/GoogleFcm.scala | 6 +-
.../firebase/fcm/v1/FcmNotificationSpec.scala | 2 +-
.../firebase/fcm/v1/impl/FcmSenderSpec.scala | 48 +-
.../src/test/scala/docs/scaladsl/FcmExamples.scala | 32 +-
.../akka/stream/alpakka/hbase/HTableSettings.scala | 24 +-
.../alpakka/hbase/impl/HBaseCapabilities.scala | 11 +-
.../stream/alpakka/hbase/impl/HBaseFlowStage.scala | 22 +-
.../alpakka/hbase/impl/HBaseSourceStage.scala | 12 +-
.../stream/alpakka/hbase/javadsl/HTableStage.scala | 8 +-
.../alpakka/hbase/scaladsl/HTableStage.scala | 8 +-
.../test/scala/docs/scaladsl/HBaseStageSpec.scala | 40 +-
.../stream/alpakka/hdfs/impl/HdfsFlowStage.scala | 20 +-
.../impl/strategy/DefaultRotationStrategy.scala | 6 +-
.../hdfs/impl/strategy/DefaultSyncStrategy.scala | 3 +-
.../hdfs/impl/writer/CompressedDataWriter.scala | 10 +-
.../alpakka/hdfs/impl/writer/DataWriter.scala | 5 +-
.../alpakka/hdfs/impl/writer/HdfsWriter.scala | 2 +-
.../alpakka/hdfs/impl/writer/SequenceWriter.scala | 33 +-
.../stream/alpakka/hdfs/javadsl/HdfsFlow.scala | 43 +-
.../stream/alpakka/hdfs/javadsl/HdfsSource.scala | 21 +-
.../scala/akka/stream/alpakka/hdfs/model.scala | 12 +-
.../stream/alpakka/hdfs/scaladsl/HdfsFlow.scala | 60 +--
.../stream/alpakka/hdfs/scaladsl/HdfsSource.scala | 17 +-
.../akka/stream/alpakka/hdfs/util/TestUtils.scala | 33 +-
.../test/scala/docs/scaladsl/HdfsReaderSpec.scala | 40 +-
.../test/scala/docs/scaladsl/HdfsWriterSpec.scala | 116 ++---
.../huawei/pushkit/ForwardProxyHttpsContext.scala | 8 +-
.../huawei/pushkit/ForwardProxyPoolSettings.scala | 10 +-
.../alpakka/huawei/pushkit/HmsSettingExt.scala | 8 +-
.../alpakka/huawei/pushkit/HmsSettings.scala | 49 +-
.../alpakka/huawei/pushkit/impl/HmsTokenApi.scala | 17 +-
.../alpakka/huawei/pushkit/impl/PushKitFlows.scala | 12 +-
.../huawei/pushkit/impl/PushKitJsonSupport.scala | 33 +-
.../huawei/pushkit/impl/PushKitSender.scala | 19 +-
.../huawei/pushkit/javadsl/HmsPushKit.scala | 4 +-
.../huawei/pushkit/models/AndroidConfig.scala | 104 ++--
.../alpakka/huawei/pushkit/models/ApnsConfig.scala | 4 +-
.../pushkit/models/PushKitNotification.scala | 18 +-
.../alpakka/huawei/pushkit/models/WebConfig.scala | 30 +-
.../huawei/pushkit/scaladsl/HmsPushKit.scala | 6 +-
.../huawei/pushkit/ConditionBuilderSpec.scala | 2 +-
.../huawei/pushkit/impl/HmsTokenApiSpec.scala | 41 +-
.../huawei/pushkit/impl/PushKitSenderSpec.scala | 54 +--
.../test/scala/docs/scaladsl/PushKitExamples.scala | 22 +-
.../alpakka/influxdb/InfluxDbReadSettings.scala | 6 +-
.../alpakka/influxdb/InfluxDbWriteMessage.scala | 18 +-
.../influxdb/impl/AlpakkaResultMapperHelper.scala | 36 +-
.../alpakka/influxdb/impl/InfluxDbFlowStage.scala | 44 +-
.../influxdb/impl/InfluxDbSourceStage.scala | 38 +-
.../alpakka/influxdb/javadsl/InfluxDbFlow.scala | 20 +-
.../alpakka/influxdb/javadsl/InfluxDbSink.scala | 16 +-
.../alpakka/influxdb/javadsl/InfluxDbSource.scala | 8 +-
.../alpakka/influxdb/scaladsl/InfluxDbFlow.scala | 20 +-
.../alpakka/influxdb/scaladsl/InfluxDbSink.scala | 8 +-
.../alpakka/influxdb/scaladsl/InfluxDbSource.scala | 8 +-
.../src/test/scala/docs/scaladsl/FlowSpec.scala | 35 +-
.../scala/docs/scaladsl/InfluxDbSourceSpec.scala | 10 +-
.../test/scala/docs/scaladsl/InfluxDbSpec.scala | 32 +-
.../stream/alpakka/ironmq/IronMqSettings.scala | 41 +-
.../scala/akka/stream/alpakka/ironmq/domain.scala | 2 +-
.../akka/stream/alpakka/ironmq/impl/Codec.scala | 2 +-
.../stream/alpakka/ironmq/impl/IronMqClient.scala | 62 ++-
.../alpakka/ironmq/impl/IronMqPullStage.scala | 10 +-
.../alpakka/ironmq/impl/IronMqPushStage.scala | 16 +-
.../alpakka/ironmq/javadsl/IronMqConsumer.scala | 2 +-
.../alpakka/ironmq/javadsl/IronMqProducer.scala | 26 +-
.../alpakka/ironmq/scaladsl/IronMqProducer.scala | 15 +-
.../akka/stream/alpakka/ironmq/IronMqSpec.scala | 4 +-
.../alpakka/ironmq/impl/IronMqClientForTests.scala | 2 +-
.../alpakka/ironmq/impl/IronMqClientSpec.scala | 2 +-
.../alpakka/ironmq/impl/IronMqPullStageSpec.scala | 4 +-
.../alpakka/ironmq/impl/IronMqPushStageSpec.scala | 2 +-
.../ironmq/scaladsl/IronMqConsumerSpec.scala | 6 +-
.../ironmq/scaladsl/IronMqProducerSpec.scala | 14 +-
.../test/scala/docs/scaladsl/IronMqDocsSpec.scala | 10 +-
.../akka/stream/alpakka/jms/AcknowledgeMode.scala | 15 +-
.../alpakka/jms/ConnectionRetrySettings.scala | 14 +-
.../akka/stream/alpakka/jms/Credentials.scala | 24 +-
.../akka/stream/alpakka/jms/Destinations.scala | 2 +-
.../scala/akka/stream/alpakka/jms/Envelopes.scala | 4 +-
.../stream/alpakka/jms/JmsBrowseSettings.scala | 20 +-
.../stream/alpakka/jms/JmsConsumerSettings.scala | 21 +-
.../akka/stream/alpakka/jms/JmsExceptions.scala | 15 +-
.../akka/stream/alpakka/jms/JmsMessages.scala | 279 +++++------
.../stream/alpakka/jms/JmsProducerSettings.scala | 23 +-
.../stream/alpakka/jms/SendRetrySettings.scala | 17 +-
.../alpakka/jms/impl/InternalConnectionState.scala | 6 +-
.../alpakka/jms/impl/JmsAckSourceStage.scala | 12 +-
.../stream/alpakka/jms/impl/JmsBrowseStage.scala | 6 +-
.../stream/alpakka/jms/impl/JmsConnector.scala | 56 +--
.../stream/alpakka/jms/impl/JmsConsumerStage.scala | 5 +-
.../alpakka/jms/impl/JmsMessageProducer.scala | 48 +-
.../stream/alpakka/jms/impl/JmsMessageReader.scala | 12 +-
.../stream/alpakka/jms/impl/JmsProducerStage.scala | 37 +-
.../stream/alpakka/jms/impl/JmsTxSourceStage.scala | 16 +-
.../akka/stream/alpakka/jms/impl/Sessions.scala | 27 +-
.../stream/alpakka/jms/impl/SourceStageLogic.scala | 23 +-
.../stream/alpakka/jms/javadsl/JmsConsumer.scala | 6 +-
.../stream/alpakka/jms/javadsl/JmsProducer.scala | 22 +-
.../alpakka/jms/scaladsl/JmsConnectorState.scala | 12 +-
.../stream/alpakka/jms/scaladsl/JmsConsumer.scala | 2 +-
.../stream/alpakka/jms/scaladsl/JmsProducer.scala | 22 +-
.../akka/stream/alpakka/jms/scaladsl/package.scala | 18 +-
.../alpakka/jms/JmsConnectionStatusSpec.scala | 36 +-
.../stream/alpakka/jms/JmsProducerRetrySpec.scala | 40 +-
.../scala/akka/stream/alpakka/jms/JmsSpec.scala | 22 +-
.../alpakka/jms/impl/JmsMessageProducerSpec.scala | 20 +-
.../alpakka/jms/impl/SoftReferenceCacheSpec.scala | 4 +-
.../jms/scaladsl/CachedConnectionFactory.scala | 2 +-
.../jms/scaladsl/JmsAckConnectorsSpec.scala | 101 ++--
.../scaladsl/JmsBufferedAckConnectorsSpec.scala | 96 ++--
.../scala/docs/scaladsl/JmsConnectorsSpec.scala | 359 ++++++--------
.../docs/scaladsl/JmsIbmmqConnectorsSpec.scala | 60 ++-
.../test/scala/docs/scaladsl/JmsSettingsSpec.scala | 22 +-
.../scala/docs/scaladsl/JmsTxConnectorsSpec.scala | 147 +++---
.../alpakka/json/impl/JsonStreamReader.scala | 13 +-
.../test/scala/docs/scaladsl/JsonReaderTest.scala | 2 +-
.../stream/alpakka/kinesis/CommittableRecord.scala | 11 +-
.../stream/alpakka/kinesis/KinesisErrors.scala | 2 +-
.../alpakka/kinesis/KinesisFlowSettings.scala | 18 +-
.../alpakka/kinesis/KinesisSchedulerSettings.scala | 6 +-
.../stream/alpakka/kinesis/ShardSettings.scala | 21 +-
.../kinesis/impl/KinesisSchedulerSourceStage.scala | 16 +-
.../alpakka/kinesis/impl/KinesisSourceStage.scala | 20 +-
.../alpakka/kinesis/impl/ShardProcessor.scala | 21 +-
.../alpakka/kinesis/javadsl/KinesisFlow.scala | 18 +-
.../kinesis/javadsl/KinesisSchedulerSource.scala | 19 +-
.../alpakka/kinesis/javadsl/KinesisSink.scala | 6 +-
.../alpakka/kinesis/javadsl/KinesisSource.scala | 4 +-
.../alpakka/kinesis/scaladsl/KinesisFlow.scala | 66 +--
.../kinesis/scaladsl/KinesisSchedulerSource.scala | 18 +-
.../alpakka/kinesis/scaladsl/KinesisSink.scala | 15 +-
.../alpakka/kinesis/scaladsl/KinesisSource.scala | 8 +-
.../KinesisFirehoseFlowSettings.scala | 18 +-
.../javadsl/KinesisFirehoseFlow.scala | 10 +-
.../javadsl/KinesisFirehoseSink.scala | 8 +-
.../scaladsl/KinesisFirehoseFlow.scala | 28 +-
.../scaladsl/KinesisFirehoseSink.scala | 3 +-
.../alpakka/kinesis/DefaultTestContext.scala | 9 +-
.../stream/alpakka/kinesis/KinesisFlowSpec.scala | 22 +-
.../akka/stream/alpakka/kinesis/KinesisMock.scala | 2 +-
.../kinesis/KinesisSchedulerSourceSpec.scala | 76 ++-
.../stream/alpakka/kinesis/KinesisSourceSpec.scala | 15 +-
.../scala/akka/stream/alpakka/kinesis/Valve.scala | 13 +-
.../kinesisfirehose/KinesisFirehoseFlowSpec.scala | 2 +-
.../kinesisfirehose/KinesisFirehoseMock.scala | 2 +-
.../src/test/scala/docs/scaladsl/KclSnippets.scala | 34 +-
.../docs/scaladsl/KinesisFirehoseSnippets.scala | 22 +-
.../test/scala/docs/scaladsl/KinesisSnippets.scala | 39 +-
.../akka/stream/alpakka/kudu/KuduClientExt.scala | 2 +-
.../stream/alpakka/kudu/KuduTableSettings.scala | 33 +-
.../alpakka/kudu/impl/KuduCapabilities.scala | 6 +-
.../stream/alpakka/kudu/impl/KuduFlowStage.scala | 16 +-
.../stream/alpakka/kudu/javadsl/KuduTable.scala | 4 +-
.../stream/alpakka/kudu/scaladsl/KuduTable.scala | 8 +-
.../test/scala/docs/scaladsl/KuduTableSpec.scala | 24 +-
.../stream/alpakka/mongodb/DocumentReplace.scala | 1 -
.../stream/alpakka/mongodb/DocumentUpdate.scala | 1 -
.../stream/alpakka/mongodb/javadsl/MongoFlow.scala | 29 +-
.../stream/alpakka/mongodb/javadsl/MongoSink.scala | 17 +-
.../alpakka/mongodb/scaladsl/MongoFlow.scala | 56 +--
.../alpakka/mongodb/scaladsl/MongoSink.scala | 22 +-
.../test/scala/docs/scaladsl/MongoSinkSpec.scala | 43 +-
.../test/scala/docs/scaladsl/MongoSourceSpec.scala | 6 +-
.../scala/akka/stream/alpakka/mqtt/MqttPerf.scala | 17 +-
.../stream/alpakka/mqtt/streaming/MqttPerf.scala | 18 +-
.../mqtt/streaming/MqttSessionSettings.scala | 64 ++-
.../mqtt/streaming/impl/BehaviorRunner.scala | 4 +-
.../alpakka/mqtt/streaming/impl/ClientState.scala | 228 ++++-----
.../mqtt/streaming/impl/MqttFrameStage.scala | 19 +-
.../mqtt/streaming/impl/QueueOfferState.scala | 8 +-
.../alpakka/mqtt/streaming/impl/RequestState.scala | 138 +++---
.../alpakka/mqtt/streaming/impl/ServerState.scala | 360 ++++++--------
.../alpakka/mqtt/streaming/javadsl/Mqtt.scala | 11 +-
.../akka/stream/alpakka/mqtt/streaming/model.scala | 175 ++++---
.../alpakka/mqtt/streaming/scaladsl/Mqtt.scala | 20 +-
.../mqtt/streaming/scaladsl/MqttSession.scala | 178 ++++---
.../mqtt/streaming/impl/MqttFrameStageSpec.scala | 2 +-
.../mqtt/streaming/impl/QueueOfferStateSpec.scala | 13 +-
.../mqtt/streaming/impl/RequestStateSpec.scala | 12 +-
.../scala/docs/scaladsl/MqttActorSystemsSpec.scala | 2 +-
.../test/scala/docs/scaladsl/MqttCodecSpec.scala | 43 +-
.../test/scala/docs/scaladsl/MqttFlowSpec.scala | 58 ++-
.../test/scala/docs/scaladsl/MqttSessionSpec.scala | 150 +++---
.../stream/alpakka/mqtt/impl/MqttFlowStage.scala | 88 ++--
.../alpakka/mqtt/impl/MqttFlowStageWithAck.scala | 58 +--
.../stream/alpakka/mqtt/javadsl/MqttFlow.scala | 12 +-
.../stream/alpakka/mqtt/javadsl/MqttSink.scala | 4 +-
.../stream/alpakka/mqtt/javadsl/MqttSource.scala | 8 +-
.../scala/akka/stream/alpakka/mqtt/model.scala | 24 +-
.../stream/alpakka/mqtt/scaladsl/MqttFlow.scala | 40 +-
.../stream/alpakka/mqtt/scaladsl/MqttSink.scala | 2 +-
.../stream/alpakka/mqtt/scaladsl/MqttSource.scala | 16 +-
.../scala/akka/stream/alpakka/mqtt/settings.scala | 84 ++--
.../test/scala/docs/scaladsl/MqttFlowSpec.scala | 28 +-
.../test/scala/docs/scaladsl/MqttSinkSpec.scala | 22 +-
.../test/scala/docs/scaladsl/MqttSourceSpec.scala | 118 ++---
.../test/scala/docs/scaladsl/MqttSpecBase.scala | 5 +-
.../alpakka/orientdb/OrientDbSourceSettings.scala | 15 +-
.../alpakka/orientdb/OrientDbWriteSettings.scala | 17 +-
.../alpakka/orientdb/impl/OrientDbFlowStage.scala | 9 +-
.../orientdb/impl/OrientDbSourceStage.scala | 19 +-
.../alpakka/orientdb/javadsl/OrientDbFlow.scala | 20 +-
.../alpakka/orientdb/javadsl/OrientDbSink.scala | 14 +-
.../alpakka/orientdb/javadsl/OrientDbSource.scala | 22 +-
.../alpakka/orientdb/scaladsl/OrientDbFlow.scala | 36 +-
.../alpakka/orientdb/scaladsl/OrientDbSink.scala | 10 +-
.../alpakka/orientdb/scaladsl/OrientDbSource.scala | 18 +-
.../test/scala/docs/scaladsl/OrientDbSpec.scala | 87 ++--
.../akka/stream/alpakka/pravega/PravegaEvent.scala | 2 +-
.../pravega/PravegaReaderGroupManager.scala | 12 +-
.../stream/alpakka/pravega/PravegaSettings.scala | 327 ++++++-------
.../alpakka/pravega/impl/PravegaCapabilities.scala | 2 +-
.../stream/alpakka/pravega/impl/PravegaFlow.scala | 27 +-
.../alpakka/pravega/impl/PravegaSource.scala | 30 +-
.../pravega/impl/PravegaTableReadFlow.scala | 20 +-
.../alpakka/pravega/impl/PravegaTableSource.scala | 26 +-
.../pravega/impl/PravegaTableWriteFlow.scala | 31 +-
.../alpakka/pravega/impl/PravegaWriter.scala | 3 +-
.../stream/alpakka/pravega/scaladsl/Pravega.scala | 8 +-
.../alpakka/pravega/scaladsl/PravegaTable.scala | 31 +-
.../stream/alpakka/pravega/PravegaBaseSpec.scala | 2 +-
.../stream/alpakka/pravega/PravegaGraphSpec.scala | 13 +-
.../alpakka/pravega/PravegaKVTableSpec.scala | 2 +-
.../pravega/PravegaStreamAndTableSpec.scala | 16 +-
.../scala/docs/scaladsl/PravegaReadWriteDocs.scala | 4 +-
.../scala/docs/scaladsl/PravegaSettingsSpec.scala | 23 +-
project/AutomaticModuleName.scala | 5 +-
project/Common.scala | 169 ++++---
project/Dependencies.scala | 534 +++++++++-----------
project/TestChanged.scala | 15 +-
.../reference/impl/ReferenceFlowStage.scala | 19 +-
.../reference/impl/ReferenceSourceStage.scala | 21 +-
.../impl/ReferenceWithResourceFlowStage.scala | 19 +-
.../alpakka/reference/javadsl/Reference.scala | 13 +-
.../akka/stream/alpakka/reference/model.scala | 14 +-
.../alpakka/reference/scaladsl/Reference.scala | 8 +-
.../akka/stream/alpakka/reference/settings.scala | 14 +-
.../alpakka/reference/testkit/MessageFactory.scala | 8 +-
.../test/scala/docs/scaladsl/ReferenceSpec.scala | 17 +-
.../scala/akka/stream/alpakka/s3/S3Exception.scala | 13 +-
.../main/scala/akka/stream/alpakka/s3/S3Ext.scala | 2 +-
.../scala/akka/stream/alpakka/s3/S3Headers.scala | 19 +-
.../main/scala/akka/stream/alpakka/s3/Utils.scala | 3 +-
.../alpakka/s3/headers/ServerSideEncryption.scala | 34 +-
.../scala/akka/stream/alpakka/s3/impl/Chunk.scala | 2 +-
.../akka/stream/alpakka/s3/impl/DiskBuffer.scala | 2 +-
.../akka/stream/alpakka/s3/impl/HttpRequests.scala | 122 ++---
.../akka/stream/alpakka/s3/impl/Marshalling.scala | 81 ++--
.../akka/stream/alpakka/s3/impl/MemoryBuffer.scala | 4 +-
.../stream/alpakka/s3/impl/MemoryWithContext.scala | 4 +-
.../akka/stream/alpakka/s3/impl/S3Stream.scala | 538 +++++++++------------
.../stream/alpakka/s3/impl/SplitAfterSize.scala | 2 +-
.../s3/impl/SplitAfterSizeWithContext.scala | 10 +-
.../alpakka/s3/impl/auth/CanonicalRequest.scala | 19 +-
.../akka/stream/alpakka/s3/impl/auth/Signer.scala | 26 +-
.../stream/alpakka/s3/impl/auth/SigningKey.scala | 10 +-
.../scala/akka/stream/alpakka/s3/javadsl/S3.scala | 434 ++++++++---------
.../main/scala/akka/stream/alpakka/s3/model.scala | 270 +++++------
.../scala/akka/stream/alpakka/s3/scaladsl/S3.scala | 219 ++++-----
.../scala/akka/stream/alpakka/s3/settings.scala | 100 ++--
.../akka/stream/alpakka/s3/MinioContainer.scala | 4 +-
.../scala/akka/stream/alpakka/s3/MinioS3Test.scala | 5 +-
.../akka/stream/alpakka/s3/S3SettingsSpec.scala | 37 +-
.../stream/alpakka/s3/impl/DiskBufferSpec.scala | 18 +-
.../stream/alpakka/s3/impl/HttpRequestsSpec.scala | 57 +--
.../stream/alpakka/s3/impl/MarshallingSpec.scala | 93 ++--
.../stream/alpakka/s3/impl/MemoryBufferSpec.scala | 9 +-
.../stream/alpakka/s3/impl/S3HeadersSpec.scala | 15 +-
.../akka/stream/alpakka/s3/impl/S3StreamSpec.scala | 56 +--
.../alpakka/s3/impl/SplitAfterSizeSpec.scala | 40 +-
.../s3/impl/auth/CanonicalRequestSpec.scala | 58 +--
.../stream/alpakka/s3/impl/auth/SignerSpec.scala | 34 +-
.../alpakka/s3/impl/auth/SigningKeySpec.scala | 10 +-
.../impl/auth/SplitAfterSizeWithContextSpec.scala | 19 +-
.../alpakka/s3/impl/auth/StreamUtilsSpec.scala | 10 +-
.../s3/scaladsl/S3ClientIntegrationSpec.scala | 2 +-
.../stream/alpakka/s3/scaladsl/S3ExtSpec.scala | 4 +-
.../alpakka/s3/scaladsl/S3IntegrationSpec.scala | 246 ++++------
.../s3/scaladsl/S3SlowMinioIntegrationSpec.scala | 9 +-
.../alpakka/s3/scaladsl/S3WireMockBase.scala | 222 +++------
s3/src/test/scala/docs/scaladsl/S3SinkSpec.scala | 120 ++---
s3/src/test/scala/docs/scaladsl/S3SourceSpec.scala | 58 ++-
.../recordio/impl/RecordIOFramingStage.scala | 10 +-
.../scala/docs/scaladsl/RecordIOFramingSpec.scala | 71 ++-
.../akka/stream/alpakka/slick/javadsl/Slick.scala | 67 ++-
.../stream/alpakka/slick/javadsl/package.scala | 3 +-
.../akka/stream/alpakka/slick/scaladsl/Slick.scala | 22 +-
.../stream/alpakka/slick/scaladsl/package.scala | 2 +-
.../src/test/scala/docs/scaladsl/DocSnippets.scala | 31 +-
slick/src/test/scala/docs/scaladsl/SlickSpec.scala | 56 +--
.../stream/alpakka/sns/javadsl/SnsPublisher.scala | 26 +-
.../stream/alpakka/sns/scaladsl/SnsPublisher.scala | 25 +-
.../stream/alpakka/sns/DefaultTestContext.scala | 2 +-
.../alpakka/sns/IntegrationTestContext.scala | 14 +-
.../stream/alpakka/sns/SnsPublishMockingSpec.scala | 6 +-
.../scala/docs/scaladsl/SnsPublisherSpec.scala | 26 +-
.../akka/stream/alpakka/solr/SolrMessages.scala | 42 +-
.../stream/alpakka/solr/SolrUpdateSettings.scala | 9 +-
.../stream/alpakka/solr/impl/SolrFlowStage.scala | 44 +-
.../stream/alpakka/solr/impl/SolrSourceStage.scala | 4 +-
.../stream/alpakka/solr/javadsl/SolrFlow.scala | 61 +--
.../stream/alpakka/solr/javadsl/SolrSink.scala | 21 +-
.../stream/alpakka/solr/scaladsl/SolrFlow.scala | 65 +--
.../stream/alpakka/solr/scaladsl/SolrSink.scala | 16 +-
solr/src/test/scala/docs/scaladsl/SolrSpec.scala | 165 +++----
.../stream/alpakka/sqs/SqsAckBatchSettings.scala | 3 +-
.../stream/alpakka/sqs/SqsAckGroupedSettings.scala | 33 +-
.../akka/stream/alpakka/sqs/SqsAckSettings.scala | 12 +-
.../scala/akka/stream/alpakka/sqs/SqsModel.scala | 39 +-
.../alpakka/sqs/SqsPublishBatchSettings.scala | 3 +-
.../alpakka/sqs/SqsPublishGroupedSettings.scala | 21 +-
.../stream/alpakka/sqs/SqsSourceSettings.scala | 22 +-
.../alpakka/sqs/impl/BalancingMapAsync.scala | 41 +-
.../stream/alpakka/sqs/javadsl/SqsAckFlow.scala | 8 +-
.../stream/alpakka/sqs/javadsl/SqsAckSink.scala | 10 +-
.../alpakka/sqs/javadsl/SqsPublishFlow.scala | 14 +-
.../alpakka/sqs/javadsl/SqsPublishSink.scala | 27 +-
.../stream/alpakka/sqs/scaladsl/SqsAckFlow.scala | 41 +-
.../stream/alpakka/sqs/scaladsl/SqsAckSink.scala | 10 +-
.../alpakka/sqs/scaladsl/SqsPublishFlow.scala | 39 +-
.../alpakka/sqs/scaladsl/SqsPublishSink.scala | 36 +-
.../stream/alpakka/sqs/scaladsl/SqsSource.scala | 11 +-
.../alpakka/sqs/testkit/MessageFactory.scala | 17 +-
.../alpakka/sqs/scaladsl/DefaultTestContext.scala | 20 +-
.../sqs/scaladsl/MessageAttributeNameSpec.scala | 3 +-
.../alpakka/sqs/scaladsl/SqsPublishSinkSpec.scala | 62 +--
.../alpakka/sqs/scaladsl/SqsSourceMockSpec.scala | 27 +-
sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala | 69 ++-
.../test/scala/docs/scaladsl/SqsPublishSpec.scala | 57 ++-
.../test/scala/docs/scaladsl/SqsSourceSpec.scala | 30 +-
.../stream/alpakka/sse/javadsl/EventSource.scala | 28 +-
.../stream/alpakka/sse/scaladsl/EventSource.scala | 23 +-
.../test/scala/docs/scaladsl/EventSourceSpec.scala | 28 +-
.../stream/alpakka/testkit/CapturingAppender.scala | 7 +-
.../akka/stream/alpakka/testkit/LogbackUtil.scala | 9 +-
.../testkit/javadsl/LogCapturingJunit4.scala | 9 +-
.../alpakka/testkit/scaladsl/LogCapturing.scala | 6 +-
.../stream/alpakka/testkit/scaladsl/Repeated.scala | 2 +-
.../alpakka/text/impl/CharsetDecodingFlow.scala | 4 +-
.../stream/alpakka/text/impl/CharsetLogic.scala | 28 +-
.../alpakka/text/impl/CharsetTranscodingFlow.scala | 4 +-
.../stream/alpakka/text/javadsl/TextFlow.scala | 2 +-
.../stream/alpakka/text/scaladsl/TextFlow.scala | 2 +-
.../text/scaladsl/CharsetCodingFlowsSpec.scala | 10 +-
.../docs/scaladsl/CharsetCodingFlowsDoc.scala | 4 +-
.../akka/stream/alpakka/udp/impl/UdpBind.scala | 27 +-
.../akka/stream/alpakka/udp/impl/UdpSend.scala | 20 +-
.../akka/stream/alpakka/udp/javadsl/Udp.scala | 20 +-
.../akka/stream/alpakka/udp/scaladsl/Udp.scala | 24 +-
udp/src/test/scala/docs/scaladsl/UdpSpec.scala | 4 +-
.../impl/UnixDomainSocketImpl.scala | 102 ++--
.../javadsl/UnixDomainSocket.scala | 22 +-
.../scaladsl/UnixDomainSocket.scala | 21 +-
.../scala/docs/scaladsl/UnixDomainSocketSpec.scala | 27 +-
.../akka/stream/alpakka/xml/impl/Coalesce.scala | 10 +-
.../alpakka/xml/impl/StreamingXmlParser.scala | 27 +-
.../alpakka/xml/impl/StreamingXmlWriter.scala | 6 +-
.../akka/stream/alpakka/xml/impl/Subslice.scala | 6 +-
.../akka/stream/alpakka/xml/impl/Subtree.scala | 6 +-
.../stream/alpakka/xml/javadsl/XmlParsing.scala | 6 +-
.../stream/alpakka/xml/javadsl/XmlWriting.scala | 4 +-
.../main/scala/akka/stream/alpakka/xml/model.scala | 46 +-
.../stream/alpakka/xml/scaladsl/XmlParsing.scala | 2 +-
.../stream/alpakka/xml/scaladsl/XmlWriting.scala | 2 +-
.../test/scala/docs/scaladsl/XmlCoalesceSpec.scala | 14 +-
.../scala/docs/scaladsl/XmlProcessingSpec.scala | 92 ++--
.../test/scala/docs/scaladsl/XmlSubsliceSpec.scala | 22 +-
.../test/scala/docs/scaladsl/XmlSubtreeSpec.scala | 22 +-
.../test/scala/docs/scaladsl/XmlWritingSpec.scala | 47 +-
756 files changed, 10194 insertions(+), 13296 deletions(-)
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectionProvider.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectionProvider.scala
index c2758232..33fa6cd1 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectionProvider.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectionProvider.scala
@@ -8,8 +8,8 @@ import java.util.ConcurrentModificationException
import java.util.concurrent.atomic.AtomicReference
import akka.annotation.DoNotInherit
-import com.rabbitmq.client.{Address, Connection, ConnectionFactory, ExceptionHandler}
-import javax.net.ssl.{SSLContext, TrustManager}
+import com.rabbitmq.client.{ Address, Connection, ConnectionFactory, ExceptionHandler }
+import javax.net.ssl.{ SSLContext, TrustManager }
import scala.annotation.tailrec
import scala.collection.immutable
@@ -72,8 +72,7 @@ final class AmqpDetailsConnectionProvider private (
val automaticRecoveryEnabled: Boolean = false,
val topologyRecoveryEnabled: Boolean = false,
val exceptionHandler: Option[ExceptionHandler] = None,
- val connectionName: Option[String] = None
-) extends AmqpConnectionProvider {
+ val connectionName: Option[String] = None) extends AmqpConnectionProvider {
def withHostAndPort(host: String, port: Int): AmqpDetailsConnectionProvider =
copy(hostAndPortList = immutable.Seq(host -> port))
@@ -152,18 +151,18 @@ final class AmqpDetailsConnectionProvider private (
}
private def copy(hostAndPortList: immutable.Seq[(String, Int)] = hostAndPortList,
- credentials: Option[AmqpCredentials] = credentials,
- virtualHost: Option[String] = virtualHost,
- sslConfiguration: Option[AmqpSSLConfiguration] = sslConfiguration,
- requestedHeartbeat: Option[Int] = requestedHeartbeat,
- connectionTimeout: Option[Int] = connectionTimeout,
- handshakeTimeout: Option[Int] = handshakeTimeout,
- shutdownTimeout: Option[Int] = shutdownTimeout,
- networkRecoveryInterval: Option[Int] = networkRecoveryInterval,
- automaticRecoveryEnabled: Boolean = automaticRecoveryEnabled,
- topologyRecoveryEnabled: Boolean = topologyRecoveryEnabled,
- exceptionHandler: Option[ExceptionHandler] = exceptionHandler,
- connectionName: Option[String] = connectionName): AmqpDetailsConnectionProvider =
+ credentials: Option[AmqpCredentials] = credentials,
+ virtualHost: Option[String] = virtualHost,
+ sslConfiguration: Option[AmqpSSLConfiguration] = sslConfiguration,
+ requestedHeartbeat: Option[Int] = requestedHeartbeat,
+ connectionTimeout: Option[Int] = connectionTimeout,
+ handshakeTimeout: Option[Int] = handshakeTimeout,
+ shutdownTimeout: Option[Int] = shutdownTimeout,
+ networkRecoveryInterval: Option[Int] = networkRecoveryInterval,
+ automaticRecoveryEnabled: Boolean = automaticRecoveryEnabled,
+ topologyRecoveryEnabled: Boolean = topologyRecoveryEnabled,
+ exceptionHandler: Option[ExceptionHandler] = exceptionHandler,
+ connectionName: Option[String] = connectionName): AmqpDetailsConnectionProvider =
new AmqpDetailsConnectionProvider(
hostAndPortList,
credentials,
@@ -177,8 +176,7 @@ final class AmqpDetailsConnectionProvider private (
automaticRecoveryEnabled,
topologyRecoveryEnabled,
exceptionHandler,
- connectionName
- )
+ connectionName)
override def toString: String =
"AmqpDetailsConnectionProvider(" +
@@ -233,8 +231,8 @@ object AmqpCredentials {
}
final class AmqpSSLConfiguration private (val protocol: Option[String] = None,
- val trustManager: Option[TrustManager] = None,
- val context: Option[SSLContext] = None) {
+ val trustManager: Option[TrustManager] = None,
+ val context: Option[SSLContext] = None) {
if (protocol.isDefined && context.isDefined) {
throw new IllegalArgumentException("Protocol and context can't be defined in the same AmqpSSLConfiguration.")
}
@@ -249,8 +247,8 @@ final class AmqpSSLConfiguration private (val protocol: Option[String] = None,
copy(context = context)
private def copy(protocol: Option[String] = protocol,
- trustManager: Option[TrustManager] = trustManager,
- context: Option[SSLContext] = context): AmqpSSLConfiguration =
+ trustManager: Option[TrustManager] = trustManager,
+ context: Option[SSLContext] = context): AmqpSSLConfiguration =
new AmqpSSLConfiguration(protocol, trustManager, context)
override def toString: String =
@@ -302,8 +300,8 @@ object AmqpSSLConfiguration {
* If empty, it defaults to the host and port in the underlying factory.
*/
final class AmqpConnectionFactoryConnectionProvider private (val factory: ConnectionFactory,
- private val hostAndPorts: immutable.Seq[(String, Int)] =
- Nil)
+ private val hostAndPorts: immutable.Seq[(String, Int)] =
+ Nil)
extends AmqpConnectionProvider {
/**
@@ -326,8 +324,7 @@ final class AmqpConnectionFactoryConnectionProvider private (val factory: Connec
* Java API
*/
def withHostsAndPorts(
- hostAndPorts: java.util.List[akka.japi.Pair[String, Int]]
- ): AmqpConnectionFactoryConnectionProvider =
+ hostAndPorts: java.util.List[akka.japi.Pair[String, Int]]): AmqpConnectionFactoryConnectionProvider =
copy(hostAndPorts = hostAndPorts.asScala.map(_.toScala).toIndexedSeq)
override def get: Connection = {
@@ -358,7 +355,7 @@ object AmqpConnectionFactoryConnectionProvider {
}
final class AmqpCachedConnectionProvider private (val provider: AmqpConnectionProvider,
- val automaticRelease: Boolean = true)
+ val automaticRelease: Boolean = true)
extends AmqpConnectionProvider {
import akka.stream.alpakka.amqp.AmqpCachedConnectionProvider._
@@ -375,8 +372,7 @@ final class AmqpCachedConnectionProvider private (val provider: AmqpConnectionPr
val connection = provider.get
if (!state.compareAndSet(Connecting, Connected(connection, 1)))
throw new ConcurrentModificationException(
- "Unexpected concurrent modification while creating the connection."
- )
+ "Unexpected concurrent modification while creating the connection.")
connection
} catch {
case e: ConcurrentModificationException => throw e
@@ -394,7 +390,7 @@ final class AmqpCachedConnectionProvider private (val provider: AmqpConnectionPr
@tailrec
override def release(connection: Connection): Unit = state.get match {
- case Empty => throw new IllegalStateException("There is no connection to release.")
+ case Empty => throw new IllegalStateException("There is no connection to release.")
case Connecting => release(connection)
case c @ Connected(cachedConnection, clients) =>
if (cachedConnection != connection)
@@ -405,8 +401,7 @@ final class AmqpCachedConnectionProvider private (val provider: AmqpConnectionPr
provider.release(connection)
if (!state.compareAndSet(Closing, Empty))
throw new ConcurrentModificationException(
- "Unexpected concurrent modification while closing the connection."
- )
+ "Unexpected concurrent modification while closing the connection.")
}
} else {
if (!state.compareAndSet(c, Connected(cachedConnection, clients - 1))) release(connection)
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectorSettings.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectorSettings.scala
index 95223fb9..f608d5a1 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectorSettings.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectorSettings.scala
@@ -30,8 +30,7 @@ final class NamedQueueSourceSettings private (
val exclusive: Boolean = false,
val ackRequired: Boolean = true,
val consumerTag: String = "default",
- val arguments: Map[String, AnyRef] = Map.empty
-) extends AmqpSourceSettings {
+ val arguments: Map[String, AnyRef] = Map.empty) extends AmqpSourceSettings {
def withDeclaration(declaration: Declaration): NamedQueueSourceSettings =
copy(declarations = immutable.Seq(declaration))
@@ -71,11 +70,11 @@ final class NamedQueueSourceSettings private (
copy(arguments = arguments.asScala.toMap)
private def copy(declarations: immutable.Seq[Declaration] = declarations,
- noLocal: Boolean = noLocal,
- exclusive: Boolean = exclusive,
- ackRequired: Boolean = ackRequired,
- consumerTag: String = consumerTag,
- arguments: Map[String, AnyRef] = arguments) =
+ noLocal: Boolean = noLocal,
+ exclusive: Boolean = exclusive,
+ ackRequired: Boolean = ackRequired,
+ consumerTag: String = consumerTag,
+ arguments: Map[String, AnyRef] = arguments) =
new NamedQueueSourceSettings(
connectionProvider,
queue,
@@ -84,8 +83,7 @@ final class NamedQueueSourceSettings private (
exclusive = exclusive,
ackRequired = ackRequired,
consumerTag = consumerTag,
- arguments = arguments
- )
+ arguments = arguments)
override def toString: String =
"NamedQueueSourceSettings(" +
@@ -115,8 +113,7 @@ final class TemporaryQueueSourceSettings private (
val connectionProvider: AmqpConnectionProvider,
val exchange: String,
val declarations: immutable.Seq[Declaration] = Nil,
- val routingKey: Option[String] = None
-) extends AmqpSourceSettings {
+ val routingKey: Option[String] = None) extends AmqpSourceSettings {
def withDeclaration(declaration: Declaration): TemporaryQueueSourceSettings =
copy(declarations = immutable.Seq(declaration))
@@ -157,8 +154,7 @@ object TemporaryQueueSourceSettings {
final class AmqpReplyToSinkSettings private (
val connectionProvider: AmqpConnectionProvider,
- val failIfReplyToMissing: Boolean = true
-) extends AmqpConnectorSettings {
+ val failIfReplyToMissing: Boolean = true) extends AmqpConnectorSettings {
override final val declarations = Nil
def withFailIfReplyToMissing(failIfReplyToMissing: Boolean): AmqpReplyToSinkSettings =
@@ -191,8 +187,7 @@ final class AmqpWriteSettings private (
val routingKey: Option[String] = None,
val declarations: immutable.Seq[Declaration] = Nil,
val bufferSize: Int = 10,
- val confirmationTimeout: FiniteDuration = 100.millis
-) extends AmqpConnectorSettings {
+ val confirmationTimeout: FiniteDuration = 100.millis) extends AmqpConnectorSettings {
def withExchange(exchange: String): AmqpWriteSettings =
copy(exchange = Some(exchange))
@@ -225,11 +220,11 @@ final class AmqpWriteSettings private (
copy(confirmationTimeout = confirmationTimeout.asScala)
private def copy(connectionProvider: AmqpConnectionProvider = connectionProvider,
- exchange: Option[String] = exchange,
- routingKey: Option[String] = routingKey,
- declarations: immutable.Seq[Declaration] = declarations,
- bufferSize: Int = bufferSize,
- confirmationTimeout: FiniteDuration = confirmationTimeout) =
+ exchange: Option[String] = exchange,
+ routingKey: Option[String] = routingKey,
+ declarations: immutable.Seq[Declaration] = declarations,
+ bufferSize: Int = bufferSize,
+ confirmationTimeout: FiniteDuration = confirmationTimeout) =
new AmqpWriteSettings(connectionProvider, exchange, routingKey, declarations, bufferSize, confirmationTimeout)
override def toString: String =
@@ -261,8 +256,7 @@ final class QueueDeclaration private (
val durable: Boolean = false,
val exclusive: Boolean = false,
val autoDelete: Boolean = false,
- val arguments: Map[String, AnyRef] = Map.empty
-) extends Declaration {
+ val arguments: Map[String, AnyRef] = Map.empty) extends Declaration {
def withDurable(durable: Boolean): QueueDeclaration =
copy(durable = durable)
@@ -283,10 +277,10 @@ final class QueueDeclaration private (
copy(arguments = arguments.asScala.toMap)
private def copy(name: String = name,
- durable: Boolean = durable,
- exclusive: Boolean = exclusive,
- autoDelete: Boolean = autoDelete,
- arguments: Map[String, AnyRef] = arguments) =
+ durable: Boolean = durable,
+ exclusive: Boolean = exclusive,
+ autoDelete: Boolean = autoDelete,
+ arguments: Map[String, AnyRef] = arguments) =
new QueueDeclaration(name, durable, exclusive, autoDelete, arguments)
override def toString: String =
@@ -312,8 +306,7 @@ final class BindingDeclaration private (
val queue: String,
val exchange: String,
val routingKey: Option[String] = None,
- val arguments: Map[String, AnyRef] = Map.empty
-) extends Declaration {
+ val arguments: Map[String, AnyRef] = Map.empty) extends Declaration {
def withRoutingKey(routingKey: String): BindingDeclaration = copy(routingKey = Some(routingKey))
@@ -355,8 +348,7 @@ final class ExchangeDeclaration private (
val durable: Boolean = false,
val autoDelete: Boolean = false,
val internal: Boolean = false,
- val arguments: Map[String, AnyRef] = Map.empty
-) extends Declaration {
+ val arguments: Map[String, AnyRef] = Map.empty) extends Declaration {
def withDurable(durable: Boolean): ExchangeDeclaration = copy(durable = durable)
@@ -374,9 +366,9 @@ final class ExchangeDeclaration private (
copy(arguments = arguments.asScala.toMap)
private def copy(durable: Boolean = durable,
- autoDelete: Boolean = autoDelete,
- internal: Boolean = internal,
- arguments: Map[String, AnyRef] = arguments) =
+ autoDelete: Boolean = autoDelete,
+ internal: Boolean = internal,
+ arguments: Map[String, AnyRef] = arguments) =
new ExchangeDeclaration(name, exchangeType, durable, autoDelete, internal, arguments)
override def toString: String =
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AbstractAmqpAsyncFlowStageLogic.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AbstractAmqpAsyncFlowStageLogic.scala
index f703f853..87e2894b 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AbstractAmqpAsyncFlowStageLogic.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AbstractAmqpAsyncFlowStageLogic.scala
@@ -8,7 +8,7 @@ import akka.Done
import akka.annotation.InternalApi
import akka.stream._
import akka.stream.alpakka.amqp.impl.AbstractAmqpAsyncFlowStageLogic.DeliveryTag
-import akka.stream.alpakka.amqp.{AmqpWriteSettings, WriteMessage, WriteResult}
+import akka.stream.alpakka.amqp.{ AmqpWriteSettings, WriteMessage, WriteResult }
import akka.stream.stage._
import com.rabbitmq.client.ConfirmCallback
@@ -21,8 +21,7 @@ import scala.concurrent.Promise
@InternalApi private final case class AwaitingMessage[T](
tag: DeliveryTag,
passThrough: T,
- ready: Boolean = false
-)
+ ready: Boolean = false)
/**
* Internal API.
@@ -39,8 +38,7 @@ import scala.concurrent.Promise
@InternalApi private abstract class AbstractAmqpAsyncFlowStageLogic[T](
override val settings: AmqpWriteSettings,
streamCompletion: Promise[Done],
- shape: FlowShape[(WriteMessage, T), (WriteResult, T)]
-) extends TimerGraphStageLogic(shape)
+ shape: FlowShape[(WriteMessage, T), (WriteResult, T)]) extends TimerGraphStageLogic(shape)
with AmqpConnectorLogic
with StageLogging {
@@ -77,8 +75,7 @@ import scala.concurrent.Promise
dequeued.foreach(m => cancelTimer(m.tag))
pushOrEnqueueResults(
- dequeued.map(m => (WriteResult.confirmed, m.passThrough))
- )
+ dequeued.map(m => (WriteResult.confirmed, m.passThrough)))
}
private def onRejection(tag: DeliveryTag, multiple: Boolean): Unit = {
@@ -89,21 +86,18 @@ import scala.concurrent.Promise
dequeued.foreach(m => cancelTimer(m.tag))
pushOrEnqueueResults(
- dequeued.map(m => (WriteResult.rejected, m.passThrough))
- )
+ dequeued.map(m => (WriteResult.rejected, m.passThrough)))
}
private def pushOrEnqueueResults(results: Iterable[(WriteResult, T)]): Unit = {
- results.foreach(
- result =>
- if (isAvailable(out) && exitQueue.isEmpty) {
- log.debug("Pushing {} downstream.", result)
- push(out, result)
- } else {
- log.debug("Message {} queued for downstream push.", result)
- exitQueue.enqueue(result)
- }
- )
+ results.foreach(result =>
+ if (isAvailable(out) && exitQueue.isEmpty) {
+ log.debug("Pushing {} downstream.", result)
+ push(out, result)
+ } else {
+ log.debug("Message {} queued for downstream push.", result)
+ exitQueue.enqueue(result)
+ })
if (isFinished) closeStage()
}
@@ -166,13 +160,11 @@ import scala.concurrent.Promise
message.mandatory,
message.immediate,
message.properties.orNull,
- message.bytes.toArray
- )
+ message.bytes.toArray)
tag
}
- }
- )
+ })
setHandler(
out,
@@ -187,8 +179,7 @@ import scala.concurrent.Promise
if (isFinished) closeStage()
else if (!hasBeenPulled(in)) tryPull(in)
}
- }
- )
+ })
override protected def onTimer(timerKey: Any): Unit =
timerKey match {
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AbstractAmqpFlowStageLogic.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AbstractAmqpFlowStageLogic.scala
index 474451ad..cb15d008 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AbstractAmqpFlowStageLogic.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AbstractAmqpFlowStageLogic.scala
@@ -7,8 +7,8 @@ package akka.stream.alpakka.amqp.impl
import akka.Done
import akka.annotation.InternalApi
import akka.stream._
-import akka.stream.alpakka.amqp.{AmqpWriteSettings, WriteMessage, WriteResult}
-import akka.stream.stage.{GraphStageLogic, InHandler, OutHandler, StageLogging}
+import akka.stream.alpakka.amqp.{ AmqpWriteSettings, WriteMessage, WriteResult }
+import akka.stream.stage.{ GraphStageLogic, InHandler, OutHandler, StageLogging }
import scala.concurrent.Promise
@@ -20,8 +20,7 @@ import scala.concurrent.Promise
@InternalApi private abstract class AbstractAmqpFlowStageLogic[T](
override val settings: AmqpWriteSettings,
streamCompletion: Promise[Done],
- shape: FlowShape[(WriteMessage, T), (WriteResult, T)]
-) extends GraphStageLogic(shape)
+ shape: FlowShape[(WriteMessage, T), (WriteResult, T)]) extends GraphStageLogic(shape)
with AmqpConnectorLogic
with StageLogging {
@@ -47,15 +46,15 @@ import scala.concurrent.Promise
val (message, passThrough) = grab(in)
publish(message, passThrough)
}
- }
- )
+ })
protected def publish(message: WriteMessage, passThrough: T): Unit
- setHandler(out, new OutHandler {
- override def onPull(): Unit =
- if (!hasBeenPulled(in)) tryPull(in)
- })
+ setHandler(out,
+ new OutHandler {
+ override def onPull(): Unit =
+ if (!hasBeenPulled(in)) tryPull(in)
+ })
override def postStop(): Unit = {
streamCompletion.tryFailure(new RuntimeException("Stage stopped unexpectedly."))
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpAsyncFlowStage.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpAsyncFlowStage.scala
index f54ae4ce..6f91a0a5 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpAsyncFlowStage.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpAsyncFlowStage.scala
@@ -8,12 +8,12 @@ import akka.Done
import akka.annotation.InternalApi
import akka.event.Logging
import akka.stream.alpakka.amqp.impl.AbstractAmqpAsyncFlowStageLogic.DeliveryTag
-import akka.stream.alpakka.amqp.{AmqpWriteSettings, WriteMessage, WriteResult}
-import akka.stream.stage.{GraphStageLogic, GraphStageWithMaterializedValue}
+import akka.stream.alpakka.amqp.{ AmqpWriteSettings, WriteMessage, WriteResult }
+import akka.stream.stage.{ GraphStageLogic, GraphStageWithMaterializedValue }
import akka.stream._
import scala.collection.immutable.TreeMap
-import scala.concurrent.{Future, Promise}
+import scala.concurrent.{ Future, Promise }
/**
* Internal API.
@@ -27,8 +27,8 @@ import scala.concurrent.{Future, Promise}
* this delivery tag can be safely dequeued.
*/
@InternalApi private[amqp] final class AmqpAsyncFlowStage[T](
- settings: AmqpWriteSettings
-) extends GraphStageWithMaterializedValue[FlowShape[(WriteMessage, T), (WriteResult, T)], Future[Done]] {
+ settings: AmqpWriteSettings)
+ extends GraphStageWithMaterializedValue[FlowShape[(WriteMessage, T), (WriteResult, T)], Future[Done]] {
val in: Inlet[(WriteMessage, T)] = Inlet(Logging.simpleName(this) + ".in")
val out: Outlet[(WriteResult, T)] = Outlet(Logging.simpleName(this) + ".out")
@@ -42,41 +42,40 @@ import scala.concurrent.{Future, Promise}
val streamCompletion = Promise[Done]()
(new AbstractAmqpAsyncFlowStageLogic(settings, streamCompletion, shape) {
- private var buffer = TreeMap[DeliveryTag, AwaitingMessage[T]]()
+ private var buffer = TreeMap[DeliveryTag, AwaitingMessage[T]]()
- override def enqueueMessage(tag: DeliveryTag, passThrough: T): Unit =
- buffer += (tag -> AwaitingMessage(tag, passThrough))
+ override def enqueueMessage(tag: DeliveryTag, passThrough: T): Unit =
+ buffer += (tag -> AwaitingMessage(tag, passThrough))
- override def dequeueAwaitingMessages(tag: DeliveryTag, multiple: Boolean): Iterable[AwaitingMessage[T]] =
- if (multiple) {
- dequeueWhile((t, _) => t <= tag)
- } else {
- setReady(tag)
- if (isAtHead(tag)) {
- dequeueWhile((_, message) => message.ready)
+ override def dequeueAwaitingMessages(tag: DeliveryTag, multiple: Boolean): Iterable[AwaitingMessage[T]] =
+ if (multiple) {
+ dequeueWhile((t, _) => t <= tag)
} else {
- Seq.empty
+ setReady(tag)
+ if (isAtHead(tag)) {
+ dequeueWhile((_, message) => message.ready)
+ } else {
+ Seq.empty
+ }
}
- }
- private def dequeueWhile(
- predicate: (DeliveryTag, AwaitingMessage[T]) => Boolean
- ): Iterable[AwaitingMessage[T]] = {
- val dequeued = buffer.takeWhile { case (k, v) => predicate(k, v) }
- buffer --= dequeued.keys
- dequeued.values
- }
+ private def dequeueWhile(
+ predicate: (DeliveryTag, AwaitingMessage[T]) => Boolean): Iterable[AwaitingMessage[T]] = {
+ val dequeued = buffer.takeWhile { case (k, v) => predicate(k, v) }
+ buffer --= dequeued.keys
+ dequeued.values
+ }
- private def isAtHead(tag: DeliveryTag): Boolean =
- buffer.headOption.exists { case (tag, _) => tag == tag }
+ private def isAtHead(tag: DeliveryTag): Boolean =
+ buffer.headOption.exists { case (tag, _) => tag == tag }
- private def setReady(tag: DeliveryTag): Unit =
- buffer.get(tag).foreach(message => buffer += (tag -> message.copy(ready = true)))
+ private def setReady(tag: DeliveryTag): Unit =
+ buffer.get(tag).foreach(message => buffer += (tag -> message.copy(ready = true)))
- override def messagesAwaitingDelivery: Int = buffer.size
+ override def messagesAwaitingDelivery: Int = buffer.size
- override def noAwaitingMessages: Boolean = buffer.isEmpty
+ override def noAwaitingMessages: Boolean = buffer.isEmpty
- }, streamCompletion.future)
+ }, streamCompletion.future)
}
}
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpAsyncUnorderedFlowStage.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpAsyncUnorderedFlowStage.scala
index 34e7da2b..d718871e 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpAsyncUnorderedFlowStage.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpAsyncUnorderedFlowStage.scala
@@ -8,12 +8,12 @@ import akka.Done
import akka.annotation.InternalApi
import akka.event.Logging
import akka.stream.alpakka.amqp.impl.AbstractAmqpAsyncFlowStageLogic.DeliveryTag
-import akka.stream.alpakka.amqp.{AmqpWriteSettings, WriteMessage, WriteResult}
-import akka.stream.stage.{GraphStageLogic, GraphStageWithMaterializedValue}
+import akka.stream.alpakka.amqp.{ AmqpWriteSettings, WriteMessage, WriteResult }
+import akka.stream.stage.{ GraphStageLogic, GraphStageWithMaterializedValue }
import akka.stream._
import scala.collection.mutable
-import scala.concurrent.{Future, Promise}
+import scala.concurrent.{ Future, Promise }
/**
* Internal API.
@@ -24,8 +24,8 @@ import scala.concurrent.{Future, Promise}
* given delivery tag, which means that so all messages up to (and including) this delivery tag can be safely dequeued.
*/
@InternalApi private[amqp] final class AmqpAsyncUnorderedFlowStage[T](
- settings: AmqpWriteSettings
-) extends GraphStageWithMaterializedValue[FlowShape[(WriteMessage, T), (WriteResult, T)], Future[Done]] {
+ settings: AmqpWriteSettings)
+ extends GraphStageWithMaterializedValue[FlowShape[(WriteMessage, T), (WriteResult, T)], Future[Done]] {
private val in: Inlet[(WriteMessage, T)] = Inlet(Logging.simpleName(this) + ".in")
private val out: Outlet[(WriteResult, T)] = Outlet(Logging.simpleName(this) + ".out")
@@ -39,23 +39,23 @@ import scala.concurrent.{Future, Promise}
val streamCompletion = Promise[Done]()
(new AbstractAmqpAsyncFlowStageLogic(settings, streamCompletion, shape) {
- private val buffer = mutable.Queue.empty[AwaitingMessage[T]]
+ private val buffer = mutable.Queue.empty[AwaitingMessage[T]]
- override def enqueueMessage(tag: DeliveryTag, passThrough: T): Unit =
- buffer += AwaitingMessage(tag, passThrough)
+ override def enqueueMessage(tag: DeliveryTag, passThrough: T): Unit =
+ buffer += AwaitingMessage(tag, passThrough)
- override def dequeueAwaitingMessages(tag: DeliveryTag, multiple: Boolean): Iterable[AwaitingMessage[T]] =
- if (multiple)
- buffer.dequeueAll(_.tag <= tag)
- else
- buffer
- .dequeueFirst(_.tag == tag)
- .fold(Seq.empty[AwaitingMessage[T]])(Seq(_))
+ override def dequeueAwaitingMessages(tag: DeliveryTag, multiple: Boolean): Iterable[AwaitingMessage[T]] =
+ if (multiple)
+ buffer.dequeueAll(_.tag <= tag)
+ else
+ buffer
+ .dequeueFirst(_.tag == tag)
+ .fold(Seq.empty[AwaitingMessage[T]])(Seq(_))
- override def messagesAwaitingDelivery: Int = buffer.length
+ override def messagesAwaitingDelivery: Int = buffer.length
- override def noAwaitingMessages: Boolean = buffer.isEmpty
+ override def noAwaitingMessages: Boolean = buffer.isEmpty
- }, streamCompletion.future)
+ }, streamCompletion.future)
}
}
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpConnectorLogic.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpConnectorLogic.scala
index bf93f56a..5729ead7 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpConnectorLogic.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpConnectorLogic.scala
@@ -4,8 +4,8 @@
package akka.stream.alpakka.amqp.impl
-import akka.stream.alpakka.amqp.{AmqpConnectorSettings, BindingDeclaration, ExchangeDeclaration, QueueDeclaration}
-import akka.stream.stage.{AsyncCallback, GraphStageLogic}
+import akka.stream.alpakka.amqp.{ AmqpConnectorSettings, BindingDeclaration, ExchangeDeclaration, QueueDeclaration }
+import akka.stream.stage.{ AsyncCallback, GraphStageLogic }
import com.rabbitmq.client._
import scala.util.control.NonFatal
@@ -41,16 +41,14 @@ private trait AmqpConnectorLogic { this: GraphStageLogic =>
d.durable,
d.exclusive,
d.autoDelete,
- d.arguments.asJava
- )
+ d.arguments.asJava)
case d: BindingDeclaration =>
channel.queueBind(
d.queue,
d.exchange,
d.routingKey.getOrElse(""),
- d.arguments.asJava
- )
+ d.arguments.asJava)
case d: ExchangeDeclaration =>
channel.exchangeDeclare(
@@ -59,8 +57,7 @@ private trait AmqpConnectorLogic { this: GraphStageLogic =>
d.durable,
d.autoDelete,
d.internal,
- d.arguments.asJava
- )
+ d.arguments.asJava)
}
whenConnected()
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpReplyToSinkStage.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpReplyToSinkStage.scala
index e230c75e..52f0fa3f 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpReplyToSinkStage.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpReplyToSinkStage.scala
@@ -6,11 +6,11 @@ package akka.stream.alpakka.amqp.impl
import akka.Done
import akka.annotation.InternalApi
-import akka.stream.alpakka.amqp.{AmqpReplyToSinkSettings, WriteMessage}
-import akka.stream.stage.{GraphStageLogic, GraphStageWithMaterializedValue, InHandler}
-import akka.stream.{ActorAttributes, Attributes, Inlet, SinkShape}
+import akka.stream.alpakka.amqp.{ AmqpReplyToSinkSettings, WriteMessage }
+import akka.stream.stage.{ GraphStageLogic, GraphStageWithMaterializedValue, InHandler }
+import akka.stream.{ ActorAttributes, Attributes, Inlet, SinkShape }
-import scala.concurrent.{Future, Promise}
+import scala.concurrent.{ Future, Promise }
/**
* Connects to an AMQP server upon materialization and sends write messages to the server.
@@ -31,58 +31,56 @@ private[amqp] final class AmqpReplyToSinkStage(settings: AmqpReplyToSinkSettings
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[Done]) = {
val streamCompletion = Promise[Done]()
(new GraphStageLogic(shape) with AmqpConnectorLogic {
- override val settings = stage.settings
-
- override def whenConnected(): Unit = pull(in)
-
- override def postStop(): Unit = {
- streamCompletion.tryFailure(new RuntimeException("stage stopped unexpectedly"))
- super.postStop()
- }
-
- override def onFailure(ex: Throwable): Unit = {
- streamCompletion.tryFailure(ex)
- super.onFailure(ex)
- }
-
- setHandler(
- in,
- new InHandler {
-
- override def onUpstreamFailure(ex: Throwable): Unit = {
- streamCompletion.failure(ex)
- super.onUpstreamFailure(ex)
- }
-
- override def onUpstreamFinish(): Unit = {
- streamCompletion.success(Done)
- super.onUpstreamFinish()
- }
-
- override def onPush(): Unit = {
- val elem = grab(in)
-
- val replyTo = elem.properties.flatMap(properties => Option(properties.getReplyTo))
-
- if (replyTo.isDefined) {
- channel.basicPublish(
- elem.routingKey.getOrElse(""),
- replyTo.get,
- elem.mandatory,
- elem.immediate,
- elem.properties.orNull,
- elem.bytes.toArray
- )
- } else if (settings.failIfReplyToMissing) {
- onFailure(new RuntimeException("Reply-to header was not set"))
- }
+ override val settings = stage.settings
+
+ override def whenConnected(): Unit = pull(in)
+
+ override def postStop(): Unit = {
+ streamCompletion.tryFailure(new RuntimeException("stage stopped unexpectedly"))
+ super.postStop()
+ }
- tryPull(in)
- }
+ override def onFailure(ex: Throwable): Unit = {
+ streamCompletion.tryFailure(ex)
+ super.onFailure(ex)
}
- )
- }, streamCompletion.future)
+ setHandler(
+ in,
+ new InHandler {
+
+ override def onUpstreamFailure(ex: Throwable): Unit = {
+ streamCompletion.failure(ex)
+ super.onUpstreamFailure(ex)
+ }
+
+ override def onUpstreamFinish(): Unit = {
+ streamCompletion.success(Done)
+ super.onUpstreamFinish()
+ }
+
+ override def onPush(): Unit = {
+ val elem = grab(in)
+
+ val replyTo = elem.properties.flatMap(properties => Option(properties.getReplyTo))
+
+ if (replyTo.isDefined) {
+ channel.basicPublish(
+ elem.routingKey.getOrElse(""),
+ replyTo.get,
+ elem.mandatory,
+ elem.immediate,
+ elem.properties.orNull,
+ elem.bytes.toArray)
+ } else if (settings.failIfReplyToMissing) {
+ onFailure(new RuntimeException("Reply-to header was not set"))
+ }
+
+ tryPull(in)
+ }
+ })
+
+ }, streamCompletion.future)
}
override def toString: String = "AmqpReplyToSink"
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpRpcFlowStage.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpRpcFlowStage.scala
index 01576960..c46e4a61 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpRpcFlowStage.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpRpcFlowStage.scala
@@ -17,7 +17,7 @@ import com.rabbitmq.client.AMQP.BasicProperties
import com.rabbitmq.client._
import scala.collection.mutable
-import scala.concurrent.{Future, Promise}
+import scala.concurrent.{ Future, Promise }
import scala.util.Success
/**
@@ -43,191 +43,185 @@ private[amqp] final class AmqpRpcFlowStage(settings: AmqpWriteSettings, bufferSi
val streamCompletion = Promise[String]()
(new GraphStageLogic(shape) with AmqpConnectorLogic {
- override val settings = stage.settings
- private val exchange = settings.exchange.getOrElse("")
- private val routingKey = settings.routingKey.getOrElse("")
- private val queue = mutable.Queue[CommittableReadResult]()
- private var queueName: String = _
- private var unackedMessages = 0
- private var outstandingMessages = 0
-
- override def whenConnected(): Unit = {
-
- pull(in)
-
- channel.basicQos(bufferSize)
- val consumerCallback = getAsyncCallback(handleDelivery)
-
- val commitCallback = getAsyncCallback[AckArguments] {
- case AckArguments(deliveryTag, multiple, promise) => {
- try {
- channel.basicAck(deliveryTag, multiple)
- unackedMessages -= 1
- if (unackedMessages == 0 && (isClosed(out) || (isClosed(in) && queue.isEmpty && outstandingMessages == 0)))
- completeStage()
- promise.complete(Success(Done))
- } catch {
- case e: Throwable => promise.failure(e)
+ override val settings = stage.settings
+ private val exchange = settings.exchange.getOrElse("")
+ private val routingKey = settings.routingKey.getOrElse("")
+ private val queue = mutable.Queue[CommittableReadResult]()
+ private var queueName: String = _
+ private var unackedMessages = 0
+ private var outstandingMessages = 0
+
+ override def whenConnected(): Unit = {
+
+ pull(in)
+
+ channel.basicQos(bufferSize)
+ val consumerCallback = getAsyncCallback(handleDelivery)
+
+ val commitCallback = getAsyncCallback[AckArguments] {
+ case AckArguments(deliveryTag, multiple, promise) => {
+ try {
+ channel.basicAck(deliveryTag, multiple)
+ unackedMessages -= 1
+ if (unackedMessages == 0 && (isClosed(out) || (isClosed(
+ in) && queue.isEmpty && outstandingMessages == 0)))
+ completeStage()
+ promise.complete(Success(Done))
+ } catch {
+ case e: Throwable => promise.failure(e)
+ }
}
}
- }
- val nackCallback = getAsyncCallback[NackArguments] {
- case NackArguments(deliveryTag, multiple, requeue, promise) => {
- try {
- channel.basicNack(deliveryTag, multiple, requeue)
- unackedMessages -= 1
- if (unackedMessages == 0 && (isClosed(out) || (isClosed(in) && queue.isEmpty && outstandingMessages == 0)))
- completeStage()
- promise.complete(Success(Done))
- } catch {
- case e: Throwable => promise.failure(e)
+ val nackCallback = getAsyncCallback[NackArguments] {
+ case NackArguments(deliveryTag, multiple, requeue, promise) => {
+ try {
+ channel.basicNack(deliveryTag, multiple, requeue)
+ unackedMessages -= 1
+ if (unackedMessages == 0 && (isClosed(out) || (isClosed(
+ in) && queue.isEmpty && outstandingMessages == 0)))
+ completeStage()
+ promise.complete(Success(Done))
+ } catch {
+ case e: Throwable => promise.failure(e)
+ }
}
}
- }
- val amqpSourceConsumer = new DefaultConsumer(channel) {
- override def handleDelivery(consumerTag: String,
- envelope: Envelope,
- properties: BasicProperties,
- body: Array[Byte]): Unit =
- consumerCallback.invoke(
- new CommittableReadResult {
- override val message = ReadResult(ByteString(body), envelope, properties)
-
- override def ack(multiple: Boolean): Future[Done] = {
- val promise = Promise[Done]()
- commitCallback.invoke(AckArguments(message.envelope.getDeliveryTag, multiple, promise))
- promise.future
- }
+ val amqpSourceConsumer = new DefaultConsumer(channel) {
+ override def handleDelivery(consumerTag: String,
+ envelope: Envelope,
+ properties: BasicProperties,
+ body: Array[Byte]): Unit =
+ consumerCallback.invoke(
+ new CommittableReadResult {
+ override val message = ReadResult(ByteString(body), envelope, properties)
+
+ override def ack(multiple: Boolean): Future[Done] = {
+ val promise = Promise[Done]()
+ commitCallback.invoke(AckArguments(message.envelope.getDeliveryTag, multiple, promise))
+ promise.future
+ }
+
+ override def nack(multiple: Boolean, requeue: Boolean): Future[Done] = {
+ val promise = Promise[Done]()
+ nackCallback.invoke(NackArguments(message.envelope.getDeliveryTag, multiple, requeue, promise))
+ promise.future
+ }
+ })
+
+ override def handleCancel(consumerTag: String): Unit =
+ // non consumer initiated cancel, for example happens when the queue has been deleted.
+ shutdownCallback.invoke(
+ new RuntimeException(s"Consumer $queueName with consumerTag $consumerTag shut down unexpectedly"))
+
+ override def handleShutdownSignal(consumerTag: String, sig: ShutdownSignalException): Unit =
+ // "Called when either the channel or the underlying connection has been shut down."
+ shutdownCallback.invoke(
+ new RuntimeException(s"Consumer $queueName with consumerTag $consumerTag shut down unexpectedly", sig))
+ }
- override def nack(multiple: Boolean, requeue: Boolean): Future[Done] = {
- val promise = Promise[Done]()
- nackCallback.invoke(NackArguments(message.envelope.getDeliveryTag, multiple, requeue, promise))
- promise.future
- }
- }
- )
-
- override def handleCancel(consumerTag: String): Unit =
- // non consumer initiated cancel, for example happens when the queue has been deleted.
- shutdownCallback.invoke(
- new RuntimeException(s"Consumer $queueName with consumerTag $consumerTag shut down unexpectedly")
- )
-
- override def handleShutdownSignal(consumerTag: String, sig: ShutdownSignalException): Unit =
- // "Called when either the channel or the underlying connection has been shut down."
- shutdownCallback.invoke(
- new RuntimeException(s"Consumer $queueName with consumerTag $consumerTag shut down unexpectedly", sig)
- )
+ // Create an exclusive queue with a randomly generated name for use as the replyTo portion of RPC
+ queueName = channel
+ .queueDeclare(
+ "",
+ false,
+ true,
+ true,
+ Collections.emptyMap())
+ .getQueue
+
+ channel.basicConsume(
+ queueName,
+ amqpSourceConsumer)
+ streamCompletion.success(queueName)
}
- // Create an exclusive queue with a randomly generated name for use as the replyTo portion of RPC
- queueName = channel
- .queueDeclare(
- "",
- false,
- true,
- true,
- Collections.emptyMap()
- )
- .getQueue
-
- channel.basicConsume(
- queueName,
- amqpSourceConsumer
- )
- streamCompletion.success(queueName)
- }
-
- def handleDelivery(message: CommittableReadResult): Unit =
- if (isAvailable(out)) {
- pushMessage(message)
- } else if (queue.size + 1 > bufferSize) {
- onFailure(new RuntimeException(s"Reached maximum buffer size $bufferSize"))
- } else {
- queue.enqueue(message)
- }
+ def handleDelivery(message: CommittableReadResult): Unit =
+ if (isAvailable(out)) {
+ pushMessage(message)
+ } else if (queue.size + 1 > bufferSize) {
+ onFailure(new RuntimeException(s"Reached maximum buffer size $bufferSize"))
+ } else {
+ queue.enqueue(message)
+ }
+
+ setHandler(
+ out,
+ new OutHandler {
+ override def onPull(): Unit =
+ if (queue.nonEmpty) {
+ pushMessage(queue.dequeue())
+ }
- setHandler(
- out,
- new OutHandler {
- override def onPull(): Unit =
- if (queue.nonEmpty) {
- pushMessage(queue.dequeue())
+ override def onDownstreamFinish(cause: Throwable): Unit = {
+ setKeepGoing(true)
+ if (unackedMessages == 0) super.onDownstreamFinish(cause)
}
+ })
- override def onDownstreamFinish(cause: Throwable): Unit = {
- setKeepGoing(true)
- if (unackedMessages == 0) super.onDownstreamFinish(cause)
- }
+ def pushMessage(message: CommittableReadResult): Unit = {
+ push(out, message)
+ unackedMessages += 1
+ outstandingMessages -= 1
}
- )
-
- def pushMessage(message: CommittableReadResult): Unit = {
- push(out, message)
- unackedMessages += 1
- outstandingMessages -= 1
- }
-
- setHandler(
- in,
- new InHandler {
- // We don't want to finish since we're still waiting
- // on incoming messages from rabbit. However, if we
- // haven't processed a message yet, we do want to complete
- // so that we don't hang.
- override def onUpstreamFinish(): Unit = {
- setKeepGoing(true)
- if (queue.isEmpty && outstandingMessages == 0 && unackedMessages == 0) super.onUpstreamFinish()
- }
- override def onUpstreamFailure(ex: Throwable): Unit = {
- setKeepGoing(true)
- if (queue.isEmpty && outstandingMessages == 0 && unackedMessages == 0)
- super.onUpstreamFailure(ex)
- }
+ setHandler(
+ in,
+ new InHandler {
+ // We don't want to finish since we're still waiting
+ // on incoming messages from rabbit. However, if we
+ // haven't processed a message yet, we do want to complete
+ // so that we don't hang.
+ override def onUpstreamFinish(): Unit = {
+ setKeepGoing(true)
+ if (queue.isEmpty && outstandingMessages == 0 && unackedMessages == 0) super.onUpstreamFinish()
+ }
- override def onPush(): Unit = {
- val elem = grab(in)
- val props = elem.properties.getOrElse(new BasicProperties()).builder.replyTo(queueName).build()
- channel.basicPublish(
- exchange,
- elem.routingKey.getOrElse(routingKey),
- elem.mandatory,
- elem.immediate,
- props,
- elem.bytes.toArray
- )
-
- val expectedResponses: Int = {
- val headers = props.getHeaders
- if (headers == null) {
- responsesPerMessage
- } else {
- val r = headers.get("expectedReplies")
- if (r != null) {
- r.asInstanceOf[Int]
- } else {
+ override def onUpstreamFailure(ex: Throwable): Unit = {
+ setKeepGoing(true)
+ if (queue.isEmpty && outstandingMessages == 0 && unackedMessages == 0)
+ super.onUpstreamFailure(ex)
+ }
+
+ override def onPush(): Unit = {
+ val elem = grab(in)
+ val props = elem.properties.getOrElse(new BasicProperties()).builder.replyTo(queueName).build()
+ channel.basicPublish(
+ exchange,
+ elem.routingKey.getOrElse(routingKey),
+ elem.mandatory,
+ elem.immediate,
+ props,
+ elem.bytes.toArray)
+
+ val expectedResponses: Int = {
+ val headers = props.getHeaders
+ if (headers == null) {
responsesPerMessage
+ } else {
+ val r = headers.get("expectedReplies")
+ if (r != null) {
+ r.asInstanceOf[Int]
+ } else {
+ responsesPerMessage
+ }
}
}
+
+ outstandingMessages += expectedResponses
+ pull(in)
}
+ })
+ override def postStop(): Unit = {
+ streamCompletion.tryFailure(new RuntimeException("stage stopped unexpectedly"))
+ super.postStop()
+ }
- outstandingMessages += expectedResponses
- pull(in)
- }
+ override def onFailure(ex: Throwable): Unit = {
+ streamCompletion.tryFailure(ex)
+ super.onFailure(ex)
}
- )
- override def postStop(): Unit = {
- streamCompletion.tryFailure(new RuntimeException("stage stopped unexpectedly"))
- super.postStop()
- }
-
- override def onFailure(ex: Throwable): Unit = {
- streamCompletion.tryFailure(ex)
- super.onFailure(ex)
- }
- }, streamCompletion.future)
+ }, streamCompletion.future)
}
override def toString: String = "AmqpRpcFlow"
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpSimpleFlowStage.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpSimpleFlowStage.scala
index 0d3a855f..149e0808 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpSimpleFlowStage.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpSimpleFlowStage.scala
@@ -7,11 +7,11 @@ package akka.stream.alpakka.amqp.impl
import akka.Done
import akka.annotation.InternalApi
import akka.event.Logging
-import akka.stream.{ActorAttributes, Attributes, FlowShape, Inlet, Outlet}
-import akka.stream.alpakka.amqp.{AmqpWriteSettings, WriteMessage, WriteResult}
-import akka.stream.stage.{GraphStageLogic, GraphStageWithMaterializedValue}
+import akka.stream.{ ActorAttributes, Attributes, FlowShape, Inlet, Outlet }
+import akka.stream.alpakka.amqp.{ AmqpWriteSettings, WriteMessage, WriteResult }
+import akka.stream.stage.{ GraphStageLogic, GraphStageWithMaterializedValue }
-import scala.concurrent.{Future, Promise}
+import scala.concurrent.{ Future, Promise }
/**
* Internal API.
@@ -36,19 +36,18 @@ import scala.concurrent.{Future, Promise}
override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[Done]) = {
val streamCompletion = Promise[Done]()
(new AbstractAmqpFlowStageLogic[T](settings, streamCompletion, shape) {
- override def publish(message: WriteMessage, passThrough: T): Unit = {
- log.debug("Publishing message {}.", message)
-
- channel.basicPublish(
- settings.exchange.getOrElse(""),
- message.routingKey.orElse(settings.routingKey).getOrElse(""),
- message.mandatory,
- message.immediate,
- message.properties.orNull,
- message.bytes.toArray
- )
- push(out, (WriteResult.confirmed, passThrough))
- }
- }, streamCompletion.future)
+ override def publish(message: WriteMessage, passThrough: T): Unit = {
+ log.debug("Publishing message {}.", message)
+
+ channel.basicPublish(
+ settings.exchange.getOrElse(""),
+ message.routingKey.orElse(settings.routingKey).getOrElse(""),
+ message.mandatory,
+ message.immediate,
+ message.properties.orNull,
+ message.bytes.toArray)
+ push(out, (WriteResult.confirmed, passThrough))
+ }
+ }, streamCompletion.future)
}
}
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpSourceStage.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpSourceStage.scala
index bb01248d..ab7c4754 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpSourceStage.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpSourceStage.scala
@@ -9,14 +9,14 @@ import akka.annotation.InternalApi
import akka.stream.alpakka.amqp._
import akka.stream.alpakka.amqp.impl.AmqpSourceStage.AutoAckedReadResult
import akka.stream.alpakka.amqp.scaladsl.CommittableReadResult
-import akka.stream.stage.{GraphStage, GraphStageLogic, OutHandler, StageLogging}
-import akka.stream.{Attributes, Outlet, SourceShape}
+import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler, StageLogging }
+import akka.stream.{ Attributes, Outlet, SourceShape }
import akka.util.ByteString
import com.rabbitmq.client.AMQP.BasicProperties
-import com.rabbitmq.client.{DefaultConsumer, Envelope, ShutdownSignalException}
+import com.rabbitmq.client.{ DefaultConsumer, Envelope, ShutdownSignalException }
import scala.collection.mutable
-import scala.concurrent.{Future, Promise}
+import scala.concurrent.{ Future, Promise }
import scala.util.Success
private final case class AckArguments(deliveryTag: Long, multiple: Boolean, promise: Promise[Done])
@@ -80,9 +80,9 @@ private[amqp] final class AmqpSourceStage(settings: AmqpSourceSettings, bufferSi
val amqpSourceConsumer = new DefaultConsumer(channel) {
override def handleDelivery(consumerTag: String,
- envelope: Envelope,
- properties: BasicProperties,
- body: Array[Byte]): Unit = {
+ envelope: Envelope,
+ properties: BasicProperties,
+ body: Array[Byte]): Unit = {
val message = if (ackRequired) {
new CommittableReadResult {
@@ -107,14 +107,12 @@ private[amqp] final class AmqpSourceStage(settings: AmqpSourceSettings, bufferSi
override def handleCancel(consumerTag: String): Unit =
// non consumer initiated cancel, for example happens when the queue has been deleted.
shutdownCallback.invoke(
- new RuntimeException(s"Consumer with consumerTag $consumerTag shut down unexpectedly")
- )
+ new RuntimeException(s"Consumer with consumerTag $consumerTag shut down unexpectedly"))
override def handleShutdownSignal(consumerTag: String, sig: ShutdownSignalException): Unit =
// "Called when either the channel or the underlying connection has been shut down."
shutdownCallback.invoke(
- new RuntimeException(s"Consumer with consumerTag $consumerTag shut down unexpectedly", sig)
- )
+ new RuntimeException(s"Consumer with consumerTag $consumerTag shut down unexpectedly", sig))
}
def setupNamedQueue(settings: NamedQueueSourceSettings): Unit =
@@ -125,8 +123,7 @@ private[amqp] final class AmqpSourceStage(settings: AmqpSourceSettings, bufferSi
settings.noLocal,
settings.exclusive,
settings.arguments.asJava,
- amqpSourceConsumer
- )
+ amqpSourceConsumer)
def setupTemporaryQueue(settings: TemporaryQueueSourceSettings): Unit = {
// this is a weird case that required dynamic declaration, the queue name is not known
@@ -135,8 +132,7 @@ private[amqp] final class AmqpSourceStage(settings: AmqpSourceSettings, bufferSi
channel.queueBind(queueName, settings.exchange, settings.routingKey.getOrElse(""))
channel.basicConsume(
queueName,
- amqpSourceConsumer
- )
+ amqpSourceConsumer)
}
settings match {
@@ -171,8 +167,7 @@ private[amqp] final class AmqpSourceStage(settings: AmqpSourceSettings, bufferSi
setKeepGoing(true)
log.debug("Awaiting {} acks before finishing.", unackedMessages)
}
- }
- )
+ })
def pushMessage(message: CommittableReadResult): Unit = {
push(out, message)
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlow.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlow.scala
index 07e7d073..8c61ede6 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlow.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlow.scala
@@ -27,8 +27,7 @@ object AmqpFlow {
* @param settings `bufferSize` and `confirmationTimeout` properties are ignored by this connector
*/
def create(
- settings: AmqpWriteSettings
- ): akka.stream.javadsl.Flow[WriteMessage, WriteResult, CompletionStage[Done]] =
+ settings: AmqpWriteSettings): akka.stream.javadsl.Flow[WriteMessage, WriteResult, CompletionStage[Done]] =
akka.stream.alpakka.amqp.scaladsl.AmqpFlow(settings).mapMaterializedValue(f => f.toJava).asJava
/**
@@ -50,8 +49,7 @@ object AmqpFlow {
* supposed to be used with another AMQP brokers.
*/
def createWithConfirm(
- settings: AmqpWriteSettings
- ): akka.stream.javadsl.Flow[WriteMessage, WriteResult, CompletionStage[Done]] =
+ settings: AmqpWriteSettings): akka.stream.javadsl.Flow[WriteMessage, WriteResult, CompletionStage[Done]] =
akka.stream.alpakka.amqp.scaladsl.AmqpFlow
.withConfirm(settings = settings)
.mapMaterializedValue(_.toJava)
@@ -76,8 +74,7 @@ object AmqpFlow {
* supposed to be used with another AMQP brokers.
*/
def createWithConfirmUnordered(
- settings: AmqpWriteSettings
- ): akka.stream.javadsl.Flow[WriteMessage, WriteResult, CompletionStage[Done]] =
+ settings: AmqpWriteSettings): akka.stream.javadsl.Flow[WriteMessage, WriteResult, CompletionStage[Done]] =
akka.stream.alpakka.amqp.scaladsl.AmqpFlow
.withConfirmUnordered(settings)
.mapMaterializedValue(_.toJava)
@@ -93,15 +90,14 @@ object AmqpFlow {
* supposed to be used with another AMQP brokers.
*/
def createWithConfirmAndPassThroughUnordered[T](
- settings: AmqpWriteSettings
- ): akka.stream.javadsl.Flow[Pair[WriteMessage, T], Pair[WriteResult, T], CompletionStage[Done]] =
+ settings: AmqpWriteSettings)
+ : akka.stream.javadsl.Flow[Pair[WriteMessage, T], Pair[WriteResult, T], CompletionStage[Done]] =
akka.stream.scaladsl
.Flow[Pair[WriteMessage, T]]
.map((p: Pair[WriteMessage, T]) => p.toScala)
.viaMat(
akka.stream.alpakka.amqp.scaladsl.AmqpFlow
- .withConfirmAndPassThroughUnordered[T](settings = settings)
- )(Keep.right)
+ .withConfirmAndPassThroughUnordered[T](settings = settings))(Keep.right)
.map { case (writeResult, passThrough) => Pair(writeResult, passThrough) }
.mapMaterializedValue(_.toJava)
.asJava
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlowWithContext.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlowWithContext.scala
index 943e5f99..53535496 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlowWithContext.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlowWithContext.scala
@@ -19,8 +19,8 @@ object AmqpFlowWithContext {
* @see [[AmqpFlow.create]]
*/
def create[T](
- settings: AmqpWriteSettings
- ): akka.stream.javadsl.FlowWithContext[WriteMessage, T, WriteResult, T, CompletionStage[Done]] =
+ settings: AmqpWriteSettings)
+ : akka.stream.javadsl.FlowWithContext[WriteMessage, T, WriteResult, T, CompletionStage[Done]] =
akka.stream.alpakka.amqp.scaladsl.AmqpFlowWithContext
.apply(settings)
.mapMaterializedValue(_.toJava)
@@ -36,8 +36,8 @@ object AmqpFlowWithContext {
* supposed to be used with another AMQP brokers.
*/
def createWithConfirm[T](
- settings: AmqpWriteSettings
- ): akka.stream.javadsl.FlowWithContext[WriteMessage, T, WriteResult, T, CompletionStage[Done]] =
+ settings: AmqpWriteSettings)
+ : akka.stream.javadsl.FlowWithContext[WriteMessage, T, WriteResult, T, CompletionStage[Done]] =
akka.stream.alpakka.amqp.scaladsl.AmqpFlowWithContext
.withConfirm(settings)
.mapMaterializedValue(_.toJava)
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpRpcFlow.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpRpcFlow.scala
index 91f25271..9f28e57d 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpRpcFlow.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpRpcFlow.scala
@@ -24,7 +24,7 @@ object AmqpRpcFlow {
* @param repliesPerMessage The number of responses that should be expected for each message placed on the queue.
*/
def createSimple(settings: AmqpWriteSettings,
- repliesPerMessage: Int): Flow[ByteString, ByteString, CompletionStage[String]] =
+ repliesPerMessage: Int): Flow[ByteString, ByteString, CompletionStage[String]] =
akka.stream.alpakka.amqp.scaladsl.AmqpRpcFlow
.simple(settings, repliesPerMessage)
.mapMaterializedValue(f => f.toJava)
@@ -36,7 +36,7 @@ object AmqpRpcFlow {
* before its read result is emitted downstream.
*/
def atMostOnceFlow(settings: AmqpWriteSettings,
- bufferSize: Int): Flow[WriteMessage, ReadResult, CompletionStage[String]] =
+ bufferSize: Int): Flow[WriteMessage, ReadResult, CompletionStage[String]] =
akka.stream.alpakka.amqp.scaladsl.AmqpRpcFlow
.atMostOnceFlow(settings, bufferSize)
.mapMaterializedValue(f => f.toJava)
@@ -48,8 +48,8 @@ object AmqpRpcFlow {
* before its read result is emitted downstream.
*/
def atMostOnceFlow(settings: AmqpWriteSettings,
- bufferSize: Int,
- repliesPerMessage: Int): Flow[WriteMessage, ReadResult, CompletionStage[String]] =
+ bufferSize: Int,
+ repliesPerMessage: Int): Flow[WriteMessage, ReadResult, CompletionStage[String]] =
akka.stream.alpakka.amqp.scaladsl.AmqpRpcFlow
.atMostOnceFlow(settings, bufferSize, repliesPerMessage)
.mapMaterializedValue(f => f.toJava)
@@ -69,8 +69,7 @@ object AmqpRpcFlow {
def committableFlow(
settings: AmqpWriteSettings,
bufferSize: Int,
- repliesPerMessage: Int = 1
- ): Flow[WriteMessage, CommittableReadResult, CompletionStage[String]] =
+ repliesPerMessage: Int = 1): Flow[WriteMessage, CommittableReadResult, CompletionStage[String]] =
akka.stream.alpakka.amqp.scaladsl.AmqpRpcFlow
.committableFlow(settings, bufferSize, repliesPerMessage)
.mapMaterializedValue(f => f.toJava)
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpSink.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpSink.scala
index 0e75fefe..52db94f8 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpSink.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpSink.scala
@@ -41,8 +41,7 @@ object AmqpSink {
* either normally or because of an amqp failure.
*/
def createReplyTo(
- settings: AmqpReplyToSinkSettings
- ): akka.stream.javadsl.Sink[WriteMessage, CompletionStage[Done]] =
+ settings: AmqpReplyToSinkSettings): akka.stream.javadsl.Sink[WriteMessage, CompletionStage[Done]] =
akka.stream.alpakka.amqp.scaladsl.AmqpSink.replyTo(settings).mapMaterializedValue(f => f.toJava).asJava
}
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpSource.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpSource.scala
index 30accf60..ec7d15a1 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpSource.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpSource.scala
@@ -5,7 +5,7 @@
package akka.stream.alpakka.amqp.javadsl
import akka.NotUsed
-import akka.stream.alpakka.amqp.{AmqpSourceSettings, ReadResult}
+import akka.stream.alpakka.amqp.{ AmqpSourceSettings, ReadResult }
import akka.stream.javadsl.Source
object AmqpSource {
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/model.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/model.scala
index b86a2594..b66e6f36 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/model.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/model.scala
@@ -13,8 +13,7 @@ import com.rabbitmq.client.Envelope
final class ReadResult private (
val bytes: ByteString,
val envelope: Envelope,
- val properties: BasicProperties
-) {
+ val properties: BasicProperties) {
override def toString: String =
s"ReadResult(bytes=$bytes, envelope=$envelope, properties=$properties)"
}
@@ -35,8 +34,7 @@ final class WriteMessage private (
val immediate: Boolean,
val mandatory: Boolean,
val properties: Option[BasicProperties] = None,
- val routingKey: Option[String] = None
-) {
+ val routingKey: Option[String] = None) {
def withImmediate(value: Boolean): WriteMessage =
if (value == immediate) this
@@ -53,9 +51,9 @@ final class WriteMessage private (
copy(routingKey = Some(routingKey))
private def copy(immediate: Boolean = immediate,
- mandatory: Boolean = mandatory,
- properties: Option[BasicProperties] = properties,
- routingKey: Option[String] = routingKey) =
+ mandatory: Boolean = mandatory,
+ properties: Option[BasicProperties] = properties,
+ routingKey: Option[String] = routingKey) =
new WriteMessage(bytes, immediate, mandatory, properties, routingKey)
override def toString: String =
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpFlow.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpFlow.scala
index 035e0ad1..6fb7235a 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpFlow.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpFlow.scala
@@ -5,8 +5,8 @@
package akka.stream.alpakka.amqp.scaladsl
import akka.stream.alpakka.amqp._
-import akka.stream.scaladsl.{Flow, Keep}
-import akka.{Done, NotUsed}
+import akka.stream.scaladsl.{ Flow, Keep }
+import akka.{ Done, NotUsed }
import scala.concurrent.Future
@@ -24,11 +24,9 @@ object AmqpFlow {
* @param settings `bufferSize` and `confirmationTimeout` properties are ignored by this connector
*/
def apply(
- settings: AmqpWriteSettings
- ): Flow[WriteMessage, WriteResult, Future[Done]] =
+ settings: AmqpWriteSettings): Flow[WriteMessage, WriteResult, Future[Done]] =
asFlowWithoutContext(
- Flow.fromGraph(new impl.AmqpSimpleFlowStage(settings))
- )
+ Flow.fromGraph(new impl.AmqpSimpleFlowStage(settings)))
/**
* Creates an `AmqpFlow` that accepts `WriteMessage` elements and emits `WriteResult`.
@@ -45,11 +43,9 @@ object AmqpFlow {
* either normally or because of an amqp failure.
*/
def withConfirm(
- settings: AmqpWriteSettings
- ): Flow[WriteMessage, WriteResult, Future[Done]] =
+ settings: AmqpWriteSettings): Flow[WriteMessage, WriteResult, Future[Done]] =
asFlowWithoutContext(
- Flow.fromGraph(new impl.AmqpAsyncFlowStage(settings))
- )
+ Flow.fromGraph(new impl.AmqpAsyncFlowStage(settings)))
/**
* Creates an `AmqpFlow` that accepts `WriteMessage` elements and emits `WriteResult`.
@@ -70,11 +66,9 @@ object AmqpFlow {
* supposed to be used with another AMQP brokers.
*/
def withConfirmUnordered(
- settings: AmqpWriteSettings
- ): Flow[WriteMessage, WriteResult, Future[Done]] =
+ settings: AmqpWriteSettings): Flow[WriteMessage, WriteResult, Future[Done]] =
asFlowWithoutContext(
- Flow.fromGraph(new impl.AmqpAsyncUnorderedFlowStage(settings))
- )
+ Flow.fromGraph(new impl.AmqpAsyncUnorderedFlowStage(settings)))
/**
* Variant of `AmqpFlow.withConfirmUnordered` with additional support for pass-through elements.
@@ -86,8 +80,7 @@ object AmqpFlow {
* supposed to be used with another AMQP brokers.
*/
def withConfirmAndPassThroughUnordered[T](
- settings: AmqpWriteSettings
- ): Flow[(WriteMessage, T), (WriteResult, T), Future[Done]] =
+ settings: AmqpWriteSettings): Flow[(WriteMessage, T), (WriteResult, T), Future[Done]] =
Flow.fromGraph(new impl.AmqpAsyncUnorderedFlowStage(settings))
private def asFlowWithoutContext(flow: Flow[(WriteMessage, NotUsed), (WriteResult, NotUsed), Future[Done]]) =
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpFlowWithContext.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpFlowWithContext.scala
index e9ff77cc..358845c5 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpFlowWithContext.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpFlowWithContext.scala
@@ -6,7 +6,7 @@ package akka.stream.alpakka.amqp.scaladsl
import akka.Done
import akka.stream.alpakka.amqp._
-import akka.stream.scaladsl.{Flow, FlowWithContext}
+import akka.stream.scaladsl.{ Flow, FlowWithContext }
import scala.concurrent.Future
@@ -18,11 +18,9 @@ object AmqpFlowWithContext {
* @see [[AmqpFlow.apply]]
*/
def apply[T](
- settings: AmqpWriteSettings
- ): FlowWithContext[WriteMessage, T, WriteResult, T, Future[Done]] =
+ settings: AmqpWriteSettings): FlowWithContext[WriteMessage, T, WriteResult, T, Future[Done]] =
FlowWithContext.fromTuples(
- Flow.fromGraph(new impl.AmqpSimpleFlowStage[T](settings))
- )
+ Flow.fromGraph(new impl.AmqpSimpleFlowStage[T](settings)))
/**
* Creates a contextual variant of corresponding [[AmqpFlow]].
@@ -34,9 +32,7 @@ object AmqpFlowWithContext {
* supposed to be used with another AMQP brokers.
*/
def withConfirm[T](
- settings: AmqpWriteSettings
- ): FlowWithContext[WriteMessage, T, WriteResult, T, Future[Done]] =
+ settings: AmqpWriteSettings): FlowWithContext[WriteMessage, T, WriteResult, T, Future[Done]] =
FlowWithContext.fromTuples(
- Flow.fromGraph(new impl.AmqpAsyncFlowStage(settings))
- )
+ Flow.fromGraph(new impl.AmqpAsyncFlowStage(settings)))
}
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpRpcFlow.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpRpcFlow.scala
index f6636037..cb711f12 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpRpcFlow.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpRpcFlow.scala
@@ -6,7 +6,7 @@ package akka.stream.alpakka.amqp.scaladsl
import akka.dispatch.ExecutionContexts
import akka.stream.alpakka.amqp._
-import akka.stream.scaladsl.{Flow, Keep}
+import akka.stream.scaladsl.{ Flow, Keep }
import akka.util.ByteString
import scala.concurrent.Future
@@ -35,8 +35,8 @@ object AmqpRpcFlow {
* before it is emitted downstream.
*/
def atMostOnceFlow(settings: AmqpWriteSettings,
- bufferSize: Int,
- repliesPerMessage: Int = 1): Flow[WriteMessage, ReadResult, Future[String]] =
+ bufferSize: Int,
+ repliesPerMessage: Int = 1): Flow[WriteMessage, ReadResult, Future[String]] =
committableFlow(settings, bufferSize, repliesPerMessage)
.mapAsync(1) { cm =>
cm.ack().map(_ => cm.message)(ExecutionContexts.parasitic)
@@ -54,8 +54,8 @@ object AmqpRpcFlow {
* Compared to auto-commit, this gives exact control over when a message is considered consumed.
*/
def committableFlow(settings: AmqpWriteSettings,
- bufferSize: Int,
- repliesPerMessage: Int = 1): Flow[WriteMessage, CommittableReadResult, Future[String]] =
+ bufferSize: Int,
+ repliesPerMessage: Int = 1): Flow[WriteMessage, CommittableReadResult, Future[String]] =
Flow.fromGraph(new impl.AmqpRpcFlowStage(settings, bufferSize, repliesPerMessage))
}
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpSink.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpSink.scala
index 9a58df1f..5f874325 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpSink.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpSink.scala
@@ -6,7 +6,7 @@ package akka.stream.alpakka.amqp.scaladsl
import akka.Done
import akka.stream.alpakka.amqp._
-import akka.stream.scaladsl.{Keep, Sink}
+import akka.stream.scaladsl.{ Keep, Sink }
import akka.util.ByteString
import scala.concurrent.Future
diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpSource.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpSource.scala
index 302127f1..967ecc86 100644
--- a/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpSource.scala
+++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpSource.scala
@@ -7,7 +7,7 @@ package akka.stream.alpakka.amqp.scaladsl
import akka.NotUsed
import akka.dispatch.ExecutionContexts
import akka.stream.alpakka.amqp.impl
-import akka.stream.alpakka.amqp.{AmqpSourceSettings, ReadResult}
+import akka.stream.alpakka.amqp.{ AmqpSourceSettings, ReadResult }
import akka.stream.scaladsl.Source
object AmqpSource {
diff --git a/amqp/src/test/scala/akka/stream/alpakka/amqp/AmqpProxyConnection.scala b/amqp/src/test/scala/akka/stream/alpakka/amqp/AmqpProxyConnection.scala
index abedac8b..93e876f8 100644
--- a/amqp/src/test/scala/akka/stream/alpakka/amqp/AmqpProxyConnection.scala
+++ b/amqp/src/test/scala/akka/stream/alpakka/amqp/AmqpProxyConnection.scala
@@ -59,7 +59,7 @@ class AmqpProxyConnection(protected val delegate: Connection) extends Connection
override def addBlockedListener(blockedListener: BlockedListener): Unit = delegate.addBlockedListener(blockedListener)
override def addBlockedListener(blockedCallback: BlockedCallback,
- unblockedCallback: UnblockedCallback): BlockedListener =
+ unblockedCallback: UnblockedCallback): BlockedListener =
delegate.addBlockedListener(blockedCallback, unblockedCallback)
override def removeBlockedListener(blockedListener: BlockedListener): Boolean =
diff --git a/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpConnectionProvidersSpec.scala b/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpConnectionProvidersSpec.scala
index 00a042de..509ec1a6 100644
--- a/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpConnectionProvidersSpec.scala
+++ b/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpConnectionProvidersSpec.scala
@@ -15,7 +15,7 @@ class AmqpConnectionProvidersSpec extends AmqpSpec {
val connectionProvider = AmqpLocalConnectionProvider
val connection1 = connectionProvider.get
val connection2 = connectionProvider.get
- connection1 should not equal connection2
+ (connection1 should not).equal(connection2)
connectionProvider.release(connection1)
connectionProvider.release(connection2)
}
@@ -31,7 +31,7 @@ class AmqpConnectionProvidersSpec extends AmqpSpec {
val connectionProvider = AmqpUriConnectionProvider("amqp://localhost:5672")
val connection1 = connectionProvider.get
val connection2 = connectionProvider.get
- connection1 should not equal connection2
+ (connection1 should not).equal(connection2)
connectionProvider.release(connection1)
connectionProvider.release(connection2)
}
@@ -47,7 +47,7 @@ class AmqpConnectionProvidersSpec extends AmqpSpec {
val connectionProvider = AmqpDetailsConnectionProvider("localhost", 5672)
val connection1 = connectionProvider.get
val connection2 = connectionProvider.get
- connection1 should not equal connection2
+ (connection1 should not).equal(connection2)
connectionProvider.release(connection1)
connectionProvider.release(connection2)
}
@@ -65,7 +65,7 @@ class AmqpConnectionProvidersSpec extends AmqpSpec {
AmqpConnectionFactoryConnectionProvider(connectionFactory).withHostAndPort("localhost", 5672)
val connection1 = connectionProvider.get
val connection2 = connectionProvider.get
- connection1 should not equal connection2
+ (connection1 should not).equal(connection2)
connectionProvider.release(connection1)
connectionProvider.release(connection2)
}
diff --git a/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpConnectorsSpec.scala b/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpConnectorsSpec.scala
index a7b508a4..72933fe6 100644
--- a/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpConnectorsSpec.scala
+++ b/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpConnectorsSpec.scala
@@ -9,10 +9,10 @@ import java.net.ConnectException
import akka.Done
import akka.stream._
import akka.stream.alpakka.amqp._
-import akka.stream.scaladsl.{GraphDSL, Keep, Merge, Sink, Source}
+import akka.stream.scaladsl.{ GraphDSL, Keep, Merge, Sink, Source }
import akka.stream.testkit.scaladsl.TestSink
import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
-import akka.stream.testkit.{TestPublisher, TestSubscriber}
+import akka.stream.testkit.{ TestPublisher, TestSubscriber }
import akka.util.ByteString
import com.rabbitmq.client.AMQP.BasicProperties
import com.rabbitmq.client.AuthenticationFailureException
@@ -41,8 +41,7 @@ class AmqpConnectorsSpec extends AmqpSpec {
val amqpSink = AmqpSink.simple(
AmqpWriteSettings(connectionProvider)
.withRoutingKey(queueName)
- .withDeclaration(queueDeclaration)
- )
+ .withDeclaration(queueDeclaration))
val input = Vector("one", "two", "three", "four", "five")
val result = Source(input).map(s => ByteString(s)).runWith(amqpSink)
@@ -61,8 +60,7 @@ class AmqpConnectorsSpec extends AmqpSpec {
val amqpSink = AmqpSink.simple(
AmqpWriteSettings(connectionProvider)
.withRoutingKey(queueName)
- .withDeclaration(queueDeclaration)
- )
+ .withDeclaration(queueDeclaration))
val input = Vector("one", "two", "three", "four", "five")
val result = Source(input).map(s => ByteString(s)).runWith(amqpSink)
@@ -77,13 +75,11 @@ class AmqpConnectorsSpec extends AmqpSpec {
AmqpWriteSettings(connectionProvider)
.withRoutingKey(queueName)
.withDeclaration(queueDeclaration),
- 2
- )
+ 2)
val amqpSource = AmqpSource.atMostOnceSource(
NamedQueueSourceSettings(connectionProvider, queueName),
- bufferSize = 1
- )
+ bufferSize = 1)
val input = Vector("one", "two", "three", "four", "five")
val (rpcQueueF, probe) =
@@ -91,16 +87,14 @@ class AmqpConnectorsSpec extends AmqpSpec {
rpcQueueF.futureValue
val amqpSink = AmqpSink.replyTo(
- AmqpReplyToSinkSettings(connectionProvider)
- )
+ AmqpReplyToSinkSettings(connectionProvider))
val sourceToSink = amqpSource
.viaMat(KillSwitches.single)(Keep.right)
.mapConcat { b =>
List(
WriteMessage(b.bytes.concat(ByteString("a"))).withProperties(b.properties),
- WriteMessage(b.bytes.concat(ByteString("aa"))).withProperties(b.properties)
- )
+ WriteMessage(b.bytes.concat(ByteString("aa"))).withProperties(b.properties))
}
.to(amqpSink)
.run()
@@ -139,8 +133,7 @@ class AmqpConnectorsSpec extends AmqpSpec {
Source
.single(outgoingMessage)
.toMat(AmqpSink.replyTo(AmqpReplyToSinkSettings(connectionProvider).withFailIfReplyToMissing(true)))(
- Keep.right
- )
+ Keep.right)
.run()
.futureValue
}
@@ -157,8 +150,7 @@ class AmqpConnectorsSpec extends AmqpSpec {
Source
.single(outgoingMessageWithEmptyReplyTo)
.toMat(AmqpSink.replyTo(AmqpReplyToSinkSettings(connectionProvider).withFailIfReplyToMissing(false)))(
- Keep.right
- )
+ Keep.right)
.run()
.futureValue shouldBe Done
@@ -170,8 +162,7 @@ class AmqpConnectorsSpec extends AmqpSpec {
val amqpSink = AmqpSink.simple(
AmqpWriteSettings(connectionProvider)
.withRoutingKey(queueName)
- .withDeclaration(queueDeclaration)
- )
+ .withDeclaration(queueDeclaration))
val input = Vector("one", "two", "three", "four", "five")
Source(input).map(s => ByteString(s)).runWith(amqpSink)
@@ -185,9 +176,7 @@ class AmqpConnectorsSpec extends AmqpSpec {
AmqpSource.atMostOnceSource(
NamedQueueSourceSettings(connectionProvider, queueName)
.withDeclaration(queueDeclaration),
- bufferSize = 1
- )
- )
+ bufferSize = 1))
source.out ~> merge.in(n)
}
@@ -204,14 +193,12 @@ class AmqpConnectorsSpec extends AmqpSpec {
val queueDeclaration = QueueDeclaration(queueName)
val amqpSource = AmqpSource.atMostOnceSource(
NamedQueueSourceSettings(connectionProvider, queueName).withDeclaration(queueDeclaration),
- bufferSize = 2
- )
+ bufferSize = 2)
val amqpSink = AmqpSink.simple(
AmqpWriteSettings(connectionProvider)
.withRoutingKey(queueName)
- .withDeclaration(queueDeclaration)
- )
+ .withDeclaration(queueDeclaration))
val publisher = TestPublisher.probe[ByteString]()
val subscriber = TestSubscriber.probe[ReadResult]()
@@ -260,13 +247,11 @@ class AmqpConnectorsSpec extends AmqpSpec {
val amqpSink = AmqpSink.simple(
AmqpWriteSettings(connectionSettings)
.withRoutingKey(queueName)
- .withDeclaration(queueDeclaration)
- )
+ .withDeclaration(queueDeclaration))
val amqpSource = AmqpSource.committableSource(
NamedQueueSourceSettings(connectionSettings, queueName).withDeclaration(queueDeclaration),
- bufferSize = 10
- )
+ bufferSize = 10)
val input = Vector("one", "two", "three", "four", "five")
Source(input).map(s => ByteString(s)).runWith(amqpSink).futureValue shouldEqual Done
@@ -287,15 +272,13 @@ class AmqpConnectorsSpec extends AmqpSpec {
val amqpSink = AmqpSink.simple(
AmqpWriteSettings(connectionProvider)
.withRoutingKey(queueName)
- .withDeclaration(queueDeclaration)
- )
+ .withDeclaration(queueDeclaration))
val input = Vector("one", "two", "three", "four", "five")
Source(input).map(s => ByteString(s)).runWith(amqpSink).futureValue shouldEqual Done
val amqpSource = AmqpSource.committableSource(
NamedQueueSourceSettings(connectionProvider, queueName).withDeclaration(queueDeclaration),
- bufferSize = 10
- )
+ bufferSize = 10)
val result1 = amqpSource
.mapAsync(1)(cm => cm.nack(requeue = false).map(_ => cm))
@@ -326,8 +309,7 @@ class AmqpConnectorsSpec extends AmqpSpec {
AmqpWriteSettings(connectionProvider)
.withRoutingKey(queueName)
.withDeclaration(queueDeclaration),
- bufferSize = 10
- )
+ bufferSize = 10)
val (rpcQueueF, probe) =
Source(input)
.map(s => ByteString(s))
@@ -339,13 +321,11 @@ class AmqpConnectorsSpec extends AmqpSpec {
rpcQueueF.futureValue
val amqpSink = AmqpSink.replyTo(
- AmqpReplyToSinkSettings(connectionProvider)
- )
+ AmqpReplyToSinkSettings(connectionProvider))
val amqpSource = AmqpSource.atMostOnceSource(
NamedQueueSourceSettings(connectionProvider, queueName),
- bufferSize = 1
- )
+ bufferSize = 1)
val sourceToSink = amqpSource
.viaMat(KillSwitches.single)(Keep.right)
.map(b => WriteMessage(b.bytes).withProperties(b.properties))
@@ -368,14 +348,12 @@ class AmqpConnectorsSpec extends AmqpSpec {
val amqpSink = AmqpSink(
AmqpWriteSettings(connectionProvider)
.withExchange(exchangeName)
- .withDeclarations(immutable.Seq(exchangeDeclaration, queueDeclaration, bindingDeclaration))
- )
+ .withDeclarations(immutable.Seq(exchangeDeclaration, queueDeclaration, bindingDeclaration)))
val amqpSource = AmqpSource.atMostOnceSource(
NamedQueueSourceSettings(connectionProvider, queueName)
.withDeclarations(immutable.Seq(exchangeDeclaration, queueDeclaration, bindingDeclaration)),
- bufferSize = 10
- )
+ bufferSize = 10)
val input = Vector("one", "two", "three", "four", "five")
val routingKeys = input.map(s => getRoutingKey(s))
@@ -403,16 +381,14 @@ class AmqpConnectorsSpec extends AmqpSpec {
val amqpSink = AmqpSink.simple(
AmqpWriteSettings(connectionProvider)
.withRoutingKey(queueName)
- .withDeclaration(queueDeclaration)
- )
+ .withDeclaration(queueDeclaration))
val amqpSource = AmqpSource
.committableSource(
NamedQueueSourceSettings(connectionProvider, queueName)
.withAckRequired(false)
.withDeclaration(queueDeclaration),
- bufferSize = 10
- )
+ bufferSize = 10)
val input = Vector("one", "two", "three", "four", "five")
Source(input).map(s => ByteString(s)).runWith(amqpSink).futureValue shouldEqual Done
diff --git a/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpFlowSpec.scala b/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpFlowSpec.scala
index 157010c6..d84d9992 100644
--- a/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpFlowSpec.scala
+++ b/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpFlowSpec.scala
@@ -6,15 +6,15 @@ package akka.stream.alpakka.amqp.scaladsl
import akka.Done
import akka.stream.alpakka.amqp._
-import akka.stream.scaladsl.{Flow, FlowWithContext, Keep, Sink, Source}
+import akka.stream.scaladsl.{ Flow, FlowWithContext, Keep, Sink, Source }
import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
-import akka.stream.testkit.scaladsl.{TestSink, TestSource}
+import akka.stream.testkit.scaladsl.{ TestSink, TestSource }
import akka.util.ByteString
import com.rabbitmq.client.AMQP.BasicProperties
import com.rabbitmq.client.ConfirmCallback
import org.mockito.ArgumentMatchers._
import org.mockito.Mockito._
-import org.mockito.{ArgumentCaptor, Mockito}
+import org.mockito.{ ArgumentCaptor, Mockito }
import org.scalatest.BeforeAndAfterEach
import scala.concurrent.Future
@@ -91,8 +91,7 @@ class AmqpFlowSpec extends AmqpSpec with AmqpMocking with BeforeAndAfterEach {
val channelError = new RuntimeException("channel error")
when(
- connectionMock.createChannel()
- ).thenThrow(channelError)
+ connectionMock.createChannel()).thenThrow(channelError)
val completion =
Source
@@ -175,8 +174,7 @@ class AmqpFlowSpec extends AmqpSpec with AmqpMocking with BeforeAndAfterEach {
(WriteResult.rejected, input(3)),
(WriteResult.rejected, input(4)),
(WriteResult.rejected, input(5)),
- (WriteResult.rejected, input(6))
- )
+ (WriteResult.rejected, input(6)))
messages should contain theSameElementsInOrderAs expectedResult
completion.futureValue shouldBe an[Done]
@@ -262,8 +260,7 @@ class AmqpFlowSpec extends AmqpSpec with AmqpMocking with BeforeAndAfterEach {
(WriteResult.rejected, input(6)),
(WriteResult.rejected, input(3)),
(WriteResult.rejected, input(4)),
- (WriteResult.rejected, input(5))
- )
+ (WriteResult.rejected, input(5)))
messages should contain theSameElementsInOrderAs expectedResult
completion.futureValue shouldBe an[Done]
@@ -327,8 +324,8 @@ class AmqpFlowSpec extends AmqpSpec with AmqpMocking with BeforeAndAfterEach {
when(
channelMock
- .basicPublish(any[String], any[String], any[Boolean], any[Boolean], any[BasicProperties], any[Array[Byte]])
- ).thenThrow(publicationError)
+ .basicPublish(any[String], any[String], any[Boolean], any[Boolean], any[BasicProperties],
+ any[Array[Byte]])).thenThrow(publicationError)
val completion =
Source
@@ -419,8 +416,7 @@ class AmqpFlowSpec extends AmqpSpec with AmqpMocking with BeforeAndAfterEach {
WriteResult.confirmed,
WriteResult.confirmed,
WriteResult.rejected,
- WriteResult.rejected
- )
+ WriteResult.rejected)
messages should contain theSameElementsAs expectedResult
completion.futureValue shouldBe an[Done]
diff --git a/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala b/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala
index a57fe40c..65eb63dc 100644
--- a/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala
+++ b/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala
@@ -19,7 +19,7 @@ import akka.stream.alpakka.amqp.{
import akka.stream.alpakka.testkit.scaladsl.LogCapturing
import akka.stream.scaladsl.Source
import akka.util.ByteString
-import com.rabbitmq.client.{AddressResolver, Connection, ConnectionFactory, ShutdownListener}
+import com.rabbitmq.client.{ AddressResolver, Connection, ConnectionFactory, ShutdownListener }
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.BeforeAndAfterEach
@@ -82,8 +82,7 @@ class AmqpGraphStageLogicConnectionShutdownSpec
val amqpSink = AmqpSink.simple(
AmqpWriteSettings(reusableConnectionProvider)
.withRoutingKey(queueName)
- .withDeclaration(queueDeclaration)
- )
+ .withDeclaration(queueDeclaration))
val input = Vector("one", "two", "three", "four")
diff --git a/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpMocking.scala b/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpMocking.scala
index 2e1d3a93..6138ff9f 100644
--- a/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpMocking.scala
+++ b/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpMocking.scala
@@ -4,7 +4,7 @@
package akka.stream.alpakka.amqp.scaladsl
-import com.rabbitmq.client.{Address, Channel, ConfirmCallback, ConfirmListener, Connection, ConnectionFactory}
+import com.rabbitmq.client.{ Address, Channel, ConfirmCallback, ConfirmListener, Connection, ConnectionFactory }
import org.mockito.ArgumentMatchers._
import org.scalatestplus.mockito.MockitoSugar
import org.mockito.Mockito.when
diff --git a/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala b/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala
index 8447cd05..b0d7d644 100644
--- a/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala
+++ b/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala
@@ -4,18 +4,18 @@
package docs.scaladsl
-import akka.{Done, NotUsed}
+import akka.{ Done, NotUsed }
import akka.stream.KillSwitches
import akka.stream.alpakka.amqp._
-import akka.stream.alpakka.amqp.scaladsl.{AmqpFlow, AmqpRpcFlow, AmqpSink, AmqpSource, CommittableReadResult}
-import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
+import akka.stream.alpakka.amqp.scaladsl.{ AmqpFlow, AmqpRpcFlow, AmqpSink, AmqpSource, CommittableReadResult }
+import akka.stream.scaladsl.{ Flow, Keep, Sink, Source }
import akka.stream.testkit.TestSubscriber
import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
import akka.stream.testkit.scaladsl.TestSink
import akka.util.ByteString
import scala.concurrent.duration._
-import scala.concurrent.{Await, Future, Promise}
+import scala.concurrent.{ Await, Future, Promise }
import scala.collection.immutable
/**
@@ -38,41 +38,39 @@ class AmqpDocsSpec extends AmqpSpec {
val connectionProvider =
AmqpDetailsConnectionProvider("invalid", 5673).withHostsAndPorts(immutable.Seq("localhost" -> 5672))
- //#queue-declaration
+ // #queue-declaration
val queueName = "amqp-conn-it-spec-simple-queue-" + System.currentTimeMillis()
val queueDeclaration = QueueDeclaration(queueName)
- //#queue-declaration
+ // #queue-declaration
- //#create-sink
+ // #create-sink
val amqpSink: Sink[ByteString, Future[Done]] =
AmqpSink.simple(
AmqpWriteSettings(connectionProvider)
.withRoutingKey(queueName)
- .withDeclaration(queueDeclaration)
- )
+ .withDeclaration(queueDeclaration))
val input = Vector("one", "two", "three", "four", "five")
val writing: Future[Done] =
Source(input)
.map(s => ByteString(s))
.runWith(amqpSink)
- //#create-sink
+ // #create-sink
writing.futureValue shouldEqual Done
- //#create-source
+ // #create-source
val amqpSource: Source[ReadResult, NotUsed] =
AmqpSource.atMostOnceSource(
NamedQueueSourceSettings(connectionProvider, queueName)
.withDeclaration(queueDeclaration)
.withAckRequired(false),
- bufferSize = 10
- )
+ bufferSize = 10)
val result: Future[immutable.Seq[ReadResult]] =
amqpSource
.take(input.size)
.runWith(Sink.seq)
- //#create-source
+ // #create-source
result.futureValue.map(_.bytes.utf8String) shouldEqual input
}
@@ -84,27 +82,24 @@ class AmqpDocsSpec extends AmqpSpec {
val amqpSource = AmqpSource.atMostOnceSource(
NamedQueueSourceSettings(connectionProvider, queueName),
- bufferSize = 1
- )
+ bufferSize = 1)
val input = Vector("one", "two", "three", "four", "five")
- //#create-rpc-flow
+ // #create-rpc-flow
val amqpRpcFlow = AmqpRpcFlow.simple(
- AmqpWriteSettings(connectionProvider).withRoutingKey(queueName).withDeclaration(queueDeclaration)
- )
+ AmqpWriteSettings(connectionProvider).withRoutingKey(queueName).withDeclaration(queueDeclaration))
val (rpcQueueF: Future[String], probe: TestSubscriber.Probe[ByteString]) = Source(input)
.map(s => ByteString(s))
.viaMat(amqpRpcFlow)(Keep.right)
.toMat(TestSink.probe)(Keep.both)
.run()
- //#create-rpc-flow
+ // #create-rpc-flow
rpcQueueF.futureValue
val amqpSink = AmqpSink.replyTo(
- AmqpReplyToSinkSettings(connectionProvider)
- )
+ AmqpReplyToSinkSettings(connectionProvider))
val sourceToSink = amqpSource
.viaMat(KillSwitches.single)(Keep.right)
@@ -121,20 +116,19 @@ class AmqpDocsSpec extends AmqpSpec {
// and then one queue for each source which subscribes to the
// exchange - all this described by the declarations
- //#exchange-declaration
+ // #exchange-declaration
val exchangeName = "amqp-conn-it-spec-pub-sub-" + System.currentTimeMillis()
val exchangeDeclaration = ExchangeDeclaration(exchangeName, "fanout")
- //#exchange-declaration
+ // #exchange-declaration
- //#create-exchange-sink
+ // #create-exchange-sink
val amqpSink = AmqpSink.simple(
AmqpWriteSettings(connectionProvider)
.withExchange(exchangeName)
- .withDeclaration(exchangeDeclaration)
- )
- //#create-exchange-sink
+ .withDeclaration(exchangeDeclaration))
+ // #create-exchange-sink
- //#create-exchange-source
+ // #create-exchange-source
val fanoutSize = 4
val mergedSources = (0 until fanoutSize).foldLeft(Source.empty[(Int, String)]) {
@@ -144,14 +138,11 @@ class AmqpDocsSpec extends AmqpSpec {
.atMostOnceSource(
TemporaryQueueSourceSettings(
connectionProvider,
- exchangeName
- ).withDeclaration(exchangeDeclaration),
- bufferSize = 1
- )
- .map(msg => (fanoutBranch, msg.bytes.utf8String))
- )
+ exchangeName).withDeclaration(exchangeDeclaration),
+ bufferSize = 1)
+ .map(msg => (fanoutBranch, msg.bytes.utf8String)))
}
- //#create-exchange-source
+ // #create-exchange-source
val completion = Promise[Done]()
val mergingFlow = mergedSources
@@ -164,8 +155,7 @@ class AmqpDocsSpec extends AmqpSpec {
.run()
system.scheduler.scheduleOnce(5.seconds)(
- completion.tryFailure(new Error("Did not get at least one element from every fanout branch"))
- )
+ completion.tryFailure(new Error("Did not get at least one element from every fanout branch")))
val dataSender = Source
.repeat("stuff")
@@ -186,25 +176,23 @@ class AmqpDocsSpec extends AmqpSpec {
val amqpSink = AmqpSink.simple(
AmqpWriteSettings(connectionProvider)
.withRoutingKey(queueName)
- .withDeclaration(queueDeclaration)
- )
+ .withDeclaration(queueDeclaration))
val input = Vector("one", "two", "three", "four", "five")
Source(input).map(s => ByteString(s)).runWith(amqpSink).futureValue shouldEqual Done
- //#create-source-withoutautoack
+ // #create-source-withoutautoack
val amqpSource = AmqpSource.committableSource(
NamedQueueSourceSettings(connectionProvider, queueName)
.withDeclaration(queueDeclaration),
- bufferSize = 10
- )
+ bufferSize = 10)
val result: Future[immutable.Seq[ReadResult]] = amqpSource
.mapAsync(1)(businessLogic)
.mapAsync(1)(cm => cm.ack().map(_ => cm.message))
.take(input.size)
.runWith(Sink.seq)
- //#create-source-withoutautoack
+ // #create-source-withoutautoack
result.futureValue.map(_.bytes.utf8String) shouldEqual input
}
@@ -217,25 +205,23 @@ class AmqpDocsSpec extends AmqpSpec {
val amqpSink = AmqpSink.simple(
AmqpWriteSettings(connectionProvider)
.withRoutingKey(queueName)
- .withDeclaration(queueDeclaration)
- )
+ .withDeclaration(queueDeclaration))
val input = Vector("one", "two", "three", "four", "five")
Source(input).map(s => ByteString(s)).runWith(amqpSink).futureValue shouldEqual Done
val amqpSource = AmqpSource.committableSource(
NamedQueueSourceSettings(connectionProvider, queueName).withDeclaration(queueDeclaration),
- bufferSize = 10
- )
+ bufferSize = 10)
- //#create-source-withoutautoack
+ // #create-source-withoutautoack
val nackedResults: Future[immutable.Seq[ReadResult]] = amqpSource
.mapAsync(1)(businessLogic)
.take(input.size)
.mapAsync(1)(cm => cm.nack(multiple = false, requeue = true).map(_ => cm.message))
.runWith(Sink.seq)
- //#create-source-withoutautoack
+ // #create-source-withoutautoack
Await.ready(nackedResults, 3.seconds)
@@ -251,7 +237,7 @@ class AmqpDocsSpec extends AmqpSpec {
val queueName = "amqp-conn-it-spec-flow-" + System.currentTimeMillis()
val queueDeclaration = QueueDeclaration(queueName)
- //#create-flow
+ // #create-flow
val settings = AmqpWriteSettings(connectionProvider)
.withRoutingKey(queueName)
.withDeclaration(queueDeclaration)
@@ -267,7 +253,7 @@ class AmqpDocsSpec extends AmqpSpec {
.map(message => WriteMessage(ByteString(message)))
.via(amqpFlow)
.runWith(Sink.seq)
- //#create-flow
+ // #create-flow
result.futureValue should contain theSameElementsAs input.map(_ => WriteResult.confirmed)
}
diff --git a/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/impl/AvroParquetFlow.scala b/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/impl/AvroParquetFlow.scala
index 03e7cba9..901eaa2f 100644
--- a/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/impl/AvroParquetFlow.scala
+++ b/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/impl/AvroParquetFlow.scala
@@ -5,7 +5,7 @@
package akka.stream.alpakka.avroparquet.impl
import akka.annotation.InternalApi
import akka.stream._
-import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
+import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler }
import org.apache.avro.generic.GenericRecord
import org.apache.parquet.hadoop.ParquetWriter
@@ -30,7 +30,7 @@ private[avroparquet] class AvroParquetFlow[T <: GenericRecord](writer: ParquetWr
new InHandler {
override def onUpstreamFinish(): Unit =
- //super.onUpstreamFinish()
+ // super.onUpstreamFinish()
completeStage()
override def onUpstreamFailure(ex: Throwable): Unit = {
@@ -43,13 +43,13 @@ private[avroparquet] class AvroParquetFlow[T <: GenericRecord](writer: ParquetWr
writer.write(obtainedValue)
push(out, obtainedValue)
}
- }
- )
+ })
- setHandler(out, new OutHandler {
- override def onPull(): Unit =
- pull(in)
- })
+ setHandler(out,
+ new OutHandler {
+ override def onPull(): Unit =
+ pull(in)
+ })
override def postStop(): Unit = writer.close()
}
diff --git a/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/impl/AvroParquetSource.scala b/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/impl/AvroParquetSource.scala
index cc044412..331a13c0 100644
--- a/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/impl/AvroParquetSource.scala
+++ b/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/impl/AvroParquetSource.scala
@@ -4,8 +4,8 @@
package akka.stream.alpakka.avroparquet.impl
import akka.annotation.InternalApi
-import akka.stream.{ActorAttributes, Attributes, Outlet, SourceShape}
-import akka.stream.stage.{GraphStage, GraphStageLogic, OutHandler}
+import akka.stream.{ ActorAttributes, Attributes, Outlet, SourceShape }
+import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler }
import org.apache.avro.generic.GenericRecord
import org.apache.parquet.hadoop.ParquetReader
@@ -38,8 +38,7 @@ private[avroparquet] class AvroParquetSource[T <: GenericRecord](reader: Parquet
complete(out)
}(push(out, _))
}
- }
- )
+ })
override def postStop(): Unit = reader.close()
diff --git a/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/javadsl/AvroParquetSink.scala b/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/javadsl/AvroParquetSink.scala
index 46b4b5a5..8c594fd1 100644
--- a/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/javadsl/AvroParquetSink.scala
+++ b/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/javadsl/AvroParquetSink.scala
@@ -5,8 +5,8 @@
package akka.stream.alpakka.avroparquet.javadsl
import java.util.concurrent.CompletionStage
-import akka.stream.javadsl.{Flow, Keep, Sink}
-import akka.{Done, NotUsed}
+import akka.stream.javadsl.{ Flow, Keep, Sink }
+import akka.{ Done, NotUsed }
import org.apache.avro.generic.GenericRecord
import org.apache.parquet.hadoop.ParquetWriter
diff --git a/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/scaladsl/AvroParquetSink.scala b/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/scaladsl/AvroParquetSink.scala
index 1ab2cbb3..c7fde124 100644
--- a/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/scaladsl/AvroParquetSink.scala
+++ b/avroparquet/src/main/scala/akka/stream/alpakka/avroparquet/scaladsl/AvroParquetSink.scala
@@ -4,7 +4,7 @@
package akka.stream.alpakka.avroparquet.scaladsl
import akka.Done
-import akka.stream.scaladsl.{Flow, Keep, Sink}
+import akka.stream.scaladsl.{ Flow, Keep, Sink }
import org.apache.avro.generic.GenericRecord
import org.apache.parquet.hadoop.ParquetWriter
diff --git a/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquet.scala b/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquet.scala
index b55cf80e..593abc19 100644
--- a/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquet.scala
+++ b/avroparquet/src/test/scala/docs/scaladsl/AbstractAvroParquet.scala
@@ -9,14 +9,14 @@ import java.io.File
import akka.testkit.TestKit
import com.sksamuel.avro4s.RecordFormat
import org.apache.avro.Schema
-import org.apache.avro.generic.{GenericRecord, GenericRecordBuilder}
+import org.apache.avro.generic.{ GenericRecord, GenericRecordBuilder }
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
-import org.apache.parquet.avro.{AvroParquetReader, AvroParquetWriter, AvroReadSupport}
-import org.apache.parquet.hadoop.{ParquetReader, ParquetWriter}
+import org.apache.parquet.avro.{ AvroParquetReader, AvroParquetWriter, AvroReadSupport }
+import org.apache.parquet.hadoop.{ ParquetReader, ParquetWriter }
import org.apache.parquet.hadoop.util.HadoopInputFile
import org.scalacheck.Gen
-import org.scalatest.{BeforeAndAfterAll, Suite}
+import org.scalatest.{ BeforeAndAfterAll, Suite }
import scala.reflect.io.Directory
import scala.util.Random
@@ -27,8 +27,7 @@ trait AbstractAvroParquet extends BeforeAndAfterAll {
case class Document(id: String, body: String)
val schema: Schema = new Schema.Parser().parse(
- "{\"type\":\"record\",\"name\":\"Document\",\"fields\":[{\"name\":\"id\",\"type\":\"string\"},{\"name\":\"body\",\"type\":\"string\"}]}"
- )
+ "{\"type\":\"record\",\"name\":\"Document\",\"fields\":[{\"name\":\"id\",\"type\":\"string\"},{\"name\":\"body\",\"type\":\"string\"}]}")
val genDocument: Gen[Document] =
Gen.oneOf(Seq(Document(id = Gen.alphaStr.sample.get, body = Gen.alphaLowerStr.sample.get)))
diff --git a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetFlowSpec.scala b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetFlowSpec.scala
index 7023280f..20c39ed7 100644
--- a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetFlowSpec.scala
+++ b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetFlowSpec.scala
@@ -7,7 +7,7 @@ package docs.scaladsl
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.alpakka.avroparquet.scaladsl.AvroParquetFlow
-import akka.stream.scaladsl.{Flow, Sink, Source}
+import akka.stream.scaladsl.{ Flow, Sink, Source }
import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
import akka.testkit.TestKit
import com.sksamuel.avro4s.Record
@@ -29,7 +29,7 @@ class AvroParquetFlowSpec
"Parquet Flow" should {
"insert avro records in parquet from `GenericRecord`" in assertAllStagesStopped {
- //given
+ // given
val n: Int = 2
val file: String = genFinalFile.sample.get
// #init-flow
@@ -38,7 +38,7 @@ class AvroParquetFlowSpec
= genDocuments(n).sample.get.map(docToGenericRecord)
val writer: ParquetWriter[GenericRecord] = parquetWriter(file, conf, schema)
- //when
+ // when
// #init-flow
val source: Source[GenericRecord, NotUsed] = Source(records)
val avroParquet: Flow[GenericRecord, GenericRecord, NotUsed] = AvroParquetFlow(writer)
@@ -50,27 +50,27 @@ class AvroParquetFlowSpec
result.futureValue
- //then
+ // then
val parquetContent: List[GenericRecord] = fromParquet(file, conf)
parquetContent.length shouldEqual n
parquetContent should contain theSameElementsAs records
}
"insert avro records in parquet from a subtype of `GenericRecord`" in assertAllStagesStopped {
- //given
+ // given
val n: Int = 2
val file: String = genFinalFile.sample.get
val documents: List[Document] = genDocuments(n).sample.get
val avroDocuments: List[Record] = documents.map(format.to(_))
val writer: ParquetWriter[Record] = parquetWriter[Record](file, conf, schema)
- //when
+ // when
Source(avroDocuments)
.via(AvroParquetFlow[Record](writer))
.runWith(Sink.seq)
.futureValue
- //then
+ // then
val parquetContent: List[GenericRecord] = fromParquet(file, conf)
parquetContent.length shouldEqual n
parquetContent.map(format.from(_)) should contain theSameElementsAs documents
diff --git a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSinkSpec.scala b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSinkSpec.scala
index 4b9eae51..8a2085a9 100644
--- a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSinkSpec.scala
+++ b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSinkSpec.scala
@@ -4,13 +4,13 @@
package docs.scaladsl
-import akka.{Done, NotUsed}
+import akka.{ Done, NotUsed }
import akka.actor.ActorSystem
import akka.stream.alpakka.avroparquet.scaladsl.AvroParquetSink
import akka.stream.scaladsl.Source
import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
import akka.testkit.TestKit
-import com.sksamuel.avro4s.{Record, RecordFormat}
+import com.sksamuel.avro4s.{ Record, RecordFormat }
import org.scalatest.concurrent.ScalaFutures
import org.apache.avro.generic.GenericRecord
import org.apache.parquet.hadoop.ParquetWriter
@@ -31,23 +31,23 @@ class AvroParquetSinkSpec
"Parquet Sink" should {
"create new parquet file from `GenericRecords`" in assertAllStagesStopped {
- //given
+ // given
val n: Int = 3
val file: String = genFinalFile.sample.get
val records: List[GenericRecord] = genDocuments(n).sample.get.map(docToGenericRecord)
Source(records).runWith(AvroParquetSink(parquetWriter(file, conf, schema))).futureValue
- //when
+ // when
val parquetContent: List[GenericRecord] = fromParquet(file, conf)
- //then
+ // then
parquetContent.length shouldEqual n
parquetContent should contain theSameElementsAs records
}
"create new parquet file from any subtype of `GenericRecord` " in assertAllStagesStopped {
- //given
+ // given
val n: Int = 3
val file: String = genFinalFile.sample.get
val documents: List[Document] = genDocuments(n).sample.get
@@ -60,10 +60,10 @@ class AvroParquetSinkSpec
// #init-sink
result.futureValue shouldBe Done
- //when
+ // when
val parquetContent: List[GenericRecord] = fromParquet(file, conf)
- //then
+ // then
parquetContent.length shouldEqual n
parquetContent.map(format.from(_)) should contain theSameElementsAs documents
}
diff --git a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSourceSpec.scala b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSourceSpec.scala
index 15fe5bbe..e372e659 100644
--- a/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSourceSpec.scala
+++ b/avroparquet/src/test/scala/docs/scaladsl/AvroParquetSourceSpec.scala
@@ -6,8 +6,8 @@ package docs.scaladsl
import akka.NotUsed
import akka.actor.ActorSystem
-import akka.stream.alpakka.avroparquet.scaladsl.{AvroParquetSink, AvroParquetSource}
-import akka.stream.scaladsl.{Keep, Source}
+import akka.stream.alpakka.avroparquet.scaladsl.{ AvroParquetSink, AvroParquetSource }
+import akka.stream.scaladsl.{ Keep, Source }
import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
import akka.stream.testkit.scaladsl.TestSink
import akka.testkit.TestKit
@@ -32,7 +32,7 @@ class AvroParquetSourceSpec
"AvroParquetSource" should {
"read from parquet file as a `GenericRecord` type" in assertAllStagesStopped {
- //given
+ // given
val n: Int = 4
val file: String = genFinalFile.sample.get
val records: List[GenericRecord] = genDocuments(n).sample.get.map(docToGenericRecord)
@@ -41,21 +41,21 @@ class AvroParquetSourceSpec
.run()
.futureValue
- //when
+ // when
val reader: ParquetReader[GenericRecord] = parquetReader(file, conf)
// #init-source
val source: Source[GenericRecord, NotUsed] = AvroParquetSource(reader)
// #init-source
val sink = source.runWith(TestSink.probe)
- //then
+ // then
val result: Seq[GenericRecord] = sink.toStrict(3.seconds)
result.length shouldEqual n
result should contain theSameElementsAs records
}
"read from parquet file as any subtype of `GenericRecord` " in assertAllStagesStopped {
- //given
+ // given
val n: Int = 4
val file: String = genFinalFile.sample.get
val documents: List[Document] = genDocuments(n).sample.get
@@ -65,14 +65,14 @@ class AvroParquetSourceSpec
.run()
.futureValue
- //when
+ // when
val reader: ParquetReader[GenericRecord] = parquetReader(file, conf)
// #init-source
val source: Source[GenericRecord, NotUsed] = AvroParquetSource(reader)
// #init-source
val sink = source.runWith(TestSink.probe)
- //then
+ // then
val result: Seq[GenericRecord] = sink.toStrict(3.seconds)
result.length shouldEqual n
result.map(format.from(_)) should contain theSameElementsAs documents
diff --git a/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/EventBridgePublishSettings.scala b/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/EventBridgePublishSettings.scala
index f92d2725..eee63cb6 100644
--- a/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/EventBridgePublishSettings.scala
+++ b/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/EventBridgePublishSettings.scala
@@ -12,7 +12,7 @@ package akka.stream.alpakka.aws.eventbridge
* put into the stream. Use concurrency 1 for having control over failures.
*
* @param concurrency maps to parallelism in in async stream operations
- * */
+ */
final class EventBridgePublishSettings private (val concurrency: Int) {
require(concurrency > 0)
diff --git a/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/javadsl/EventBridgePublisher.scala b/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/javadsl/EventBridgePublisher.scala
index 51ebb98d..8e360536 100644
--- a/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/javadsl/EventBridgePublisher.scala
+++ b/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/javadsl/EventBridgePublisher.scala
@@ -7,8 +7,8 @@ package akka.stream.alpakka.aws.eventbridge.javadsl
import java.util.concurrent.CompletionStage
import akka.stream.alpakka.aws.eventbridge.EventBridgePublishSettings
-import akka.stream.javadsl.{Flow, Keep, Sink}
-import akka.{Done, NotUsed}
+import akka.stream.javadsl.{ Flow, Keep, Sink }
+import akka.{ Done, NotUsed }
import software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient
import software.amazon.awssdk.services.eventbridge.model._
@@ -25,7 +25,7 @@ object EventBridgePublisher {
* @param eventBridgeClient [[software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient EventBridgeAsyncClient]] client for publishing
*/
def flow(settings: EventBridgePublishSettings,
- eventBridgeClient: EventBridgeAsyncClient): Flow[PutEventsRequestEntry, PutEventsResponse, NotUsed] =
+ eventBridgeClient: EventBridgeAsyncClient): Flow[PutEventsRequestEntry, PutEventsResponse, NotUsed] =
akka.stream.alpakka.aws.eventbridge.scaladsl.EventBridgePublisher.flow(settings)(eventBridgeClient).asJava
/**
@@ -46,8 +46,7 @@ object EventBridgePublisher {
*/
def flowSeq(
settings: EventBridgePublishSettings,
- eventBridgeClient: EventBridgeAsyncClient
- ): Flow[PutEventsRequestEntry, PutEventsResponse, NotUsed] =
+ eventBridgeClient: EventBridgeAsyncClient): Flow[PutEventsRequestEntry, PutEventsResponse, NotUsed] =
akka.stream.alpakka.aws.eventbridge.scaladsl.EventBridgePublisher.flow(settings)(eventBridgeClient).asJava
/**
@@ -56,8 +55,7 @@ object EventBridgePublisher {
* @param eventBridgeClient [[software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient EventBridgeAsyncClient]] client for publishing
*/
def flowSeq(
- eventBridgeClient: EventBridgeAsyncClient
- ): Flow[Seq[PutEventsRequestEntry], PutEventsResponse, NotUsed] =
+ eventBridgeClient: EventBridgeAsyncClient): Flow[Seq[PutEventsRequestEntry], PutEventsResponse, NotUsed] =
akka.stream.alpakka.aws.eventbridge.scaladsl.EventBridgePublisher
.flowSeq(EventBridgePublishSettings())(eventBridgeClient)
.asJava
@@ -69,7 +67,7 @@ object EventBridgePublisher {
* @param eventBridgeClient [[software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient EventBridgeAsyncClient]] client for publishing
*/
def publishFlow(settings: EventBridgePublishSettings,
- eventBridgeClient: EventBridgeAsyncClient): Flow[PutEventsRequest, PutEventsResponse, NotUsed] =
+ eventBridgeClient: EventBridgeAsyncClient): Flow[PutEventsRequest, PutEventsResponse, NotUsed] =
akka.stream.alpakka.aws.eventbridge.scaladsl.EventBridgePublisher.publishFlow(settings)(eventBridgeClient).asJava
/**
@@ -98,7 +96,7 @@ object EventBridgePublisher {
* @param eventBridgeClient [[software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient EventBridgeAsyncClient]] client for publishing
*/
def sink(settings: EventBridgePublishSettings,
- eventBridgeClient: EventBridgeAsyncClient): Sink[PutEventsRequestEntry, CompletionStage[Done]] =
+ eventBridgeClient: EventBridgeAsyncClient): Sink[PutEventsRequestEntry, CompletionStage[Done]] =
flow(settings, eventBridgeClient)
.toMat(Sink.ignore(), Keep.right[NotUsed, CompletionStage[Done]])
@@ -109,7 +107,7 @@ object EventBridgePublisher {
* @param eventBridgeClient [[software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient EventBridgeAsyncClient]] client for publishing
*/
def publishSink(settings: EventBridgePublishSettings,
- eventBridgeClient: EventBridgeAsyncClient): Sink[PutEventsRequest, CompletionStage[Done]] =
+ eventBridgeClient: EventBridgeAsyncClient): Sink[PutEventsRequest, CompletionStage[Done]] =
publishFlow(settings, eventBridgeClient)
.toMat(Sink.ignore(), Keep.right[NotUsed, CompletionStage[Done]])
diff --git a/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/scaladsl/EventBridgePublisher.scala b/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/scaladsl/EventBridgePublisher.scala
index c619171a..c31e27d3 100644
--- a/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/scaladsl/EventBridgePublisher.scala
+++ b/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/scaladsl/EventBridgePublisher.scala
@@ -5,8 +5,8 @@
package akka.stream.alpakka.aws.eventbridge.scaladsl
import akka.stream.alpakka.aws.eventbridge.EventBridgePublishSettings
-import akka.stream.scaladsl.{Flow, Keep, Sink}
-import akka.{Done, NotUsed}
+import akka.stream.scaladsl.{ Flow, Keep, Sink }
+import akka.{ Done, NotUsed }
import software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient
import software.amazon.awssdk.services.eventbridge.model._
@@ -26,8 +26,7 @@ object EventBridgePublisher {
* @param eventBridgeClient [[software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient EventBridgeAsyncClient]] client for publishing
*/
def flow(settings: EventBridgePublishSettings = EventBridgePublishSettings())(
- implicit eventBridgeClient: EventBridgeAsyncClient
- ): Flow[PutEventsRequestEntry, PutEventsResponse, NotUsed] =
+ implicit eventBridgeClient: EventBridgeAsyncClient): Flow[PutEventsRequestEntry, PutEventsResponse, NotUsed] =
Flow
.fromFunction((message: PutEventsRequestEntry) => PutEventsRequest.builder().entries(message).build())
.via(publishFlow(settings))
@@ -39,8 +38,8 @@ object EventBridgePublisher {
* @param eventBridgeClient [[software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient EventBridgeAsyncClient]] client for publishing
*/
def flowSeq(settings: EventBridgePublishSettings = EventBridgePublishSettings())(
- implicit eventBridgeClient: EventBridgeAsyncClient
- ): Flow[Seq[PutEventsRequestEntry], PutEventsResponse, NotUsed] =
+ implicit eventBridgeClient: EventBridgeAsyncClient)
+ : Flow[Seq[PutEventsRequestEntry], PutEventsResponse, NotUsed] =
Flow
.fromFunction((messages: Seq[PutEventsRequestEntry]) => PutEventsRequest.builder().entries(messages: _*).build())
.via(publishFlow(settings))
@@ -52,8 +51,8 @@ object EventBridgePublisher {
* @param eventBridgeClient [[software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient EventBridgeAsyncClient]] client for publishing
*/
def publishFlow(
- settings: EventBridgePublishSettings
- )(implicit eventBridgeClient: EventBridgeAsyncClient): Flow[PutEventsRequest, PutEventsResponse, NotUsed] =
+ settings: EventBridgePublishSettings)(
+ implicit eventBridgeClient: EventBridgeAsyncClient): Flow[PutEventsRequest, PutEventsResponse, NotUsed] =
Flow[PutEventsRequest]
.mapAsync(settings.concurrency)(eventBridgeClient.putEvents(_).toScala)
@@ -63,8 +62,7 @@ object EventBridgePublisher {
* @param eventBridgeClient [[software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient EventBridgeAsyncClient]] client for publishing
*/
def publishFlow()(
- implicit eventBridgeClient: EventBridgeAsyncClient
- ): Flow[PutEventsRequest, PutEventsResponse, NotUsed] =
+ implicit eventBridgeClient: EventBridgeAsyncClient): Flow[PutEventsRequest, PutEventsResponse, NotUsed] =
publishFlow(EventBridgePublishSettings())
/**
@@ -74,8 +72,7 @@ object EventBridgePublisher {
* @param eventBridgeClient [[software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient EventBridgeAsyncClient]] client for publishing
*/
def sink(settings: EventBridgePublishSettings = EventBridgePublishSettings())(
- implicit eventBridgeClient: EventBridgeAsyncClient
- ): Sink[PutEventsRequestEntry, Future[Done]] =
+ implicit eventBridgeClient: EventBridgeAsyncClient): Sink[PutEventsRequestEntry, Future[Done]] =
flow(settings).toMat(Sink.ignore)(Keep.right)
/**
@@ -85,8 +82,8 @@ object EventBridgePublisher {
* @param eventBridgeClient [[software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient EventBridgeAsyncClient]] client for publishing
*/
def publishSink(
- settings: EventBridgePublishSettings = EventBridgePublishSettings()
- )(implicit eventBridgeClient: EventBridgeAsyncClient): Sink[PutEventsRequest, Future[Done]] =
+ settings: EventBridgePublishSettings = EventBridgePublishSettings())(
+ implicit eventBridgeClient: EventBridgeAsyncClient): Sink[PutEventsRequest, Future[Done]] =
publishFlow(settings).toMat(Sink.ignore)(Keep.right)
/**
diff --git a/aws-event-bridge/src/test/scala/akka/stream/alpakka/aws/eventbridge/DefaultTestContext.scala b/aws-event-bridge/src/test/scala/akka/stream/alpakka/aws/eventbridge/DefaultTestContext.scala
index 338e8e3d..f1034b4f 100644
--- a/aws-event-bridge/src/test/scala/akka/stream/alpakka/aws/eventbridge/DefaultTestContext.scala
+++ b/aws-event-bridge/src/test/scala/akka/stream/alpakka/aws/eventbridge/DefaultTestContext.scala
@@ -6,7 +6,7 @@ package akka.stream.alpakka.aws.eventbridge
import akka.actor.ActorSystem
import org.mockito.Mockito.reset
-import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}
+import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach, Suite }
import org.scalatestplus.mockito.MockitoSugar
import software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient
diff --git a/aws-event-bridge/src/test/scala/akka/stream/alpakka/aws/eventbridge/EventBridgePublishMockSpec.scala b/aws-event-bridge/src/test/scala/akka/stream/alpakka/aws/eventbridge/EventBridgePublishMockSpec.scala
index addfa896..dd03c815 100644
--- a/aws-event-bridge/src/test/scala/akka/stream/alpakka/aws/eventbridge/EventBridgePublishMockSpec.scala
+++ b/aws-event-bridge/src/test/scala/akka/stream/alpakka/aws/eventbridge/EventBridgePublishMockSpec.scala
@@ -7,9 +7,9 @@ package akka.stream.alpakka.aws.eventbridge
import java.util.concurrent.CompletableFuture
import akka.stream.alpakka.aws.eventbridge.scaladsl.EventBridgePublisher
-import akka.stream.scaladsl.{Keep, Sink}
+import akka.stream.scaladsl.{ Keep, Sink }
import akka.stream.testkit.scaladsl.TestSource
-import org.mockito.ArgumentMatchers.{any, eq => meq}
+import org.mockito.ArgumentMatchers.{ any, eq => meq }
import org.mockito.Mockito._
import org.scalatest.flatspec._
import org.scalatest.matchers.must.Matchers
@@ -30,8 +30,7 @@ class EventBridgePublishMockSpec extends AnyFlatSpec with DefaultTestContext wit
PutEventsRequest
.builder()
.entries(
- details.map(detail => entryDetail(detail, eventBusName)): _*
- )
+ details.map(detail => entryDetail(detail, eventBusName)): _*)
.build()
private def resultResponse(eventId: String): PutEventsResponse = {
diff --git a/aws-event-bridge/src/test/scala/akka/stream/alpakka/aws/eventbridge/IntegrationTestContext.scala b/aws-event-bridge/src/test/scala/akka/stream/alpakka/aws/eventbridge/IntegrationTestContext.scala
index f92c3d5e..8c80598c 100644
--- a/aws-event-bridge/src/test/scala/akka/stream/alpakka/aws/eventbridge/IntegrationTestContext.scala
+++ b/aws-event-bridge/src/test/scala/akka/stream/alpakka/aws/eventbridge/IntegrationTestContext.scala
@@ -9,7 +9,7 @@ import java.util.UUID
import akka.actor.ActorSystem
import akka.testkit.TestKit
import org.scalatest.concurrent.ScalaFutures
-import org.scalatest.{BeforeAndAfterAll, Suite}
+import org.scalatest.{ BeforeAndAfterAll, Suite }
import software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient
import software.amazon.awssdk.services.eventbridge.model.CreateEventBusRequest
@@ -18,9 +18,9 @@ import scala.concurrent.duration.FiniteDuration
trait IntegrationTestContext extends BeforeAndAfterAll with ScalaFutures {
this: Suite =>
- //#init-system
+ // #init-system
implicit val system: ActorSystem = ActorSystem()
- //#init-system
+ // #init-system
def eventBusEndpoint: String = s"http://localhost:4587"
@@ -30,8 +30,7 @@ trait IntegrationTestContext extends BeforeAndAfterAll with ScalaFutures {
def createEventBus(): String =
eventBridgeClient
.createEventBus(
- CreateEventBusRequest.builder().name(s"alpakka-topic-${UUID.randomUUID().toString}").build()
- )
+ CreateEventBusRequest.builder().name(s"alpakka-topic-${UUID.randomUUID().toString}").build())
.get()
.eventBusArn()
@@ -43,11 +42,11 @@ trait IntegrationTestContext extends BeforeAndAfterAll with ScalaFutures {
override protected def afterAll(): Unit = TestKit.shutdownActorSystem(system)
def createAsyncClient(endEndpoint: String): EventBridgeAsyncClient = {
- //#init-client
+ // #init-client
import java.net.URI
import com.github.matsluni.akkahttpspi.AkkaHttpClient
- import software.amazon.awssdk.auth.credentials.{AwsBasicCredentials, StaticCredentialsProvider}
+ import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.regions.Region
import software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient
@@ -61,7 +60,7 @@ trait IntegrationTestContext extends BeforeAndAfterAll with ScalaFutures {
.build()
system.registerOnTermination(awsEventBridgeClient.close())
- //#init-client
+ // #init-client
awsEventBridgeClient
}
diff --git a/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala b/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala
index d03f8075..ae06c56b 100644
--- a/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala
+++ b/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala
@@ -7,11 +7,11 @@ package docs.scaladsl
import akka.Done
import akka.stream.alpakka.aws.eventbridge.IntegrationTestContext
import akka.stream.alpakka.aws.eventbridge.scaladsl.EventBridgePublisher
-import akka.stream.scaladsl.{Sink, Source}
+import akka.stream.scaladsl.{ Sink, Source }
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.flatspec._
import org.scalatest.matchers.should.Matchers
-import software.amazon.awssdk.services.eventbridge.model.{PutEventsRequest, PutEventsRequestEntry}
+import software.amazon.awssdk.services.eventbridge.model.{ PutEventsRequest, PutEventsRequestEntry }
import scala.concurrent.Future
import scala.concurrent.duration._
diff --git a/awslambda/src/main/scala/akka/stream/alpakka/awslambda/javadsl/AwsLambdaFlow.scala b/awslambda/src/main/scala/akka/stream/alpakka/awslambda/javadsl/AwsLambdaFlow.scala
index 800dd131..10805e0c 100644
--- a/awslambda/src/main/scala/akka/stream/alpakka/awslambda/javadsl/AwsLambdaFlow.scala
+++ b/awslambda/src/main/scala/akka/stream/alpakka/awslambda/javadsl/AwsLambdaFlow.scala
@@ -6,7 +6,7 @@ package akka.stream.alpakka.awslambda.javadsl
import akka.NotUsed
import akka.stream.javadsl.Flow
-import software.amazon.awssdk.services.lambda.model.{InvokeRequest, InvokeResponse}
+import software.amazon.awssdk.services.lambda.model.{ InvokeRequest, InvokeResponse }
import software.amazon.awssdk.services.lambda.LambdaAsyncClient
object AwsLambdaFlow {
diff --git a/awslambda/src/main/scala/akka/stream/alpakka/awslambda/scaladsl/AwsLambdaFlow.scala b/awslambda/src/main/scala/akka/stream/alpakka/awslambda/scaladsl/AwsLambdaFlow.scala
index 19a509f5..30d3c63d 100644
--- a/awslambda/src/main/scala/akka/stream/alpakka/awslambda/scaladsl/AwsLambdaFlow.scala
+++ b/awslambda/src/main/scala/akka/stream/alpakka/awslambda/scaladsl/AwsLambdaFlow.scala
@@ -6,7 +6,7 @@ package akka.stream.alpakka.awslambda.scaladsl
import akka.NotUsed
import akka.stream.scaladsl.Flow
-import software.amazon.awssdk.services.lambda.model.{InvokeRequest, InvokeResponse}
+import software.amazon.awssdk.services.lambda.model.{ InvokeRequest, InvokeResponse }
import software.amazon.awssdk.services.lambda.LambdaAsyncClient
import scala.compat.java8.FutureConverters._
@@ -16,8 +16,7 @@ object AwsLambdaFlow {
* Scala API: creates a [[AwsLambdaFlowStage]] for a AWS Lambda function invocation using [[LambdaAsyncClient]]
*/
def apply(
- parallelism: Int
- )(implicit awsLambdaClient: LambdaAsyncClient): Flow[InvokeRequest, InvokeResponse, NotUsed] =
+ parallelism: Int)(implicit awsLambdaClient: LambdaAsyncClient): Flow[InvokeRequest, InvokeResponse, NotUsed] =
Flow[InvokeRequest].mapAsyncUnordered(parallelism)(awsLambdaClient.invoke(_).toScala)
}
diff --git a/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala b/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala
index b313bae8..68acd756 100644
--- a/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala
+++ b/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala
@@ -9,22 +9,22 @@ import java.util.concurrent.CompletableFuture
import akka.actor.ActorSystem
import akka.stream.alpakka.awslambda.scaladsl.AwsLambdaFlow
import akka.stream.alpakka.testkit.scaladsl.LogCapturing
-import akka.stream.scaladsl.{Keep, Sink}
+import akka.stream.scaladsl.{ Keep, Sink }
import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
import akka.stream.testkit.scaladsl.TestSource
import akka.testkit.TestKit
-import org.mockito.ArgumentMatchers.{any => mockitoAny, eq => mockitoEq}
+import org.mockito.ArgumentMatchers.{ any => mockitoAny, eq => mockitoEq }
import org.mockito.Mockito._
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
-import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
+import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach }
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import org.scalatestplus.mockito.MockitoSugar
import software.amazon.awssdk.core.SdkBytes
import software.amazon.awssdk.services.lambda.LambdaAsyncClient
-import software.amazon.awssdk.services.lambda.model.{InvokeRequest, InvokeResponse}
+import software.amazon.awssdk.services.lambda.model.{ InvokeRequest, InvokeResponse }
import scala.concurrent.Await
import scala.concurrent.duration._
@@ -63,8 +63,7 @@ class AwsLambdaFlowSpec
"call a single invoke request" in assertAllStagesStopped {
when(
- awsLambdaClient.invoke(mockitoEq(invokeRequest))
- ).thenAnswer(new Answer[CompletableFuture[InvokeResponse]] {
+ awsLambdaClient.invoke(mockitoEq(invokeRequest))).thenAnswer(new Answer[CompletableFuture[InvokeResponse]] {
override def answer(invocation: InvocationOnMock): CompletableFuture[InvokeResponse] =
CompletableFuture.completedFuture(invokeResponse)
})
@@ -81,8 +80,7 @@ class AwsLambdaFlowSpec
"call with exception" in assertAllStagesStopped {
when(
- awsLambdaClient.invoke(mockitoAny[InvokeRequest]())
- ).thenAnswer(new Answer[CompletableFuture[InvokeResponse]] {
+ awsLambdaClient.invoke(mockitoAny[InvokeRequest]())).thenAnswer(new Answer[CompletableFuture[InvokeResponse]] {
override def answer(invocation: InvocationOnMock): CompletableFuture[InvokeResponse] = {
val exception = new RuntimeException("Error in lambda")
val future = new CompletableFuture[InvokeResponse]()
@@ -98,7 +96,7 @@ class AwsLambdaFlowSpec
val ex = Await.result(future.failed, 3.seconds)
ex shouldBe a[RuntimeException]
- ex.getMessage shouldBe ("Error in lambda")
+ ex.getMessage shouldBe "Error in lambda"
}
}
diff --git a/awslambda/src/test/scala/docs/scaladsl/Examples.scala b/awslambda/src/test/scala/docs/scaladsl/Examples.scala
index 6471ca0d..863cf664 100644
--- a/awslambda/src/test/scala/docs/scaladsl/Examples.scala
+++ b/awslambda/src/test/scala/docs/scaladsl/Examples.scala
@@ -5,21 +5,21 @@
package docs.scaladsl
import akka.stream.alpakka.awslambda.scaladsl.AwsLambdaFlow
-import akka.stream.scaladsl.{Sink, Source}
+import akka.stream.scaladsl.{ Sink, Source }
import software.amazon.awssdk.services.lambda.LambdaAsyncClient
object Examples {
- //#init-sys
+ // #init-sys
import akka.actor.ActorSystem
implicit val system: ActorSystem = ActorSystem()
- //#init-sys
+ // #init-sys
def initClient(): Unit = {
- //#init-client
+ // #init-client
import com.github.matsluni.akkahttpspi.AkkaHttpClient
- import software.amazon.awssdk.auth.credentials.{AwsBasicCredentials, StaticCredentialsProvider}
+ import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.services.lambda.LambdaAsyncClient
// Don't encode credentials in your source code!
@@ -35,11 +35,11 @@ object Examples {
.build()
system.registerOnTermination(lambdaClient.close())
- //#init-client
+ // #init-client
}
def run()(implicit lambdaClient: LambdaAsyncClient): Unit = {
- //#run
+ // #run
import software.amazon.awssdk.core.SdkBytes
import software.amazon.awssdk.services.lambda.model.InvokeRequest
@@ -49,6 +49,6 @@ object Examples {
.payload(SdkBytes.fromUtf8String("test-payload"))
.build()
Source.single(request).via(AwsLambdaFlow(1)).runWith(Sink.seq)
- //#run
+ // #run
}
}
diff --git a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/impl/AzureQueueSinkFunctions.scala b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/impl/AzureQueueSinkFunctions.scala
index eb01878f..3ccaca3d 100644
--- a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/impl/AzureQueueSinkFunctions.scala
+++ b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/impl/AzureQueueSinkFunctions.scala
@@ -6,31 +6,29 @@ package akka.stream.alpakka.azure.storagequeue.impl
import akka.annotation.InternalApi
import akka.stream.alpakka.azure.storagequeue.DeleteOrUpdateMessage
-import akka.stream.alpakka.azure.storagequeue.DeleteOrUpdateMessage.{Delete, UpdateVisibility}
-import com.microsoft.azure.storage.queue.{CloudQueue, CloudQueueMessage}
+import akka.stream.alpakka.azure.storagequeue.DeleteOrUpdateMessage.{ Delete, UpdateVisibility }
+import com.microsoft.azure.storage.queue.{ CloudQueue, CloudQueueMessage }
/**
* INTERNAL API
*/
@InternalApi private[storagequeue] object AzureQueueSinkFunctions {
def addMessage(
- cloudQueue: () => CloudQueue
- )(msg: CloudQueueMessage, timeToLive: Int = 0, initialVisibilityTimeout: Int = 0): Unit =
+ cloudQueue: () => CloudQueue)(
+ msg: CloudQueueMessage, timeToLive: Int = 0, initialVisibilityTimeout: Int = 0): Unit =
cloudQueue().addMessage(msg, timeToLive, initialVisibilityTimeout, null, null)
def deleteMessage(
- cloudQueue: () => CloudQueue
- )(msg: CloudQueueMessage): Unit =
+ cloudQueue: () => CloudQueue)(msg: CloudQueueMessage): Unit =
cloudQueue().deleteMessage(msg)
def updateMessage(cloudQueue: () => CloudQueue)(msg: CloudQueueMessage, timeout: Int): Unit =
cloudQueue().updateMessage(msg, timeout)
def deleteOrUpdateMessage(
- cloudQueue: () => CloudQueue
- )(msg: CloudQueueMessage, op: DeleteOrUpdateMessage): Unit =
+ cloudQueue: () => CloudQueue)(msg: CloudQueueMessage, op: DeleteOrUpdateMessage): Unit =
op match {
- case _: Delete => deleteMessage(cloudQueue)(msg)
+ case _: Delete => deleteMessage(cloudQueue)(msg)
case m: UpdateVisibility => updateMessage(cloudQueue)(msg, m.timeout)
}
}
diff --git a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/impl/AzureQueueSourceStage.scala b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/impl/AzureQueueSourceStage.scala
index b727f084..c22c1495 100644
--- a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/impl/AzureQueueSourceStage.scala
+++ b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/impl/AzureQueueSourceStage.scala
@@ -8,9 +8,9 @@ import akka.NotUsed
import akka.annotation.InternalApi
import akka.stream.alpakka.azure.storagequeue.AzureQueueSourceSettings
import akka.stream.impl.Stages.DefaultAttributes.IODispatcher
-import akka.stream.stage.{GraphStage, GraphStageLogic, OutHandler, TimerGraphStageLogic}
-import akka.stream.{Attributes, Outlet, SourceShape}
-import com.microsoft.azure.storage.queue.{CloudQueue, CloudQueueMessage}
+import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler, TimerGraphStageLogic }
+import akka.stream.{ Attributes, Outlet, SourceShape }
+import com.microsoft.azure.storage.queue.{ CloudQueue, CloudQueueMessage }
import scala.collection.mutable.Queue
@@ -18,7 +18,7 @@ import scala.collection.mutable.Queue
* INTERNAL API
*/
@InternalApi private[storagequeue] final class AzureQueueSourceStage(cloudQueue: () => CloudQueue,
- settings: AzureQueueSourceSettings)
+ settings: AzureQueueSourceSettings)
extends GraphStage[SourceShape[CloudQueueMessage]] {
val out: Outlet[CloudQueueMessage] = Outlet("AzureCloudQueue.out")
override val shape: SourceShape[CloudQueueMessage] = SourceShape(out)
@@ -64,7 +64,6 @@ import scala.collection.mutable.Queue
} else {
retrieveMessages()
}
- }
- )
+ })
}
}
diff --git a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/javadsl/AzureQueueSink.scala b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/javadsl/AzureQueueSink.scala
index d7265adf..63ff38f2 100644
--- a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/javadsl/AzureQueueSink.scala
+++ b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/javadsl/AzureQueueSink.scala
@@ -4,7 +4,7 @@
package akka.stream.alpakka.azure.storagequeue.javadsl
-import com.microsoft.azure.storage.queue.{CloudQueue, CloudQueueMessage}
+import com.microsoft.azure.storage.queue.{ CloudQueue, CloudQueueMessage }
import akka.stream.alpakka.azure.storagequeue.impl.AzureQueueSinkFunctions
import akka.stream.javadsl.Sink
import akka.Done
@@ -25,7 +25,7 @@ object AzureQueueSink {
* Internal API
*/
private[javadsl] def fromFunction[T](f: T => Unit): Sink[T, CompletionStage[Done]] = {
- import akka.stream.alpakka.azure.storagequeue.scaladsl.{AzureQueueSink => AzureQueueSinkScalaDSL}
+ import akka.stream.alpakka.azure.storagequeue.scaladsl.{ AzureQueueSink => AzureQueueSinkScalaDSL }
import scala.compat.java8.FutureConverters._
AzureQueueSinkScalaDSL.fromFunction(f).mapMaterializedValue(_.toJava).asJava
}
@@ -41,12 +41,10 @@ object AzureQueueWithTimeoutsSink {
* of a [[com.microsoft.azure.storage.queue.CouldQueueMessage]] a [[MessageWithTimeouts]].
*/
def create(cloudQueue: Supplier[CloudQueue]): Sink[MessageWithTimeouts, CompletionStage[Done]] =
- AzureQueueSink.fromFunction(
- { input: MessageWithTimeouts =>
- AzureQueueSinkFunctions
- .addMessage(() => cloudQueue.get)(input.message, input.timeToLive, input.initialVisibility)
- }
- )
+ AzureQueueSink.fromFunction { input: MessageWithTimeouts =>
+ AzureQueueSinkFunctions
+ .addMessage(() => cloudQueue.get)(input.message, input.timeToLive, input.initialVisibility)
+ }
}
object AzureQueueDeleteSink {
@@ -67,7 +65,6 @@ object AzureQueueDeleteOrUpdateSink {
* in an Azure Storage Queue.
*/
def create(cloudQueue: Supplier[CloudQueue]): Sink[MessageAndDeleteOrUpdate, CompletionStage[Done]] =
- AzureQueueSink.fromFunction[MessageAndDeleteOrUpdate](
- input => AzureQueueSinkFunctions.deleteOrUpdateMessage(() => cloudQueue.get)(input.message, input.op)
- )
+ AzureQueueSink.fromFunction[MessageAndDeleteOrUpdate](input =>
+ AzureQueueSinkFunctions.deleteOrUpdateMessage(() => cloudQueue.get)(input.message, input.op))
}
diff --git a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/javadsl/AzureQueueSource.scala b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/javadsl/AzureQueueSource.scala
index 4c3e3d22..56d530e4 100644
--- a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/javadsl/AzureQueueSource.scala
+++ b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/javadsl/AzureQueueSource.scala
@@ -4,7 +4,7 @@
package akka.stream.alpakka.azure.storagequeue.javadsl
-import com.microsoft.azure.storage.queue.{CloudQueue, CloudQueueMessage}
+import com.microsoft.azure.storage.queue.{ CloudQueue, CloudQueueMessage }
import akka.stream.alpakka.azure.storagequeue.AzureQueueSourceSettings
import akka.stream.javadsl.Source
import akka.NotUsed
diff --git a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/scaladsl/AzureQueueSink.scala b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/scaladsl/AzureQueueSink.scala
index ffb76bb6..fb73743b 100644
--- a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/scaladsl/AzureQueueSink.scala
+++ b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/scaladsl/AzureQueueSink.scala
@@ -4,9 +4,9 @@
package akka.stream.alpakka.azure.storagequeue.scaladsl
-import com.microsoft.azure.storage.queue.{CloudQueue, CloudQueueMessage}
+import com.microsoft.azure.storage.queue.{ CloudQueue, CloudQueueMessage }
import akka.stream.alpakka.azure.storagequeue.impl.AzureQueueSinkFunctions
-import akka.stream.scaladsl.{Flow, Keep, Sink}
+import akka.stream.scaladsl.{ Flow, Keep, Sink }
import akka.Done
import scala.concurrent.Future
@@ -41,11 +41,8 @@ object AzureQueueWithTimeoutsSink {
* with (CouldQueueMessage, timeToLive, initialVisibilityTimeout).
*/
def apply(
- cloudQueue: () => CloudQueue
- ): Sink[(CloudQueueMessage, Int, Int), Future[Done]] =
- AzureQueueSink.fromFunction(
- tup => AzureQueueSinkFunctions.addMessage(cloudQueue)(tup._1, tup._2, tup._3)
- )
+ cloudQueue: () => CloudQueue): Sink[(CloudQueueMessage, Int, Int), Future[Done]] =
+ AzureQueueSink.fromFunction(tup => AzureQueueSinkFunctions.addMessage(cloudQueue)(tup._1, tup._2, tup._3))
}
object AzureQueueDeleteSink {
@@ -64,9 +61,6 @@ object AzureQueueDeleteOrUpdateSink {
* in an Azure Storage Queue.
*/
def apply(
- cloudQueue: () => CloudQueue
- ): Sink[(CloudQueueMessage, DeleteOrUpdateMessage), Future[Done]] =
- AzureQueueSink.fromFunction(
- input => AzureQueueSinkFunctions.deleteOrUpdateMessage(cloudQueue)(input._1, input._2)
- )
+ cloudQueue: () => CloudQueue): Sink[(CloudQueueMessage, DeleteOrUpdateMessage), Future[Done]] =
+ AzureQueueSink.fromFunction(input => AzureQueueSinkFunctions.deleteOrUpdateMessage(cloudQueue)(input._1, input._2))
}
diff --git a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/scaladsl/AzureQueueSource.scala b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/scaladsl/AzureQueueSource.scala
index 35a4d00f..54756788 100644
--- a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/scaladsl/AzureQueueSource.scala
+++ b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/scaladsl/AzureQueueSource.scala
@@ -4,7 +4,7 @@
package akka.stream.alpakka.azure.storagequeue.scaladsl
-import com.microsoft.azure.storage.queue.{CloudQueue, CloudQueueMessage}
+import com.microsoft.azure.storage.queue.{ CloudQueue, CloudQueueMessage }
import akka.stream.alpakka.azure.storagequeue.AzureQueueSourceSettings
import akka.stream.scaladsl.Source
import akka.NotUsed
@@ -17,7 +17,6 @@ object AzureQueueSource {
*/
def apply(
cloudQueue: () => CloudQueue,
- settings: AzureQueueSourceSettings = AzureQueueSourceSettings()
- ): Source[CloudQueueMessage, NotUsed] =
+ settings: AzureQueueSourceSettings = AzureQueueSourceSettings()): Source[CloudQueueMessage, NotUsed] =
Source.fromGraph(new AzureQueueSourceStage(cloudQueue, settings))
}
diff --git a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/settings.scala b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/settings.scala
index de65edbc..495a98a8 100644
--- a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/settings.scala
+++ b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/settings.scala
@@ -4,13 +4,14 @@
package akka.stream.alpakka.azure.storagequeue
-import java.time.{Duration => JavaDuration}
+import java.time.{ Duration => JavaDuration }
import java.util.Optional
import scala.compat.java8.OptionConverters._
-import scala.concurrent.duration.{Duration, FiniteDuration}
+import scala.concurrent.duration.{ Duration, FiniteDuration }
-/** Settings for AzureQueueSource
+/**
+ * Settings for AzureQueueSource
*
* @param initalVisibilityTimeout Specifies how many seconds a message becomes invisible after it has been dequeued.
* See parameter of the same name in [[com.microsoft.azure.storage.queue.CloudQueue$.retrieveMessages]].
@@ -22,14 +23,12 @@ import scala.concurrent.duration.{Duration, FiniteDuration}
final class AzureQueueSourceSettings private (
val initialVisibilityTimeout: Int,
val batchSize: Int,
- val retrieveRetryTimeout: Option[FiniteDuration] = None
-) {
+ val retrieveRetryTimeout: Option[FiniteDuration] = None) {
def withBatchSize(batchSize: Int): AzureQueueSourceSettings =
copy(batchSize = batchSize)
/**
- *
* @param retrieveRetryTimeout in seconds. If <= 0 retrying of message retrieval is disabled.
* @return
*/
diff --git a/azure-storage-queue/src/test/scala/docs/scaladsl/AzureQueueSpec.scala b/azure-storage-queue/src/test/scala/docs/scaladsl/AzureQueueSpec.scala
index df2b8d94..44f13e62 100644
--- a/azure-storage-queue/src/test/scala/docs/scaladsl/AzureQueueSpec.scala
+++ b/azure-storage-queue/src/test/scala/docs/scaladsl/AzureQueueSpec.scala
@@ -5,7 +5,7 @@
package docs.scaladsl
import akka.actor.ActorSystem
-import akka.stream.alpakka.azure.storagequeue.DeleteOrUpdateMessage.{Delete, UpdateVisibility}
+import akka.stream.alpakka.azure.storagequeue.DeleteOrUpdateMessage.{ Delete, UpdateVisibility }
import akka.stream.alpakka.azure.storagequeue._
import akka.stream.alpakka.azure.storagequeue.scaladsl._
import akka.stream.scaladsl._
@@ -70,10 +70,8 @@ class AzureQueueSpec extends TestKit(ActorSystem()) with AsyncFlatSpecLike with
AzureQueueSource(queueFactory)
.runWith(Sink.seq)
- .map(
- dequeuedMsgs =>
- assert(msgs.map(_.getMessageContentAsString).toSet == dequeuedMsgs.map(_.getMessageContentAsString).toSet)
- )
+ .map(dequeuedMsgs =>
+ assert(msgs.map(_.getMessageContentAsString).toSet == dequeuedMsgs.map(_.getMessageContentAsString).toSet))
}
it should "observe retrieveRetryTimeout and retrieve messages queued later" in assertAllStagesStopped {
@@ -83,10 +81,8 @@ class AzureQueueSpec extends TestKit(ActorSystem()) with AsyncFlatSpecLike with
AzureQueueSource(queueFactory, AzureQueueSourceSettings().withRetrieveRetryTimeout(1.seconds))
.take(10)
.runWith(Sink.seq)
- .map(
- dequeuedMsgs =>
- assert(msgs.map(_.getMessageContentAsString).toSet == dequeuedMsgs.map(_.getMessageContentAsString).toSet)
- )
+ .map(dequeuedMsgs =>
+ assert(msgs.map(_.getMessageContentAsString).toSet == dequeuedMsgs.map(_.getMessageContentAsString).toSet))
Thread.sleep(3000)
msgs.foreach(m => queue.addMessage(m))
@@ -98,9 +94,9 @@ class AzureQueueSpec extends TestKit(ActorSystem()) with AsyncFlatSpecLike with
msgs.foreach(m => queue.addMessage(m))
Await.result(AzureQueueSource(queueFactory, AzureQueueSourceSettings().withBatchSize(2))
- .take(1)
- .runWith(Sink.seq),
- timeout)
+ .take(1)
+ .runWith(Sink.seq),
+ timeout)
assert(queue.retrieveMessage() != null, "There should be a 11th message on queue")
}
@@ -158,8 +154,7 @@ class AzureQueueSpec extends TestKit(ActorSystem()) with AsyncFlatSpecLike with
.take(10)
.map(msg => (msg, UpdateVisibility(120)))
.runWith(AzureQueueDeleteOrUpdateSink(queueFactory)),
- timeout
- )
+ timeout)
// Now we should not be able to get another one
assertCannotGetMessageFromQueue
@@ -176,8 +171,7 @@ class AzureQueueSpec extends TestKit(ActorSystem()) with AsyncFlatSpecLike with
.take(10)
.map(msg => (msg, Delete))
.runWith(AzureQueueDeleteOrUpdateSink(queueFactory)),
- timeout
- )
+ timeout)
// Now we should not be able to get another one
assertCannotGetMessageFromQueue
diff --git a/build.sbt b/build.sbt
index 0e0effef..2893f2c0 100644
--- a/build.sbt
+++ b/build.sbt
@@ -49,8 +49,7 @@ lazy val `pekko-connectors` = project
text,
udp,
unixdomainsocket,
- xml
- )
+ xml)
.aggregate(`doc-examples`)
.settings(
onLoadMessage :=
@@ -82,16 +81,15 @@ lazy val `pekko-connectors` = project
.filterNot(_.data.getAbsolutePath.contains("protobuf-java-2.6.1.jar"))
},
ScalaUnidoc / unidoc / unidocProjectFilter := inAnyProject
- -- inProjects(
- `doc-examples`,
- csvBench,
- mqttStreamingBench,
- // googleCloudPubSubGrpc and googleCloudBigQueryStorage contain the same gRPC generated classes
- // don't include ScalaDocs for googleCloudBigQueryStorage to make it work
- googleCloudBigQueryStorage,
- // springWeb triggers an esoteric ScalaDoc bug (from Java code)
- springWeb
- ),
+ -- inProjects(
+ `doc-examples`,
+ csvBench,
+ mqttStreamingBench,
+ // googleCloudPubSubGrpc and googleCloudBigQueryStorage contain the same gRPC generated classes
+ // don't include ScalaDocs for googleCloudBigQueryStorage to make it work
+ googleCloudBigQueryStorage,
+ // springWeb triggers an esoteric ScalaDoc bug (from Java code)
+ springWeb),
licenses := List(License.Apache2),
crossScalaVersions := List() // workaround for https://github.com/sbt/sbt/issues/3465
)
@@ -99,18 +97,15 @@ lazy val `pekko-connectors` = project
TaskKey[Unit]("verifyCodeFmt") := {
javafmtCheckAll.all(ScopeFilter(inAnyProject)).result.value.toEither.left.foreach { _ =>
throw new MessageOnlyException(
- "Unformatted Java code found. Please run 'javafmtAll' and commit the reformatted code"
- )
+ "Unformatted Java code found. Please run 'javafmtAll' and commit the reformatted code")
}
scalafmtCheckAll.all(ScopeFilter(inAnyProject)).result.value.toEither.left.foreach { _ =>
throw new MessageOnlyException(
- "Unformatted Scala code found. Please run 'scalafmtAll' and commit the reformatted code"
- )
+ "Unformatted Scala code found. Please run 'scalafmtAll' and commit the reformatted code")
}
(Compile / scalafmtSbtCheck).result.value.toEither.left.foreach { _ =>
throw new MessageOnlyException(
- "Unformatted sbt code found. Please run 'scalafmtSbt' and commit the reformatted code"
- )
+ "Unformatted sbt code found. Please run 'scalafmtSbt' and commit the reformatted code")
}
}
@@ -126,8 +121,7 @@ lazy val awslambda = pekkoConnectorProject("awslambda", "aws.lambda", Dependenci
lazy val azureStorageQueue = pekkoConnectorProject(
"azure-storage-queue",
"azure.storagequeue",
- Dependencies.AzureStorageQueue
-)
+ Dependencies.AzureStorageQueue)
lazy val cassandra =
pekkoConnectorProject("cassandra", "cassandra", Dependencies.Cassandra)
@@ -146,8 +140,7 @@ lazy val dynamodb = pekkoConnectorProject("dynamodb", "aws.dynamodb", Dependenci
lazy val elasticsearch = pekkoConnectorProject(
"elasticsearch",
"elasticsearch",
- Dependencies.Elasticsearch
-)
+ Dependencies.Elasticsearch)
// The name 'file' is taken by `sbt.file`, hence 'files'
lazy val files = pekkoConnectorProject("file", "file", Dependencies.File)
@@ -158,8 +151,7 @@ lazy val ftp = pekkoConnectorProject(
Dependencies.Ftp,
Test / fork := true,
// To avoid potential blocking in machines with low entropy (default is `/dev/random`)
- Test / javaOptions += "-Djava.security.egd=file:/dev/./urandom"
-)
+ Test / javaOptions += "-Djava.security.egd=file:/dev/./urandom")
lazy val geode =
pekkoConnectorProject(
@@ -168,23 +160,21 @@ lazy val geode =
Dependencies.Geode,
Test / fork := true,
// https://github.com/scala/bug/issues/12072
- Test / scalacOptions += "-Xlint:-byname-implicit"
- )
+ Test / scalacOptions += "-Xlint:-byname-implicit")
lazy val googleCommon = pekkoConnectorProject(
"google-common",
"google.common",
Dependencies.GoogleCommon,
- Test / fork := true
-)
+ Test / fork := true)
lazy val googleCloudBigQuery = pekkoConnectorProject(
"google-cloud-bigquery",
"google.cloud.bigquery",
Dependencies.GoogleBigQuery,
Test / fork := true,
- Compile / scalacOptions += "-Wconf:src=src_managed/.+:s"
-).dependsOn(googleCommon).enablePlugins(spray.boilerplate.BoilerplatePlugin)
+ Compile / scalacOptions += "-Wconf:src=src_managed/.+:s").dependsOn(googleCommon).enablePlugins(
+ spray.boilerplate.BoilerplatePlugin)
lazy val googleCloudBigQueryStorage = pekkoConnectorProject(
"google-cloud-bigquery-storage",
@@ -197,11 +187,10 @@ lazy val googleCloudBigQueryStorage = pekkoConnectorProject(
// Test / akkaGrpcGeneratedSources := Seq(AkkaGrpc.Server),
akkaGrpcGeneratedLanguages := Seq(AkkaGrpc.Scala, AkkaGrpc.Java),
Compile / scalacOptions ++= Seq(
- "-Wconf:src=.+/akka-grpc/main/.+:s",
- "-Wconf:src=.+/akka-grpc/test/.+:s"
- ),
- compile / javacOptions := (compile / javacOptions).value.filterNot(_ == "-Xlint:deprecation")
-).dependsOn(googleCommon).enablePlugins(AkkaGrpcPlugin)
+ "-Wconf:src=.+/akka-grpc/main/.+:s",
+ "-Wconf:src=.+/akka-grpc/test/.+:s"),
+ compile / javacOptions := (compile / javacOptions).value.filterNot(_ == "-Xlint:deprecation")).dependsOn(
+ googleCommon).enablePlugins(AkkaGrpcPlugin)
lazy val googleCloudPubSub = pekkoConnectorProject(
"google-cloud-pub-sub",
@@ -209,8 +198,8 @@ lazy val googleCloudPubSub = pekkoConnectorProject(
Dependencies.GooglePubSub,
Test / fork := true,
// See docker-compose.yml gcloud-pubsub-emulator_prep
- Test / envVars := Map("PUBSUB_EMULATOR_HOST" -> "localhost", "PUBSUB_EMULATOR_PORT" -> "8538")
-).dependsOn(googleCommon)
+ Test / envVars := Map("PUBSUB_EMULATOR_HOST" -> "localhost", "PUBSUB_EMULATOR_PORT" -> "8538")).dependsOn(
+ googleCommon)
lazy val googleCloudPubSubGrpc = pekkoConnectorProject(
"google-cloud-pub-sub-grpc",
@@ -222,18 +211,16 @@ lazy val googleCloudPubSubGrpc = pekkoConnectorProject(
// for the ExampleApp in the tests
run / connectInput := true,
Compile / scalacOptions ++= Seq(
- "-Wconf:src=.+/akka-grpc/main/.+:s",
- "-Wconf:src=.+/akka-grpc/test/.+:s"
- ),
- compile / javacOptions := (compile / javacOptions).value.filterNot(_ == "-Xlint:deprecation")
-).enablePlugins(AkkaGrpcPlugin).dependsOn(googleCommon)
+ "-Wconf:src=.+/akka-grpc/main/.+:s",
+ "-Wconf:src=.+/akka-grpc/test/.+:s"),
+ compile / javacOptions := (compile / javacOptions).value.filterNot(_ == "-Xlint:deprecation")).enablePlugins(
+ AkkaGrpcPlugin).dependsOn(googleCommon)
lazy val googleCloudStorage = pekkoConnectorProject(
"google-cloud-storage",
"google.cloud.storage",
Test / fork := true,
- Dependencies.GoogleStorage
-).dependsOn(googleCommon)
+ Dependencies.GoogleStorage).dependsOn(googleCommon)
lazy val googleFcm =
pekkoConnectorProject("google-fcm", "google.firebase.fcm", Dependencies.GoogleFcm, Test / fork := true)
@@ -251,17 +238,14 @@ lazy val influxdb = pekkoConnectorProject(
"influxdb",
Dependencies.InfluxDB,
Compile / scalacOptions ++= Seq(
- // JDK 11: method isAccessible in class AccessibleObject is deprecated
- "-Wconf:cat=deprecation:s"
- )
-)
+ // JDK 11: method isAccessible in class AccessibleObject is deprecated
+ "-Wconf:cat=deprecation:s"))
lazy val ironmq = pekkoConnectorProject(
"ironmq",
"ironmq",
Dependencies.IronMq,
- Test / fork := true
-)
+ Test / fork := true)
lazy val jms = pekkoConnectorProject("jms", "jms", Dependencies.Jms)
@@ -288,8 +272,7 @@ lazy val orientdb =
Dependencies.OrientDB,
Test / fork := true,
// note: orientdb client needs to be refactored to move off deprecated calls
- fatalWarnings := false
- )
+ fatalWarnings := false)
lazy val reference = internalProject("reference", Dependencies.Reference)
.dependsOn(testkit % Test)
@@ -300,14 +283,12 @@ lazy val pravega = pekkoConnectorProject(
"pravega",
"pravega",
Dependencies.Pravega,
- Test / fork := true
-)
+ Test / fork := true)
lazy val springWeb = pekkoConnectorProject(
"spring-web",
"spring.web",
- Dependencies.SpringWeb
-)
+ Dependencies.SpringWeb)
lazy val simpleCodecs = pekkoConnectorProject("simple-codecs", "simplecodecs")
@@ -344,77 +325,74 @@ lazy val docs = project
Preprocess / siteSubdirName := s"api/alpakka/${projectInfoVersion.value}",
Preprocess / sourceDirectory := (LocalRootProject / ScalaUnidoc / unidoc / target).value,
Preprocess / preprocessRules := Seq(
- ("http://www\\.eclipse\\.org/".r, _ => "https://www\\.eclipse\\.org/"),
- ("http://pravega\\.io/".r, _ => "https://pravega\\.io/"),
- ("http://www\\.scala-lang\\.org/".r, _ => "https://www\\.scala-lang\\.org/"),
- ("https://javadoc\\.io/page/".r, _ => "https://javadoc\\.io/static/")
- ),
+ ("http://www\\.eclipse\\.org/".r, _ => "https://www\\.eclipse\\.org/"),
+ ("http://pravega\\.io/".r, _ => "https://pravega\\.io/"),
+ ("http://www\\.scala-lang\\.org/".r, _ => "https://www\\.scala-lang\\.org/"),
+ ("https://javadoc\\.io/page/".r, _ => "https://javadoc\\.io/static/")),
Paradox / siteSubdirName := s"docs/alpakka/${projectInfoVersion.value}",
paradoxProperties ++= Map(
- "akka.version" -> Dependencies.AkkaVersion,
- "akka-http.version" -> Dependencies.AkkaHttpVersion,
- "hadoop.version" -> Dependencies.HadoopVersion,
- "extref.github.base_url" -> s"https://github.com/akka/alpakka/tree/${if (isSnapshot.value) "master"
+ "akka.version" -> Dependencies.AkkaVersion,
+ "akka-http.version" -> Dependencies.AkkaHttpVersion,
+ "hadoop.version" -> Dependencies.HadoopVersion,
+ "extref.github.base_url" -> s"https://github.com/akka/alpakka/tree/${if (isSnapshot.value) "master"
else "v" + version.value}/%s",
- "extref.akka.base_url" -> s"https://doc.akka.io/docs/akka/${Dependencies.AkkaBinaryVersion}/%s",
- "scaladoc.akka.base_url" -> s"https://doc.akka.io/api/akka/${Dependencies.AkkaBinaryVersion}",
- "javadoc.akka.base_url" -> s"https://doc.akka.io/japi/akka/${Dependencies.AkkaBinaryVersion}/",
- "javadoc.akka.link_style" -> "direct",
- "extref.akka-http.base_url" -> s"https://doc.akka.io/docs/akka-http/${Dependencies.AkkaHttpBinaryVersion}/%s",
- "scaladoc.akka.http.base_url" -> s"https://doc.akka.io/api/akka-http/${Dependencies.AkkaHttpBinaryVersion}/",
- "javadoc.akka.http.base_url" -> s"https://doc.akka.io/japi/akka-http/${Dependencies.AkkaHttpBinaryVersion}/",
- // Akka gRPC
- "akka-grpc.version" -> Dependencies.AkkaGrpcBinaryVersion,
- "extref.akka-grpc.base_url" -> s"https://doc.akka.io/docs/akka-grpc/${Dependencies.AkkaGrpcBinaryVersion}/%s",
- // Couchbase
- "couchbase.version" -> Dependencies.CouchbaseVersion,
- "extref.couchbase.base_url" -> s"https://docs.couchbase.com/java-sdk/${Dependencies.CouchbaseVersionForDocs}/%s",
- // Java
- "extref.java-api.base_url" -> "https://docs.oracle.com/javase/8/docs/api/index.html?%s.html",
- "extref.geode.base_url" -> s"https://geode.apache.org/docs/guide/${Dependencies.GeodeVersionForDocs}/%s",
- "extref.javaee-api.base_url" -> "https://docs.oracle.com/javaee/7/api/index.html?%s.html",
- "extref.paho-api.base_url" -> "https://www.eclipse.org/paho/files/javadoc/index.html?%s.html",
- "extref.pravega.base_url" -> s"https://cncf.pravega.io/docs/${Dependencies.PravegaVersionForDocs}/%s",
- "extref.slick.base_url" -> s"https://scala-slick.org/doc/${Dependencies.SlickVersion}/%s",
- // Cassandra
- "extref.cassandra.base_url" -> s"https://cassandra.apache.org/doc/${Dependencies.CassandraVersionInDocs}/%s",
- "extref.cassandra-driver.base_url" -> s"https://docs.datastax.com/en/developer/java-driver/${Dependencies.CassandraDriverVersionInDocs}/%s",
- "javadoc.com.datastax.oss.base_url" -> s"https://docs.datastax.com/en/drivers/java/${Dependencies.CassandraDriverVersionInDocs}/",
- // Solr
- "extref.solr.base_url" -> s"https://lucene.apache.org/solr/guide/${Dependencies.SolrVersionForDocs}/%s",
- "javadoc.org.apache.solr.base_url" -> s"https://lucene.apache.org/solr/${Dependencies.SolrVersionForDocs}_0/solr-solrj/",
- // Java
- "javadoc.base_url" -> "https://docs.oracle.com/javase/8/docs/api/",
- "javadoc.javax.jms.base_url" -> "https://docs.oracle.com/javaee/7/api/",
- "javadoc.com.couchbase.base_url" -> s"https://docs.couchbase.com/sdk-api/couchbase-java-client-${Dependencies.CouchbaseVersion}/",
- "javadoc.io.pravega.base_url" -> s"http://pravega.io/docs/${Dependencies.PravegaVersionForDocs}/javadoc/clients/",
- "javadoc.org.apache.kudu.base_url" -> s"https://kudu.apache.org/releases/${Dependencies.KuduVersion}/apidocs/",
- "javadoc.org.apache.hadoop.base_url" -> s"https://hadoop.apache.org/docs/r${Dependencies.HadoopVersion}/api/",
- "javadoc.software.amazon.awssdk.base_url" -> "https://sdk.amazonaws.com/java/api/latest/",
- "javadoc.com.google.auth.base_url" -> "https://www.javadoc.io/doc/com.google.auth/google-auth-library-credentials/latest/",
- "javadoc.com.google.auth.link_style" -> "direct",
- "javadoc.com.fasterxml.jackson.annotation.base_url" -> "https://javadoc.io/doc/com.fasterxml.jackson.core/jackson-annotations/latest/",
- "javadoc.com.fasterxml.jackson.annotation.link_style" -> "direct",
- // Scala
- "scaladoc.spray.json.base_url" -> s"https://javadoc.io/doc/io.spray/spray-json_${scalaBinaryVersion.value}/latest/",
- // Eclipse Paho client for MQTT
- "javadoc.org.eclipse.paho.client.mqttv3.base_url" -> "https://www.eclipse.org/paho/files/javadoc/",
- "javadoc.org.bson.codecs.configuration.base_url" -> "https://mongodb.github.io/mongo-java-driver/3.7/javadoc/",
- "scaladoc.scala.base_url" -> s"https://www.scala-lang.org/api/${scalaBinaryVersion.value}.x/",
- "scaladoc.akka.stream.alpakka.base_url" -> s"/${(Preprocess / siteSubdirName).value}/",
- "javadoc.akka.stream.alpakka.base_url" -> ""
- ),
+ "extref.akka.base_url" -> s"https://doc.akka.io/docs/akka/${Dependencies.AkkaBinaryVersion}/%s",
+ "scaladoc.akka.base_url" -> s"https://doc.akka.io/api/akka/${Dependencies.AkkaBinaryVersion}",
+ "javadoc.akka.base_url" -> s"https://doc.akka.io/japi/akka/${Dependencies.AkkaBinaryVersion}/",
+ "javadoc.akka.link_style" -> "direct",
+ "extref.akka-http.base_url" -> s"https://doc.akka.io/docs/akka-http/${Dependencies.AkkaHttpBinaryVersion}/%s",
+ "scaladoc.akka.http.base_url" -> s"https://doc.akka.io/api/akka-http/${Dependencies.AkkaHttpBinaryVersion}/",
+ "javadoc.akka.http.base_url" -> s"https://doc.akka.io/japi/akka-http/${Dependencies.AkkaHttpBinaryVersion}/",
+ // Akka gRPC
+ "akka-grpc.version" -> Dependencies.AkkaGrpcBinaryVersion,
+ "extref.akka-grpc.base_url" -> s"https://doc.akka.io/docs/akka-grpc/${Dependencies.AkkaGrpcBinaryVersion}/%s",
+ // Couchbase
+ "couchbase.version" -> Dependencies.CouchbaseVersion,
+ "extref.couchbase.base_url" -> s"https://docs.couchbase.com/java-sdk/${Dependencies.CouchbaseVersionForDocs}/%s",
+ // Java
+ "extref.java-api.base_url" -> "https://docs.oracle.com/javase/8/docs/api/index.html?%s.html",
+ "extref.geode.base_url" -> s"https://geode.apache.org/docs/guide/${Dependencies.GeodeVersionForDocs}/%s",
+ "extref.javaee-api.base_url" -> "https://docs.oracle.com/javaee/7/api/index.html?%s.html",
+ "extref.paho-api.base_url" -> "https://www.eclipse.org/paho/files/javadoc/index.html?%s.html",
+ "extref.pravega.base_url" -> s"https://cncf.pravega.io/docs/${Dependencies.PravegaVersionForDocs}/%s",
+ "extref.slick.base_url" -> s"https://scala-slick.org/doc/${Dependencies.SlickVersion}/%s",
+ // Cassandra
+ "extref.cassandra.base_url" -> s"https://cassandra.apache.org/doc/${Dependencies.CassandraVersionInDocs}/%s",
+ "extref.cassandra-driver.base_url" -> s"https://docs.datastax.com/en/developer/java-driver/${Dependencies.CassandraDriverVersionInDocs}/%s",
+ "javadoc.com.datastax.oss.base_url" -> s"https://docs.datastax.com/en/drivers/java/${Dependencies.CassandraDriverVersionInDocs}/",
+ // Solr
+ "extref.solr.base_url" -> s"https://lucene.apache.org/solr/guide/${Dependencies.SolrVersionForDocs}/%s",
+ "javadoc.org.apache.solr.base_url" -> s"https://lucene.apache.org/solr/${Dependencies.SolrVersionForDocs}_0/solr-solrj/",
+ // Java
+ "javadoc.base_url" -> "https://docs.oracle.com/javase/8/docs/api/",
+ "javadoc.javax.jms.base_url" -> "https://docs.oracle.com/javaee/7/api/",
+ "javadoc.com.couchbase.base_url" -> s"https://docs.couchbase.com/sdk-api/couchbase-java-client-${Dependencies.CouchbaseVersion}/",
+ "javadoc.io.pravega.base_url" -> s"http://pravega.io/docs/${Dependencies.PravegaVersionForDocs}/javadoc/clients/",
+ "javadoc.org.apache.kudu.base_url" -> s"https://kudu.apache.org/releases/${Dependencies.KuduVersion}/apidocs/",
+ "javadoc.org.apache.hadoop.base_url" -> s"https://hadoop.apache.org/docs/r${Dependencies.HadoopVersion}/api/",
+ "javadoc.software.amazon.awssdk.base_url" -> "https://sdk.amazonaws.com/java/api/latest/",
+ "javadoc.com.google.auth.base_url" -> "https://www.javadoc.io/doc/com.google.auth/google-auth-library-credentials/latest/",
+ "javadoc.com.google.auth.link_style" -> "direct",
+ "javadoc.com.fasterxml.jackson.annotation.base_url" -> "https://javadoc.io/doc/com.fasterxml.jackson.core/jackson-annotations/latest/",
+ "javadoc.com.fasterxml.jackson.annotation.link_style" -> "direct",
+ // Scala
+ "scaladoc.spray.json.base_url" -> s"https://javadoc.io/doc/io.spray/spray-json_${scalaBinaryVersion.value}/latest/",
+ // Eclipse Paho client for MQTT
+ "javadoc.org.eclipse.paho.client.mqttv3.base_url" -> "https://www.eclipse.org/paho/files/javadoc/",
+ "javadoc.org.bson.codecs.configuration.base_url" -> "https://mongodb.github.io/mongo-java-driver/3.7/javadoc/",
+ "scaladoc.scala.base_url" -> s"https://www.scala-lang.org/api/${scalaBinaryVersion.value}.x/",
+ "scaladoc.akka.stream.alpakka.base_url" -> s"/${(Preprocess / siteSubdirName).value}/",
+ "javadoc.akka.stream.alpakka.base_url" -> ""),
paradoxGroups := Map("Language" -> Seq("Java", "Scala")),
paradoxRoots := List("examples/elasticsearch-samples.html",
- "examples/ftp-samples.html",
- "examples/jms-samples.html",
- "examples/mqtt-samples.html",
- "index.html"),
+ "examples/ftp-samples.html",
+ "examples/jms-samples.html",
+ "examples/mqtt-samples.html",
+ "index.html"),
resolvers += Resolver.jcenterRepo,
publishRsyncArtifacts += makeSite.value -> "www/",
publishRsyncHost := "akkarepo@gustav.akka.io",
- apidocRootPackage := "akka"
- )
+ apidocRootPackage := "akka")
lazy val testkit = internalProject("testkit", Dependencies.testkit)
@@ -424,12 +402,11 @@ lazy val `doc-examples` = project
.settings(
name := s"pekko-connectors-doc-examples",
publish / skip := true,
- Dependencies.`Doc-examples`
- )
+ Dependencies.`Doc-examples`)
def pekkoConnectorProject(projectId: String,
- moduleName: String,
- additionalSettings: sbt.Def.SettingsDefinition*): Project = {
+ moduleName: String,
+ additionalSettings: sbt.Def.SettingsDefinition*): Project = {
import com.typesafe.tools.mima.core._
Project(id = projectId, base = file(projectId))
.enablePlugins(AutomateHeaderPlugin)
@@ -439,16 +416,13 @@ def pekkoConnectorProject(projectId: String,
licenses := List(License.Apache2),
AutomaticModuleName.settings(s"akka.stream.alpakka.$moduleName"),
mimaPreviousArtifacts := Set(
- organization.value %% name.value % previousStableVersion.value
- .getOrElse("0.0.0")
- ),
+ organization.value %% name.value % previousStableVersion.value
+ .getOrElse("0.0.0")),
mimaBinaryIssueFilters ++= Seq(
- ProblemFilters.exclude[Problem]("*.impl.*"),
- // generated code
- ProblemFilters.exclude[Problem]("com.google.*")
- ),
- Test / parallelExecution := false
- )
+ ProblemFilters.exclude[Problem]("*.impl.*"),
+ // generated code
+ ProblemFilters.exclude[Problem]("com.google.*")),
+ Test / parallelExecution := false)
.settings(additionalSettings: _*)
.dependsOn(testkit % Test)
}
@@ -465,7 +439,6 @@ Global / onLoad := (Global / onLoad).value.andThen { s =>
val log = sLog.value
if (dynverGitDescribeOutput.value.hasNoTags)
log.error(
- s"Failed to derive version from git tags. Maybe run `git fetch --unshallow` or `git fetch upstream` on a fresh git clone from a fork? Derived version: $v"
- )
+ s"Failed to derive version from git tags. Maybe run `git fetch --unshallow` or `git fetch upstream` on a fresh git clone from a fork? Derived version: $v")
s
}
diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/AkkaDiscoverySessionProvider.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/AkkaDiscoverySessionProvider.scala
index 727be3a5..dac15aee 100644
--- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/AkkaDiscoverySessionProvider.scala
+++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/AkkaDiscoverySessionProvider.scala
@@ -5,16 +5,16 @@
package akka.stream.alpakka.cassandra
import akka.ConfigurationException
-import akka.actor.{ActorSystem, ClassicActorSystemProvider}
+import akka.actor.{ ActorSystem, ClassicActorSystemProvider }
import akka.discovery.Discovery
import akka.util.JavaDurationConverters._
import com.datastax.oss.driver.api.core.CqlSession
-import com.typesafe.config.{Config, ConfigFactory}
+import com.typesafe.config.{ Config, ConfigFactory }
import scala.collection.immutable
import scala.compat.java8.FutureConverters._
import scala.concurrent.duration.FiniteDuration
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ ExecutionContext, Future }
/**
* [[https://doc.akka.io/docs/akka/current/discovery/index.html Akka Discovery]]
@@ -73,7 +73,7 @@ private[cassandra] object AkkaDiscoverySessionProvider {
* Expect a `service` section in Config and use Akka Discovery to read the addresses for `name` within `lookup-timeout`.
*/
private def readNodes(config: Config)(implicit system: ActorSystem,
- ec: ExecutionContext): Future[immutable.Seq[String]] = {
+ ec: ExecutionContext): Future[immutable.Seq[String]] = {
val serviceConfig = config.getConfig("service-discovery")
val serviceName = serviceConfig.getString("name")
val lookupTimeout = serviceConfig.getDuration("lookup-timeout").asScala
@@ -85,14 +85,13 @@ private[cassandra] object AkkaDiscoverySessionProvider {
*/
private def readNodes(
serviceName: String,
- lookupTimeout: FiniteDuration
- )(implicit system: ActorSystem, ec: ExecutionContext): Future[immutable.Seq[String]] = {
+ lookupTimeout: FiniteDuration)(
+ implicit system: ActorSystem, ec: ExecutionContext): Future[immutable.Seq[String]] = {
Discovery(system).discovery.lookup(serviceName, lookupTimeout).map { resolved =>
resolved.addresses.map { target =>
target.host + ":" + target.port.getOrElse {
throw new ConfigurationException(
- s"Akka Discovery for Cassandra service [$serviceName] must provide a port for [${target.host}]"
- )
+ s"Akka Discovery for Cassandra service [$serviceName] must provide a port for [${target.host}]")
}
}
}
diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraMetricsRegistry.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraMetricsRegistry.scala
index 2b32180d..5fa716a6 100644
--- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraMetricsRegistry.scala
+++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraMetricsRegistry.scala
@@ -4,7 +4,7 @@
package akka.stream.alpakka.cassandra
-import akka.actor.{ClassicActorSystemProvider, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider}
+import akka.actor.{ ClassicActorSystemProvider, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider }
import akka.annotation.InternalApi
import com.codahale.metrics.MetricRegistry
diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraSessionSettings.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraSessionSettings.scala
index 85bfb957..f71f1089 100644
--- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraSessionSettings.scala
+++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraSessionSettings.scala
@@ -14,8 +14,8 @@ import scala.compat.java8.FutureConverters._
import scala.concurrent.Future
class CassandraSessionSettings private (val configPath: String,
- _metricsCategory: Option[String] = None,
- val init: Option[CqlSession => Future[Done]] = None) {
+ _metricsCategory: Option[String] = None,
+ val init: Option[CqlSession => Future[Done]] = None) {
def metricsCategory: String = _metricsCategory.getOrElse(configPath)
@@ -40,8 +40,8 @@ class CassandraSessionSettings private (val configPath: String,
def withInit(value: CqlSession => Future[Done]): CassandraSessionSettings = copy(init = Some(value))
private def copy(configPath: String = configPath,
- metricsCategory: Option[String] = _metricsCategory,
- init: Option[CqlSession => Future[Done]] = init) =
+ metricsCategory: Option[String] = _metricsCategory,
+ init: Option[CqlSession => Future[Done]] = init) =
new CassandraSessionSettings(configPath, metricsCategory, init)
override def toString: String =
diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraWriteSettings.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraWriteSettings.scala
index 8b5fbf3a..29e6d0f1 100644
--- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraWriteSettings.scala
+++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraWriteSettings.scala
@@ -7,12 +7,12 @@ package akka.stream.alpakka.cassandra
import akka.util.JavaDurationConverters._
import com.datastax.oss.driver.api.core.cql.BatchType
-import scala.concurrent.duration.{FiniteDuration, _}
+import scala.concurrent.duration.{ FiniteDuration, _ }
class CassandraWriteSettings private (val parallelism: Int,
- val maxBatchSize: Int,
- val maxBatchWait: FiniteDuration,
- val batchType: BatchType) {
+ val maxBatchSize: Int,
+ val maxBatchWait: FiniteDuration,
+ val batchType: BatchType) {
require(parallelism > 0, s"Invalid value for parallelism: $parallelism. It should be > 0.")
require(maxBatchSize > 0, s"Invalid value for maxBatchSize: $maxBatchSize. It should be > 0.")
@@ -43,9 +43,9 @@ class CassandraWriteSettings private (val parallelism: Int,
copy(batchType = value)
private def copy(parallelism: Int = parallelism,
- maxBatchSize: Int = maxBatchSize,
- maxBatchWait: FiniteDuration = maxBatchWait,
- batchType: BatchType = batchType) =
+ maxBatchSize: Int = maxBatchSize,
+ maxBatchWait: FiniteDuration = maxBatchWait,
+ batchType: BatchType = batchType) =
new CassandraWriteSettings(parallelism, maxBatchSize, maxBatchWait, batchType)
override def toString: String =
diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CqlSessionProvider.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CqlSessionProvider.scala
index 1bdaced4..b7ff21ae 100644
--- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CqlSessionProvider.scala
+++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CqlSessionProvider.scala
@@ -4,13 +4,13 @@
package akka.stream.alpakka.cassandra
-import akka.actor.{ActorSystem, ClassicActorSystemProvider, ExtendedActorSystem}
+import akka.actor.{ ActorSystem, ClassicActorSystemProvider, ExtendedActorSystem }
import com.datastax.oss.driver.api.core.CqlSession
-import com.typesafe.config.{Config, ConfigFactory}
+import com.typesafe.config.{ Config, ConfigFactory }
import scala.collection.immutable
import scala.compat.java8.FutureConverters._
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Failure
/**
@@ -82,9 +82,7 @@ object CqlSessionProvider {
new IllegalArgumentException(
s"Unable to create SessionProvider instance for class [$className], " +
"tried constructor with ActorSystem, Config, and only ActorSystem, and no parameters",
- ex
- )
- )
+ ex))
}
.get
}
diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraFlow.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraFlow.scala
index 254c8421..f41e7c79 100644
--- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraFlow.scala
+++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraFlow.scala
@@ -7,8 +7,8 @@ package akka.stream.alpakka.cassandra.javadsl
import akka.NotUsed
import akka.stream.alpakka.cassandra.CassandraWriteSettings
import akka.stream.alpakka.cassandra.scaladsl
-import akka.stream.javadsl.{Flow, FlowWithContext}
-import com.datastax.oss.driver.api.core.cql.{BoundStatement, PreparedStatement}
+import akka.stream.javadsl.{ Flow, FlowWithContext }
+import com.datastax.oss.driver.api.core.cql.{ BoundStatement, PreparedStatement }
/**
* Java API to create Cassandra flows.
@@ -26,13 +26,12 @@ object CassandraFlow {
* @tparam T stream element type
*/
def create[T](session: CassandraSession,
- writeSettings: CassandraWriteSettings,
- cqlStatement: String,
- statementBinder: akka.japi.Function2[T, PreparedStatement, BoundStatement]): Flow[T, T, NotUsed] =
+ writeSettings: CassandraWriteSettings,
+ cqlStatement: String,
+ statementBinder: akka.japi.Function2[T, PreparedStatement, BoundStatement]): Flow[T, T, NotUsed] =
scaladsl.CassandraFlow
.create(writeSettings, cqlStatement, (t, preparedStatement) => statementBinder.apply(t, preparedStatement))(
- session.delegate
- )
+ session.delegate)
.asJava
/**
@@ -50,12 +49,11 @@ object CassandraFlow {
session: CassandraSession,
writeSettings: CassandraWriteSettings,
cqlStatement: String,
- statementBinder: akka.japi.Function2[T, PreparedStatement, BoundStatement]
- ): FlowWithContext[T, Ctx, T, Ctx, NotUsed] = {
+ statementBinder: akka.japi.Function2[T, PreparedStatement, BoundStatement])
+ : FlowWithContext[T, Ctx, T, Ctx, NotUsed] = {
scaladsl.CassandraFlow
.withContext(writeSettings, cqlStatement, (t, preparedStatement) => statementBinder.apply(t, preparedStatement))(
- session.delegate
- )
+ session.delegate)
.asJava
}
@@ -81,15 +79,15 @@ object CassandraFlow {
* @tparam K extracted key type for grouping into batches
*/
def createUnloggedBatch[T, K](session: CassandraSession,
- writeSettings: CassandraWriteSettings,
- cqlStatement: String,
- statementBinder: (T, PreparedStatement) => BoundStatement,
- groupingKey: akka.japi.Function[T, K]): Flow[T, T, NotUsed] = {
+ writeSettings: CassandraWriteSettings,
+ cqlStatement: String,
+ statementBinder: (T, PreparedStatement) => BoundStatement,
+ groupingKey: akka.japi.Function[T, K]): Flow[T, T, NotUsed] = {
scaladsl.CassandraFlow
.createBatch(writeSettings,
- cqlStatement,
- (t, preparedStatement) => statementBinder.apply(t, preparedStatement),
- t => groupingKey.apply(t))(session.delegate)
+ cqlStatement,
+ (t, preparedStatement) => statementBinder.apply(t, preparedStatement),
+ t => groupingKey.apply(t))(session.delegate)
.asJava
}
diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSession.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSession.scala
index 991c47d2..241d760d 100644
--- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSession.scala
+++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSession.scala
@@ -4,10 +4,10 @@
package akka.stream.alpakka.cassandra.javadsl
-import java.util.{List => JList}
+import java.util.{ List => JList }
import java.util.Optional
-import java.util.concurrent.{CompletionStage, Executor}
-import java.util.function.{Function => JFunction}
+import java.util.concurrent.{ CompletionStage, Executor }
+import java.util.function.{ Function => JFunction }
import scala.annotation.varargs
import scala.jdk.CollectionConverters._
@@ -16,11 +16,11 @@ import scala.compat.java8.OptionConverters._
import scala.concurrent.ExecutionContext
import akka.Done
import akka.NotUsed
-import akka.actor.{ActorSystem, ClassicActorSystemProvider}
+import akka.actor.{ ActorSystem, ClassicActorSystemProvider }
import akka.annotation.InternalApi
import akka.event.LoggingAdapter
import akka.stream.alpakka.cassandra.CassandraServerMetaData
-import akka.stream.alpakka.cassandra.{scaladsl, CqlSessionProvider}
+import akka.stream.alpakka.cassandra.{ scaladsl, CqlSessionProvider }
import akka.stream.javadsl.Source
import com.datastax.oss.driver.api.core.CqlSession
import com.datastax.oss.driver.api.core.cql.BatchStatement
@@ -46,32 +46,31 @@ final class CassandraSession(@InternalApi private[akka] val delegate: scaladsl.C
* Use this constructor if you want to create a stand-alone `CassandraSession`.
*/
def this(system: ActorSystem,
- sessionProvider: CqlSessionProvider,
- executionContext: ExecutionContext,
- log: LoggingAdapter,
- metricsCategory: String,
- init: JFunction[CqlSession, CompletionStage[Done]],
- onClose: java.lang.Runnable) =
+ sessionProvider: CqlSessionProvider,
+ executionContext: ExecutionContext,
+ log: LoggingAdapter,
+ metricsCategory: String,
+ init: JFunction[CqlSession, CompletionStage[Done]],
+ onClose: java.lang.Runnable) =
this(
new scaladsl.CassandraSession(system,
- sessionProvider,
- executionContext,
- log,
- metricsCategory,
- session => init.apply(session).toScala,
- () => onClose.run())
- )
+ sessionProvider,
+ executionContext,
+ log,
+ metricsCategory,
+ session => init.apply(session).toScala,
+ () => onClose.run()))
/**
* Use this constructor if you want to create a stand-alone `CassandraSession`.
*/
def this(system: ClassicActorSystemProvider,
- sessionProvider: CqlSessionProvider,
- executionContext: ExecutionContext,
- log: LoggingAdapter,
- metricsCategory: String,
- init: JFunction[CqlSession, CompletionStage[Done]],
- onClose: java.lang.Runnable) =
+ sessionProvider: CqlSessionProvider,
+ executionContext: ExecutionContext,
+ log: LoggingAdapter,
+ metricsCategory: String,
+ init: JFunction[CqlSession, CompletionStage[Done]],
+ onClose: java.lang.Runnable) =
this(system.classicSystem, sessionProvider, executionContext, log, metricsCategory, init, onClose)
implicit private val ec = delegate.ec
diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSessionRegistry.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSessionRegistry.scala
index fa0ab63a..34eba615 100644
--- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSessionRegistry.scala
+++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSessionRegistry.scala
@@ -8,7 +8,7 @@ import java.util.concurrent.CompletionStage
import akka.Done
import akka.actor.ClassicActorSystemProvider
-import akka.stream.alpakka.cassandra.{scaladsl, CassandraSessionSettings}
+import akka.stream.alpakka.cassandra.{ scaladsl, CassandraSessionSettings }
import com.datastax.oss.driver.api.core.CqlSession
import scala.compat.java8.FutureConverters._
@@ -56,7 +56,7 @@ final class CassandraSessionRegistry private (delegate: scaladsl.CassandraSessio
* Sessions in the session registry are closed after actor system termination.
*/
def sessionFor(configPath: String,
- init: java.util.function.Function[CqlSession, CompletionStage[Done]]): CassandraSession =
+ init: java.util.function.Function[CqlSession, CompletionStage[Done]]): CassandraSession =
new CassandraSession(delegate.sessionFor(configPath, ses => init(ses).toScala))
/**
diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSource.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSource.scala
index 066aa7b0..33caceda 100644
--- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSource.scala
+++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSource.scala
@@ -8,7 +8,7 @@ import java.util.concurrent.CompletionStage
import akka.NotUsed
import akka.stream.javadsl.Source
-import com.datastax.oss.driver.api.core.cql.{Row, Statement}
+import com.datastax.oss.driver.api.core.cql.{ Row, Statement }
import scala.annotation.varargs
diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraFlow.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraFlow.scala
index 695a6f5d..20747ab2 100644
--- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraFlow.scala
+++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraFlow.scala
@@ -7,8 +7,8 @@ package akka.stream.alpakka.cassandra.scaladsl
import akka.NotUsed
import akka.dispatch.ExecutionContexts
import akka.stream.alpakka.cassandra.CassandraWriteSettings
-import akka.stream.scaladsl.{Flow, FlowWithContext}
-import com.datastax.oss.driver.api.core.cql.{BatchStatement, BoundStatement, PreparedStatement}
+import akka.stream.scaladsl.{ Flow, FlowWithContext }
+import com.datastax.oss.driver.api.core.cql.{ BatchStatement, BoundStatement, PreparedStatement }
import scala.jdk.CollectionConverters._
import scala.concurrent.Future
@@ -31,8 +31,8 @@ object CassandraFlow {
def create[T](
writeSettings: CassandraWriteSettings,
cqlStatement: String,
- statementBinder: (T, PreparedStatement) => BoundStatement
- )(implicit session: CassandraSession): Flow[T, T, NotUsed] = {
+ statementBinder: (T, PreparedStatement) => BoundStatement)(
+ implicit session: CassandraSession): Flow[T, T, NotUsed] = {
Flow
.lazyFutureFlow { () =>
val prepare = session.prepare(cqlStatement)
@@ -61,8 +61,8 @@ object CassandraFlow {
def withContext[T, Ctx](
writeSettings: CassandraWriteSettings,
cqlStatement: String,
- statementBinder: (T, PreparedStatement) => BoundStatement
- )(implicit session: CassandraSession): FlowWithContext[T, Ctx, T, Ctx, NotUsed] = {
+ statementBinder: (T, PreparedStatement) => BoundStatement)(
+ implicit session: CassandraSession): FlowWithContext[T, Ctx, T, Ctx, NotUsed] = {
FlowWithContext.fromTuples {
Flow
.lazyFutureFlow { () =>
@@ -102,9 +102,9 @@ object CassandraFlow {
* @tparam K extracted key type for grouping into batches
*/
def createBatch[T, K](writeSettings: CassandraWriteSettings,
- cqlStatement: String,
- statementBinder: (T, PreparedStatement) => BoundStatement,
- groupingKey: T => K)(implicit session: CassandraSession): Flow[T, T, NotUsed] = {
+ cqlStatement: String,
+ statementBinder: (T, PreparedStatement) => BoundStatement,
+ groupingKey: T => K)(implicit session: CassandraSession): Flow[T, T, NotUsed] = {
Flow
.lazyFutureFlow { () =>
val prepareStatement: Future[PreparedStatement] = session.prepare(cqlStatement)
diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSession.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSession.scala
index dc424155..d05219e8 100644
--- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSession.scala
+++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSession.scala
@@ -7,18 +7,18 @@ package akka.stream.alpakka.cassandra.scaladsl
import akka.actor.NoSerializationVerificationNeeded
import akka.annotation.InternalApi
import akka.event.LoggingAdapter
-import akka.stream.alpakka.cassandra.{CassandraMetricsRegistry, CassandraServerMetaData, CqlSessionProvider}
-import akka.stream.scaladsl.{Sink, Source}
-import akka.stream.{Materializer, SystemMaterializer}
+import akka.stream.alpakka.cassandra.{ CassandraMetricsRegistry, CassandraServerMetaData, CqlSessionProvider }
+import akka.stream.scaladsl.{ Sink, Source }
+import akka.stream.{ Materializer, SystemMaterializer }
import akka.util.OptionVal
-import akka.{Done, NotUsed}
+import akka.{ Done, NotUsed }
import com.datastax.oss.driver.api.core.CqlSession
import com.datastax.oss.driver.api.core.cql._
import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException
import scala.collection.immutable
import scala.compat.java8.FutureConverters._
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ ExecutionContext, Future }
import scala.util.control.NonFatal
/**
@@ -34,12 +34,12 @@ import scala.util.control.NonFatal
* All methods are non-blocking.
*/
final class CassandraSession(system: akka.actor.ActorSystem,
- sessionProvider: CqlSessionProvider,
- executionContext: ExecutionContext,
- log: LoggingAdapter,
- metricsCategory: String,
- init: CqlSession => Future[Done],
- onClose: () => Unit)
+ sessionProvider: CqlSessionProvider,
+ executionContext: ExecutionContext,
+ log: LoggingAdapter,
+ metricsCategory: String,
+ init: CqlSession => Future[Done],
+ onClose: () => Unit)
extends NoSerializationVerificationNeeded {
implicit private[akka] val ec: ExecutionContext = executionContext
@@ -92,8 +92,8 @@ final class CassandraSession(system: akka.actor.ActorSystem,
val result = selectOne("select cluster_name, data_center, release_version from system.local").map {
case Some(row) =>
new CassandraServerMetaData(row.getString("cluster_name"),
- row.getString("data_center"),
- row.getString("release_version"))
+ row.getString("data_center"),
+ row.getString("release_version"))
case None =>
log.warning("Couldn't retrieve serverMetaData from system.local table. No rows found.")
new CassandraServerMetaData("", "", "")
diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSessionRegistry.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSessionRegistry.scala
index 3c5c71a5..820cd81a 100644
--- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSessionRegistry.scala
+++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSessionRegistry.scala
@@ -10,10 +10,10 @@ import scala.jdk.CollectionConverters._
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import akka.Done
-import akka.actor.{ClassicActorSystemProvider, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider}
+import akka.actor.{ ClassicActorSystemProvider, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider }
import akka.annotation.InternalStableApi
import akka.event.Logging
-import akka.stream.alpakka.cassandra.{CassandraSessionSettings, CqlSessionProvider}
+import akka.stream.alpakka.cassandra.{ CassandraSessionSettings, CqlSessionProvider }
import com.datastax.oss.driver.api.core.CqlSession
import com.typesafe.config.Config
@@ -82,24 +82,24 @@ final class CassandraSessionRegistry(system: ExtendedActorSystem) extends Extens
* that is different from the ActorSystem's config section for the `configPath`.
*/
@InternalStableApi private[akka] def sessionFor(settings: CassandraSessionSettings,
- sessionProviderConfig: Config): CassandraSession = {
+ sessionProviderConfig: Config): CassandraSession = {
val key = sessionKey(settings)
sessions.computeIfAbsent(key, _ => startSession(settings, key, sessionProviderConfig))
}
private def startSession(settings: CassandraSessionSettings,
- key: SessionKey,
- sessionProviderConfig: Config): CassandraSession = {
+ key: SessionKey,
+ sessionProviderConfig: Config): CassandraSession = {
val sessionProvider = CqlSessionProvider(system, sessionProviderConfig)
val log = Logging(system, classOf[CassandraSession])
val executionContext = system.dispatchers.lookup(sessionProviderConfig.getString("session-dispatcher"))
new CassandraSession(system,
- sessionProvider,
- executionContext,
- log,
- metricsCategory = settings.metricsCategory,
- init = settings.init.getOrElse(_ => Future.successful(Done)),
- onClose = () => sessions.remove(key))
+ sessionProvider,
+ executionContext,
+ log,
+ metricsCategory = settings.metricsCategory,
+ init = settings.init.getOrElse(_ => Future.successful(Done)),
+ onClose = () => sessions.remove(key))
}
/**
diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSource.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSource.scala
index ca41113e..a0d3148b 100644
--- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSource.scala
+++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSource.scala
@@ -6,7 +6,7 @@ package akka.stream.alpakka.cassandra.scaladsl
import akka.NotUsed
import akka.stream.scaladsl.Source
-import com.datastax.oss.driver.api.core.cql.{Row, Statement}
+import com.datastax.oss.driver.api.core.cql.{ Row, Statement }
import scala.concurrent.Future
diff --git a/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraLifecycle.scala b/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraLifecycle.scala
index 75010305..87e09462 100644
--- a/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraLifecycle.scala
+++ b/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraLifecycle.scala
@@ -11,12 +11,12 @@ import akka.Done
import akka.testkit.TestKitBase
import com.datastax.oss.driver.api.core.cql._
import org.scalatest._
-import org.scalatest.concurrent.{PatienceConfiguration, ScalaFutures}
+import org.scalatest.concurrent.{ PatienceConfiguration, ScalaFutures }
import scala.jdk.CollectionConverters._
import scala.collection.immutable
import scala.concurrent.duration._
-import scala.concurrent.{Await, Future}
+import scala.concurrent.{ Await, Future }
import scala.util.control.NonFatal
import scala.compat.java8.FutureConverters._
@@ -40,16 +40,14 @@ trait CassandraLifecycleBase {
def createKeyspace(session: CassandraSession, name: String): Future[Done] = {
session.executeWrite(
new SimpleStatementBuilder(
- s"""CREATE KEYSPACE $name WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '1'};"""
- ).setTimeout(keyspaceTimeout)
- .build()
- )
+ s"""CREATE KEYSPACE $name WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '1'};""").setTimeout(
+ keyspaceTimeout)
+ .build())
}
def dropKeyspace(session: CassandraSession, name: String): Future[Done] =
session.executeWrite(
- new SimpleStatementBuilder(s"""DROP KEYSPACE IF EXISTS $name;""").setTimeout(keyspaceTimeout).build()
- )
+ new SimpleStatementBuilder(s"""DROP KEYSPACE IF EXISTS $name;""").setTimeout(keyspaceTimeout).build())
def createKeyspace(name: String): Future[Done] = withSchemaMetadataDisabled(createKeyspace(lifecycleSession, name))
diff --git a/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSessionPerformanceSpec.scala b/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSessionPerformanceSpec.scala
index 15374219..f7899220 100644
--- a/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSessionPerformanceSpec.scala
+++ b/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSessionPerformanceSpec.scala
@@ -8,8 +8,8 @@ import scala.concurrent.Await
import akka.actor.ActorSystem
import akka.event.Logging
-import akka.stream.alpakka.cassandra.{CassandraSessionSettings, CassandraWriteSettings}
-import akka.stream.scaladsl.{Sink, Source}
+import akka.stream.alpakka.cassandra.{ CassandraSessionSettings, CassandraWriteSettings }
+import akka.stream.scaladsl.{ Sink, Source }
import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
import scala.concurrent.duration._
@@ -51,8 +51,7 @@ final class CassandraSessionPerformanceSpec extends CassandraSpecBase(ActorSyste
CassandraWriteSettings.create().withMaxBatchSize(10000),
s"INSERT INTO $dataTable(partition_id, id, value, seq_nr) VALUES (?, ?, ?, ?)",
(d: Int, ps) => ps.bind(Long.box(partitionId), id, Long.box(d), Long.box(d)),
- (_: Int) => partitionId
- )(lifecycleSession)
+ (_: Int) => partitionId)(lifecycleSession)
}
.runWith(Sink.ignore)
}
diff --git a/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSpecBase.scala b/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSpecBase.scala
index feb8e1bf..d016464a 100644
--- a/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSpecBase.scala
+++ b/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSpecBase.scala
@@ -9,10 +9,10 @@ import akka.stream.alpakka.testkit.scaladsl.LogCapturing
import akka.testkit.TestKit
import org.scalatest.matchers.must.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
-import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
+import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach }
import scala.concurrent.ExecutionContext
-import akka.stream.{Materializer, SystemMaterializer}
+import akka.stream.{ Materializer, SystemMaterializer }
/**
* All the tests must be run with a local Cassandra running on default port 9042.
diff --git a/cassandra/src/test/scala/docs/javadsl/CassandraSessionSpec.scala b/cassandra/src/test/scala/docs/javadsl/CassandraSessionSpec.scala
index b968de45..d928add0 100644
--- a/cassandra/src/test/scala/docs/javadsl/CassandraSessionSpec.scala
+++ b/cassandra/src/test/scala/docs/javadsl/CassandraSessionSpec.scala
@@ -53,9 +53,7 @@ final class CassandraSessionSpec extends CassandraSpecBase(ActorSystem("Cassandr
s"INSERT INTO $dataTable (partition, key, count) VALUES ('A', 'c', 3);",
s"INSERT INTO $dataTable (partition, key, count) VALUES ('A', 'd', 4);",
s"INSERT INTO $dataTable (partition, key, count) VALUES ('B', 'e', 5);",
- s"INSERT INTO $dataTable (partition, key, count) VALUES ('B', 'f', 6);"
- )
- )
+ s"INSERT INTO $dataTable (partition, key, count) VALUES ('B', 'f', 6);"))
} yield Done
}.futureValue mustBe Done
}
@@ -138,15 +136,13 @@ final class CassandraSessionSpec extends CassandraSpecBase(ActorSystem("Cassandr
"create indexes" in {
withSchemaMetadataDisabled(
- lifecycleSession.executeDDL(s"CREATE INDEX IF NOT EXISTS count_idx ON $dataTable(count)")
- ).futureValue mustBe Done
+ lifecycleSession.executeDDL(
+ s"CREATE INDEX IF NOT EXISTS count_idx ON $dataTable(count)")).futureValue mustBe Done
val row =
await(
- session.selectOne("SELECT * FROM system_schema.indexes WHERE table_name = ? ALLOW FILTERING", dataTableName)
- )
+ session.selectOne("SELECT * FROM system_schema.indexes WHERE table_name = ? ALLOW FILTERING", dataTableName))
row.asScala.map(index => index.getString("table_name") -> index.getString("index_name")) mustBe Some(
- dataTableName -> "count_idx"
- )
+ dataTableName -> "count_idx")
}
}
diff --git a/cassandra/src/test/scala/docs/scaladsl/AkkaDiscoverySpec.scala b/cassandra/src/test/scala/docs/scaladsl/AkkaDiscoverySpec.scala
index cc2cac3f..b59e90a2 100644
--- a/cassandra/src/test/scala/docs/scaladsl/AkkaDiscoverySpec.scala
+++ b/cassandra/src/test/scala/docs/scaladsl/AkkaDiscoverySpec.scala
@@ -7,7 +7,7 @@ package docs.scaladsl
import akka.Done
import akka.actor.ActorSystem
import akka.stream.alpakka.cassandra.CassandraSessionSettings
-import akka.stream.alpakka.cassandra.scaladsl.{CassandraSession, CassandraSessionRegistry, CassandraSpecBase}
+import akka.stream.alpakka.cassandra.scaladsl.{ CassandraSession, CassandraSessionRegistry, CassandraSpecBase }
import akka.stream.scaladsl.Sink
import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
diff --git a/cassandra/src/test/scala/docs/scaladsl/CassandraFlowSpec.scala b/cassandra/src/test/scala/docs/scaladsl/CassandraFlowSpec.scala
index 91ab3521..70c4bd3b 100644
--- a/cassandra/src/test/scala/docs/scaladsl/CassandraFlowSpec.scala
+++ b/cassandra/src/test/scala/docs/scaladsl/CassandraFlowSpec.scala
@@ -4,11 +4,11 @@
package docs.scaladsl
-import akka.{Done, NotUsed}
+import akka.{ Done, NotUsed }
import akka.actor.ActorSystem
-import akka.stream.alpakka.cassandra.{CassandraSessionSettings, CassandraWriteSettings}
-import akka.stream.alpakka.cassandra.scaladsl.{CassandraFlow, CassandraSession, CassandraSource, CassandraSpecBase}
-import akka.stream.scaladsl.{Sink, Source, SourceWithContext}
+import akka.stream.alpakka.cassandra.{ CassandraSessionSettings, CassandraWriteSettings }
+import akka.stream.alpakka.cassandra.scaladsl.{ CassandraFlow, CassandraSession, CassandraSource, CassandraSpecBase }
+import akka.stream.scaladsl.{ Sink, Source, SourceWithContext }
import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
import scala.collection.immutable
@@ -16,9 +16,9 @@ import scala.concurrent.Future
class CassandraFlowSpec extends CassandraSpecBase(ActorSystem("CassandraFlowSpec")) {
- //#element-to-insert
+ // #element-to-insert
case class ToInsert(id: Integer, cc: Integer)
- //#element-to-insert
+ // #element-to-insert
val sessionSettings = CassandraSessionSettings("alpakka.cassandra")
val data = 1 until 103
@@ -40,9 +40,8 @@ class CassandraFlowSpec extends CassandraSpecBase(ActorSystem("CassandraFlowSpec
val written: Future[Done] = Source(data)
.via(
CassandraFlow.create(CassandraWriteSettings.defaults,
- s"INSERT INTO $table(id) VALUES (?)",
- (element, preparedStatement) => preparedStatement.bind(Int.box(element)))
- )
+ s"INSERT INTO $table(id) VALUES (?)",
+ (element, preparedStatement) => preparedStatement.bind(Int.box(element))))
.runWith(Sink.ignore)
written.futureValue mustBe Done
@@ -65,7 +64,7 @@ class CassandraFlowSpec extends CassandraSpecBase(ActorSystem("CassandraFlowSpec
// #prepared
import akka.stream.alpakka.cassandra.CassandraWriteSettings
import akka.stream.alpakka.cassandra.scaladsl.CassandraFlow
- import com.datastax.oss.driver.api.core.cql.{BoundStatement, PreparedStatement}
+ import com.datastax.oss.driver.api.core.cql.{ BoundStatement, PreparedStatement }
case class Person(id: Int, name: String, city: String)
@@ -78,9 +77,8 @@ class CassandraFlowSpec extends CassandraSpecBase(ActorSystem("CassandraFlowSpec
val written: Future[immutable.Seq[Person]] = Source(persons)
.via(
CassandraFlow.create(CassandraWriteSettings.defaults,
- s"INSERT INTO $table(id, name, city) VALUES (?, ?, ?)",
- statementBinder)
- )
+ s"INSERT INTO $table(id, name, city) VALUES (?, ?, ?)",
+ statementBinder))
.runWith(Sink.seq)
// #prepared
@@ -112,8 +110,8 @@ class CassandraFlowSpec extends CassandraSpecBase(ActorSystem("CassandraFlowSpec
}
val persons =
immutable.Seq(Person(12, "John", "London") -> AckHandle(12),
- Person(43, "Umberto", "Roma") -> AckHandle(43),
- Person(56, "James", "Chicago") -> AckHandle(56))
+ Person(43, "Umberto", "Roma") -> AckHandle(43),
+ Person(56, "James", "Chicago") -> AckHandle(56))
// #withContext
val personsAndHandles: SourceWithContext[Person, AckHandle, NotUsed] = // ???
@@ -126,9 +124,7 @@ class CassandraFlowSpec extends CassandraSpecBase(ActorSystem("CassandraFlowSpec
CassandraFlow.withContext(
CassandraWriteSettings.defaults,
s"INSERT INTO $table(id, name, city) VALUES (?, ?, ?)",
- (person, preparedStatement) => preparedStatement.bind(Int.box(person.id), person.name, person.city)
- )
- )
+ (person, preparedStatement) => preparedStatement.bind(Int.box(person.id), person.name, person.city)))
.asSource
.mapAsync(1) {
case (_, handle) => handle.ack()
@@ -169,9 +165,7 @@ class CassandraFlowSpec extends CassandraSpecBase(ActorSystem("CassandraFlowSpec
s"INSERT INTO $table(id, name, city) VALUES (?, ?, ?)",
statementBinder =
(person, preparedStatement) => preparedStatement.bind(Int.box(person.id), person.name, person.city),
- groupingKey = person => person.id
- )
- )
+ groupingKey = person => person.id))
.runWith(Sink.ignore)
written.futureValue mustBe Done
diff --git a/cassandra/src/test/scala/docs/scaladsl/CassandraSourceSpec.scala b/cassandra/src/test/scala/docs/scaladsl/CassandraSourceSpec.scala
index df808eac..0c2a97bd 100644
--- a/cassandra/src/test/scala/docs/scaladsl/CassandraSourceSpec.scala
+++ b/cassandra/src/test/scala/docs/scaladsl/CassandraSourceSpec.scala
@@ -6,7 +6,7 @@ package docs.scaladsl
import akka.Done
import akka.actor.ActorSystem
-import akka.stream.alpakka.cassandra.scaladsl.{CassandraSession, CassandraSource, CassandraSpecBase}
+import akka.stream.alpakka.cassandra.scaladsl.{ CassandraSession, CassandraSource, CassandraSpecBase }
import akka.stream.scaladsl.Sink
import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
@@ -15,9 +15,9 @@ import scala.concurrent.Future
class CassandraSourceSpec extends CassandraSpecBase(ActorSystem("CassandraSourceSpec")) {
- //#element-to-insert
+ // #element-to-insert
case class ToInsert(id: Integer, cc: Integer)
- //#element-to-insert
+ // #element-to-insert
val sessionSettings = akka.stream.alpakka.cassandra.CassandraSessionSettings()
val data = 1 until 103
@@ -80,7 +80,7 @@ class CassandraSourceSpec extends CassandraSpecBase(ActorSystem("CassandraSource
"stream the result of a Cassandra statement with several pages" in assertAllStagesStopped {
// #statement
- import com.datastax.oss.driver.api.core.cql.{Row, SimpleStatement}
+ import com.datastax.oss.driver.api.core.cql.{ Row, SimpleStatement }
val stmt = SimpleStatement.newInstance(s"SELECT * FROM $intTable").setPageSize(20)
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/CouchbaseResponseException.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/CouchbaseResponseException.scala
index 9cd876bf..1ff29f69 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/CouchbaseResponseException.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/CouchbaseResponseException.scala
@@ -20,6 +20,5 @@ private[akka] object CouchbaseResponseException {
def apply(json: JsonObject): CouchbaseResponseException =
new CouchbaseResponseException(
msg = if (json.containsKey("msg")) json.getString("msg") else "",
- code = if (json.containsKey("code")) Some(json.getInt("code")) else None
- )
+ code = if (json.containsKey("code")) Some(json.getInt("code")) else None)
}
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/CouchbaseSessionRegistry.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/CouchbaseSessionRegistry.scala
index 342d025f..b806f5ea 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/CouchbaseSessionRegistry.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/CouchbaseSessionRegistry.scala
@@ -7,15 +7,15 @@ package akka.stream.alpakka.couchbase
import java.util.concurrent.CompletionStage
import java.util.concurrent.atomic.AtomicReference
-import akka.actor.{ClassicActorSystemProvider, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider}
+import akka.actor.{ ClassicActorSystemProvider, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider }
import akka.dispatch.ExecutionContexts
import akka.stream.alpakka.couchbase.impl.CouchbaseClusterRegistry
-import akka.stream.alpakka.couchbase.javadsl.{CouchbaseSession => JCouchbaseSession}
+import akka.stream.alpakka.couchbase.javadsl.{ CouchbaseSession => JCouchbaseSession }
import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession
import scala.annotation.tailrec
import scala.compat.java8.FutureConverters._
-import scala.concurrent.{Future, Promise}
+import scala.concurrent.{ Future, Promise }
/**
* This Couchbase session registry makes it possible to share Couchbase sessions between multiple use sites
@@ -65,7 +65,7 @@ final class CouchbaseSessionRegistry(system: ExtendedActorSystem) extends Extens
val key = SessionKey(enrichedSettings, bucketName)
sessions.get.get(key) match {
case Some(futureSession) => futureSession
- case _ => startSession(key)
+ case _ => startSession(key)
}
}(system.dispatcher)
@@ -91,8 +91,7 @@ final class CouchbaseSessionRegistry(system: ExtendedActorSystem) extends Extens
val session = clusterRegistry
.clusterFor(key.settings)
.flatMap(cluster => CouchbaseSession(cluster, key.bucketName)(blockingDispatcher))(
- ExecutionContexts.parasitic
- )
+ ExecutionContexts.parasitic)
promise.completeWith(session)
promise.future
} else {
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseClusterRegistry.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseClusterRegistry.scala
index 529f914c..7adbc43a 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseClusterRegistry.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseClusterRegistry.scala
@@ -14,7 +14,7 @@ import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession
import com.couchbase.client.java.AsyncCluster
import scala.annotation.tailrec
-import scala.concurrent.{Future, Promise}
+import scala.concurrent.{ Future, Promise }
/**
* Internal API
@@ -31,7 +31,7 @@ final private[couchbase] class CouchbaseClusterRegistry(system: ActorSystem) {
def clusterFor(settings: CouchbaseSessionSettings): Future[AsyncCluster] =
clusters.get.get(settings) match {
case Some(futureSession) => futureSession
- case _ => createClusterClient(settings)
+ case _ => createClusterClient(settings)
}
@tailrec
@@ -45,8 +45,7 @@ final private[couchbase] class CouchbaseClusterRegistry(system: ActorSystem) {
log.info("Starting Couchbase client for nodes [{}]", nodesAsString)
promise.completeWith(
CouchbaseSession
- .createClusterClient(settings)(blockingDispatcher)
- )
+ .createClusterClient(settings)(blockingDispatcher))
val future = promise.future
system.registerOnTermination {
future.foreach { cluster =>
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionImpl.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionImpl.scala
index 88bef879..cd3667d7 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionImpl.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionImpl.scala
@@ -9,15 +9,15 @@ import java.util.concurrent.TimeUnit
import akka.annotation.InternalApi
import akka.dispatch.ExecutionContexts
import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession
-import akka.stream.alpakka.couchbase.{javadsl, CouchbaseWriteSettings}
+import akka.stream.alpakka.couchbase.{ javadsl, CouchbaseWriteSettings }
import akka.stream.scaladsl.Source
-import akka.{Done, NotUsed}
+import akka.{ Done, NotUsed }
import com.couchbase.client.java.bucket.AsyncBucketManager
import com.couchbase.client.java.document.json.JsonObject
-import com.couchbase.client.java.document.{Document, JsonDocument}
+import com.couchbase.client.java.document.{ Document, JsonDocument }
import com.couchbase.client.java.query.util.IndexInfo
-import com.couchbase.client.java.query.{N1qlQuery, Statement}
-import com.couchbase.client.java.{AsyncBucket, AsyncCluster}
+import com.couchbase.client.java.query.{ N1qlQuery, Statement }
+import com.couchbase.client.java.{ AsyncBucket, AsyncCluster }
import rx.RxReactiveStreams
import scala.concurrent.Future
@@ -47,11 +47,11 @@ final private[couchbase] class CouchbaseSessionImpl(asyncBucket: AsyncBucket, cl
def insertDoc[T <: Document[_]](document: T, writeSettings: CouchbaseWriteSettings): Future[T] =
singleObservableToFuture(asyncBucket.insert(document,
- writeSettings.persistTo,
- writeSettings.replicateTo,
- writeSettings.timeout.toMillis,
- TimeUnit.MILLISECONDS),
- document)
+ writeSettings.persistTo,
+ writeSettings.replicateTo,
+ writeSettings.timeout.toMillis,
+ TimeUnit.MILLISECONDS),
+ document)
def get(id: String): Future[Option[JsonDocument]] =
zeroOrOneObservableToFuture(asyncBucket.get(id))
@@ -63,8 +63,8 @@ final private[couchbase] class CouchbaseSessionImpl(asyncBucket: AsyncBucket, cl
zeroOrOneObservableToFuture(asyncBucket.get(id, timeout.toMillis, TimeUnit.MILLISECONDS))
def get[T <: Document[_]](id: String,
- timeout: FiniteDuration,
- documentClass: Class[T]): scala.concurrent.Future[Option[T]] =
+ timeout: FiniteDuration,
+ documentClass: Class[T]): scala.concurrent.Future[Option[T]] =
zeroOrOneObservableToFuture(asyncBucket.get(id, documentClass, timeout.toMillis, TimeUnit.MILLISECONDS))
def upsert(document: JsonDocument): Future[JsonDocument] = upsertDoc(document)
@@ -77,11 +77,11 @@ final private[couchbase] class CouchbaseSessionImpl(asyncBucket: AsyncBucket, cl
def upsertDoc[T <: Document[_]](document: T, writeSettings: CouchbaseWriteSettings): Future[T] =
singleObservableToFuture(asyncBucket.upsert(document,
- writeSettings.persistTo,
- writeSettings.replicateTo,
- writeSettings.timeout.toMillis,
- TimeUnit.MILLISECONDS),
- document.id)
+ writeSettings.persistTo,
+ writeSettings.replicateTo,
+ writeSettings.timeout.toMillis,
+ TimeUnit.MILLISECONDS),
+ document.id)
def replace(document: JsonDocument): Future[JsonDocument] = replaceDoc(document)
@@ -93,11 +93,11 @@ final private[couchbase] class CouchbaseSessionImpl(asyncBucket: AsyncBucket, cl
def replaceDoc[T <: Document[_]](document: T, writeSettings: CouchbaseWriteSettings): Future[T] =
singleObservableToFuture(asyncBucket.replace(document,
- writeSettings.persistTo,
- writeSettings.replicateTo,
- writeSettings.timeout.toMillis,
- TimeUnit.MILLISECONDS),
- document.id)
+ writeSettings.persistTo,
+ writeSettings.replicateTo,
+ writeSettings.timeout.toMillis,
+ TimeUnit.MILLISECONDS),
+ document.id)
def remove(id: String): Future[Done] =
singleObservableToFuture(asyncBucket.remove(id), id)
@@ -105,11 +105,11 @@ final private[couchbase] class CouchbaseSessionImpl(asyncBucket: AsyncBucket, cl
def remove(id: String, writeSettings: CouchbaseWriteSettings): Future[Done] =
singleObservableToFuture(asyncBucket.remove(id,
- writeSettings.persistTo,
- writeSettings.replicateTo,
- writeSettings.timeout.toMillis,
- TimeUnit.MILLISECONDS),
- id)
+ writeSettings.persistTo,
+ writeSettings.replicateTo,
+ writeSettings.timeout.toMillis,
+ TimeUnit.MILLISECONDS),
+ id)
.map(_ => Done)(ExecutionContexts.parasitic)
def streamedQuery(query: N1qlQuery): Source[JsonObject, NotUsed] =
@@ -130,13 +130,13 @@ final private[couchbase] class CouchbaseSessionImpl(asyncBucket: AsyncBucket, cl
def counter(id: String, delta: Long, initial: Long, writeSettings: CouchbaseWriteSettings): Future[Long] =
singleObservableToFuture(asyncBucket.counter(id,
- delta,
- initial,
- writeSettings.persistTo,
- writeSettings.replicateTo,
- writeSettings.timeout.toMillis,
- TimeUnit.MILLISECONDS),
- id)
+ delta,
+ initial,
+ writeSettings.persistTo,
+ writeSettings.replicateTo,
+ writeSettings.timeout.toMillis,
+ TimeUnit.MILLISECONDS),
+ id)
.map(_.content(): Long)(ExecutionContexts.parasitic)
def close(): Future[Done] =
@@ -162,11 +162,8 @@ final private[couchbase] class CouchbaseSessionImpl(asyncBucket: AsyncBucket, cl
.flatMap(
func1Observable[AsyncBucketManager, Boolean](
_.createN1qlIndex(indexName, ignoreIfExist, false, fields: _*)
- .map(func1(Boolean.unbox))
- )
- ),
- s"Create index: $indexName"
- )
+ .map(func1(Boolean.unbox)))),
+ s"Create index: $indexName")
override def listIndexes(): Source[IndexInfo, NotUsed] =
Source.fromPublisher(
@@ -174,9 +171,6 @@ final private[couchbase] class CouchbaseSessionImpl(asyncBucket: AsyncBucket, cl
asyncBucket
.bucketManager()
.flatMap(
- func1Observable((abm: AsyncBucketManager) => abm.listN1qlIndexes())
- )
- )
- )
+ func1Observable((abm: AsyncBucketManager) => abm.listN1qlIndexes()))))
}
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionJavaAdapter.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionJavaAdapter.scala
index bf87ed35..43eb25a6 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionJavaAdapter.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionJavaAdapter.scala
@@ -14,17 +14,17 @@ import akka.stream.alpakka.couchbase.CouchbaseWriteSettings
import akka.stream.alpakka.couchbase.javadsl
import akka.stream.alpakka.couchbase.scaladsl
import akka.stream.javadsl.Source
-import akka.{Done, NotUsed}
+import akka.{ Done, NotUsed }
import com.couchbase.client.java.AsyncBucket
import com.couchbase.client.java.document.json.JsonObject
-import com.couchbase.client.java.document.{Document, JsonDocument}
+import com.couchbase.client.java.document.{ Document, JsonDocument }
import com.couchbase.client.java.query.util.IndexInfo
-import com.couchbase.client.java.query.{N1qlQuery, Statement}
+import com.couchbase.client.java.query.{ N1qlQuery, Statement }
import scala.compat.java8.FutureConverters._
import scala.compat.java8.OptionConverters._
import scala.concurrent.duration.FiniteDuration
-import scala.concurrent.{duration, Future}
+import scala.concurrent.{ duration, Future }
/**
* INTERNAL API
@@ -43,13 +43,12 @@ private[couchbase] final class CouchbaseSessionJavaAdapter(delegate: scaladsl.Co
override def insert(
document: JsonDocument,
- writeSettings: CouchbaseWriteSettings
- ): CompletionStage[JsonDocument] = delegate.insert(document, writeSettings).toJava
+ writeSettings: CouchbaseWriteSettings): CompletionStage[JsonDocument] =
+ delegate.insert(document, writeSettings).toJava
override def insertDoc[T <: Document[_]](
document: T,
- writeSettings: CouchbaseWriteSettings
- ): CompletionStage[T] = delegate.insertDoc(document, writeSettings).toJava
+ writeSettings: CouchbaseWriteSettings): CompletionStage[T] = delegate.insertDoc(document, writeSettings).toJava
override def get(id: String): CompletionStage[Optional[JsonDocument]] =
futureOptToJava(delegate.get(id))
@@ -107,8 +106,8 @@ private[couchbase] final class CouchbaseSessionJavaAdapter(delegate: scaladsl.Co
id: String,
delta: Long,
initial: Long,
- writeSettings: CouchbaseWriteSettings
- ): CompletionStage[Long] = delegate.counter(id, delta, initial, writeSettings).toJava
+ writeSettings: CouchbaseWriteSettings): CompletionStage[Long] =
+ delegate.counter(id, delta, initial, writeSettings).toJava
override def close(): CompletionStage[Done] = delegate.close().toJava
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/RxUtilities.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/RxUtilities.scala
index dc3411a8..b2858bf2 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/RxUtilities.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/RxUtilities.scala
@@ -7,11 +7,11 @@ package akka.stream.alpakka.couchbase.impl
import akka.annotation.InternalApi
import akka.stream.alpakka.couchbase.CouchbaseResponseException
import com.couchbase.client.java.document.json.JsonObject
-import com.couchbase.client.java.query.{AsyncN1qlQueryResult, AsyncN1qlQueryRow}
+import com.couchbase.client.java.query.{ AsyncN1qlQueryResult, AsyncN1qlQueryRow }
import rx.functions.Func1
-import rx.{Observable, Subscriber}
+import rx.{ Observable, Subscriber }
-import scala.concurrent.{Future, Promise}
+import scala.concurrent.{ Future, Promise }
/**
* INTERNAL API
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseFlow.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseFlow.scala
index c9cac312..efbfbe81 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseFlow.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseFlow.scala
@@ -7,7 +7,7 @@ package akka.stream.alpakka.couchbase.javadsl
import akka.NotUsed
import akka.stream.alpakka.couchbase._
import akka.stream.javadsl.Flow
-import com.couchbase.client.java.document.{Document, JsonDocument}
+import com.couchbase.client.java.document.{ Document, JsonDocument }
/**
* Java API: Factory methods for Couchbase flows.
@@ -24,24 +24,24 @@ object CouchbaseFlow {
* Create a flow to query Couchbase for by `id` and emit documents of the given class.
*/
def fromId[T <: Document[_]](sessionSettings: CouchbaseSessionSettings,
- bucketName: String,
- target: Class[T]): Flow[String, T, NotUsed] =
+ bucketName: String,
+ target: Class[T]): Flow[String, T, NotUsed] =
scaladsl.CouchbaseFlow.fromId(sessionSettings, bucketName, target).asJava
/**
* Create a flow to update or insert a Couchbase [[com.couchbase.client.java.document.JsonDocument JsonDocument]].
*/
def upsert(sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[JsonDocument, JsonDocument, NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[JsonDocument, JsonDocument, NotUsed] =
scaladsl.CouchbaseFlow.upsert(sessionSettings, writeSettings, bucketName).asJava
/**
* Create a flow to update or insert a Couchbase document of the given class.
*/
def upsertDoc[T <: Document[_]](sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[T, T, NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[T, T, NotUsed] =
scaladsl.CouchbaseFlow.upsertDoc(sessionSettings, writeSettings, bucketName).asJava
/**
@@ -49,24 +49,24 @@ object CouchbaseFlow {
* can be handled in-stream.
*/
def upsertDocWithResult[T <: Document[_]](sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[T, CouchbaseWriteResult[T], NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[T, CouchbaseWriteResult[T], NotUsed] =
scaladsl.CouchbaseFlow.upsertDocWithResult(sessionSettings, writeSettings, bucketName).asJava
/**
* Create a flow to replace a Couchbase [[com.couchbase.client.java.document.JsonDocument JsonDocument]].
*/
def replace(sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[JsonDocument, JsonDocument, NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[JsonDocument, JsonDocument, NotUsed] =
scaladsl.CouchbaseFlow.replace(sessionSettings, writeSettings, bucketName).asJava
/**
* Create a flow to replace a Couchbase document of the given class.
*/
def replaceDoc[T <: Document[_]](sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[T, T, NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[T, T, NotUsed] =
scaladsl.CouchbaseFlow.replaceDoc(sessionSettings, writeSettings, bucketName).asJava
/**
@@ -74,24 +74,24 @@ object CouchbaseFlow {
* can be handled in-stream.
*/
def replaceDocWithResult[T <: Document[_]](sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[T, CouchbaseWriteResult[T], NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[T, CouchbaseWriteResult[T], NotUsed] =
scaladsl.CouchbaseFlow.replaceDocWithResult(sessionSettings, writeSettings, bucketName).asJava
/**
* Create a flow to delete documents from Couchbase by `id`. Emits the same `id`.
*/
def delete(sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[String, String, NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[String, String, NotUsed] =
scaladsl.CouchbaseFlow.delete(sessionSettings, writeSettings, bucketName).asJava
/**
* Create a flow to delete documents from Couchbase by `id` and emit operation outcome containing the same `id`.
*/
def deleteWithResult(sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[String, CouchbaseDeleteResult, NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[String, CouchbaseDeleteResult, NotUsed] =
scaladsl.CouchbaseFlow.deleteWithResult(sessionSettings, writeSettings, bucketName).asJava
}
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSession.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSession.scala
index 560e8c0f..9a1e2f23 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSession.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSession.scala
@@ -6,20 +6,20 @@ package akka.stream.alpakka.couchbase.javadsl
import java.time.Duration
import java.util.Optional
-import java.util.concurrent.{CompletionStage, Executor}
+import java.util.concurrent.{ CompletionStage, Executor }
import akka.annotation.DoNotInherit
import akka.dispatch.ExecutionContexts
import akka.stream.alpakka.couchbase.impl.CouchbaseSessionJavaAdapter
-import akka.stream.alpakka.couchbase.scaladsl.{CouchbaseSession => ScalaDslCouchbaseSession}
-import akka.stream.alpakka.couchbase.{CouchbaseSessionSettings, CouchbaseWriteSettings}
+import akka.stream.alpakka.couchbase.scaladsl.{ CouchbaseSession => ScalaDslCouchbaseSession }
+import akka.stream.alpakka.couchbase.{ CouchbaseSessionSettings, CouchbaseWriteSettings }
import akka.stream.javadsl.Source
-import akka.{Done, NotUsed}
+import akka.{ Done, NotUsed }
import com.couchbase.client.java.document.json.JsonObject
-import com.couchbase.client.java.document.{Document, JsonDocument}
+import com.couchbase.client.java.document.{ Document, JsonDocument }
import com.couchbase.client.java.query.util.IndexInfo
-import com.couchbase.client.java.query.{N1qlQuery, Statement}
-import com.couchbase.client.java.{AsyncBucket, AsyncCluster, Bucket}
+import com.couchbase.client.java.query.{ N1qlQuery, Statement }
+import com.couchbase.client.java.{ AsyncBucket, AsyncCluster, Bucket }
import scala.compat.java8.FutureConverters._
import scala.concurrent.ExecutionContext
@@ -36,13 +36,12 @@ object CouchbaseSession {
* the session is closed.
*/
def create(settings: CouchbaseSessionSettings,
- bucketName: String,
- executor: Executor): CompletionStage[CouchbaseSession] =
+ bucketName: String,
+ executor: Executor): CompletionStage[CouchbaseSession] =
ScalaDslCouchbaseSession
.apply(settings, bucketName)(executionContext(executor))
.map(new CouchbaseSessionJavaAdapter(_): CouchbaseSession)(
- ExecutionContexts.parasitic
- )
+ ExecutionContexts.parasitic)
.toJava
/**
@@ -52,8 +51,7 @@ object CouchbaseSession {
def create(client: AsyncCluster, bucketName: String, executor: Executor): CompletionStage[CouchbaseSession] =
ScalaDslCouchbaseSession(client, bucketName)(executionContext(executor))
.map(new CouchbaseSessionJavaAdapter(_): CouchbaseSession)(
- ExecutionContexts.parasitic
- )
+ ExecutionContexts.parasitic)
.toJava
/**
@@ -68,7 +66,7 @@ object CouchbaseSession {
private def executionContext(executor: Executor): ExecutionContext =
executor match {
case ec: ExecutionContext => ec
- case _ => ExecutionContext.fromExecutor(executor)
+ case _ => ExecutionContext.fromExecutor(executor)
}
/**
@@ -269,7 +267,7 @@ abstract class CouchbaseSession {
* @return a [[java.util.concurrent.CompletionStage]] of `true` if the index was/will be effectively created, `false`
* if the index existed and ignoreIfExist` is true. Completion of the `CompletionStage` does not guarantee the index
* is online and ready to be used.
- **/
+ */
def createIndex(indexName: String, ignoreIfExist: Boolean, fields: AnyRef*): CompletionStage[Boolean]
/**
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSink.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSink.scala
index 144fc420..593f6a01 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSink.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSink.scala
@@ -7,9 +7,9 @@ package akka.stream.alpakka.couchbase.javadsl
import java.util.concurrent.CompletionStage
import akka.stream.alpakka.couchbase._
-import akka.stream.javadsl.{Keep, Sink}
-import akka.{Done, NotUsed}
-import com.couchbase.client.java.document.{Document, JsonDocument}
+import akka.stream.javadsl.{ Keep, Sink }
+import akka.{ Done, NotUsed }
+import com.couchbase.client.java.document.{ Document, JsonDocument }
/**
* Java API: Factory methods for Couchbase sinks.
@@ -20,8 +20,8 @@ object CouchbaseSink {
* Create a sink to update or insert a Couchbase [[com.couchbase.client.java.document.JsonDocument JsonDocument]].
*/
def upsert(sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Sink[JsonDocument, CompletionStage[Done]] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Sink[JsonDocument, CompletionStage[Done]] =
CouchbaseFlow
.upsert(sessionSettings, writeSettings, bucketName)
.toMat(Sink.ignore(), Keep.right[NotUsed, CompletionStage[Done]])
@@ -30,8 +30,8 @@ object CouchbaseSink {
* Create a sink to update or insert a Couchbase document of the given class.
*/
def upsertDoc[T <: Document[_]](sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Sink[T, CompletionStage[Done]] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Sink[T, CompletionStage[Done]] =
CouchbaseFlow
.upsertDoc[T](sessionSettings, writeSettings, bucketName)
.toMat(Sink.ignore(), Keep.right[NotUsed, CompletionStage[Done]])
@@ -40,8 +40,8 @@ object CouchbaseSink {
* Create a sink to replace a Couchbase [[com.couchbase.client.java.document.JsonDocument JsonDocument]].
*/
def replace(sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Sink[JsonDocument, CompletionStage[Done]] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Sink[JsonDocument, CompletionStage[Done]] =
CouchbaseFlow
.replace(sessionSettings, writeSettings, bucketName)
.toMat(Sink.ignore(), Keep.right[NotUsed, CompletionStage[Done]])
@@ -50,8 +50,8 @@ object CouchbaseSink {
* Create a sink to replace a Couchbase document of the given class.
*/
def replaceDoc[T <: Document[_]](sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Sink[T, CompletionStage[Done]] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Sink[T, CompletionStage[Done]] =
CouchbaseFlow
.replaceDoc[T](sessionSettings, writeSettings, bucketName)
.toMat(Sink.ignore(), Keep.right[NotUsed, CompletionStage[Done]])
@@ -60,8 +60,8 @@ object CouchbaseSink {
* Create a sink to delete documents from Couchbase by `id`.
*/
def delete(sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Sink[String, CompletionStage[Done]] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Sink[String, CompletionStage[Done]] =
CouchbaseFlow
.delete(sessionSettings, writeSettings, bucketName)
.toMat(Sink.ignore(), Keep.right[NotUsed, CompletionStage[Done]])
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSource.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSource.scala
index 8cac9af1..0750bdbf 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSource.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSource.scala
@@ -5,10 +5,10 @@
package akka.stream.alpakka.couchbase.javadsl
import akka.NotUsed
-import akka.stream.alpakka.couchbase.{scaladsl, CouchbaseSessionSettings}
+import akka.stream.alpakka.couchbase.{ scaladsl, CouchbaseSessionSettings }
import akka.stream.javadsl.Source
import com.couchbase.client.java.document.json.JsonObject
-import com.couchbase.client.java.query.{N1qlQuery, Statement}
+import com.couchbase.client.java.query.{ N1qlQuery, Statement }
/**
* Java API: Factory methods for Couchbase sources.
@@ -19,16 +19,16 @@ object CouchbaseSource {
* Create a source query Couchbase by statement, emitted as [[com.couchbase.client.java.document.JsonDocument JsonDocument]]s.
*/
def fromStatement(sessionSettings: CouchbaseSessionSettings,
- statement: Statement,
- bucketName: String): Source[JsonObject, NotUsed] =
+ statement: Statement,
+ bucketName: String): Source[JsonObject, NotUsed] =
scaladsl.CouchbaseSource.fromStatement(sessionSettings, statement, bucketName).asJava
/**
* Create a source query Couchbase by statement, emitted as [[com.couchbase.client.java.document.JsonDocument JsonDocument]]s.
*/
def fromN1qlQuery(sessionSettings: CouchbaseSessionSettings,
- query: N1qlQuery,
- bucketName: String): Source[JsonObject, NotUsed] =
+ query: N1qlQuery,
+ bucketName: String): Source[JsonObject, NotUsed] =
scaladsl.CouchbaseSource.fromN1qlQuery(sessionSettings, query, bucketName).asJava
}
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/model.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/model.scala
index 71696d05..6eb1d283 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/model.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/model.scala
@@ -4,13 +4,13 @@
package akka.stream.alpakka.couchbase
-import java.util.concurrent.{CompletionStage, TimeUnit}
+import java.util.concurrent.{ CompletionStage, TimeUnit }
-import akka.actor.{ActorSystem, ClassicActorSystemProvider}
+import akka.actor.{ ActorSystem, ClassicActorSystemProvider }
import akka.annotation.InternalApi
import com.couchbase.client.java.document.Document
import com.couchbase.client.java.env.CouchbaseEnvironment
-import com.couchbase.client.java.{PersistTo, ReplicateTo}
+import com.couchbase.client.java.{ PersistTo, ReplicateTo }
import com.typesafe.config.Config
import scala.jdk.CollectionConverters._
@@ -32,21 +32,21 @@ object CouchbaseWriteSettings {
def apply(): CouchbaseWriteSettings = inMemory
def apply(parallelism: Int,
- replicateTo: ReplicateTo,
- persistTo: PersistTo,
- timeout: FiniteDuration): CouchbaseWriteSettings =
+ replicateTo: ReplicateTo,
+ persistTo: PersistTo,
+ timeout: FiniteDuration): CouchbaseWriteSettings =
new CouchbaseWriteSettings(parallelism, replicateTo, persistTo, timeout)
def create(): CouchbaseWriteSettings = inMemory
def create(parallelism: Int,
- replicateTo: ReplicateTo,
- persistTo: PersistTo,
- timeout: java.time.Duration): CouchbaseWriteSettings =
+ replicateTo: ReplicateTo,
+ persistTo: PersistTo,
+ timeout: java.time.Duration): CouchbaseWriteSettings =
new CouchbaseWriteSettings(parallelism,
- replicateTo,
- persistTo,
- FiniteDuration(timeout.toMillis, TimeUnit.MILLISECONDS))
+ replicateTo,
+ persistTo,
+ FiniteDuration(timeout.toMillis, TimeUnit.MILLISECONDS))
}
@@ -54,9 +54,9 @@ object CouchbaseWriteSettings {
* Configure Couchbase writes.
*/
final class CouchbaseWriteSettings private (val parallelism: Int,
- val replicateTo: ReplicateTo,
- val persistTo: PersistTo,
- val timeout: FiniteDuration) {
+ val replicateTo: ReplicateTo,
+ val persistTo: PersistTo,
+ val timeout: FiniteDuration) {
def withParallelism(parallelism: Int): CouchbaseWriteSettings = copy(parallelism = parallelism)
@@ -76,9 +76,9 @@ final class CouchbaseWriteSettings private (val parallelism: Int,
def withTimeout(timeout: FiniteDuration): CouchbaseWriteSettings = copy(timeout = timeout)
private[this] def copy(parallelism: Int = parallelism,
- replicateTo: ReplicateTo = replicateTo,
- persistTo: PersistTo = persistTo,
- timeout: FiniteDuration = timeout) =
+ replicateTo: ReplicateTo = replicateTo,
+ persistTo: PersistTo = persistTo,
+ timeout: FiniteDuration = timeout) =
new CouchbaseWriteSettings(parallelism, replicateTo, persistTo, timeout)
override def equals(other: Any): Boolean = other match {
@@ -174,8 +174,7 @@ final class CouchbaseSessionSettings private (
val password: String,
val nodes: immutable.Seq[String],
val environment: Option[CouchbaseEnvironment],
- val enrichAsync: CouchbaseSessionSettings => Future[CouchbaseSessionSettings]
-) {
+ val enrichAsync: CouchbaseSessionSettings => Future[CouchbaseSessionSettings]) {
def withUsername(username: String): CouchbaseSessionSettings =
copy(username = username)
@@ -193,18 +192,20 @@ final class CouchbaseSessionSettings private (
def withNodes(nodes: java.util.List[String]): CouchbaseSessionSettings =
copy(nodes = nodes.asScala.toList)
- /** Scala API:
+ /**
+ * Scala API:
* Allows to provide an asynchronous method to update the settings.
*/
def withEnrichAsync(value: CouchbaseSessionSettings => Future[CouchbaseSessionSettings]): CouchbaseSessionSettings =
copy(enrichAsync = value)
- /** Java API:
+ /**
+ * Java API:
* Allows to provide an asynchronous method to update the settings.
*/
def withEnrichAsyncCs(
- value: java.util.function.Function[CouchbaseSessionSettings, CompletionStage[CouchbaseSessionSettings]]
- ): CouchbaseSessionSettings =
+ value: java.util.function.Function[CouchbaseSessionSettings, CompletionStage[CouchbaseSessionSettings]])
+ : CouchbaseSessionSettings =
copy(enrichAsync = (s: CouchbaseSessionSettings) => value.apply(s).toScala)
def withEnvironment(environment: CouchbaseEnvironment): CouchbaseSessionSettings =
@@ -222,8 +223,8 @@ final class CouchbaseSessionSettings private (
password: String = password,
nodes: immutable.Seq[String] = nodes,
environment: Option[CouchbaseEnvironment] = environment,
- enrichAsync: CouchbaseSessionSettings => Future[CouchbaseSessionSettings] = enrichAsync
- ): CouchbaseSessionSettings =
+ enrichAsync: CouchbaseSessionSettings => Future[CouchbaseSessionSettings] = enrichAsync)
+ : CouchbaseSessionSettings =
new CouchbaseSessionSettings(username, password, nodes, environment, enrichAsync)
override def equals(other: Any): Boolean = other match {
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseFlow.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseFlow.scala
index 0e570ce5..e872713a 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseFlow.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseFlow.scala
@@ -6,7 +6,7 @@ package akka.stream.alpakka.couchbase.scaladsl
import akka.NotUsed
import akka.stream.alpakka.couchbase._
import akka.stream.scaladsl.Flow
-import com.couchbase.client.java.document.{Document, JsonDocument}
+import com.couchbase.client.java.document.{ Document, JsonDocument }
/**
* Scala API: Factory methods for Couchbase flows.
@@ -30,8 +30,8 @@ object CouchbaseFlow {
* Create a flow to query Couchbase for by `id` and emit documents of the given class.
*/
def fromId[T <: Document[_]](sessionSettings: CouchbaseSessionSettings,
- bucketName: String,
- target: Class[T]): Flow[String, T, NotUsed] =
+ bucketName: String,
+ target: Class[T]): Flow[String, T, NotUsed] =
Flow
.fromMaterializer { (materializer, _) =>
val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName)
@@ -45,15 +45,14 @@ object CouchbaseFlow {
* Create a flow to update or insert a Couchbase [[com.couchbase.client.java.document.JsonDocument JsonDocument]].
*/
def upsert(sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[JsonDocument, JsonDocument, NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[JsonDocument, JsonDocument, NotUsed] =
Flow
.fromMaterializer { (materializer, _) =>
val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName)
Flow[JsonDocument]
- .mapAsync(writeSettings.parallelism)(
- doc => session.flatMap(_.upsert(doc, writeSettings))(materializer.system.dispatcher)
- )
+ .mapAsync(writeSettings.parallelism)(doc =>
+ session.flatMap(_.upsert(doc, writeSettings))(materializer.system.dispatcher))
}
.mapMaterializedValue(_ => NotUsed)
@@ -61,15 +60,14 @@ object CouchbaseFlow {
* Create a flow to update or insert a Couchbase document of the given class.
*/
def upsertDoc[T <: Document[_]](sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[T, T, NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[T, T, NotUsed] =
Flow
.fromMaterializer { (materializer, _) =>
val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName)
Flow[T]
- .mapAsync(writeSettings.parallelism)(
- doc => session.flatMap(_.upsertDoc(doc, writeSettings))(materializer.system.dispatcher)
- )
+ .mapAsync(writeSettings.parallelism)(doc =>
+ session.flatMap(_.upsertDoc(doc, writeSettings))(materializer.system.dispatcher))
}
.mapMaterializedValue(_ => NotUsed)
@@ -78,23 +76,21 @@ object CouchbaseFlow {
* can be handled in-stream.
*/
def upsertDocWithResult[T <: Document[_]](sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[T, CouchbaseWriteResult[T], NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[T, CouchbaseWriteResult[T], NotUsed] =
Flow
.fromMaterializer { (materializer, _) =>
val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName)
Flow[T]
- .mapAsync(writeSettings.parallelism)(
- doc => {
- implicit val executor = materializer.system.dispatcher
- session
- .flatMap(_.upsertDoc(doc, writeSettings))
- .map(_ => CouchbaseWriteSuccess(doc))
- .recover {
- case exception => CouchbaseWriteFailure(doc, exception)
- }
- }
- )
+ .mapAsync(writeSettings.parallelism)(doc => {
+ implicit val executor = materializer.system.dispatcher
+ session
+ .flatMap(_.upsertDoc(doc, writeSettings))
+ .map(_ => CouchbaseWriteSuccess(doc))
+ .recover {
+ case exception => CouchbaseWriteFailure(doc, exception)
+ }
+ })
}
.mapMaterializedValue(_ => NotUsed)
@@ -102,15 +98,14 @@ object CouchbaseFlow {
* Create a flow to replace a Couchbase [[com.couchbase.client.java.document.JsonDocument JsonDocument]].
*/
def replace(sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[JsonDocument, JsonDocument, NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[JsonDocument, JsonDocument, NotUsed] =
Flow
.fromMaterializer { (materializer, _) =>
val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName)
Flow[JsonDocument]
- .mapAsync(writeSettings.parallelism)(
- doc => session.flatMap(_.replace(doc, writeSettings))(materializer.system.dispatcher)
- )
+ .mapAsync(writeSettings.parallelism)(doc =>
+ session.flatMap(_.replace(doc, writeSettings))(materializer.system.dispatcher))
}
.mapMaterializedValue(_ => NotUsed)
@@ -118,15 +113,14 @@ object CouchbaseFlow {
* Create a flow to replace a Couchbase document of the given class.
*/
def replaceDoc[T <: Document[_]](sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[T, T, NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[T, T, NotUsed] =
Flow
.fromMaterializer { (materializer, _) =>
val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName)
Flow[T]
- .mapAsync(writeSettings.parallelism)(
- doc => session.flatMap(_.replaceDoc(doc, writeSettings))(materializer.system.dispatcher)
- )
+ .mapAsync(writeSettings.parallelism)(doc =>
+ session.flatMap(_.replaceDoc(doc, writeSettings))(materializer.system.dispatcher))
}
.mapMaterializedValue(_ => NotUsed)
@@ -135,23 +129,21 @@ object CouchbaseFlow {
* can be handled in-stream.
*/
def replaceDocWithResult[T <: Document[_]](sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[T, CouchbaseWriteResult[T], NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[T, CouchbaseWriteResult[T], NotUsed] =
Flow
.fromMaterializer { (materializer, _) =>
val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName)
Flow[T]
- .mapAsync(writeSettings.parallelism)(
- doc => {
- implicit val executor = materializer.system.dispatcher
- session
- .flatMap(_.replaceDoc(doc, writeSettings))
- .map(_ => CouchbaseWriteSuccess(doc))
- .recover {
- case exception => CouchbaseWriteFailure(doc, exception)
- }
- }
- )
+ .mapAsync(writeSettings.parallelism)(doc => {
+ implicit val executor = materializer.system.dispatcher
+ session
+ .flatMap(_.replaceDoc(doc, writeSettings))
+ .map(_ => CouchbaseWriteSuccess(doc))
+ .recover {
+ case exception => CouchbaseWriteFailure(doc, exception)
+ }
+ })
}
.mapMaterializedValue(_ => NotUsed)
@@ -159,20 +151,18 @@ object CouchbaseFlow {
* Create a flow to delete documents from Couchbase by `id`. Emits the same `id`.
*/
def delete(sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[String, String, NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[String, String, NotUsed] =
Flow
.fromMaterializer { (materializer, _) =>
val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName)
Flow[String]
- .mapAsync(writeSettings.parallelism)(
- id => {
- implicit val executor = materializer.system.dispatcher
- session
- .flatMap(_.remove(id, writeSettings))
- .map(_ => id)
- }
- )
+ .mapAsync(writeSettings.parallelism)(id => {
+ implicit val executor = materializer.system.dispatcher
+ session
+ .flatMap(_.remove(id, writeSettings))
+ .map(_ => id)
+ })
}
.mapMaterializedValue(_ => NotUsed)
@@ -180,23 +170,21 @@ object CouchbaseFlow {
* Create a flow to delete documents from Couchbase by `id` and emit operation outcome containing the same `id`.
*/
def deleteWithResult(sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Flow[String, CouchbaseDeleteResult, NotUsed] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Flow[String, CouchbaseDeleteResult, NotUsed] =
Flow
.fromMaterializer { (materializer, _) =>
val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName)
Flow[String]
- .mapAsync(writeSettings.parallelism)(
- id => {
- implicit val executor = materializer.system.dispatcher
- session
- .flatMap(_.remove(id, writeSettings))
- .map(_ => CouchbaseDeleteSuccess(id))
- .recover {
- case exception => CouchbaseDeleteFailure(id, exception)
- }
- }
- )
+ .mapAsync(writeSettings.parallelism)(id => {
+ implicit val executor = materializer.system.dispatcher
+ session
+ .flatMap(_.remove(id, writeSettings))
+ .map(_ => CouchbaseDeleteSuccess(id))
+ .recover {
+ case exception => CouchbaseDeleteFailure(id, exception)
+ }
+ })
}
.mapMaterializedValue(_ => NotUsed)
}
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseSession.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseSession.scala
index b3679a27..8700bd2a 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseSession.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseSession.scala
@@ -4,19 +4,19 @@
package akka.stream.alpakka.couchbase.scaladsl
-import akka.annotation.{DoNotInherit, InternalApi}
-import akka.stream.alpakka.couchbase.impl.{CouchbaseSessionImpl, RxUtilities}
-import akka.stream.alpakka.couchbase.javadsl.{CouchbaseSession => JavaDslCouchbaseSession}
-import akka.stream.alpakka.couchbase.{CouchbaseSessionSettings, CouchbaseWriteSettings}
+import akka.annotation.{ DoNotInherit, InternalApi }
+import akka.stream.alpakka.couchbase.impl.{ CouchbaseSessionImpl, RxUtilities }
+import akka.stream.alpakka.couchbase.javadsl.{ CouchbaseSession => JavaDslCouchbaseSession }
+import akka.stream.alpakka.couchbase.{ CouchbaseSessionSettings, CouchbaseWriteSettings }
import akka.stream.scaladsl.Source
-import akka.{Done, NotUsed}
+import akka.{ Done, NotUsed }
import com.couchbase.client.java._
import com.couchbase.client.java.document.json.JsonObject
-import com.couchbase.client.java.document.{Document, JsonDocument}
+import com.couchbase.client.java.document.{ Document, JsonDocument }
import com.couchbase.client.java.query._
import com.couchbase.client.java.query.util.IndexInfo
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ ExecutionContext, Future }
import scala.concurrent.duration.FiniteDuration
/**
@@ -31,7 +31,7 @@ object CouchbaseSession {
* the session is closed.
*/
def apply(settings: CouchbaseSessionSettings,
- bucketName: String)(implicit ec: ExecutionContext): Future[CouchbaseSession] =
+ bucketName: String)(implicit ec: ExecutionContext): Future[CouchbaseSession] =
createClusterClient(settings).flatMap(c => openBucket(c, disconnectClusterOnClose = true, bucketName))
/**
@@ -56,8 +56,7 @@ object CouchbaseSession {
*/
@InternalApi
private[couchbase] def createClusterClient(
- settings: CouchbaseSessionSettings
- )(implicit ec: ExecutionContext): Future[AsyncCluster] =
+ settings: CouchbaseSessionSettings)(implicit ec: ExecutionContext): Future[AsyncCluster] =
settings.enriched
.flatMap { enrichedSettings =>
Future(enrichedSettings.environment match {
@@ -69,8 +68,7 @@ object CouchbaseSession {
}
private def openBucket(cluster: AsyncCluster, disconnectClusterOnClose: Boolean, bucketName: String)(
- implicit ec: ExecutionContext
- ): Future[CouchbaseSession] =
+ implicit ec: ExecutionContext): Future[CouchbaseSession] =
RxUtilities
.singleObservableToFuture(cluster.openBucket(bucketName), "openBucket")
.map { bucket =>
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseSink.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseSink.scala
index 8d8542e0..7d058951 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseSink.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseSink.scala
@@ -6,8 +6,8 @@ package akka.stream.alpakka.couchbase.scaladsl
import akka.Done
import akka.stream.alpakka.couchbase._
-import akka.stream.scaladsl.{Keep, Sink}
-import com.couchbase.client.java.document.{Document, JsonDocument}
+import akka.stream.scaladsl.{ Keep, Sink }
+import com.couchbase.client.java.document.{ Document, JsonDocument }
import scala.concurrent.Future
@@ -20,16 +20,16 @@ object CouchbaseSink {
* Create a sink to update or insert a Couchbase [[com.couchbase.client.java.document.JsonDocument JsonDocument]].
*/
def upsert(sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Sink[JsonDocument, Future[Done]] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Sink[JsonDocument, Future[Done]] =
CouchbaseFlow.upsert(sessionSettings, writeSettings, bucketName).toMat(Sink.ignore)(Keep.right)
/**
* Create a sink to update or insert a Couchbase document of the given class.
*/
def upsertDoc[T <: Document[_]](sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Sink[T, Future[Done]] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Sink[T, Future[Done]] =
CouchbaseFlow
.upsertDoc(sessionSettings, writeSettings, bucketName)
.toMat(Sink.ignore)(Keep.right)
@@ -38,8 +38,8 @@ object CouchbaseSink {
* Create a sink to delete documents from Couchbase by `id`.
*/
def delete(sessionSettings: CouchbaseSessionSettings,
- writeSettings: CouchbaseWriteSettings,
- bucketName: String): Sink[String, Future[Done]] =
+ writeSettings: CouchbaseWriteSettings,
+ bucketName: String): Sink[String, Future[Done]] =
CouchbaseFlow.delete(sessionSettings, writeSettings, bucketName).toMat(Sink.ignore)(Keep.right)
}
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseSource.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseSource.scala
index 61f801e8..62aadca2 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseSource.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/CouchbaseSource.scala
@@ -5,10 +5,10 @@
package akka.stream.alpakka.couchbase.scaladsl
import akka.NotUsed
-import akka.stream.alpakka.couchbase.{CouchbaseSessionRegistry, CouchbaseSessionSettings}
+import akka.stream.alpakka.couchbase.{ CouchbaseSessionRegistry, CouchbaseSessionSettings }
import akka.stream.scaladsl.Source
import com.couchbase.client.java.document.json.JsonObject
-import com.couchbase.client.java.query.{N1qlQuery, Statement}
+import com.couchbase.client.java.query.{ N1qlQuery, Statement }
/**
* Scala API: Factory methods for Couchbase sources.
@@ -19,8 +19,8 @@ object CouchbaseSource {
* Create a source query Couchbase by statement, emitted as [[com.couchbase.client.java.document.JsonDocument JsonDocument]]s.
*/
def fromStatement(sessionSettings: CouchbaseSessionSettings,
- statement: Statement,
- bucketName: String): Source[JsonObject, NotUsed] =
+ statement: Statement,
+ bucketName: String): Source[JsonObject, NotUsed] =
Source
.fromMaterializer { (materializer, _) =>
val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName)
@@ -34,8 +34,8 @@ object CouchbaseSource {
* Create a source query Couchbase by statement, emitted as [[com.couchbase.client.java.document.JsonDocument JsonDocument]]s.
*/
def fromN1qlQuery(sessionSettings: CouchbaseSessionSettings,
- query: N1qlQuery,
- bucketName: String): Source[JsonObject, NotUsed] =
+ query: N1qlQuery,
+ bucketName: String): Source[JsonObject, NotUsed] =
Source
.fromMaterializer { (materializer, _) =>
val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName)
diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/DiscoverySupport.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/DiscoverySupport.scala
index a77a0030..b026c8b8 100644
--- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/DiscoverySupport.scala
+++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/DiscoverySupport.scala
@@ -6,7 +6,7 @@ package akka.stream.alpakka.couchbase.scaladsl
import java.util.concurrent.CompletionStage
-import akka.actor.{ActorSystem, ClassicActorSystemProvider}
+import akka.actor.{ ActorSystem, ClassicActorSystemProvider }
import akka.annotation.InternalApi
import akka.discovery.Discovery
import akka.stream.alpakka.couchbase.CouchbaseSessionSettings
@@ -29,8 +29,7 @@ sealed class DiscoverySupport private {
*/
private def readNodes(
serviceName: String,
- lookupTimeout: FiniteDuration
- )(implicit system: ClassicActorSystemProvider): Future[immutable.Seq[String]] = {
+ lookupTimeout: FiniteDuration)(implicit system: ClassicActorSystemProvider): Future[immutable.Seq[String]] = {
implicit val ec = system.classicSystem.dispatcher
val discovery = Discovery(system).discovery
discovery.lookup(serviceName, lookupTimeout).map { resolved =>
@@ -53,8 +52,8 @@ sealed class DiscoverySupport private {
* to be used as Couchbase `nodes`.
*/
def nodes(
- config: Config
- )(implicit system: ClassicActorSystemProvider): CouchbaseSessionSettings => Future[CouchbaseSessionSettings] = {
+ config: Config)(
+ implicit system: ClassicActorSystemProvider): CouchbaseSessionSettings => Future[CouchbaseSessionSettings] = {
implicit val ec = system.classicSystem.dispatcher
settings =>
readNodes(config)
@@ -64,7 +63,7 @@ sealed class DiscoverySupport private {
}
private[couchbase] def nodes(config: Config,
- system: ActorSystem): CouchbaseSessionSettings => Future[CouchbaseSessionSettings] =
+ system: ActorSystem): CouchbaseSessionSettings => Future[CouchbaseSessionSettings] =
nodes(config)(system)
/**
@@ -73,8 +72,8 @@ sealed class DiscoverySupport private {
@InternalApi
private[couchbase] def getNodes(
config: Config,
- system: ClassicActorSystemProvider
- ): java.util.function.Function[CouchbaseSessionSettings, CompletionStage[CouchbaseSessionSettings]] =
+ system: ClassicActorSystemProvider)
+ : java.util.function.Function[CouchbaseSessionSettings, CompletionStage[CouchbaseSessionSettings]] =
nodes(config)(system).andThen(_.toJava).asJava
/**
@@ -82,8 +81,7 @@ sealed class DiscoverySupport private {
* to be used as Couchbase `nodes`.
*/
def nodes()(
- implicit system: ClassicActorSystemProvider
- ): CouchbaseSessionSettings => Future[CouchbaseSessionSettings] =
+ implicit system: ClassicActorSystemProvider): CouchbaseSessionSettings => Future[CouchbaseSessionSettings] =
nodes(system.classicSystem)
/**
diff --git a/couchbase/src/test/scala/akka/stream/alpakka/couchbase/testing/CouchbaseSupport.scala b/couchbase/src/test/scala/akka/stream/alpakka/couchbase/testing/CouchbaseSupport.scala
index 587e3485..89cc28a4 100644
--- a/couchbase/src/test/scala/akka/stream/alpakka/couchbase/testing/CouchbaseSupport.scala
+++ b/couchbase/src/test/scala/akka/stream/alpakka/couchbase/testing/CouchbaseSupport.scala
@@ -7,13 +7,13 @@ package akka.stream.alpakka.couchbase.testing
import akka.Done
import akka.actor.ActorSystem
import akka.stream.alpakka.couchbase.scaladsl._
-import akka.stream.alpakka.couchbase.{CouchbaseSessionSettings, CouchbaseWriteSettings}
-import akka.stream.scaladsl.{Sink, Source}
+import akka.stream.alpakka.couchbase.{ CouchbaseSessionSettings, CouchbaseWriteSettings }
+import akka.stream.scaladsl.{ Sink, Source }
import com.couchbase.client.deps.io.netty.buffer.Unpooled
import com.couchbase.client.deps.io.netty.util.CharsetUtil
import com.couchbase.client.java.ReplicateTo
import com.couchbase.client.java.document.json.JsonObject
-import com.couchbase.client.java.document.{BinaryDocument, JsonDocument, RawJsonDocument, StringDocument}
+import com.couchbase.client.java.document.{ BinaryDocument, JsonDocument, RawJsonDocument, StringDocument }
import org.slf4j.LoggerFactory
import play.api.libs.json.Json
@@ -21,7 +21,7 @@ import scala.jdk.CollectionConverters._
import scala.collection.immutable.Seq
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
-import scala.concurrent.{Await, Future}
+import scala.concurrent.{ Await, Future }
case class TestObject(id: String, value: String)
@@ -29,15 +29,15 @@ trait CouchbaseSupport {
private val log = LoggerFactory.getLogger(classOf[CouchbaseSupport])
- //#init-actor-system
+ // #init-actor-system
implicit val actorSystem: ActorSystem = ActorSystem()
- //#init-actor-system
+ // #init-actor-system
val sampleData = TestObject("First", "First")
val sampleSequence: Seq[TestObject] = sampleData +: Seq[TestObject](TestObject("Second", "Second"),
- TestObject("Third", "Third"),
- TestObject("Fourth", "Fourth"))
+ TestObject("Third", "Third"),
+ TestObject("Fourth", "Fourth"))
val sampleJavaList: java.util.List[TestObject] = sampleSequence.asJava
@@ -78,7 +78,7 @@ trait CouchbaseSupport {
.via(CouchbaseFlow.upsert(sessionSettings, CouchbaseWriteSettings.inMemory, bucketName))
.runWith(Sink.ignore)
Await.result(bulkUpsertResult, 5.seconds)
- //all queries are Eventual Consistent, se we need to wait for index refresh!!
+ // all queries are Eventual Consistent, se we need to wait for index refresh!!
Thread.sleep(2000)
}
diff --git a/couchbase/src/test/scala/docs/scaladsl/CouchbaseFlowSpec.scala b/couchbase/src/test/scala/docs/scaladsl/CouchbaseFlowSpec.scala
index f91a55d9..2bf8f701 100644
--- a/couchbase/src/test/scala/docs/scaladsl/CouchbaseFlowSpec.scala
+++ b/couchbase/src/test/scala/docs/scaladsl/CouchbaseFlowSpec.scala
@@ -5,22 +5,22 @@
package docs.scaladsl
import akka.Done
-import akka.stream.alpakka.couchbase.{CouchbaseDeleteFailure, CouchbaseDeleteResult}
+import akka.stream.alpakka.couchbase.{ CouchbaseDeleteFailure, CouchbaseDeleteResult }
import akka.stream.alpakka.couchbase.scaladsl.CouchbaseFlow
-import akka.stream.alpakka.couchbase.testing.{CouchbaseSupport, TestObject}
+import akka.stream.alpakka.couchbase.testing.{ CouchbaseSupport, TestObject }
import akka.stream.alpakka.testkit.scaladsl.LogCapturing
-import akka.stream.scaladsl.{Sink, Source}
+import akka.stream.scaladsl.{ Sink, Source }
import com.couchbase.client.java.error.DocumentDoesNotExistException
import org.scalatest.concurrent.ScalaFutures
import org.scalatest._
//#write-settings
import akka.stream.alpakka.couchbase.CouchbaseWriteSettings
-import com.couchbase.client.java.{PersistTo, ReplicateTo}
+import com.couchbase.client.java.{ PersistTo, ReplicateTo }
//#write-settings
import akka.stream.testkit.scaladsl.StreamTestKit._
-import com.couchbase.client.java.document.{BinaryDocument, RawJsonDocument, StringDocument}
+import com.couchbase.client.java.document.{ BinaryDocument, RawJsonDocument, StringDocument }
import scala.collection.immutable
import scala.collection.immutable.Seq
@@ -60,13 +60,13 @@ class CouchbaseFlowSpec
"create custom writeSettings object" in {
- //#write-settings
+ // #write-settings
val writeSettings = CouchbaseWriteSettings()
.withParallelism(3)
.withPersistTo(PersistTo.FOUR)
.withReplicateTo(ReplicateTo.THREE)
.withTimeout(5.seconds)
- //#write-settings
+ // #write-settings
val expectedwriteSettings = CouchbaseWriteSettings(3, ReplicateTo.THREE, PersistTo.FOUR, 5.seconds)
writeSettings shouldEqual expectedwriteSettings
@@ -84,9 +84,7 @@ class CouchbaseFlowSpec
CouchbaseFlow.upsertDoc(
sessionSettings,
writeSettings,
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.ignore)
result.futureValue
@@ -110,9 +108,7 @@ class CouchbaseFlowSpec
CouchbaseFlow.upsert(
sessionSettings,
writeSettings,
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.ignore)
// #upsert
jsonDocumentUpsert.futureValue
@@ -132,9 +128,7 @@ class CouchbaseFlowSpec
CouchbaseFlow.upsertDoc(
sessionSettings,
writeSettings,
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.ignore)
// #upsert
stringDocumentUpsert.futureValue
@@ -163,9 +157,7 @@ class CouchbaseFlowSpec
CouchbaseFlow.upsertDoc(
sessionSettings,
writeSettings.withParallelism(2),
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.ignore)
bulkUpsertResult.futureValue
@@ -182,8 +174,7 @@ class CouchbaseFlowSpec
val bulkUpsertResult: Future[Done] = Source(sampleSequence)
.map(toJsonDocument)
.via(
- CouchbaseFlow.upsert(sessionSettings, writeSettings.withParallelism(2), bucketName)
- )
+ CouchbaseFlow.upsert(sessionSettings, writeSettings.withParallelism(2), bucketName))
.runWith(Sink.ignore)
bulkUpsertResult.futureValue
@@ -196,9 +187,7 @@ class CouchbaseFlowSpec
.via(
CouchbaseFlow.fromId(
sessionSettings,
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.seq)
// #fromId
@@ -212,9 +201,7 @@ class CouchbaseFlowSpec
CouchbaseFlow.upsertDoc(
sessionSettings,
writeSettings.withParallelism(2),
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.ignore)
bulkUpsertResult.futureValue
@@ -224,9 +211,7 @@ class CouchbaseFlowSpec
CouchbaseFlow.fromId(
sessionSettings,
bucketName,
- classOf[StringDocument]
- )
- )
+ classOf[StringDocument]))
.runWith(Sink.seq)
resultsAsFuture.futureValue.map(_.id()) should contain.inOrder("First", "Second", "Third", "Fourth")
@@ -239,9 +224,7 @@ class CouchbaseFlowSpec
CouchbaseFlow.upsertDoc(
sessionSettings,
writeSettings.withParallelism(2),
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.ignore)
bulkUpsertResult.futureValue
@@ -251,9 +234,7 @@ class CouchbaseFlowSpec
CouchbaseFlow.fromId(
sessionSettings,
bucketName,
- classOf[BinaryDocument]
- )
- )
+ classOf[BinaryDocument]))
.runWith(Sink.seq)
resultsAsFuture.futureValue.map(_.id()) shouldBe Seq("First", "Second", "Third", "Fourth")
}
@@ -263,12 +244,11 @@ class CouchbaseFlowSpec
.map(toJsonDocument)
.via(
CouchbaseFlow.upsert(sessionSettings,
- writeSettings
- .withParallelism(2)
- .withPersistTo(PersistTo.THREE)
- .withTimeout(1.seconds),
- bucketName)
- )
+ writeSettings
+ .withParallelism(2)
+ .withPersistTo(PersistTo.THREE)
+ .withTimeout(1.seconds),
+ bucketName))
.runWith(Sink.seq)
bulkUpsertResult.failed.futureValue shouldBe a[com.couchbase.client.java.error.DurabilityException]
@@ -286,11 +266,9 @@ class CouchbaseFlowSpec
CouchbaseFlow.upsertDoc(
sessionSettings,
writeSettings,
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.ignore)
- //wait til operation completed
+ // wait til operation completed
upsertFuture.futureValue
// #delete
@@ -301,9 +279,7 @@ class CouchbaseFlowSpec
CouchbaseFlow.delete(
sessionSettings,
writeSettings,
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.ignore)
// #delete
deleteFuture.futureValue
@@ -321,9 +297,7 @@ class CouchbaseFlowSpec
.fromId(
sessionSettings,
bucketName,
- classOf[RawJsonDocument]
- )
- )
+ classOf[RawJsonDocument]))
.runWith(Sink.head)
getFuture.failed.futureValue shouldBe a[NoSuchElementException]
}
@@ -332,23 +306,20 @@ class CouchbaseFlowSpec
val bulkUpsertResult: Future[Done] = Source(sampleSequence)
.map(toRawJsonDocument)
.via(
- CouchbaseFlow.upsertDoc(sessionSettings, writeSettings.withParallelism(2), bucketName)
- )
+ CouchbaseFlow.upsertDoc(sessionSettings, writeSettings.withParallelism(2), bucketName))
.runWith(Sink.ignore)
bulkUpsertResult.futureValue
val deleteFuture: Future[Done] = Source(sampleSequence.map(_.id) :+ "NoneExisting")
.via(
- CouchbaseFlow.delete(sessionSettings, writeSettings.withParallelism(2), bucketName)
- )
+ CouchbaseFlow.delete(sessionSettings, writeSettings.withParallelism(2), bucketName))
.runWith(Sink.ignore)
deleteFuture.failed.futureValue shouldBe a[DocumentDoesNotExistException]
val getFuture: Future[Seq[RawJsonDocument]] =
Source(sampleSequence.map(_.id))
.via(
- CouchbaseFlow.fromId(sessionSettings, bucketName, classOf[RawJsonDocument])
- )
+ CouchbaseFlow.fromId(sessionSettings, bucketName, classOf[RawJsonDocument]))
.runWith(Sink.seq)
getFuture.futureValue shouldBe empty
}
@@ -364,8 +335,7 @@ class CouchbaseFlowSpec
val result: Future[JsonDocument] = Source
.single(id)
.via(
- CouchbaseFlow.fromId(sessionSettings, queryBucketName)
- )
+ CouchbaseFlow.fromId(sessionSettings, queryBucketName))
.runWith(Sink.head)
result.futureValue.id() shouldEqual id
}
@@ -416,9 +386,7 @@ class CouchbaseFlowSpec
CouchbaseFlow.replace(
sessionSettings,
writeSettings,
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.ignore)
// #replace
replaceFuture.futureValue
@@ -432,7 +400,7 @@ class CouchbaseFlowSpec
"replace multiple RawJsonDocuments" in assertAllStagesStopped {
val replaceSequence: Seq[TestObject] = sampleData +: Seq[TestObject](TestObject("Second", "SecondReplace"),
- TestObject("Third", "ThirdReplace"))
+ TestObject("Third", "ThirdReplace"))
upsertSampleData(bucketName)
@@ -442,9 +410,7 @@ class CouchbaseFlowSpec
CouchbaseFlow.replaceDoc(
sessionSettings,
writeSettings.withParallelism(2),
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.ignore)
bulkReplaceResult.futureValue
@@ -455,9 +421,9 @@ class CouchbaseFlowSpec
.runWith(Sink.seq)
resultsAsFuture.futureValue.map(doc => doc.content().get("value")) should contain.inOrderOnly("First",
- "SecondReplace",
- "ThirdReplace",
- "Fourth")
+ "SecondReplace",
+ "ThirdReplace",
+ "Fourth")
}
"replace RawJsonDocument" in assertAllStagesStopped {
@@ -474,9 +440,7 @@ class CouchbaseFlowSpec
CouchbaseFlow.replaceDoc(
sessionSettings,
writeSettings,
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.ignore)
// #replaceDocreplace
@@ -496,12 +460,11 @@ class CouchbaseFlowSpec
.map(toJsonDocument)
.via(
CouchbaseFlow.replace(sessionSettings,
- writeSettings
- .withParallelism(2)
- .withPersistTo(PersistTo.THREE)
- .withTimeout(1.seconds),
- bucketName)
- )
+ writeSettings
+ .withParallelism(2)
+ .withPersistTo(PersistTo.THREE)
+ .withTimeout(1.seconds),
+ bucketName))
.runWith(Sink.seq)
bulkReplaceResult.failed.futureValue shouldBe a[com.couchbase.client.java.error.DurabilityException]
@@ -511,7 +474,7 @@ class CouchbaseFlowSpec
"Couchbase upsert with result" should {
"write documents" in assertAllStagesStopped {
// #upsertDocWithResult
- import akka.stream.alpakka.couchbase.{CouchbaseWriteFailure, CouchbaseWriteResult}
+ import akka.stream.alpakka.couchbase.{ CouchbaseWriteFailure, CouchbaseWriteResult }
val result: Future[immutable.Seq[CouchbaseWriteResult[RawJsonDocument]]] =
Source(sampleSequence)
@@ -520,9 +483,7 @@ class CouchbaseFlowSpec
CouchbaseFlow.upsertDocWithResult(
sessionSettings,
writeSettings,
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.seq)
val failedDocs: immutable.Seq[CouchbaseWriteFailure[RawJsonDocument]] = result.futureValue.collect {
@@ -536,18 +497,17 @@ class CouchbaseFlowSpec
}
"expose failures in-stream" in assertAllStagesStopped {
- import akka.stream.alpakka.couchbase.{CouchbaseWriteFailure, CouchbaseWriteResult}
+ import akka.stream.alpakka.couchbase.{ CouchbaseWriteFailure, CouchbaseWriteResult }
val result: Future[immutable.Seq[CouchbaseWriteResult[JsonDocument]]] = Source(sampleSequence)
.map(toJsonDocument)
.via(
CouchbaseFlow.upsertDocWithResult(sessionSettings,
- writeSettings
- .withParallelism(2)
- .withPersistTo(PersistTo.THREE)
- .withTimeout(1.seconds),
- bucketName)
- )
+ writeSettings
+ .withParallelism(2)
+ .withPersistTo(PersistTo.THREE)
+ .withTimeout(1.seconds),
+ bucketName))
.runWith(Sink.seq)
result.futureValue should have size sampleSequence.size
@@ -571,9 +531,7 @@ class CouchbaseFlowSpec
.withParallelism(2)
.withReplicateTo(ReplicateTo.THREE)
.withTimeout(1.seconds),
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.head)
// #deleteWithResult
@@ -584,9 +542,7 @@ class CouchbaseFlowSpec
CouchbaseFlow.deleteWithResult(
sessionSettings,
writeSettings,
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.head)
// #deleteWithResult
deleteFuture.failed.futureValue shouldBe a[DocumentDoesNotExistException]
@@ -603,7 +559,7 @@ class CouchbaseFlowSpec
upsertSampleData(bucketName)
// #replaceDocWithResult
- import akka.stream.alpakka.couchbase.{CouchbaseWriteFailure, CouchbaseWriteResult}
+ import akka.stream.alpakka.couchbase.{ CouchbaseWriteFailure, CouchbaseWriteResult }
val result: Future[immutable.Seq[CouchbaseWriteResult[RawJsonDocument]]] =
Source(sampleSequence)
@@ -612,9 +568,7 @@ class CouchbaseFlowSpec
CouchbaseFlow.replaceDocWithResult(
sessionSettings,
writeSettings,
- bucketName
- )
- )
+ bucketName))
.runWith(Sink.seq)
val failedDocs: immutable.Seq[CouchbaseWriteFailure[RawJsonDocument]] = result.futureValue.collect {
@@ -631,18 +585,17 @@ class CouchbaseFlowSpec
cleanAllInBucket(bucketName)
- import akka.stream.alpakka.couchbase.{CouchbaseWriteFailure, CouchbaseWriteResult}
+ import akka.stream.alpakka.couchbase.{ CouchbaseWriteFailure, CouchbaseWriteResult }
val result: Future[immutable.Seq[CouchbaseWriteResult[JsonDocument]]] = Source(sampleSequence)
.map(toJsonDocument)
.via(
CouchbaseFlow.replaceDocWithResult(sessionSettings,
- writeSettings
- .withParallelism(2)
- .withPersistTo(PersistTo.THREE)
- .withTimeout(1.seconds),
- bucketName)
- )
+ writeSettings
+ .withParallelism(2)
+ .withPersistTo(PersistTo.THREE)
+ .withTimeout(1.seconds),
+ bucketName))
.runWith(Sink.seq)
result.futureValue should have size sampleSequence.size
diff --git a/couchbase/src/test/scala/docs/scaladsl/CouchbaseSessionExamplesSpec.scala b/couchbase/src/test/scala/docs/scaladsl/CouchbaseSessionExamplesSpec.scala
index 9267777d..b36dd921 100644
--- a/couchbase/src/test/scala/docs/scaladsl/CouchbaseSessionExamplesSpec.scala
+++ b/couchbase/src/test/scala/docs/scaladsl/CouchbaseSessionExamplesSpec.scala
@@ -12,7 +12,7 @@ import org.scalatest.concurrent.ScalaFutures
import org.scalatest.BeforeAndAfterAll
import scala.concurrent.duration._
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ ExecutionContext, Future }
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
@@ -35,7 +35,7 @@ class CouchbaseSessionExamplesSpec
import akka.stream.alpakka.couchbase.CouchbaseSessionRegistry
import akka.stream.alpakka.couchbase.CouchbaseSessionSettings
import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession
- import com.couchbase.client.java.env.{CouchbaseEnvironment, DefaultCouchbaseEnvironment}
+ import com.couchbase.client.java.env.{ CouchbaseEnvironment, DefaultCouchbaseEnvironment }
// Akka extension (singleton per actor system)
val registry = CouchbaseSessionRegistry(actorSystem)
@@ -81,7 +81,7 @@ class CouchbaseSessionExamplesSpec
implicit val ec: ExecutionContext = actorSystem.dispatcher
// #fromBucket
import com.couchbase.client.java.auth.PasswordAuthenticator
- import com.couchbase.client.java.{Bucket, CouchbaseCluster}
+ import com.couchbase.client.java.{ Bucket, CouchbaseCluster }
val cluster: CouchbaseCluster = CouchbaseCluster.create("localhost")
cluster.authenticate(new PasswordAuthenticator("Administrator", "password"))
diff --git a/couchbase/src/test/scala/docs/scaladsl/CouchbaseSourceSpec.scala b/couchbase/src/test/scala/docs/scaladsl/CouchbaseSourceSpec.scala
index 70adf9e3..9ace5986 100644
--- a/couchbase/src/test/scala/docs/scaladsl/CouchbaseSourceSpec.scala
+++ b/couchbase/src/test/scala/docs/scaladsl/CouchbaseSourceSpec.scala
@@ -4,13 +4,13 @@
package docs.scaladsl
-import akka.stream.alpakka.couchbase.scaladsl.{CouchbaseSession, CouchbaseSource}
+import akka.stream.alpakka.couchbase.scaladsl.{ CouchbaseSession, CouchbaseSource }
import akka.stream.alpakka.couchbase.testing.CouchbaseSupport
import akka.stream.alpakka.testkit.scaladsl.LogCapturing
import akka.stream.scaladsl.Sink
import akka.stream.testkit.scaladsl.StreamTestKit._
import com.couchbase.client.java.auth.PasswordAuthenticator
-import com.couchbase.client.java.{Bucket, CouchbaseCluster}
+import com.couchbase.client.java.{ Bucket, CouchbaseCluster }
import com.couchbase.client.java.document.json.JsonObject
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.BeforeAndAfterAll
@@ -70,8 +70,8 @@ class CouchbaseSourceSpec
"run simple N1QL query" in assertAllStagesStopped {
- //#n1ql
- import com.couchbase.client.java.query.{N1qlParams, N1qlQuery}
+ // #n1ql
+ import com.couchbase.client.java.query.{ N1qlParams, N1qlQuery }
val params = N1qlParams.build.adhoc(false)
val query = N1qlQuery.simple(s"select count(*) from $queryBucketName", params)
@@ -80,7 +80,7 @@ class CouchbaseSourceSpec
CouchbaseSource
.fromN1qlQuery(sessionSettings, query, bucketName)
.runWith(Sink.seq)
- //#n1ql
+ // #n1ql
resultAsFuture.futureValue.head.get("$1") shouldEqual 4
}
diff --git a/couchbase/src/test/scala/docs/scaladsl/DiscoverySpec.scala b/couchbase/src/test/scala/docs/scaladsl/DiscoverySpec.scala
index 0ca616e3..22ac2c18 100644
--- a/couchbase/src/test/scala/docs/scaladsl/DiscoverySpec.scala
+++ b/couchbase/src/test/scala/docs/scaladsl/DiscoverySpec.scala
@@ -7,14 +7,14 @@ package docs.scaladsl
import akka.actor.ActorSystem
import akka.stream.alpakka.testkit.scaladsl.LogCapturing
import com.couchbase.client.java.document.JsonDocument
-import com.typesafe.config.{Config, ConfigFactory}
+import com.typesafe.config.{ Config, ConfigFactory }
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import scala.concurrent.duration._
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ ExecutionContext, Future }
class DiscoverySpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with ScalaFutures with LogCapturing {
@@ -32,8 +32,8 @@ class DiscoverySpec extends AnyWordSpec with Matchers with BeforeAndAfterAll wit
"a Couchbasesession" should {
"be managed by the registry" in {
// #registry
- import akka.stream.alpakka.couchbase.scaladsl.{CouchbaseSession, DiscoverySupport}
- import akka.stream.alpakka.couchbase.{CouchbaseSessionRegistry, CouchbaseSessionSettings}
+ import akka.stream.alpakka.couchbase.scaladsl.{ CouchbaseSession, DiscoverySupport }
+ import akka.stream.alpakka.couchbase.{ CouchbaseSessionRegistry, CouchbaseSessionSettings }
val registry = CouchbaseSessionRegistry(actorSystem)
@@ -47,7 +47,7 @@ class DiscoverySpec extends AnyWordSpec with Matchers with BeforeAndAfterAll wit
"be created from settings" in {
// #create
import akka.stream.alpakka.couchbase.CouchbaseSessionSettings
- import akka.stream.alpakka.couchbase.scaladsl.{CouchbaseSession, DiscoverySupport}
+ import akka.stream.alpakka.couchbase.scaladsl.{ CouchbaseSession, DiscoverySupport }
implicit val ec: ExecutionContext = actorSystem.dispatcher
val sessionSettings = CouchbaseSessionSettings(actorSystem)
diff --git a/csv-bench/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvBench.scala b/csv-bench/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvBench.scala
index 3efd95d9..fe3ddb2c 100644
--- a/csv-bench/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvBench.scala
+++ b/csv-bench/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvBench.scala
@@ -67,8 +67,7 @@ class CsvBench {
"1024", // ~8x smaller than row
"8192", // ~same size as row
"65536" // ~8k larger than row
- )
- )
+ ))
var bsSize: Int = _
var source: Source[ByteString, NotUsed] = _
@@ -132,8 +131,7 @@ object CsvBench {
def main(args: Array[String]): Unit = {
val bench = new CsvBench
bench.parse(
- new Blackhole("Today's password is swordfish. I understand instantiating Blackholes directly is dangerous.")
- )
+ new Blackhole("Today's password is swordfish. I understand instantiating Blackholes directly is dangerous."))
bench.tearDown()
}
}
diff --git a/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvFormatter.scala b/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvFormatter.scala
index 0c707e51..d8efe7a2 100644
--- a/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvFormatter.scala
+++ b/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvFormatter.scala
@@ -4,7 +4,7 @@
package akka.stream.alpakka.csv.impl
-import java.nio.charset.{Charset, StandardCharsets}
+import java.nio.charset.{ Charset, StandardCharsets }
import akka.annotation.InternalApi
import akka.stream.alpakka.csv.scaladsl.CsvQuotingStyle
@@ -16,11 +16,11 @@ import scala.collection.immutable
* Internal API
*/
@InternalApi private[csv] class CsvFormatter(delimiter: Char,
- quoteChar: Char,
- escapeChar: Char,
- endOfLine: String,
- quotingStyle: CsvQuotingStyle,
- charset: Charset = StandardCharsets.UTF_8) {
+ quoteChar: Char,
+ escapeChar: Char,
+ endOfLine: String,
+ quotingStyle: CsvQuotingStyle,
+ charset: Charset = StandardCharsets.UTF_8) {
private[this] val charsetName = charset.name()
diff --git a/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvParser.scala b/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvParser.scala
index 69962dfc..13ca679b 100644
--- a/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvParser.scala
+++ b/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvParser.scala
@@ -9,7 +9,7 @@ import java.nio.charset.UnsupportedCharsetException
import akka.annotation.InternalApi
import akka.stream.alpakka.csv.MalformedCsvException
import akka.stream.alpakka.csv.scaladsl.ByteOrderMark
-import akka.util.{ByteIterator, ByteString, ByteStringBuilder}
+import akka.util.{ ByteIterator, ByteString, ByteStringBuilder }
import scala.collection.mutable
@@ -38,9 +38,9 @@ import scala.collection.mutable
* INTERNAL API: Use [[akka.stream.alpakka.csv.scaladsl.CsvParsing]] instead.
*/
@InternalApi private[csv] final class CsvParser(delimiter: Byte,
- quoteChar: Byte,
- escapeChar: Byte,
- maximumLineLength: Int) {
+ quoteChar: Byte,
+ escapeChar: Byte,
+ maximumLineLength: Int) {
import CsvParser._
@@ -144,7 +144,8 @@ import scala.collection.mutable
fieldStart = 0
}
- /** FieldBuilder will just cut the required part out of the incoming ByteBuffer
+ /**
+ * FieldBuilder will just cut the required part out of the incoming ByteBuffer
* as long as non escaping is used.
*/
private final class FieldBuilder {
@@ -155,7 +156,8 @@ import scala.collection.mutable
private[this] var useBuilder = false
private[this] var builder: ByteStringBuilder = _
- /** Set up the ByteString builder instead of relying on `ByteString.slice`.
+ /**
+ * Set up the ByteString builder instead of relying on `ByteString.slice`.
*/
@inline def init(): Unit =
if (!useBuilder) {
@@ -176,8 +178,8 @@ import scala.collection.mutable
private[this] def noCharEscaped() =
throw new MalformedCsvException(currentLineNo,
- lineLength,
- s"wrong escaping at $currentLineNo:$lineLength, no character after escape")
+ lineLength,
+ s"wrong escaping at $currentLineNo:$lineLength, no character after escape")
private[this] def checkForByteOrderMark(): Unit =
if (buffer.length >= 2) {
@@ -211,8 +213,7 @@ import scala.collection.mutable
throw new MalformedCsvException(
currentLineNo,
lineLength,
- s"no line end encountered within $maximumLineLength bytes on line $currentLineNo"
- )
+ s"no line end encountered within $maximumLineLength bytes on line $currentLineNo")
val byte = current.head
state match {
case LineStart =>
@@ -317,8 +318,7 @@ import scala.collection.mutable
throw new MalformedCsvException(
currentLineNo,
lineLength,
- s"wrong escaping at $currentLineNo:$lineLength, quote is escaped as ${quoteChar.toChar}${quoteChar.toChar}"
- )
+ s"wrong escaping at $currentLineNo:$lineLength, quote is escaped as ${quoteChar.toChar}${quoteChar.toChar}")
case b =>
fieldBuilder.add(escapeChar)
@@ -412,8 +412,7 @@ import scala.collection.mutable
throw new MalformedCsvException(
currentLineNo,
lineLength,
- s"unclosed quote at end of input $currentLineNo:$lineLength, no matching quote found"
- )
+ s"unclosed quote at end of input $currentLineNo:$lineLength, no matching quote found")
case WithinField =>
columns += fieldBuilder.result(pos)
Some(columns.toList)
diff --git a/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvParsingStage.scala b/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvParsingStage.scala
index 57ee808f..f1c9f0a5 100644
--- a/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvParsingStage.scala
+++ b/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvParsingStage.scala
@@ -6,8 +6,8 @@ package akka.stream.alpakka.csv.impl
import akka.annotation.InternalApi
import akka.event.Logging
-import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
-import akka.stream.{Attributes, FlowShape, Inlet, Outlet}
+import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler }
+import akka.stream.{ Attributes, FlowShape, Inlet, Outlet }
import akka.util.ByteString
import scala.annotation.tailrec
@@ -17,9 +17,9 @@ import scala.util.control.NonFatal
* Internal API: Use [[akka.stream.alpakka.csv.scaladsl.CsvParsing]] instead.
*/
@InternalApi private[csv] class CsvParsingStage(delimiter: Byte,
- quoteChar: Byte,
- escapeChar: Byte,
- maximumLineLength: Int)
+ quoteChar: Byte,
+ escapeChar: Byte,
+ maximumLineLength: Int)
extends GraphStage[FlowShape[ByteString, List[ByteString]]] {
private val in = Inlet[ByteString](Logging.simpleName(this) + ".in")
@@ -49,13 +49,14 @@ import scala.util.control.NonFatal
private def tryPollBuffer() =
try buffer.poll(requireLineEnd = true) match {
- case Some(csvLine) => push(out, csvLine)
- case _ =>
- if (isClosed(in)) {
- emitRemaining()
- completeStage()
- } else pull(in)
- } catch {
+ case Some(csvLine) => push(out, csvLine)
+ case _ =>
+ if (isClosed(in)) {
+ emitRemaining()
+ completeStage()
+ } else pull(in)
+ }
+ catch {
case NonFatal(ex) => failStage(ex)
}
diff --git a/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvToMapJavaStage.scala b/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvToMapJavaStage.scala
index 7c14c36f..3240d234 100644
--- a/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvToMapJavaStage.scala
+++ b/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvToMapJavaStage.scala
@@ -6,10 +6,10 @@ package akka.stream.alpakka.csv.impl
import java.nio.charset.Charset
import java.util.stream.Collectors
-import java.{util => ju}
+import java.{ util => ju }
import akka.annotation.InternalApi
import akka.stream._
-import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
+import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler }
import akka.util.ByteString
/**
@@ -22,10 +22,10 @@ import akka.util.ByteString
* @param headerPlaceholder placeholder used when there are more headers than data.
*/
@InternalApi private[csv] abstract class CsvToMapJavaStageBase[V](columnNames: ju.Optional[ju.Collection[String]],
- charset: Charset,
- combineAll: Boolean,
- customFieldValuePlaceholder: ju.Optional[V],
- headerPlaceholder: ju.Optional[String])
+ charset: Charset,
+ combineAll: Boolean,
+ customFieldValuePlaceholder: ju.Optional[V],
+ headerPlaceholder: ju.Optional[String])
extends GraphStage[FlowShape[ju.Collection[ByteString], ju.Map[String, V]]] {
override protected def initialAttributes: Attributes = Attributes.name("CsvToMap")
@@ -60,8 +60,7 @@ import akka.util.ByteString
process(elem, zipWithHeaders)
}
}
- }
- )
+ })
private def process(elem: ju.Collection[ByteString], combine: ju.Collection[V] => ju.Map[String, V]) = {
if (headers.isPresent) {
@@ -73,9 +72,10 @@ import akka.util.ByteString
}
}
- setHandler(out, new OutHandler {
- override def onPull(): Unit = pull(in)
- })
+ setHandler(out,
+ new OutHandler {
+ override def onPull(): Unit = pull(in)
+ })
private def zipWithHeaders(elem: ju.Collection[V]): ju.Map[String, V] = {
val map = new ju.HashMap[String, V]()
@@ -126,15 +126,15 @@ import akka.util.ByteString
* Internal API
*/
@InternalApi private[csv] class CsvToMapJavaStage(columnNames: ju.Optional[ju.Collection[String]],
- charset: Charset,
- combineAll: Boolean,
- customFieldValuePlaceholder: ju.Optional[ByteString],
- headerPlaceholder: ju.Optional[String])
+ charset: Charset,
+ combineAll: Boolean,
+ customFieldValuePlaceholder: ju.Optional[ByteString],
+ headerPlaceholder: ju.Optional[String])
extends CsvToMapJavaStageBase[ByteString](columnNames,
- charset,
- combineAll,
- customFieldValuePlaceholder,
- headerPlaceholder) {
+ charset,
+ combineAll,
+ customFieldValuePlaceholder,
+ headerPlaceholder) {
override val fieldValuePlaceholder: ByteString = ByteString("")
@@ -146,15 +146,15 @@ import akka.util.ByteString
* Internal API
*/
@InternalApi private[csv] class CsvToMapAsStringsJavaStage(columnNames: ju.Optional[ju.Collection[String]],
- charset: Charset,
- combineAll: Boolean,
- customFieldValuePlaceholder: ju.Optional[String],
- headerPlaceholder: ju.Optional[String])
+ charset: Charset,
+ combineAll: Boolean,
+ customFieldValuePlaceholder: ju.Optional[String],
+ headerPlaceholder: ju.Optional[String])
extends CsvToMapJavaStageBase[String](columnNames,
- charset,
- combineAll,
- customFieldValuePlaceholder,
- headerPlaceholder) {
+ charset,
+ combineAll,
+ customFieldValuePlaceholder,
+ headerPlaceholder) {
override val fieldValuePlaceholder: String = ""
diff --git a/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvToMapStage.scala b/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvToMapStage.scala
index cc7f0f2f..2366a1f0 100644
--- a/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvToMapStage.scala
+++ b/csv/src/main/scala/akka/stream/alpakka/csv/impl/CsvToMapStage.scala
@@ -6,8 +6,8 @@ package akka.stream.alpakka.csv.impl
import java.nio.charset.Charset
import akka.annotation.InternalApi
-import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
-import akka.stream.{Attributes, FlowShape, Inlet, Outlet}
+import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler }
+import akka.stream.{ Attributes, FlowShape, Inlet, Outlet }
import akka.util.ByteString
import scala.collection.immutable
@@ -23,10 +23,10 @@ import scala.collection.immutable
* @param headerPlaceholder placeholder used when there are more headers than data.
*/
@InternalApi private[csv] abstract class CsvToMapStageBase[V](columnNames: Option[immutable.Seq[String]],
- charset: Charset,
- combineAll: Boolean,
- customFieldValuePlaceholder: Option[V],
- headerPlaceholder: Option[String])
+ charset: Charset,
+ combineAll: Boolean,
+ customFieldValuePlaceholder: Option[V],
+ headerPlaceholder: Option[String])
extends GraphStage[FlowShape[immutable.Seq[ByteString], Map[String, V]]] {
override protected def initialAttributes: Attributes = Attributes.name("CsvToMap")
@@ -71,8 +71,8 @@ import scala.collection.immutable
private def combineUsingPlaceholder(elem: immutable.Seq[ByteString]): Headers => Map[String, V] = headers => {
val combined = headers.get
.zipAll(transformElements(elem),
- headerPlaceholder.getOrElse("MissingHeader"),
- customFieldValuePlaceholder.getOrElse(fieldValuePlaceholder))
+ headerPlaceholder.getOrElse("MissingHeader"),
+ customFieldValuePlaceholder.getOrElse(fieldValuePlaceholder))
val filtering: String => Boolean = key =>
headerPlaceholder.map(_.equalsIgnoreCase(key)).fold(key.equalsIgnoreCase("MissingHeader"))(identity)
val missingHeadersContent =
@@ -101,15 +101,15 @@ import scala.collection.immutable
* Internal API
*/
@InternalApi private[csv] class CsvToMapStage(columnNames: Option[immutable.Seq[String]],
- charset: Charset,
- combineAll: Boolean,
- customFieldValuePlaceholder: Option[ByteString],
- headerPlaceholder: Option[String])
+ charset: Charset,
+ combineAll: Boolean,
+ customFieldValuePlaceholder: Option[ByteString],
+ headerPlaceholder: Option[String])
extends CsvToMapStageBase[ByteString](columnNames,
- charset,
- combineAll,
- customFieldValuePlaceholder,
- headerPlaceholder) {
+ charset,
+ combineAll,
+ customFieldValuePlaceholder,
+ headerPlaceholder) {
override val fieldValuePlaceholder: ByteString = ByteString("")
@@ -121,11 +121,12 @@ import scala.collection.immutable
* Internal API
*/
@InternalApi private[csv] class CsvToMapAsStringsStage(columnNames: Option[immutable.Seq[String]],
- charset: Charset,
- combineAll: Boolean,
- customFieldValuePlaceholder: Option[String],
- headerPlaceholder: Option[String])
- extends CsvToMapStageBase[String](columnNames, charset, combineAll, customFieldValuePlaceholder, headerPlaceholder) {
+ charset: Charset,
+ combineAll: Boolean,
+ customFieldValuePlaceholder: Option[String],
+ headerPlaceholder: Option[String])
+ extends CsvToMapStageBase[String](columnNames, charset, combineAll, customFieldValuePlaceholder,
+ headerPlaceholder) {
override val fieldValuePlaceholder: String = ""
diff --git a/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvFormatting.scala b/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvFormatting.scala
index d3df181e..6ab6b0c5 100644
--- a/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvFormatting.scala
+++ b/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvFormatting.scala
@@ -4,16 +4,17 @@
package akka.stream.alpakka.csv.scaladsl
-import java.nio.charset.{Charset, StandardCharsets}
+import java.nio.charset.{ Charset, StandardCharsets }
import akka.NotUsed
import akka.stream.alpakka.csv.impl.CsvFormatter
-import akka.stream.scaladsl.{Flow, Source}
+import akka.stream.scaladsl.{ Flow, Source }
import akka.util.ByteString
import scala.collection.immutable
-/** Provides CSV formatting flows that convert a sequence of String into their CSV representation
+/**
+ * Provides CSV formatting flows that convert a sequence of String into their CSV representation
* in [[akka.util.ByteString]].
*/
object CsvFormatting {
@@ -39,8 +40,7 @@ object CsvFormatting {
endOfLine: String = "\r\n",
quotingStyle: CsvQuotingStyle = CsvQuotingStyle.Required,
charset: Charset = StandardCharsets.UTF_8,
- byteOrderMark: Option[ByteString] = None
- ): Flow[T, ByteString, NotUsed] = {
+ byteOrderMark: Option[ByteString] = None): Flow[T, ByteString, NotUsed] = {
val formatter =
new CsvFormatter(delimiter, quoteChar, escapeChar, endOfLine, quotingStyle, charset)
byteOrderMark.fold {
diff --git a/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvParsing.scala b/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvParsing.scala
index 2a96adaa..27f5063a 100644
--- a/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvParsing.scala
+++ b/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvParsing.scala
@@ -19,12 +19,13 @@ object CsvParsing {
val DoubleQuote: Byte = '"'
val maximumLineLengthDefault: Int = 10 * 1024
- /** Creates CSV parsing flow that reads CSV lines from incoming
+ /**
+ * Creates CSV parsing flow that reads CSV lines from incoming
* [[akka.util.ByteString]] objects.
*/
def lineScanner(delimiter: Byte = Comma,
- quoteChar: Byte = DoubleQuote,
- escapeChar: Byte = Backslash,
- maximumLineLength: Int = maximumLineLengthDefault): Flow[ByteString, List[ByteString], NotUsed] =
+ quoteChar: Byte = DoubleQuote,
+ escapeChar: Byte = Backslash,
+ maximumLineLength: Int = maximumLineLengthDefault): Flow[ByteString, List[ByteString], NotUsed] =
Flow.fromGraph(new CsvParsingStage(delimiter, quoteChar, escapeChar, maximumLineLength))
}
diff --git a/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvQuotingStyle.scala b/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvQuotingStyle.scala
index 1b1c0188..f9d13ff5 100644
--- a/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvQuotingStyle.scala
+++ b/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvQuotingStyle.scala
@@ -21,7 +21,7 @@ object CsvQuotingStyle {
/** Java to Scala conversion helper */
def asScala(qs: javadsl.CsvQuotingStyle): CsvQuotingStyle = qs match {
- case javadsl.CsvQuotingStyle.ALWAYS => CsvQuotingStyle.Always
+ case javadsl.CsvQuotingStyle.ALWAYS => CsvQuotingStyle.Always
case javadsl.CsvQuotingStyle.REQUIRED => CsvQuotingStyle.Required
}
diff --git a/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvToMap.scala b/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvToMap.scala
index 98a24827..b74d8aef 100644
--- a/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvToMap.scala
+++ b/csv/src/main/scala/akka/stream/alpakka/csv/scaladsl/CsvToMap.scala
@@ -4,10 +4,10 @@
package akka.stream.alpakka.csv.scaladsl
-import java.nio.charset.{Charset, StandardCharsets}
+import java.nio.charset.{ Charset, StandardCharsets }
import akka.NotUsed
-import akka.stream.alpakka.csv.impl.{CsvToMapAsStringsStage, CsvToMapStage}
+import akka.stream.alpakka.csv.impl.{ CsvToMapAsStringsStage, CsvToMapStage }
import akka.stream.scaladsl.Flow
import akka.util.ByteString
@@ -21,11 +21,10 @@ object CsvToMap {
def toMap(charset: Charset = StandardCharsets.UTF_8): Flow[List[ByteString], Map[String, ByteString], NotUsed] =
Flow.fromGraph(
new CsvToMapStage(columnNames = None,
- charset,
- combineAll = false,
- customFieldValuePlaceholder = Option.empty,
- headerPlaceholder = Option.empty)
- )
+ charset,
+ combineAll = false,
+ customFieldValuePlaceholder = Option.empty,
+ headerPlaceholder = Option.empty))
/**
* A flow translating incoming [[scala.List]] of [[akka.util.ByteString]] to a map of String keys and values using the stream's first
@@ -35,11 +34,10 @@ object CsvToMap {
def toMapAsStrings(charset: Charset = StandardCharsets.UTF_8): Flow[List[ByteString], Map[String, String], NotUsed] =
Flow.fromGraph(
new CsvToMapAsStringsStage(columnNames = None,
- charset,
- combineAll = false,
- customFieldValuePlaceholder = Option.empty,
- headerPlaceholder = Option.empty)
- )
+ charset,
+ combineAll = false,
+ customFieldValuePlaceholder = Option.empty,
+ headerPlaceholder = Option.empty))
/**
* A flow translating incoming [[scala.List]] of [[akka.util.ByteString]] to a map of String and ByteString
@@ -52,15 +50,13 @@ object CsvToMap {
def toMapCombineAll(
charset: Charset = StandardCharsets.UTF_8,
customFieldValuePlaceholder: Option[ByteString] = None,
- headerPlaceholder: Option[String] = None
- ): Flow[List[ByteString], Map[String, ByteString], NotUsed] =
+ headerPlaceholder: Option[String] = None): Flow[List[ByteString], Map[String, ByteString], NotUsed] =
Flow.fromGraph(
new CsvToMapStage(columnNames = None,
- charset,
- combineAll = true,
- customFieldValuePlaceholder = customFieldValuePlaceholder,
- headerPlaceholder = headerPlaceholder)
- )
+ charset,
+ combineAll = true,
+ customFieldValuePlaceholder = customFieldValuePlaceholder,
+ headerPlaceholder = headerPlaceholder))
/**
* A flow translating incoming [[scala.List]] of [[akka.util.ByteString]] to a map of String keys and values
@@ -73,15 +69,13 @@ object CsvToMap {
def toMapAsStringsCombineAll(
charset: Charset = StandardCharsets.UTF_8,
customFieldValuePlaceholder: Option[String] = None,
- headerPlaceholder: Option[String] = None
- ): Flow[List[ByteString], Map[String, String], NotUsed] =
+ headerPlaceholder: Option[String] = None): Flow[List[ByteString], Map[String, String], NotUsed] =
Flow.fromGraph(
new CsvToMapAsStringsStage(columnNames = None,
- charset,
- combineAll = true,
- customFieldValuePlaceholder = customFieldValuePlaceholder,
- headerPlaceholder = headerPlaceholder)
- )
+ charset,
+ combineAll = true,
+ customFieldValuePlaceholder = customFieldValuePlaceholder,
+ headerPlaceholder = headerPlaceholder))
/**
* A flow translating incoming [[scala.List]] of [[akka.util.ByteString]] to a map of String and ByteString using the given headers
@@ -91,11 +85,10 @@ object CsvToMap {
def withHeaders(headers: String*): Flow[List[ByteString], Map[String, ByteString], NotUsed] =
Flow.fromGraph(
new CsvToMapStage(Some(headers.toList),
- StandardCharsets.UTF_8,
- combineAll = false,
- customFieldValuePlaceholder = Option.empty,
- headerPlaceholder = Option.empty)
- )
+ StandardCharsets.UTF_8,
+ combineAll = false,
+ customFieldValuePlaceholder = Option.empty,
+ headerPlaceholder = Option.empty))
/**
* A flow translating incoming [[scala.List]] of [[akka.util.ByteString]] to a map of String keys and values using the given headers
@@ -105,13 +98,11 @@ object CsvToMap {
*/
def withHeadersAsStrings(
charset: Charset,
- headers: String*
- ): Flow[List[ByteString], Map[String, String], NotUsed] =
+ headers: String*): Flow[List[ByteString], Map[String, String], NotUsed] =
Flow.fromGraph(
new CsvToMapAsStringsStage(Some(headers.toList),
- charset,
- combineAll = false,
- customFieldValuePlaceholder = Option.empty,
- headerPlaceholder = Option.empty)
- )
+ charset,
+ combineAll = false,
+ customFieldValuePlaceholder = Option.empty,
+ headerPlaceholder = Option.empty))
}
diff --git a/csv/src/test/scala/akka/stream/alpakka/csv/CsvParserSpec.scala b/csv/src/test/scala/akka/stream/alpakka/csv/CsvParserSpec.scala
index 3eff5afc..852bab1f 100644
--- a/csv/src/test/scala/akka/stream/alpakka/csv/CsvParserSpec.scala
+++ b/csv/src/test/scala/akka/stream/alpakka/csv/CsvParserSpec.scala
@@ -4,7 +4,7 @@
package akka.stream.alpakka.csv
-import java.nio.charset.{StandardCharsets, UnsupportedCharsetException}
+import java.nio.charset.{ StandardCharsets, UnsupportedCharsetException }
import akka.stream.alpakka.csv.impl.CsvParser
import akka.stream.alpakka.csv.scaladsl.ByteOrderMark
@@ -93,7 +93,7 @@ class CsvParserSpec extends AnyWordSpec with Matchers with OptionValues with Log
"parse double quote chars within quotes inte one quoute at end of value" in {
expectInOut("\"Venture \"\"Extended Edition\"\"\",\"\",4900.00\n",
- List("Venture \"Extended Edition\"", "", "4900.00"))
+ List("Venture \"Extended Edition\"", "", "4900.00"))
}
"parse double escape chars into one escape char" in {
@@ -298,8 +298,8 @@ class CsvParserSpec extends AnyWordSpec with Matchers with OptionValues with Log
"read values with different separator" in {
expectInOut("$Foo $#$Bar $#$Baz $\n", List("Foo ", "Bar ", "Baz "))(delimiter = '#',
- quoteChar = '$',
- escapeChar = '\\')
+ quoteChar = '$',
+ escapeChar = '\\')
}
"fail on a very 'long' line" in {
@@ -348,17 +348,17 @@ class CsvParserSpec extends AnyWordSpec with Matchers with OptionValues with Log
}
def expectInOut(in: String, expected: List[String]*)(implicit delimiter: Byte = ',',
- quoteChar: Byte = '"',
- escapeChar: Byte = '\\',
- requireLineEnd: Boolean = true): Unit = {
+ quoteChar: Byte = '"',
+ escapeChar: Byte = '\\',
+ requireLineEnd: Boolean = true): Unit = {
val bsIn = ByteString(in)
expectBsInOut(bsIn, expected: _*)(delimiter, quoteChar, escapeChar, requireLineEnd)
}
def expectBsInOut(bsIn: ByteString, expected: List[String]*)(implicit delimiter: Byte = ',',
- quoteChar: Byte = '"',
- escapeChar: Byte = '\\',
- requireLineEnd: Boolean = true): Unit = {
+ quoteChar: Byte = '"',
+ escapeChar: Byte = '\\',
+ requireLineEnd: Boolean = true): Unit = {
val parser = new CsvParser(delimiter, quoteChar, escapeChar, maximumLineLength)
parser.offer(bsIn)
expected.foreach { out =>
diff --git a/csv/src/test/scala/docs/scaladsl/CsvFormattingSpec.scala b/csv/src/test/scala/docs/scaladsl/CsvFormattingSpec.scala
index fa1540c3..a8fed803 100644
--- a/csv/src/test/scala/docs/scaladsl/CsvFormattingSpec.scala
+++ b/csv/src/test/scala/docs/scaladsl/CsvFormattingSpec.scala
@@ -6,7 +6,7 @@ package docs.scaladsl
import java.nio.charset.StandardCharsets
-import akka.stream.scaladsl.{Flow, Sink, Source}
+import akka.stream.scaladsl.{ Flow, Sink, Source }
import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
import akka.util.ByteString
@@ -16,7 +16,7 @@ class CsvFormattingSpec extends CsvSpec {
def documentation(): Unit = {
// #flow-type
- import akka.stream.alpakka.csv.scaladsl.{CsvFormatting, CsvQuotingStyle}
+ import akka.stream.alpakka.csv.scaladsl.{ CsvFormatting, CsvQuotingStyle }
// #flow-type
import CsvFormatting._
@@ -59,7 +59,7 @@ class CsvFormattingSpec extends CsvSpec {
"include Byte Order Mark" in assertAllStagesStopped {
// #formatting-bom
- import akka.stream.alpakka.csv.scaladsl.{ByteOrderMark, CsvFormatting}
+ import akka.stream.alpakka.csv.scaladsl.{ ByteOrderMark, CsvFormatting }
// #formatting-bom
val fut =
@@ -72,8 +72,7 @@ class CsvFormattingSpec extends CsvSpec {
// #formatting-bom
// format: on
fut.futureValue should be(
- List(ByteOrderMark.UTF_8, ByteString("eins,zwei,drei\r\n"), ByteString("uno,dos,tres\r\n"))
- )
+ List(ByteOrderMark.UTF_8, ByteString("eins,zwei,drei\r\n"), ByteString("uno,dos,tres\r\n")))
}
}
diff --git a/csv/src/test/scala/docs/scaladsl/CsvParsingSpec.scala b/csv/src/test/scala/docs/scaladsl/CsvParsingSpec.scala
index 23046034..605ef953 100644
--- a/csv/src/test/scala/docs/scaladsl/CsvParsingSpec.scala
+++ b/csv/src/test/scala/docs/scaladsl/CsvParsingSpec.scala
@@ -7,10 +7,10 @@ package docs.scaladsl
import java.nio.file.Paths
import akka.NotUsed
-import akka.stream.alpakka.csv.scaladsl.{CsvParsing, CsvToMap}
-import akka.stream.scaladsl.{FileIO, Flow, Keep, Sink, Source}
+import akka.stream.alpakka.csv.scaladsl.{ CsvParsing, CsvToMap }
+import akka.stream.scaladsl.{ FileIO, Flow, Keep, Sink, Source }
import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
-import akka.stream.testkit.scaladsl.{TestSink, TestSource}
+import akka.stream.testkit.scaladsl.{ TestSink, TestSource }
import akka.util.ByteString
import scala.collection.immutable.Seq
@@ -111,8 +111,7 @@ class CsvParsingSpec extends CsvSpec {
val input = Seq(
"eins,zw",
"ei,drei\nuno",
- ",dos,tres\n"
- ).map(ByteString(_))
+ ",dos,tres\n").map(ByteString(_))
val fut = Source.apply(input).via(CsvParsing.lineScanner()).map(_.map(_.utf8String)).runWith(Sink.seq)
val res = fut.futureValue
res.head should be(List("eins", "zwei", "drei"))
@@ -189,18 +188,14 @@ class CsvParsingSpec extends CsvSpec {
"Make" -> "Ford",
"Model" -> "E350",
"Description" -> "ac, abs, moon",
- "Price" -> "3000.00"
- )
- )
+ "Price" -> "3000.00"))
res(1) should contain allElementsOf (
Map(
"Year" -> "1999",
"Make" -> "Chevy",
"Model" -> "Venture \"Extended Edition\"",
"Description" -> "",
- "Price" -> "4900.00"
- )
- )
+ "Price" -> "4900.00"))
res(2) should contain allElementsOf (
Map(
"Year" -> "1996",
@@ -208,45 +203,35 @@ class CsvParsingSpec extends CsvSpec {
"Model" -> "Grand Cherokee",
"Description" -> """MUST SELL!
|air, moon roof, loaded""".stripMargin,
- "Price" -> "4799.00"
- )
- )
+ "Price" -> "4799.00"))
res(3) should contain allElementsOf (
Map(
"Year" -> "1999",
"Make" -> "Chevy",
"Model" -> "Venture \"Extended Edition, Very Large\"",
"Description" -> "",
- "Price" -> "5000.00"
- )
- )
+ "Price" -> "5000.00"))
res(4) should contain allElementsOf (
Map(
"Year" -> "",
"Make" -> "",
"Model" -> "Venture \"Extended Edition\"",
"Description" -> "",
- "Price" -> "4900.00"
- )
- )
+ "Price" -> "4900.00"))
res(5) should contain allElementsOf (
Map(
"Year" -> "1995",
"Make" -> "VW",
"Model" -> "Golf \"GTE\"",
"Description" -> "",
- "Price" -> "5000.00"
- )
- )
+ "Price" -> "5000.00"))
res(6) should contain allElementsOf (
Map(
"Year" -> "1996",
"Make" -> "VW",
"Model" -> "Golf GTE",
"Description" -> "",
- "Price" -> "5000.00"
- )
- )
+ "Price" -> "5000.00"))
}
}
}
diff --git a/csv/src/test/scala/docs/scaladsl/CsvSpec.scala b/csv/src/test/scala/docs/scaladsl/CsvSpec.scala
index d5eec85c..0e7d7469 100644
--- a/csv/src/test/scala/docs/scaladsl/CsvSpec.scala
+++ b/csv/src/test/scala/docs/scaladsl/CsvSpec.scala
@@ -8,7 +8,7 @@ import akka.actor.ActorSystem
import akka.stream.alpakka.testkit.scaladsl.LogCapturing
import akka.testkit.TestKit
import org.scalatest.concurrent.ScalaFutures
-import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
+import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach }
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
diff --git a/csv/src/test/scala/docs/scaladsl/CsvToMapSpec.scala b/csv/src/test/scala/docs/scaladsl/CsvToMapSpec.scala
index 0838114a..ef693dec 100644
--- a/csv/src/test/scala/docs/scaladsl/CsvToMapSpec.scala
+++ b/csv/src/test/scala/docs/scaladsl/CsvToMapSpec.scala
@@ -7,8 +7,8 @@ package docs.scaladsl
import java.nio.charset.StandardCharsets
import akka.NotUsed
-import akka.stream.alpakka.csv.scaladsl.{CsvParsing, CsvToMap}
-import akka.stream.scaladsl.{Flow, Sink, Source}
+import akka.stream.alpakka.csv.scaladsl.{ CsvParsing, CsvToMap }
+import akka.stream.scaladsl.{ Flow, Sink, Source }
import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped
import akka.util.ByteString
@@ -54,7 +54,7 @@ class CsvToMapSpec extends CsvSpec {
"CSV to Map" should {
"parse header line and data line into map" in assertAllStagesStopped {
// #header-line
- import akka.stream.alpakka.csv.scaladsl.{CsvParsing, CsvToMap}
+ import akka.stream.alpakka.csv.scaladsl.{ CsvParsing, CsvToMap }
// #header-line
val future =
@@ -77,9 +77,7 @@ class CsvToMapSpec extends CsvSpec {
result should be(
Seq(
Map("eins" -> ByteString("11"), "zwei" -> ByteString("12"), "drei" -> ByteString("13")),
- Map("eins" -> ByteString("21"), "zwei" -> ByteString("22"), "drei" -> ByteString("23"))
- )
- )
+ Map("eins" -> ByteString("21"), "zwei" -> ByteString("22"), "drei" -> ByteString("23"))))
// #header-line
}
@@ -105,8 +103,7 @@ class CsvToMapSpec extends CsvSpec {
.via(CsvToMap.toMap())
.runWith(Sink.head)
future.futureValue should be(
- Map("eins" -> ByteString("1"), "zwei" -> ByteString("2"), "drei" -> ByteString("3"))
- )
+ Map("eins" -> ByteString("1"), "zwei" -> ByteString("2"), "drei" -> ByteString("3")))
}
"parse header line and decode data line" in assertAllStagesStopped {
@@ -131,15 +128,13 @@ class CsvToMapSpec extends CsvSpec {
result should be(
Seq(
Map("eins" -> "11", "zwei" -> "12", "drei" -> "13"),
- Map("eins" -> "21", "zwei" -> "22", "drei" -> "23")
- )
- )
+ Map("eins" -> "21", "zwei" -> "22", "drei" -> "23")))
// #header-line
}
"use column names and data line into map" in assertAllStagesStopped {
// #column-names
- import akka.stream.alpakka.csv.scaladsl.{CsvParsing, CsvToMap}
+ import akka.stream.alpakka.csv.scaladsl.{ CsvParsing, CsvToMap }
// #column-names
val future =
@@ -162,9 +157,7 @@ class CsvToMapSpec extends CsvSpec {
result should be(
Seq(
Map("eins" -> ByteString("11"), "zwei" -> ByteString("12"), "drei" -> ByteString("13")),
- Map("eins" -> ByteString("21"), "zwei" -> ByteString("22"), "drei" -> ByteString("23"))
- )
- )
+ Map("eins" -> ByteString("21"), "zwei" -> ByteString("22"), "drei" -> ByteString("23"))))
// #column-names
}
@@ -189,15 +182,13 @@ class CsvToMapSpec extends CsvSpec {
result should be(
Seq(
Map("eins" -> "11", "zwei" -> "12", "drei" -> "13"),
- Map("eins" -> "21", "zwei" -> "22", "drei" -> "23")
- )
- )
+ Map("eins" -> "21", "zwei" -> "22", "drei" -> "23")))
// #column-names
}
"parse header and decode data line. Be OK with more headers column than data (including the header in the result)" in assertAllStagesStopped {
// #header-line
- import akka.stream.alpakka.csv.scaladsl.{CsvParsing, CsvToMap}
+ import akka.stream.alpakka.csv.scaladsl.{ CsvParsing, CsvToMap }
// #header-line
val future =
@@ -220,15 +211,13 @@ class CsvToMapSpec extends CsvSpec {
result should be(
Seq(
Map("eins" -> "11", "zwei" -> "12", "drei" -> "13", "vier" -> "", "fünt" -> ""),
- Map("eins" -> "21", "zwei" -> "22", "drei" -> "23", "vier" -> "", "fünt" -> "")
- )
- )
+ Map("eins" -> "21", "zwei" -> "22", "drei" -> "23", "vier" -> "", "fünt" -> "")))
// #header-line
}
"parse header and decode data line. Be OK when there are more data than header column, set a default header in the result" in assertAllStagesStopped {
// #header-line
- import akka.stream.alpakka.csv.scaladsl.{CsvParsing, CsvToMap}
+ import akka.stream.alpakka.csv.scaladsl.{ CsvParsing, CsvToMap }
// #header-line
val future =
@@ -251,15 +240,13 @@ class CsvToMapSpec extends CsvSpec {
result should be(
Seq(
Map("eins" -> "11", "zwei" -> "12", "drei" -> "13", "MissingHeader0" -> "14"),
- Map("eins" -> "21", "zwei" -> "22", "drei" -> "23")
- )
- )
+ Map("eins" -> "21", "zwei" -> "22", "drei" -> "23")))
// #header-line
}
"parse header and decode data line. Be OK when there are more data than header column, set the user configured header in the result" in assertAllStagesStopped {
// #header-line
- import akka.stream.alpakka.csv.scaladsl.{CsvParsing, CsvToMap}
+ import akka.stream.alpakka.csv.scaladsl.{ CsvParsing, CsvToMap }
// #header-line
val future =
@@ -282,15 +269,13 @@ class CsvToMapSpec extends CsvSpec {
result should be(
Seq(
Map("eins" -> "11", "zwei" -> "12", "MyCustomHeader0" -> "13"),
- Map("eins" -> "21", "zwei" -> "22", "MyCustomHeader0" -> "")
- )
- )
+ Map("eins" -> "21", "zwei" -> "22", "MyCustomHeader0" -> "")))
// #header-line
}
"parse header and decode data line. Be OK when there are more headers than data column, set the user configured field value in the result" in assertAllStagesStopped {
// #header-line
- import akka.stream.alpakka.csv.scaladsl.{CsvParsing, CsvToMap}
+ import akka.stream.alpakka.csv.scaladsl.{ CsvParsing, CsvToMap }
// #header-line
val future =
@@ -313,16 +298,14 @@ class CsvToMapSpec extends CsvSpec {
result should be(
Seq(
Map("eins" -> "11", "zwei" -> "12", "drei" -> "13", "fünt" -> "missing"),
- Map("eins" -> "21", "zwei" -> "22", "drei" -> "23", "fünt" -> "missing")
- )
- )
+ Map("eins" -> "21", "zwei" -> "22", "drei" -> "23", "fünt" -> "missing")))
// #header-line
}
}
"be OK with more headers column than data (including the header in the result)" in assertAllStagesStopped {
// #header-line
- import akka.stream.alpakka.csv.scaladsl.{CsvParsing, CsvToMap}
+ import akka.stream.alpakka.csv.scaladsl.{ CsvParsing, CsvToMap }
// #header-line
val future =
@@ -345,23 +328,21 @@ class CsvToMapSpec extends CsvSpec {
result should be(
Seq(
Map("eins" -> ByteString("11"),
- "zwei" -> ByteString("12"),
- "drei" -> ByteString("13"),
- "vier" -> ByteString(""),
- "fünt" -> ByteString("")),
+ "zwei" -> ByteString("12"),
+ "drei" -> ByteString("13"),
+ "vier" -> ByteString(""),
+ "fünt" -> ByteString("")),
Map("eins" -> ByteString("21"),
- "zwei" -> ByteString("22"),
- "drei" -> ByteString("23"),
- "vier" -> ByteString(""),
- "fünt" -> ByteString(""))
- )
- )
+ "zwei" -> ByteString("22"),
+ "drei" -> ByteString("23"),
+ "vier" -> ByteString(""),
+ "fünt" -> ByteString(""))))
// #header-line
}
"be OK when there are more data than header column, set a default header in the result" in assertAllStagesStopped {
// #header-line
- import akka.stream.alpakka.csv.scaladsl.{CsvParsing, CsvToMap}
+ import akka.stream.alpakka.csv.scaladsl.{ CsvParsing, CsvToMap }
// #header-line
val future =
@@ -384,19 +365,17 @@ class CsvToMapSpec extends CsvSpec {
result should be(
Seq(
Map("eins" -> ByteString("11"),
- "zwei" -> ByteString("12"),
- "drei" -> ByteString("13"),
- "MissingHeader0" -> ByteString("14"),
- "MissingHeader1" -> ByteString("15")),
- Map("eins" -> ByteString("21"), "zwei" -> ByteString("22"), "drei" -> ByteString("23"))
- )
- )
+ "zwei" -> ByteString("12"),
+ "drei" -> ByteString("13"),
+ "MissingHeader0" -> ByteString("14"),
+ "MissingHeader1" -> ByteString("15")),
+ Map("eins" -> ByteString("21"), "zwei" -> ByteString("22"), "drei" -> ByteString("23"))))
// #header-line
}
"be OK when there are more data than header column, set the user configured header in the result" in assertAllStagesStopped {
// #header-line
- import akka.stream.alpakka.csv.scaladsl.{CsvParsing, CsvToMap}
+ import akka.stream.alpakka.csv.scaladsl.{ CsvParsing, CsvToMap }
// #header-line
val future =
@@ -419,15 +398,13 @@ class CsvToMapSpec extends CsvSpec {
result should be(
Seq(
Map("eins" -> ByteString("11"), "zwei" -> ByteString("12"), "MyCustomHeader0" -> ByteString("13")),
- Map("eins" -> ByteString("21"), "zwei" -> ByteString("22"), "MyCustomHeader0" -> ByteString(""))
- )
- )
+ Map("eins" -> ByteString("21"), "zwei" -> ByteString("22"), "MyCustomHeader0" -> ByteString(""))))
// #header-line
}
"be OK when there are more headers than data column, set the user configured field value in the result" in assertAllStagesStopped {
// #header-line
- import akka.stream.alpakka.csv.scaladsl.{CsvParsing, CsvToMap}
+ import akka.stream.alpakka.csv.scaladsl.{ CsvParsing, CsvToMap }
// #header-line
val future =
@@ -450,15 +427,13 @@ class CsvToMapSpec extends CsvSpec {
result should be(
Seq(
Map("eins" -> ByteString("11"),
- "zwei" -> ByteString("12"),
- "drei" -> ByteString("13"),
- "fünt" -> ByteString("missing")),
+ "zwei" -> ByteString("12"),
+ "drei" -> ByteString("13"),
+ "fünt" -> ByteString("missing")),
Map("eins" -> ByteString("21"),
- "zwei" -> ByteString("22"),
- "drei" -> ByteString(""),
- "fünt" -> ByteString("missing"))
- )
- )
+ "zwei" -> ByteString("22"),
+ "drei" -> ByteString(""),
+ "fünt" -> ByteString("missing"))))
// #header-line
}
}
diff --git a/doc-examples/src/test/scala/akka/stream/alpakka/eip/scaladsl/PassThroughExamples.scala b/doc-examples/src/test/scala/akka/stream/alpakka/eip/scaladsl/PassThroughExamples.scala
index 78dcd393..8a6ef83c 100644
--- a/doc-examples/src/test/scala/akka/stream/alpakka/eip/scaladsl/PassThroughExamples.scala
+++ b/doc-examples/src/test/scala/akka/stream/alpakka/eip/scaladsl/PassThroughExamples.scala
@@ -7,11 +7,11 @@ package akka.stream.alpakka.eip.scaladsl
import akka.NotUsed
import akka.actor.ActorSystem
import akka.kafka.scaladsl.Consumer.DrainingControl
-import akka.kafka.scaladsl.{Committer, Consumer}
-import akka.kafka.{CommitterSettings, ConsumerMessage, ConsumerSettings, Subscriptions}
+import akka.kafka.scaladsl.{ Committer, Consumer }
+import akka.kafka.{ CommitterSettings, ConsumerMessage, ConsumerSettings, Subscriptions }
import akka.stream.scaladsl._
-import akka.stream.{FlowShape, Graph}
-import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer}
+import akka.stream.{ FlowShape, Graph }
+import org.apache.kafka.common.serialization.{ ByteArrayDeserializer, StringDeserializer }
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
@@ -24,8 +24,8 @@ class PassThroughExamples extends AnyWordSpec with BeforeAndAfterAll with Matche
"PassThroughFlow" should {
" original message is maintained " in {
- //#PassThroughWithKeep
- //Sample Source
+ // #PassThroughWithKeep
+ // Sample Source
val source = Source(List(1, 2, 3))
// Pass through this flow maintaining the original message
@@ -37,16 +37,16 @@ class PassThroughExamples extends AnyWordSpec with BeforeAndAfterAll with Matche
.via(PassThroughFlow(passThroughMe, Keep.right))
.runWith(Sink.seq)
- //Verify results
+ // Verify results
ret.futureValue should be(Vector(1, 2, 3))
- //#PassThroughWithKeep
+ // #PassThroughWithKeep
}
" original message and pass through flow output are returned " in {
- //#PassThroughTuple
- //Sample Source
+ // #PassThroughTuple
+ // Sample Source
val source = Source(List(1, 2, 3))
// Pass through this flow maintaining the original message
@@ -58,9 +58,9 @@ class PassThroughExamples extends AnyWordSpec with BeforeAndAfterAll with Matche
.via(PassThroughFlow(passThroughMe))
.runWith(Sink.seq)
- //Verify results
+ // Verify results
ret.futureValue should be(Vector((10, 1), (20, 2), (30, 3)))
- //#PassThroughTuple
+ // #PassThroughTuple
}
}
diff --git a/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/DynamoDbOp.scala b/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/DynamoDbOp.scala
index c0860656..81a5d1f5 100644
--- a/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/DynamoDbOp.scala
+++ b/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/DynamoDbOp.scala
@@ -29,8 +29,7 @@ import scala.concurrent.Future
* @tparam Out dynamodb response type
*/
sealed class DynamoDbOp[In <: DynamoDbRequest, Out <: DynamoDbResponse](
- sdkExecute: DynamoDbAsyncClient => In => CompletableFuture[Out]
-) {
+ sdkExecute: DynamoDbAsyncClient => In => CompletableFuture[Out]) {
def execute(request: In)(implicit client: DynamoDbAsyncClient): Future[Out] = sdkExecute(client)(request).toScala
}
@@ -45,8 +44,7 @@ sealed class DynamoDbOp[In <: DynamoDbRequest, Out <: DynamoDbResponse](
*/
sealed class DynamoDbPaginatedOp[In <: DynamoDbRequest, Out <: DynamoDbResponse, Pub <: SdkPublisher[Out]](
sdkExecute: DynamoDbAsyncClient => In => CompletableFuture[Out],
- sdkPublisher: DynamoDbAsyncClient => In => Pub
-) extends DynamoDbOp[In, Out](sdkExecute) {
+ sdkPublisher: DynamoDbAsyncClient => In => Pub) extends DynamoDbOp[In, Out](sdkExecute) {
def publisher(request: In)(implicit client: DynamoDbAsyncClient): Publisher[Out] = sdkPublisher(client)(request)
}
diff --git a/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/javadsl/DynamoDb.scala b/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/javadsl/DynamoDb.scala
index fd3c0306..e5f0886a 100644
--- a/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/javadsl/DynamoDb.scala
+++ b/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/javadsl/DynamoDb.scala
@@ -10,11 +10,11 @@ import akka.NotUsed
import akka.actor.ClassicActorSystemProvider
import akka.annotation.ApiMayChange
import akka.stream.Materializer
-import akka.stream.alpakka.dynamodb.{scaladsl, DynamoDbOp, DynamoDbPaginatedOp}
-import akka.stream.javadsl.{Flow, FlowWithContext, Sink, Source}
+import akka.stream.alpakka.dynamodb.{ scaladsl, DynamoDbOp, DynamoDbPaginatedOp }
+import akka.stream.javadsl.{ Flow, FlowWithContext, Sink, Source }
import software.amazon.awssdk.core.async.SdkPublisher
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient
-import software.amazon.awssdk.services.dynamodb.model.{DynamoDbRequest, DynamoDbResponse}
+import software.amazon.awssdk.services.dynamodb.model.{ DynamoDbRequest, DynamoDbResponse }
import scala.util.Try
@@ -29,8 +29,8 @@ object DynamoDb {
* @param parallelism maximum number of in-flight requests at any given time
*/
def flow[In <: DynamoDbRequest, Out <: DynamoDbResponse](client: DynamoDbAsyncClient,
- operation: DynamoDbOp[In, Out],
- parallelism: Int): Flow[In, Out, NotUsed] =
+ operation: DynamoDbOp[In, Out],
+ parallelism: Int): Flow[In, Out, NotUsed] =
scaladsl.DynamoDb.flow(parallelism)(client, operation).asJava
/**
@@ -47,19 +47,16 @@ object DynamoDb {
def flowWithContext[In <: DynamoDbRequest, Out <: DynamoDbResponse, Ctx](
client: DynamoDbAsyncClient,
operation: DynamoDbOp[In, Out],
- parallelism: Int
- ): FlowWithContext[In, Ctx, Try[Out], Ctx, NotUsed] =
+ parallelism: Int): FlowWithContext[In, Ctx, Try[Out], Ctx, NotUsed] =
scaladsl.DynamoDb.flowWithContext[In, Out, Ctx](parallelism)(client, operation).asJava
/**
* Create a Source that will emit potentially multiple responses for a given request.
- *
*/
def source[In <: DynamoDbRequest, Out <: DynamoDbResponse, Pub <: SdkPublisher[Out]](
client: DynamoDbAsyncClient,
operation: DynamoDbPaginatedOp[In, Out, Pub],
- request: In
- ): Source[Out, NotUsed] =
+ request: In): Source[Out, NotUsed] =
scaladsl.DynamoDb.source(request)(client, operation).asJava
/**
@@ -69,8 +66,7 @@ object DynamoDb {
*/
def flowPaginated[In <: DynamoDbRequest, Out <: DynamoDbResponse](
client: DynamoDbAsyncClient,
- operation: DynamoDbPaginatedOp[In, Out, _]
- ): Flow[In, Out, NotUsed] =
+ operation: DynamoDbPaginatedOp[In, Out, _]): Flow[In, Out, NotUsed] =
scaladsl.DynamoDb.flowPaginated()(client, operation).asJava
/**
@@ -79,9 +75,9 @@ object DynamoDb {
*/
@deprecated("pass in the actor system instead of the materializer", "3.0.0")
def single[In <: DynamoDbRequest, Out <: DynamoDbResponse](client: DynamoDbAsyncClient,
- operation: DynamoDbOp[In, Out],
- request: In,
- mat: Materializer): CompletionStage[Out] =
+ operation: DynamoDbOp[In, Out],
+ request: In,
+ mat: Materializer): CompletionStage[Out] =
single(client, operation, request, mat.system)
/**
@@ -91,8 +87,7 @@ object DynamoDb {
client: DynamoDbAsyncClient,
operation: DynamoDbOp[In, Out],
request: In,
- system: ClassicActorSystemProvider
- ): CompletionStage[Out] = {
+ system: ClassicActorSystemProvider): CompletionStage[Out] = {
val sink: Sink[Out, CompletionStage[Out]] = Sink.head()
Source.single(request).via(flow(client, operation, 1)).runWith(sink, system)
}
diff --git a/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/scaladsl/DynamoDb.scala b/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/scaladsl/DynamoDb.scala
index 366d0e0c..e7fe2036 100644
--- a/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/scaladsl/DynamoDb.scala
+++ b/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/scaladsl/DynamoDb.scala
@@ -9,13 +9,13 @@ import akka.actor.ClassicActorSystemProvider
import akka.dispatch.ExecutionContexts
import scala.annotation.implicitNotFound
-import akka.stream.alpakka.dynamodb.{DynamoDbOp, DynamoDbPaginatedOp}
-import akka.stream.scaladsl.{Flow, FlowWithContext, Sink, Source}
+import akka.stream.alpakka.dynamodb.{ DynamoDbOp, DynamoDbPaginatedOp }
+import akka.stream.scaladsl.{ Flow, FlowWithContext, Sink, Source }
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient
import software.amazon.awssdk.services.dynamodb.model._
import scala.concurrent.Future
-import scala.util.{Failure, Success, Try}
+import scala.util.{ Failure, Success, Try }
/**
* Factory of DynamoDb Akka Stream operators.
@@ -28,8 +28,7 @@ object DynamoDb {
* @param parallelism maximum number of in-flight requests at any given time
*/
def flow[In <: DynamoDbRequest, Out <: DynamoDbResponse](
- parallelism: Int
- )(implicit client: DynamoDbAsyncClient, operation: DynamoDbOp[In, Out]): Flow[In, Out, NotUsed] =
+ parallelism: Int)(implicit client: DynamoDbAsyncClient, operation: DynamoDbOp[In, Out]): Flow[In, Out, NotUsed] =
Flow[In].mapAsync(parallelism)(operation.execute(_))
/**
@@ -43,9 +42,8 @@ object DynamoDb {
* @tparam Ctx context (or pass-through)
*/
def flowWithContext[In <: DynamoDbRequest, Out <: DynamoDbResponse, Ctx](
- parallelism: Int
- )(implicit client: DynamoDbAsyncClient,
- operation: DynamoDbOp[In, Out]): FlowWithContext[In, Ctx, Try[Out], Ctx, NotUsed] =
+ parallelism: Int)(implicit client: DynamoDbAsyncClient,
+ operation: DynamoDbOp[In, Out]): FlowWithContext[In, Ctx, Try[Out], Ctx, NotUsed] =
FlowWithContext.fromTuples(
Flow[(In, Ctx)]
.mapAsync(parallelism) {
@@ -54,15 +52,14 @@ object DynamoDb {
.execute(in)
.map[(Try[Out], Ctx)](res => (Success(res), ctx))(ExecutionContexts.parasitic)
.recover { case t => (Failure(t), ctx) }(ExecutionContexts.parasitic)
- }
- )
+ })
/**
* Create a Source that will emit potentially multiple responses for a given request.
*/
def source[In <: DynamoDbRequest, Out <: DynamoDbResponse](
- request: In
- )(implicit client: DynamoDbAsyncClient, operation: DynamoDbPaginatedOp[In, Out, _]): Source[Out, NotUsed] =
+ request: In)(
+ implicit client: DynamoDbAsyncClient, operation: DynamoDbPaginatedOp[In, Out, _]): Source[Out, NotUsed] =
Source.fromPublisher(operation.publisher(request))
/**
@@ -72,19 +69,16 @@ object DynamoDb {
*/
def flowPaginated[In <: DynamoDbRequest, Out <: DynamoDbResponse]()(
implicit client: DynamoDbAsyncClient,
- operation: DynamoDbPaginatedOp[In, Out, _]
- ): Flow[In, Out, NotUsed] = Flow[In].flatMapConcat(source(_))
+ operation: DynamoDbPaginatedOp[In, Out, _]): Flow[In, Out, NotUsed] = Flow[In].flatMapConcat(source(_))
/**
* Create a Future that will be completed with a response to a given request.
*/
@implicitNotFound(
- "a `ClassicActorSystemProvider` is a classic or new API actor system, provide this instead of a `Materializer`"
- )
+ "a `ClassicActorSystemProvider` is a classic or new API actor system, provide this instead of a `Materializer`")
def single[In <: DynamoDbRequest, Out <: DynamoDbResponse](
- request: In
- )(implicit client: DynamoDbAsyncClient,
- operation: DynamoDbOp[In, Out],
- system: ClassicActorSystemProvider): Future[Out] =
+ request: In)(implicit client: DynamoDbAsyncClient,
+ operation: DynamoDbOp[In, Out],
+ system: ClassicActorSystemProvider): Future[Out] =
Source.single(request).via(flow(1)).runWith(Sink.head)
}
diff --git a/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/ItemSpec.scala b/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/ItemSpec.scala
index 86e57e1a..e71424a0 100644
--- a/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/ItemSpec.scala
+++ b/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/ItemSpec.scala
@@ -13,7 +13,7 @@ import com.github.matsluni.akkahttpspi.AkkaHttpClient
import org.scalatest._
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AsyncWordSpecLike
-import software.amazon.awssdk.auth.credentials.{AwsBasicCredentials, StaticCredentialsProvider}
+import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.regions.Region
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient
import software.amazon.awssdk.services.dynamodb.model.TableStatus
@@ -114,7 +114,7 @@ class ItemSpec extends TestKit(ActorSystem("ItemSpec")) with AsyncWordSpecLike w
for {
_ <- DynamoDb.single(deleteTableRequest)
list <- DynamoDb.single(listTablesRequest)
- } yield list.tableNames.asScala should not contain (tableName)
+ } yield list.tableNames.asScala should not contain tableName
}
}
diff --git a/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TableSpec.scala b/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TableSpec.scala
index fafc2c2d..2da2a374 100644
--- a/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TableSpec.scala
+++ b/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TableSpec.scala
@@ -11,7 +11,7 @@ import akka.stream.alpakka.dynamodb.scaladsl.DynamoDb
import akka.testkit.TestKit
import com.github.matsluni.akkahttpspi.AkkaHttpClient
import org.scalatest.BeforeAndAfterAll
-import software.amazon.awssdk.auth.credentials.{AwsBasicCredentials, StaticCredentialsProvider}
+import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.regions.Region
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient
@@ -80,7 +80,7 @@ class TableSpec extends TestKit(ActorSystem("TableSpec")) with AsyncWordSpecLike
for {
_ <- DynamoDb.single(deleteTableRequest)
list <- DynamoDb.single(listTablesRequest)
- } yield list.tableNames.asScala should not contain (tableName)
+ } yield list.tableNames.asScala should not contain tableName
}
}
diff --git a/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TestOps.scala b/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TestOps.scala
index 91093b3a..328e5b30 100644
--- a/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TestOps.scala
+++ b/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TestOps.scala
@@ -19,16 +19,14 @@ trait TestOps {
def N(n: Int) = AttributeValue.builder().n(n.toString).build()
def keyMap(hash: String, sort: Int): Map[String, AttributeValue] = Map(
keyCol -> S(hash),
- sortCol -> N(sort)
- )
+ sortCol -> N(sort))
def keyEQ(hash: String): Map[String, Condition] = Map(
keyCol -> Condition
.builder()
.comparisonOperator(ComparisonOperator.EQ)
.attributeValueList(S(hash))
- .build()
- )
+ .build())
object common {
val listTablesRequest = ListTablesRequest.builder().build()
@@ -38,15 +36,12 @@ trait TestOps {
.tableName(tableName)
.keySchema(
KeySchemaElement.builder().attributeName(keyCol).keyType(KeyType.HASH).build(),
- KeySchemaElement.builder().attributeName(sortCol).keyType(KeyType.RANGE).build()
- )
+ KeySchemaElement.builder().attributeName(sortCol).keyType(KeyType.RANGE).build())
.attributeDefinitions(
AttributeDefinition.builder().attributeName(keyCol).attributeType(ScalarAttributeType.S).build(),
- AttributeDefinition.builder().attributeName(sortCol).attributeType(ScalarAttributeType.N).build()
- )
+ AttributeDefinition.builder().attributeName(sortCol).attributeType(ScalarAttributeType.N).build())
.provisionedThroughput(
- ProvisionedThroughput.builder().readCapacityUnits(10L).writeCapacityUnits(10L).build()
- )
+ ProvisionedThroughput.builder().readCapacityUnits(10L).writeCapacityUnits(10L).build())
.build()
val describeTableRequest = DescribeTableRequest.builder().tableName(tableName).build()
@@ -93,10 +88,7 @@ abstract class ItemSpecOps extends TestOps {
WriteRequest
.builder()
.putRequest(PutRequest.builder().item((keyMap("B", 1) + ("data" -> S(test5Data))).asJava).build())
- .build()
- ).asJava
- ).asJava
- )
+ .build()).asJava).asJava)
.build()
def batchWriteLargeItemRequest(from: Int, to: Int) =
@@ -110,12 +102,10 @@ abstract class ItemSpecOps extends TestOps {
WriteRequest
.builder()
.putRequest(
- PutRequest.builder().item((keyMap(i.toString, i) + ("data1" -> S("0123456789" * 39000))).asJava).build()
- )
+ PutRequest.builder().item((keyMap(i.toString, i) + ("data1" -> S(
+ "0123456789" * 39000))).asJava).build())
.build()
- }.asJava
- ).asJava
- )
+ }.asJava).asJava)
.build()
def batchGetLargeItemRequest(from: Int, to: Int) =
@@ -132,9 +122,7 @@ abstract class ItemSpecOps extends TestOps {
}.asJava
}
.attributesToGet("data1")
- .build()
- ).asJava
- )
+ .build()).asJava)
.build()
def batchGetItemRequest(items: java.util.Map[String, KeysAndAttributes]) =
@@ -177,9 +165,7 @@ abstract class ItemSpecOps extends TestOps {
TransactWriteItem
.builder()
.put(Put.builder().tableName(tableName).item((keyMap("C", 1) + ("data" -> S(test8Data))).asJava).build())
- .build()
- ).asJava
- )
+ .build()).asJava)
.build()
val transactGetItemsRequest = TransactGetItemsRequest
@@ -187,9 +173,8 @@ abstract class ItemSpecOps extends TestOps {
.transactItems(
List(
TransactGetItem.builder().get(Get.builder().tableName(tableName).key(keyMap("C", 0).asJava).build()).build(),
- TransactGetItem.builder().get(Get.builder().tableName(tableName).key(keyMap("C", 1).asJava).build()).build()
- ).asJava
- )
+ TransactGetItem.builder().get(Get.builder().tableName(tableName).key(keyMap("C",
+ 1).asJava).build()).build()).asJava)
.build()
val transactDeleteItemsRequest = TransactWriteItemsRequest
@@ -203,9 +188,7 @@ abstract class ItemSpecOps extends TestOps {
TransactWriteItem
.builder()
.delete(Delete.builder().tableName(tableName).key(keyMap("C", 1).asJava).build())
- .build()
- ).asJava
- )
+ .build()).asJava)
.build()
val deleteTableRequest = common.deleteTableRequest
@@ -229,8 +212,7 @@ object TableSpecOps extends TestOps {
.builder()
.tableName(tableName)
.provisionedThroughput(
- ProvisionedThroughput.builder().writeCapacityUnits(newMaxLimit).readCapacityUnits(newMaxLimit).build()
- )
+ ProvisionedThroughput.builder().writeCapacityUnits(newMaxLimit).readCapacityUnits(newMaxLimit).build())
.build()
val describeTimeToLiveRequest = DescribeTimeToLiveRequest.builder().build()
diff --git a/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala b/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala
index 10ac506f..98f863c8 100644
--- a/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala
+++ b/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala
@@ -8,22 +8,22 @@ import java.net.URI
import akka.NotUsed
import akka.stream.alpakka.testkit.scaladsl.LogCapturing
-import akka.stream.scaladsl.{FlowWithContext, SourceWithContext}
+import akka.stream.scaladsl.{ FlowWithContext, SourceWithContext }
-import scala.util.{Failure, Success, Try}
+import scala.util.{ Failure, Success, Try }
//#init-client
import akka.actor.ActorSystem
//#init-client
import akka.stream.alpakka.dynamodb.DynamoDbOp._
import akka.stream.alpakka.dynamodb.scaladsl._
-import akka.stream.scaladsl.{Sink, Source}
+import akka.stream.scaladsl.{ Sink, Source }
import akka.testkit.TestKit
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.BeforeAndAfterAll
//#init-client
import com.github.matsluni.akkahttpspi.AkkaHttpClient
-import software.amazon.awssdk.auth.credentials.{AwsBasicCredentials, StaticCredentialsProvider}
+import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.regions.Region
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient
@@ -46,7 +46,7 @@ class ExampleSpec
override implicit val patienceConfig: PatienceConfig = PatienceConfig(5.seconds, 100.millis)
- //#init-client
+ // #init-client
// Don't encode credentials in your source code!
// see https://doc.akka.io/docs/alpakka/current/aws-shared-configuration.html
@@ -59,14 +59,14 @@ class ExampleSpec
// Possibility to configure the retry policy
// see https://doc.akka.io/docs/alpakka/current/aws-shared-configuration.html
// .overrideConfiguration(...)
- //#init-client
+ // #init-client
.endpointOverride(new URI("http://localhost:8001/"))
- //#init-client
+ // #init-client
.build()
system.registerOnTermination(client.close())
- //#init-client
+ // #init-client
override def afterAll(): Unit = {
client.close();
@@ -105,8 +105,7 @@ class ExampleSpec
val source: SourceWithContext[PutItemRequest, SomeContext, NotUsed] = // ???
// #withContext
SourceWithContext.fromTuples(
- Source.single(PutItemRequest.builder().build() -> SomeContext())
- )
+ Source.single(PutItemRequest.builder().build() -> SomeContext()))
// #withContext
@@ -116,7 +115,7 @@ class ExampleSpec
val writtenSource: SourceWithContext[PutItemResponse, SomeContext, NotUsed] = source
.via(flow)
.map {
- case Success(response) => response
+ case Success(response) => response
case Failure(exception) => throw exception
}
// #withContext
@@ -128,8 +127,7 @@ class ExampleSpec
(for {
create <- DynamoDb.single(CreateTableRequest.builder().tableName("testTable").build())
describe <- DynamoDb.single(
- DescribeTableRequest.builder().tableName(create.tableDescription.tableName).build()
- )
+ DescribeTableRequest.builder().tableName(create.tableDescription.tableName).build())
} yield describe.table.itemCount).failed.futureValue
}
diff --git a/dynamodb/src/test/scala/docs/scaladsl/RetrySpec.scala b/dynamodb/src/test/scala/docs/scaladsl/RetrySpec.scala
index 70af732c..f1203a61 100644
--- a/dynamodb/src/test/scala/docs/scaladsl/RetrySpec.scala
+++ b/dynamodb/src/test/scala/docs/scaladsl/RetrySpec.scala
@@ -9,7 +9,7 @@ import akka.stream.alpakka.testkit.scaladsl.LogCapturing
import akka.testkit.TestKit
import com.github.matsluni.akkahttpspi.AkkaHttpClient
import org.scalatest.BeforeAndAfterAll
-import software.amazon.awssdk.auth.credentials.{AwsBasicCredentials, StaticCredentialsProvider}
+import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
// #awsRetryConfiguration
import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration
import software.amazon.awssdk.core.internal.retry.SdkDefaultRetrySetting
@@ -46,10 +46,8 @@ class RetrySpec
.throttlingBackoffStrategy(BackoffStrategy.defaultThrottlingStrategy)
.numRetries(SdkDefaultRetrySetting.defaultMaxAttempts)
.retryCondition(RetryCondition.defaultRetryCondition)
- .build
- )
- .build()
- )
+ .build)
+ .build())
// #awsRetryConfiguration
.build()
// #clientRetryConfig
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchConnectionSettings.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchConnectionSettings.scala
index d0bf6cd2..f1078e3e 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchConnectionSettings.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchConnectionSettings.scala
@@ -4,7 +4,7 @@
package akka.stream.alpakka.elasticsearch
-import akka.http.scaladsl.{ConnectionContext, HttpsConnectionContext}
+import akka.http.scaladsl.{ ConnectionContext, HttpsConnectionContext }
import akka.http.scaladsl.model.HttpHeader
import akka.http.scaladsl.model.HttpHeader.ParsingResult
import akka.japi.Util
@@ -18,8 +18,7 @@ final class ElasticsearchConnectionSettings private (
val username: Option[String],
val password: Option[String],
val headers: List[HttpHeader],
- val connectionContext: Option[HttpsConnectionContext]
-) {
+ val connectionContext: Option[HttpsConnectionContext]) {
def withBaseUrl(value: String): ElasticsearchConnectionSettings = copy(baseUrl = value)
@@ -57,23 +56,20 @@ final class ElasticsearchConnectionSettings private (
@deprecated("prefer ElasticsearchConnectionSettings.withSSLContext", "3.1.0")
@Deprecated
def withConnectionContext(
- connectionContext: akka.http.javadsl.HttpsConnectionContext
- ): ElasticsearchConnectionSettings = {
+ connectionContext: akka.http.javadsl.HttpsConnectionContext): ElasticsearchConnectionSettings = {
val scalaContext = new HttpsConnectionContext(
connectionContext.getSslContext,
None,
OptionConverters.toScala(connectionContext.getEnabledCipherSuites).map(Util.immutableSeq(_)),
OptionConverters.toScala(connectionContext.getEnabledProtocols).map(Util.immutableSeq(_)),
OptionConverters.toScala(connectionContext.getClientAuth),
- OptionConverters.toScala(connectionContext.getSslParameters)
- )
+ OptionConverters.toScala(connectionContext.getSslParameters))
copy(connectionContext = Option(scalaContext))
}
def withSSLContext(
- sslContext: SSLContext
- ): ElasticsearchConnectionSettings = {
+ sslContext: SSLContext): ElasticsearchConnectionSettings = {
copy(connectionContext = Option(ConnectionContext.httpsClient(sslContext)))
}
@@ -84,18 +80,16 @@ final class ElasticsearchConnectionSettings private (
username: Option[String] = username,
password: Option[String] = password,
headers: List[HttpHeader] = headers,
- connectionContext: Option[HttpsConnectionContext] = connectionContext
- ): ElasticsearchConnectionSettings =
+ connectionContext: Option[HttpsConnectionContext] = connectionContext): ElasticsearchConnectionSettings =
new ElasticsearchConnectionSettings(baseUrl = baseUrl,
- username = username,
- password = password,
- headers = headers,
- connectionContext = connectionContext)
+ username = username,
+ password = password,
+ headers = headers,
+ connectionContext = connectionContext)
override def toString =
- s"""ElasticsearchConnectionSettings(baseUrl=$baseUrl,username=$username,password=${password.fold("")(
- _ => "***"
- )},headers=${headers.mkString(";")},connectionContext=$connectionContext)"""
+ s"""ElasticsearchConnectionSettings(baseUrl=$baseUrl,username=$username,password=${password.fold("")(_ =>
+ "***")},headers=${headers.mkString(";")},connectionContext=$connectionContext)"""
}
object ElasticsearchConnectionSettings {
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchSourceSettings.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchSourceSettings.scala
index 0215fa2f..e1394d81 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchSourceSettings.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchSourceSettings.scala
@@ -10,29 +10,28 @@ import scala.concurrent.duration.FiniteDuration
/**
* Configure Elastiscsearch sources.
- *
*/
final class ElasticsearchSourceSettings private (connection: ElasticsearchConnectionSettings,
- bufferSize: Int,
- includeDocumentVersion: Boolean,
- scrollDuration: FiniteDuration,
- apiVersion: ApiVersion)
+ bufferSize: Int,
+ includeDocumentVersion: Boolean,
+ scrollDuration: FiniteDuration,
+ apiVersion: ApiVersion)
extends SourceSettingsBase[ApiVersion, ElasticsearchSourceSettings](connection,
- bufferSize,
- includeDocumentVersion,
- scrollDuration,
- apiVersion) {
+ bufferSize,
+ includeDocumentVersion,
+ scrollDuration,
+ apiVersion) {
protected override def copy(connection: ElasticsearchConnectionSettings,
- bufferSize: Int,
- includeDocumentVersion: Boolean,
- scrollDuration: FiniteDuration,
- apiVersion: ApiVersion): ElasticsearchSourceSettings =
+ bufferSize: Int,
+ includeDocumentVersion: Boolean,
+ scrollDuration: FiniteDuration,
+ apiVersion: ApiVersion): ElasticsearchSourceSettings =
new ElasticsearchSourceSettings(connection = connection,
- bufferSize = bufferSize,
- includeDocumentVersion = includeDocumentVersion,
- scrollDuration = scrollDuration,
- apiVersion = apiVersion)
+ bufferSize = bufferSize,
+ includeDocumentVersion = includeDocumentVersion,
+ scrollDuration = scrollDuration,
+ apiVersion = apiVersion)
override def toString =
s"""ElasticsearchSourceSettings(connection=$connection,bufferSize=$bufferSize,includeDocumentVersion=$includeDocumentVersion,scrollDuration=$scrollDuration,apiVersion=$apiVersion)"""
@@ -44,16 +43,16 @@ object ElasticsearchSourceSettings {
/** Scala API */
def apply(connection: ElasticsearchConnectionSettings): ElasticsearchSourceSettings =
new ElasticsearchSourceSettings(connection,
- 10,
- includeDocumentVersion = false,
- FiniteDuration(5, TimeUnit.MINUTES),
- ApiVersion.V7)
+ 10,
+ includeDocumentVersion = false,
+ FiniteDuration(5, TimeUnit.MINUTES),
+ ApiVersion.V7)
/** Java API */
def create(connection: ElasticsearchConnectionSettings): ElasticsearchSourceSettings =
new ElasticsearchSourceSettings(connection,
- 10,
- includeDocumentVersion = false,
- FiniteDuration(5, TimeUnit.MINUTES),
- ApiVersion.V7)
+ 10,
+ includeDocumentVersion = false,
+ FiniteDuration(5, TimeUnit.MINUTES),
+ ApiVersion.V7)
}
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchWriteSettings.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchWriteSettings.scala
index 07979653..b51da675 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchWriteSettings.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchWriteSettings.scala
@@ -37,8 +37,8 @@ object RetryAtFixedRate {
}
final class RetryWithBackoff(_maxRetries: Int,
- _minBackoff: scala.concurrent.duration.FiniteDuration,
- _maxBackoff: scala.concurrent.duration.FiniteDuration)
+ _minBackoff: scala.concurrent.duration.FiniteDuration,
+ _maxBackoff: scala.concurrent.duration.FiniteDuration)
extends RetryLogic {
override val maxRetries: Int = _maxRetries
override val minBackoff: scala.concurrent.duration.FiniteDuration = _minBackoff
@@ -48,8 +48,8 @@ final class RetryWithBackoff(_maxRetries: Int,
object RetryWithBackoff {
def apply(maxRetries: Int,
- minBackoff: scala.concurrent.duration.FiniteDuration,
- maxBackoff: scala.concurrent.duration.FiniteDuration): RetryWithBackoff =
+ minBackoff: scala.concurrent.duration.FiniteDuration,
+ maxBackoff: scala.concurrent.duration.FiniteDuration): RetryWithBackoff =
new RetryWithBackoff(maxRetries, minBackoff, maxBackoff)
def create(maxRetries: Int, minBackoff: java.time.Duration, maxBackoff: java.time.Duration): RetryWithBackoff =
@@ -60,24 +60,24 @@ object RetryWithBackoff {
* Configure Elasticsearch sinks and flows.
*/
final class ElasticsearchWriteSettings private (connection: ElasticsearchConnectionSettings,
- bufferSize: Int,
- retryLogic: RetryLogic,
- versionType: Option[String],
- apiVersion: ApiVersion,
- allowExplicitIndex: Boolean)
+ bufferSize: Int,
+ retryLogic: RetryLogic,
+ versionType: Option[String],
+ apiVersion: ApiVersion,
+ allowExplicitIndex: Boolean)
extends WriteSettingsBase[ApiVersion, ElasticsearchWriteSettings](connection,
- bufferSize,
- retryLogic,
- versionType,
- apiVersion,
- allowExplicitIndex) {
+ bufferSize,
+ retryLogic,
+ versionType,
+ apiVersion,
+ allowExplicitIndex) {
protected override def copy(connection: ElasticsearchConnectionSettings,
- bufferSize: Int,
- retryLogic: RetryLogic,
- versionType: Option[String],
- apiVersion: ApiVersion,
- allowExplicitIndex: Boolean): ElasticsearchWriteSettings =
+ bufferSize: Int,
+ retryLogic: RetryLogic,
+ versionType: Option[String],
+ apiVersion: ApiVersion,
+ allowExplicitIndex: Boolean): ElasticsearchWriteSettings =
new ElasticsearchWriteSettings(connection, bufferSize, retryLogic, versionType, apiVersion, allowExplicitIndex)
override def toString: String =
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/OpensearchSourceSettings.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/OpensearchSourceSettings.scala
index 72353e28..846c573f 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/OpensearchSourceSettings.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/OpensearchSourceSettings.scala
@@ -9,28 +9,27 @@ import scala.concurrent.duration.FiniteDuration
/**
* Configure Opensearch sources.
- *
*/
final class OpensearchSourceSettings private (connection: ElasticsearchConnectionSettings,
- bufferSize: Int,
- includeDocumentVersion: Boolean,
- scrollDuration: FiniteDuration,
- apiVersion: OpensearchApiVersion)
+ bufferSize: Int,
+ includeDocumentVersion: Boolean,
+ scrollDuration: FiniteDuration,
+ apiVersion: OpensearchApiVersion)
extends SourceSettingsBase[OpensearchApiVersion, OpensearchSourceSettings](connection,
- bufferSize,
- includeDocumentVersion,
- scrollDuration,
- apiVersion) {
+ bufferSize,
+ includeDocumentVersion,
+ scrollDuration,
+ apiVersion) {
protected override def copy(connection: ElasticsearchConnectionSettings,
- bufferSize: Int,
- includeDocumentVersion: Boolean,
- scrollDuration: FiniteDuration,
- apiVersion: OpensearchApiVersion): OpensearchSourceSettings =
+ bufferSize: Int,
+ includeDocumentVersion: Boolean,
+ scrollDuration: FiniteDuration,
+ apiVersion: OpensearchApiVersion): OpensearchSourceSettings =
new OpensearchSourceSettings(connection = connection,
- bufferSize = bufferSize,
- includeDocumentVersion = includeDocumentVersion,
- scrollDuration = scrollDuration,
- apiVersion = apiVersion)
+ bufferSize = bufferSize,
+ includeDocumentVersion = includeDocumentVersion,
+ scrollDuration = scrollDuration,
+ apiVersion = apiVersion)
override def toString =
s"""OpensearchSourceSettings(connection=$connection,bufferSize=$bufferSize,includeDocumentVersion=$includeDocumentVersion,scrollDuration=$scrollDuration,apiVersion=$apiVersion)"""
@@ -42,16 +41,16 @@ object OpensearchSourceSettings {
/** Scala API */
def apply(connection: ElasticsearchConnectionSettings): OpensearchSourceSettings =
new OpensearchSourceSettings(connection,
- 10,
- includeDocumentVersion = false,
- FiniteDuration(5, TimeUnit.MINUTES),
- OpensearchApiVersion.V1)
+ 10,
+ includeDocumentVersion = false,
+ FiniteDuration(5, TimeUnit.MINUTES),
+ OpensearchApiVersion.V1)
/** Java API */
def create(connection: ElasticsearchConnectionSettings): OpensearchSourceSettings =
new OpensearchSourceSettings(connection,
- 10,
- includeDocumentVersion = false,
- FiniteDuration(5, TimeUnit.MINUTES),
- OpensearchApiVersion.V1)
+ 10,
+ includeDocumentVersion = false,
+ FiniteDuration(5, TimeUnit.MINUTES),
+ OpensearchApiVersion.V1)
}
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/OpensearchWriteSettings.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/OpensearchWriteSettings.scala
index 54b6d4fe..cb17923a 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/OpensearchWriteSettings.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/OpensearchWriteSettings.scala
@@ -8,24 +8,24 @@ package akka.stream.alpakka.elasticsearch
* Configure Opensearch sinks and flows.
*/
final class OpensearchWriteSettings private (connection: ElasticsearchConnectionSettings,
- bufferSize: Int,
- retryLogic: RetryLogic,
- versionType: Option[String],
- apiVersion: OpensearchApiVersion,
- allowExplicitIndex: Boolean)
+ bufferSize: Int,
+ retryLogic: RetryLogic,
+ versionType: Option[String],
+ apiVersion: OpensearchApiVersion,
+ allowExplicitIndex: Boolean)
extends WriteSettingsBase[OpensearchApiVersion, OpensearchWriteSettings](connection,
- bufferSize,
- retryLogic,
- versionType,
- apiVersion,
- allowExplicitIndex) {
+ bufferSize,
+ retryLogic,
+ versionType,
+ apiVersion,
+ allowExplicitIndex) {
protected override def copy(connection: ElasticsearchConnectionSettings,
- bufferSize: Int,
- retryLogic: RetryLogic,
- versionType: Option[String],
- apiVersion: OpensearchApiVersion,
- allowExplicitIndex: Boolean): OpensearchWriteSettings =
+ bufferSize: Int,
+ retryLogic: RetryLogic,
+ versionType: Option[String],
+ apiVersion: OpensearchApiVersion,
+ allowExplicitIndex: Boolean): OpensearchWriteSettings =
new OpensearchWriteSettings(connection, bufferSize, retryLogic, versionType, apiVersion, allowExplicitIndex)
override def toString: String =
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ReadResult.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ReadResult.scala
index 45204630..6ed80da6 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ReadResult.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ReadResult.scala
@@ -15,8 +15,8 @@ import scala.compat.java8.OptionConverters._
* [[akka.stream.alpakka.elasticsearch.testkit.MessageFactory]].
*/
final class ReadResult[T] @InternalApi private[elasticsearch] (val id: String,
- val source: T,
- val version: Option[Long]) {
+ val source: T,
+ val version: Option[Long]) {
/** Java API */
def getVersion: java.util.Optional[Long] = version.asJava
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/SourceSettingsBase.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/SourceSettingsBase.scala
index 123eafdf..1d67faa9 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/SourceSettingsBase.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/SourceSettingsBase.scala
@@ -13,15 +13,13 @@ import scala.concurrent.duration.FiniteDuration
/**
* Configure Elastiscsearch/OpenSearch sources.
- *
*/
abstract class SourceSettingsBase[Version <: ApiVersionBase, S <: SourceSettingsBase[Version, S]] private[alpakka] (
val connection: ElasticsearchConnectionSettings,
val bufferSize: Int,
val includeDocumentVersion: Boolean,
val scrollDuration: FiniteDuration,
- val apiVersion: Version
-) { this: S =>
+ val apiVersion: Version) { this: S =>
def withConnection(value: ElasticsearchConnectionSettings): S = copy(connection = value)
def withBufferSize(value: Int): S = copy(bufferSize = value)
@@ -43,22 +41,22 @@ abstract class SourceSettingsBase[Version <: ApiVersionBase, S <: SourceSettings
def scroll: String = {
val scrollString = scrollDuration.unit match {
- case TimeUnit.DAYS => "d"
- case TimeUnit.HOURS => "h"
- case TimeUnit.MINUTES => "m"
- case TimeUnit.SECONDS => "s"
+ case TimeUnit.DAYS => "d"
+ case TimeUnit.HOURS => "h"
+ case TimeUnit.MINUTES => "m"
+ case TimeUnit.SECONDS => "s"
case TimeUnit.MILLISECONDS => "ms"
case TimeUnit.MICROSECONDS => "micros"
- case TimeUnit.NANOSECONDS => "nanos"
+ case TimeUnit.NANOSECONDS => "nanos"
}
s"${scrollDuration.length}$scrollString"
}
protected def copy(connection: ElasticsearchConnectionSettings = connection,
- bufferSize: Int = bufferSize,
- includeDocumentVersion: Boolean = includeDocumentVersion,
- scrollDuration: FiniteDuration = scrollDuration,
- apiVersion: Version = apiVersion): S;
+ bufferSize: Int = bufferSize,
+ includeDocumentVersion: Boolean = includeDocumentVersion,
+ scrollDuration: FiniteDuration = scrollDuration,
+ apiVersion: Version = apiVersion): S;
}
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/WriteMessage.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/WriteMessage.scala
index 2b17533d..65ec975f 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/WriteMessage.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/WriteMessage.scala
@@ -32,23 +32,23 @@ private[elasticsearch] object Operation {
}
final class WriteMessage[T, PT] private (val operation: Operation,
- val id: Option[String],
- val source: Option[T],
- val passThrough: PT = NotUsed,
- val version: Option[Long] = None,
- val indexName: Option[String] = None,
- val customMetadata: Map[String, java.lang.String] = Map.empty) {
+ val id: Option[String],
+ val source: Option[T],
+ val passThrough: PT = NotUsed,
+ val version: Option[Long] = None,
+ val indexName: Option[String] = None,
+ val customMetadata: Map[String, java.lang.String] = Map.empty) {
def withSource(value: T): WriteMessage[T, PT] = copy(source = Option(value))
def withPassThrough[PT2](value: PT2): WriteMessage[T, PT2] =
new WriteMessage[T, PT2](operation = operation,
- id = id,
- source = source,
- value,
- version = version,
- indexName = indexName,
- customMetadata = customMetadata)
+ id = id,
+ source = source,
+ value,
+ version = version,
+ indexName = indexName,
+ customMetadata = customMetadata)
def withVersion(value: Long): WriteMessage[T, PT] = copy(version = Option(value))
def withIndexName(value: String): WriteMessage[T, PT] = copy(indexName = Option(value))
@@ -67,19 +67,19 @@ final class WriteMessage[T, PT] private (val operation: Operation,
this.copy(customMetadata = metadata.asScala.toMap)
private def copy(operation: Operation = operation,
- id: Option[String] = id,
- source: Option[T] = source,
- passThrough: PT = passThrough,
- version: Option[Long] = version,
- indexName: Option[String] = indexName,
- customMetadata: Map[String, String] = customMetadata): WriteMessage[T, PT] =
+ id: Option[String] = id,
+ source: Option[T] = source,
+ passThrough: PT = passThrough,
+ version: Option[Long] = version,
+ indexName: Option[String] = indexName,
+ customMetadata: Map[String, String] = customMetadata): WriteMessage[T, PT] =
new WriteMessage[T, PT](operation = operation,
- id = id,
- source = source,
- passThrough = passThrough,
- version = version,
- indexName = indexName,
- customMetadata = customMetadata)
+ id = id,
+ source = source,
+ passThrough = passThrough,
+ version = version,
+ indexName = indexName,
+ customMetadata = customMetadata)
override def toString =
s"""WriteMessage(operation=$operation,id=$id,source=$source,passThrough=$passThrough,version=$version,indexName=$indexName,customMetadata=$customMetadata)"""
@@ -138,8 +138,8 @@ object WriteMessage {
* [[akka.stream.alpakka.elasticsearch.testkit.MessageFactory]].
*/
final class WriteResult[T2, C2] @InternalApi private[elasticsearch] (val message: WriteMessage[T2, C2],
- /** JSON structure of the Elasticsearch error. */
- val error: Option[String]) {
+ /** JSON structure of the Elasticsearch error. */
+ val error: Option[String]) {
val success: Boolean = error.isEmpty
/** Java API: JSON structure of the Elasticsearch error. */
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/WriteSettingsBase.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/WriteSettingsBase.scala
index ac0d37a4..091d9c9a 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/WriteSettingsBase.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/WriteSettingsBase.scala
@@ -16,8 +16,7 @@ abstract class WriteSettingsBase[Version <: ApiVersionBase, W <: WriteSettingsBa
val retryLogic: RetryLogic,
val versionType: Option[String],
val apiVersion: Version,
- val allowExplicitIndex: Boolean
-) { this: W =>
+ val allowExplicitIndex: Boolean) { this: W =>
def withConnection(value: ElasticsearchConnectionSettings): W = copy(connection = value)
@@ -34,9 +33,9 @@ abstract class WriteSettingsBase[Version <: ApiVersionBase, W <: WriteSettingsBa
def withAllowExplicitIndex(value: Boolean): W = copy(allowExplicitIndex = value)
protected def copy(connection: ElasticsearchConnectionSettings = connection,
- bufferSize: Int = bufferSize,
- retryLogic: RetryLogic = retryLogic,
- versionType: Option[String] = versionType,
- apiVersion: Version = apiVersion,
- allowExplicitIndex: Boolean = allowExplicitIndex): W;
+ bufferSize: Int = bufferSize,
+ retryLogic: RetryLogic = retryLogic,
+ versionType: Option[String] = versionType,
+ apiVersion: Version = apiVersion,
+ allowExplicitIndex: Boolean = allowExplicitIndex): W;
}
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/ElasticsearchApi.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/ElasticsearchApi.scala
index 9985c7b7..9719fe57 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/ElasticsearchApi.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/ElasticsearchApi.scala
@@ -15,16 +15,14 @@ import scala.concurrent.Future
@InternalApi private[impl] object ElasticsearchApi {
def executeRequest(
request: HttpRequest,
- connectionSettings: ElasticsearchConnectionSettings
- )(implicit http: HttpExt): Future[HttpResponse] = {
+ connectionSettings: ElasticsearchConnectionSettings)(implicit http: HttpExt): Future[HttpResponse] = {
if (connectionSettings.hasCredentialsDefined) {
http.singleRequest(
- request.addCredentials(BasicHttpCredentials(connectionSettings.username.get, connectionSettings.password.get))
- )
+ request.addCredentials(BasicHttpCredentials(connectionSettings.username.get, connectionSettings.password.get)))
} else {
http.singleRequest(request,
- connectionContext =
- connectionSettings.connectionContext.getOrElse(http.defaultClientHttpsContext))
+ connectionContext =
+ connectionSettings.connectionContext.getOrElse(http.defaultClientHttpsContext))
}
}
}
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/ElasticsearchSimpleFlowStage.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/ElasticsearchSimpleFlowStage.scala
index d2d722a7..241d9319 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/ElasticsearchSimpleFlowStage.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/ElasticsearchSimpleFlowStage.scala
@@ -15,7 +15,7 @@ import akka.stream._
import akka.stream.alpakka.elasticsearch
import scala.collection.immutable
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.{ ExecutionContext, Future }
/**
* INTERNAL API.
@@ -26,11 +26,10 @@ import scala.concurrent.{ExecutionContext, Future}
private[elasticsearch] final class ElasticsearchSimpleFlowStage[T, C](
elasticsearchParams: ElasticsearchParams,
settings: WriteSettingsBase[_, _],
- writer: MessageWriter[T]
-)(implicit http: HttpExt, mat: Materializer, ec: ExecutionContext)
+ writer: MessageWriter[T])(implicit http: HttpExt, mat: Materializer, ec: ExecutionContext)
extends GraphStage[
- FlowShape[(immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]]), immutable.Seq[WriteResult[T, C]]]
- ] {
+ FlowShape[(immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]]), immutable.Seq[WriteResult[T,
+ C]]]] {
private val in =
Inlet[(immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]])]("messagesAndResultPassthrough")
@@ -40,10 +39,10 @@ private[elasticsearch] final class ElasticsearchSimpleFlowStage[T, C](
private val restApi: RestBulkApi[T, C] = settings.apiVersion match {
case ApiVersion.V5 =>
new RestBulkApiV5[T, C](elasticsearchParams.indexName,
- elasticsearchParams.typeName.get,
- settings.versionType,
- settings.allowExplicitIndex,
- writer)
+ elasticsearchParams.typeName.get,
+ settings.versionType,
+ settings.allowExplicitIndex,
+ writer)
case ApiVersion.V7 =>
new RestBulkApiV7[T, C](elasticsearchParams.indexName, settings.versionType, settings.allowExplicitIndex, writer)
@@ -88,8 +87,7 @@ private[elasticsearch] final class ElasticsearchSimpleFlowStage[T, C](
ElasticsearchApi
.executeRequest(
request,
- connectionSettings = settings.connection
- )
+ connectionSettings = settings.connection)
.map {
case HttpResponse(StatusCodes.OK, _, responseEntity, _) =>
Unmarshal(responseEntity)
@@ -99,8 +97,7 @@ private[elasticsearch] final class ElasticsearchSimpleFlowStage[T, C](
Unmarshal(response.entity).to[String].map { body =>
failureHandler.invoke(
(resultsPassthrough,
- new RuntimeException(s"Request failed for POST $uri, got ${response.status} with body: $body"))
- )
+ new RuntimeException(s"Request failed for POST $uri, got ${response.status} with body: $body")))
}
}
.recoverWith {
@@ -115,20 +112,18 @@ private[elasticsearch] final class ElasticsearchSimpleFlowStage[T, C](
}
private def handleFailure(
- args: (immutable.Seq[WriteResult[T, C]], Throwable)
- ): Unit = {
+ args: (immutable.Seq[WriteResult[T, C]], Throwable)): Unit = {
inflight = false
val (resultsPassthrough, exception) = args
log.error(s"Received error from elastic after having already processed {} documents. Error: {}",
- resultsPassthrough.size,
- exception)
+ resultsPassthrough.size,
+ exception)
failStage(exception)
}
private def handleResponse(
- args: (immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]], String)
- ): Unit = {
+ args: (immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]], String)): Unit = {
inflight = false
val (messages, resultsPassthrough, response) = args
@@ -142,7 +137,7 @@ private[elasticsearch] final class ElasticsearchSimpleFlowStage[T, C](
messageResults.filterNot(_.success).foreach { failure =>
if (failure.getError.isPresent) {
log.error(s"Received error from elastic when attempting to index documents. Error: {}",
- failure.getError.get)
+ failure.getError.get)
}
}
}
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/ElasticsearchSourceStage.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/ElasticsearchSourceStage.scala
index eb4075b9..d132b499 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/ElasticsearchSourceStage.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/ElasticsearchSourceStage.scala
@@ -16,13 +16,13 @@ import akka.stream.alpakka.elasticsearch.{
ReadResult,
SourceSettingsBase
}
-import akka.stream.stage.{GraphStage, GraphStageLogic, OutHandler, StageLogging}
-import akka.stream.{Attributes, Materializer, Outlet, SourceShape}
+import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler, StageLogging }
+import akka.stream.{ Attributes, Materializer, Outlet, SourceShape }
import spray.json.DefaultJsonProtocol._
import spray.json._
import scala.concurrent.ExecutionContext
-import scala.util.{Failure, Success, Try}
+import scala.util.{ Failure, Success, Try }
/**
* INTERNAL API
@@ -52,8 +52,7 @@ private[elasticsearch] final class ElasticsearchSourceStage[T](
elasticsearchParams: ElasticsearchParams,
searchParams: Map[String, String],
settings: SourceSettingsBase[_, _],
- reader: MessageReader[T]
-)(implicit http: HttpExt, mat: Materializer, ec: ExecutionContext)
+ reader: MessageReader[T])(implicit http: HttpExt, mat: Materializer, ec: ExecutionContext)
extends GraphStage[SourceShape[ReadResult[T]]] {
val out: Outlet[ReadResult[T]] = Outlet("ElasticsearchSource.out")
@@ -80,8 +79,7 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T](
settings: SourceSettingsBase[_, _],
out: Outlet[ReadResult[T]],
shape: SourceShape[ReadResult[T]],
- reader: MessageReader[T]
-)(implicit http: HttpExt, mat: Materializer, ec: ExecutionContext)
+ reader: MessageReader[T])(implicit http: HttpExt, mat: Materializer, ec: ExecutionContext)
extends GraphStageLogic(shape)
with OutHandler
with StageLogging {
@@ -110,7 +108,7 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T](
// Add extra params to search
val extraParams = Seq(
if (!searchParams.contains("size")) {
- Some(("size" -> settings.bufferSize.toString))
+ Some("size" -> settings.bufferSize.toString)
} else {
None
},
@@ -118,11 +116,10 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T](
// http://nocf-www.elastic.co/guide/en/elasticsearch/reference/current/search-request-version.html
// https://www.elastic.co/guide/en/elasticsearch/guide/current/optimistic-concurrency-control.html
if (!searchParams.contains("version") && settings.includeDocumentVersion) {
- Some(("version" -> "true"))
+ Some("version" -> "true")
} else {
None
- }
- )
+ })
val baseMap = Map("scroll" -> settings.scroll)
@@ -139,17 +136,17 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T](
val completeParams = searchParams ++ extraParams.flatten - "routing"
val searchBody = "{" + completeParams
- .map {
- case (name, json) =>
- "\"" + name + "\":" + json
- }
- .mkString(",") + "}"
+ .map {
+ case (name, json) =>
+ "\"" + name + "\":" + json
+ }
+ .mkString(",") + "}"
val endpoint: String = settings.apiVersion match {
- case ApiVersion.V5 => s"/${elasticsearchParams.indexName}/${elasticsearchParams.typeName.get}/_search"
- case ApiVersion.V7 => s"/${elasticsearchParams.indexName}/_search"
+ case ApiVersion.V5 => s"/${elasticsearchParams.indexName}/${elasticsearchParams.typeName.get}/_search"
+ case ApiVersion.V7 => s"/${elasticsearchParams.indexName}/_search"
case OpensearchApiVersion.V1 => s"/${elasticsearchParams.indexName}/_search"
- case other => throw new IllegalArgumentException(s"API version $other is not supported")
+ case other => throw new IllegalArgumentException(s"API version $other is not supported")
}
val uri = prepareUri(Path(endpoint))
@@ -158,15 +155,13 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T](
val request = HttpRequest(HttpMethods.POST)
.withUri(uri)
.withEntity(
- HttpEntity(ContentTypes.`application/json`, searchBody)
- )
+ HttpEntity(ContentTypes.`application/json`, searchBody))
.withHeaders(settings.connection.headers)
ElasticsearchApi
.executeRequest(
request,
- settings.connection
- )
+ settings.connection)
.flatMap {
case HttpResponse(StatusCodes.OK, _, responseEntity, _) =>
Unmarshal(responseEntity)
@@ -176,8 +171,7 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T](
Unmarshal(response.entity).to[String].map { body =>
failureHandler
.invoke(
- new RuntimeException(s"Request failed for POST $uri, got ${response.status} with body: $body")
- )
+ new RuntimeException(s"Request failed for POST $uri, got ${response.status} with body: $body"))
}
}
.recover {
@@ -194,15 +188,13 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T](
.withUri(uri)
.withEntity(
HttpEntity(ContentTypes.`application/json`,
- Map("scroll" -> settings.scroll, "scroll_id" -> actualScrollId).toJson.compactPrint)
- )
+ Map("scroll" -> settings.scroll, "scroll_id" -> actualScrollId).toJson.compactPrint))
.withHeaders(settings.connection.headers)
ElasticsearchApi
.executeRequest(
request,
- settings.connection
- )
+ settings.connection)
.flatMap {
case HttpResponse(StatusCodes.OK, _, responseEntity, _) =>
Unmarshal(responseEntity)
@@ -213,8 +205,7 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T](
.to[String]
.map { body =>
failureHandler.invoke(
- new RuntimeException(s"Request failed for POST $uri, got ${response.status} with body: $body")
- )
+ new RuntimeException(s"Request failed for POST $uri, got ${response.status} with body: $body"))
}
}
.recover {
@@ -343,9 +334,7 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T](
clearScrollAsyncHandler
.invoke(
Failure(
- new RuntimeException(s"Request failed for POST $uri, got ${response.status} with body: $body")
- )
- )
+ new RuntimeException(s"Request failed for POST $uri, got ${response.status} with body: $body")))
}
}
.recover {
@@ -355,12 +344,12 @@ private[elasticsearch] final class ElasticsearchSourceLogic[T](
}
}
- private val clearScrollAsyncHandler = getAsyncCallback[Try[String]]({ result =>
+ private val clearScrollAsyncHandler = getAsyncCallback[Try[String]] { result =>
{
// Note: the scroll will expire, so there is no reason to consider a failed
// clear as a reason to fail the stream.
log.debug("Result of clearing the scroll: {}", result)
completeStage()
}
- })
+ }
}
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/NDJsonProtocol.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/NDJsonProtocol.scala
index 475b34fc..6c5b4a32 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/NDJsonProtocol.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/NDJsonProtocol.scala
@@ -4,7 +4,7 @@
package akka.stream.alpakka.elasticsearch.impl
-import akka.http.scaladsl.model.{ContentType, HttpCharsets, MediaType}
+import akka.http.scaladsl.model.{ ContentType, HttpCharsets, MediaType }
object NDJsonProtocol {
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/RestBulkApi.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/RestBulkApi.scala
index 1a800655..b8262a85 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/RestBulkApi.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/RestBulkApi.scala
@@ -5,8 +5,8 @@
package akka.stream.alpakka.elasticsearch.impl
import akka.annotation.InternalApi
-import akka.stream.alpakka.elasticsearch.Operation.{Create, Delete, Index, Nop, Update, Upsert}
-import akka.stream.alpakka.elasticsearch.{WriteMessage, WriteResult}
+import akka.stream.alpakka.elasticsearch.Operation.{ Create, Delete, Index, Nop, Update, Upsert }
+import akka.stream.alpakka.elasticsearch.{ WriteMessage, WriteResult }
import spray.json._
import scala.collection.immutable
@@ -20,7 +20,7 @@ private[impl] abstract class RestBulkApi[T, C] {
def toJson(messages: immutable.Seq[WriteMessage[T, C]]): String
def toWriteResults(messages: immutable.Seq[WriteMessage[T, C]],
- jsonString: String): immutable.Seq[WriteResult[T, C]] = {
+ jsonString: String): immutable.Seq[WriteResult[T, C]] = {
val responseJson = jsonString.parseJson
// If some commands in bulk request failed, pass failed messages to follows.
@@ -36,17 +36,17 @@ private[impl] abstract class RestBulkApi[T, C] {
def messageToJson(message: WriteMessage[T, C], messageSource: String): String = message.operation match {
case Index | Create => "\n" + messageSource
- case Upsert => "\n" + JsObject("doc" -> messageSource.parseJson, "doc_as_upsert" -> JsTrue).toString
- case Update => "\n" + JsObject("doc" -> messageSource.parseJson).toString
- case Delete => ""
- case Nop => ""
+ case Upsert => "\n" + JsObject("doc" -> messageSource.parseJson, "doc_as_upsert" -> JsTrue).toString
+ case Update => "\n" + JsObject("doc" -> messageSource.parseJson).toString
+ case Delete => ""
+ case Nop => ""
}
def constructSharedFields(message: WriteMessage[T, C]): Seq[(String, JsString)]
/** NOPs don't come back so slip them into the results like this: */
private def buildMessageResults(items: JsArray,
- messages: immutable.Seq[WriteMessage[T, C]]): immutable.Seq[WriteResult[T, C]] = {
+ messages: immutable.Seq[WriteMessage[T, C]]): immutable.Seq[WriteResult[T, C]] = {
val ret = new immutable.VectorBuilder[WriteResult[T, C]]
ret.sizeHint(messages)
val itemsIter = items.elements.iterator
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/RestBulkApiV5.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/RestBulkApiV5.scala
index 48af4ab4..f9c1c1fb 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/RestBulkApiV5.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/RestBulkApiV5.scala
@@ -6,7 +6,7 @@ package akka.stream.alpakka.elasticsearch.impl
import akka.annotation.InternalApi
import akka.stream.alpakka.elasticsearch.Operation._
-import akka.stream.alpakka.elasticsearch.{MessageWriter, WriteMessage}
+import akka.stream.alpakka.elasticsearch.{ MessageWriter, WriteMessage }
import spray.json._
import scala.collection.immutable
@@ -19,10 +19,10 @@ import scala.collection.immutable
*/
@InternalApi
private[impl] final class RestBulkApiV5[T, C](indexName: String,
- typeName: String,
- versionType: Option[String],
- allowExplicitIndex: Boolean,
- messageWriter: MessageWriter[T])
+ typeName: String,
+ versionType: Option[String],
+ allowExplicitIndex: Boolean,
+ messageWriter: MessageWriter[T])
extends RestBulkApi[T, C] {
private lazy val typeNameTuple = "_type" -> JsString(typeName)
@@ -36,23 +36,20 @@ private[impl] final class RestBulkApiV5[T, C](indexName: String,
val fields = Seq(
optionalNumber("_version", message.version),
optionalString("version_type", versionType),
- optionalString("_id", message.id)
- ).flatten
+ optionalString("_id", message.id)).flatten
"index" -> JsObject(sharedFields ++ fields: _*)
case Create => "create" -> JsObject(sharedFields ++ optionalString("_id", message.id): _*)
case Update | Upsert =>
val fields =
("_id" -> JsString(message.id.get)) +: Seq(
optionalNumber("_version", message.version),
- optionalString("version_type", versionType)
- ).flatten
+ optionalString("version_type", versionType)).flatten
"update" -> JsObject(sharedFields ++ fields: _*)
case Delete =>
val fields =
("_id" -> JsString(message.id.get)) +: Seq(
optionalNumber("_version", message.version),
- optionalString("version_type", versionType)
- ).flatten
+ optionalString("version_type", versionType)).flatten
"delete" -> JsObject(sharedFields ++ fields: _*)
case Nop => "" -> JsObject()
}
@@ -63,7 +60,7 @@ private[impl] final class RestBulkApiV5[T, C](indexName: String,
}
.filter(_.nonEmpty) match {
case Nil => "" // if all NOPs
- case x => x.mkString("", "\n", "\n")
+ case x => x.mkString("", "\n", "\n")
}
override def constructSharedFields(message: WriteMessage[T, C]): Seq[(String, JsString)] = {
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/RestBulkApiV7.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/RestBulkApiV7.scala
index 1fdf8fd8..45f5c45d 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/RestBulkApiV7.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/impl/RestBulkApiV7.scala
@@ -6,7 +6,7 @@ package akka.stream.alpakka.elasticsearch.impl
import akka.annotation.InternalApi
import akka.stream.alpakka.elasticsearch.Operation._
-import akka.stream.alpakka.elasticsearch.{MessageWriter, WriteMessage}
+import akka.stream.alpakka.elasticsearch.{ MessageWriter, WriteMessage }
import spray.json._
import scala.collection.immutable
@@ -19,9 +19,9 @@ import scala.collection.immutable
*/
@InternalApi
private[impl] final class RestBulkApiV7[T, C](indexName: String,
- versionType: Option[String],
- allowExplicitIndex: Boolean,
- messageWriter: MessageWriter[T])
+ versionType: Option[String],
+ allowExplicitIndex: Boolean,
+ messageWriter: MessageWriter[T])
extends RestBulkApi[T, C] {
def toJson(messages: immutable.Seq[WriteMessage[T, C]]): String =
@@ -33,17 +33,15 @@ private[impl] final class RestBulkApiV7[T, C](indexName: String,
val fields = Seq(
optionalNumber("version", message.version),
optionalString("version_type", versionType),
- optionalString("_id", message.id)
- ).flatten
+ optionalString("_id", message.id)).flatten
"index" -> JsObject(sharedFields ++ fields: _*)
- case Create => "create" -> JsObject(sharedFields ++ optionalString("_id", message.id): _*)
+ case Create => "create" -> JsObject(sharedFields ++ optionalString("_id", message.id): _*)
case Update | Upsert => "update" -> JsObject(sharedFields :+ ("_id" -> JsString(message.id.get)): _*)
case Delete =>
val fields =
("_id" -> JsString(message.id.get)) +: Seq(
optionalNumber("version", message.version),
- optionalString("version_type", versionType)
- ).flatten
+ optionalString("version_type", versionType)).flatten
"delete" -> JsObject(sharedFields ++ fields: _*)
case Nop => "" -> JsObject()
}
@@ -54,7 +52,7 @@ private[impl] final class RestBulkApiV7[T, C](indexName: String,
}
.filter(_.nonEmpty) match {
case Nil => "" // if all NOPs
- case x => x.mkString("", "\n", "\n")
+ case x => x.mkString("", "\n", "\n")
}
override def constructSharedFields(message: WriteMessage[T, C]): Seq[(String, JsString)] = {
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchFlow.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchFlow.scala
index 8f19c4a9..fa9759ce 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchFlow.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchFlow.scala
@@ -6,7 +6,7 @@ package akka.stream.alpakka.elasticsearch.javadsl
import akka.NotUsed
import akka.annotation.ApiMayChange
-import akka.stream.alpakka.elasticsearch.{scaladsl, _}
+import akka.stream.alpakka.elasticsearch.{ scaladsl, _ }
import com.fasterxml.jackson.databind.ObjectMapper
import scala.jdk.CollectionConverters._
@@ -28,8 +28,8 @@ object ElasticsearchFlow {
def create[T](
elasticsearchParams: ElasticsearchParams,
settings: WriteSettingsBase[_, _],
- objectMapper: ObjectMapper
- ): akka.stream.javadsl.Flow[WriteMessage[T, NotUsed], WriteResult[T, NotUsed], NotUsed] =
+ objectMapper: ObjectMapper)
+ : akka.stream.javadsl.Flow[WriteMessage[T, NotUsed], WriteResult[T, NotUsed], NotUsed] =
create(elasticsearchParams, settings, new JacksonWriter[T](objectMapper))
/**
@@ -44,8 +44,8 @@ object ElasticsearchFlow {
def create[T](
elasticsearchParams: ElasticsearchParams,
settings: WriteSettingsBase[_, _],
- messageWriter: MessageWriter[T]
- ): akka.stream.javadsl.Flow[WriteMessage[T, NotUsed], WriteResult[T, NotUsed], NotUsed] =
+ messageWriter: MessageWriter[T])
+ : akka.stream.javadsl.Flow[WriteMessage[T, NotUsed], WriteResult[T, NotUsed], NotUsed] =
scaladsl.ElasticsearchFlow
.create(elasticsearchParams, settings, messageWriter)
.asJava
@@ -63,8 +63,7 @@ object ElasticsearchFlow {
def createWithPassThrough[T, C](
elasticsearchParams: ElasticsearchParams,
settings: WriteSettingsBase[_, _],
- objectMapper: ObjectMapper
- ): akka.stream.javadsl.Flow[WriteMessage[T, C], WriteResult[T, C], NotUsed] =
+ objectMapper: ObjectMapper): akka.stream.javadsl.Flow[WriteMessage[T, C], WriteResult[T, C], NotUsed] =
createWithPassThrough(elasticsearchParams, settings, new JacksonWriter[T](objectMapper))
/**
@@ -80,8 +79,7 @@ object ElasticsearchFlow {
def createWithPassThrough[T, C](
elasticsearchParams: ElasticsearchParams,
settings: WriteSettingsBase[_, _],
- messageWriter: MessageWriter[T]
- ): akka.stream.javadsl.Flow[WriteMessage[T, C], WriteResult[T, C], NotUsed] =
+ messageWriter: MessageWriter[T]): akka.stream.javadsl.Flow[WriteMessage[T, C], WriteResult[T, C], NotUsed] =
scaladsl.ElasticsearchFlow
.createWithPassThrough(elasticsearchParams, settings, messageWriter)
.asJava
@@ -100,8 +98,8 @@ object ElasticsearchFlow {
def createBulk[T, C](
elasticsearchParams: ElasticsearchParams,
settings: WriteSettingsBase[_, _],
- objectMapper: ObjectMapper
- ): akka.stream.javadsl.Flow[java.util.List[WriteMessage[T, C]], java.util.List[WriteResult[T, C]], NotUsed] =
+ objectMapper: ObjectMapper)
+ : akka.stream.javadsl.Flow[java.util.List[WriteMessage[T, C]], java.util.List[WriteResult[T, C]], NotUsed] =
createBulk(elasticsearchParams, settings, new JacksonWriter[T](objectMapper))
/**
@@ -118,15 +116,14 @@ object ElasticsearchFlow {
def createBulk[T, C](
elasticsearchParams: ElasticsearchParams,
settings: WriteSettingsBase[_, _],
- messageWriter: MessageWriter[T]
- ): akka.stream.javadsl.Flow[java.util.List[WriteMessage[T, C]], java.util.List[WriteResult[T, C]], NotUsed] =
+ messageWriter: MessageWriter[T])
+ : akka.stream.javadsl.Flow[java.util.List[WriteMessage[T, C]], java.util.List[WriteResult[T, C]], NotUsed] =
akka.stream.scaladsl
.Flow[java.util.List[WriteMessage[T, C]]]
.map(_.asScala.toIndexedSeq)
.via(
scaladsl.ElasticsearchFlow
- .createBulk(elasticsearchParams, settings, messageWriter)
- )
+ .createBulk(elasticsearchParams, settings, messageWriter))
.map(_.asJava)
.asJava
@@ -143,8 +140,8 @@ object ElasticsearchFlow {
def createWithContext[T, C](
elasticsearchParams: ElasticsearchParams,
settings: WriteSettingsBase[_, _],
- objectMapper: ObjectMapper
- ): akka.stream.javadsl.FlowWithContext[WriteMessage[T, NotUsed], C, WriteResult[T, C], C, NotUsed] =
+ objectMapper: ObjectMapper)
+ : akka.stream.javadsl.FlowWithContext[WriteMessage[T, NotUsed], C, WriteResult[T, C], C, NotUsed] =
createWithContext(elasticsearchParams, settings, new JacksonWriter[T](objectMapper))
/**
@@ -160,8 +157,8 @@ object ElasticsearchFlow {
def createWithContext[T, C](
elasticsearchParams: ElasticsearchParams,
settings: WriteSettingsBase[_, _],
- messageWriter: MessageWriter[T]
- ): akka.stream.javadsl.FlowWithContext[WriteMessage[T, NotUsed], C, WriteResult[T, C], C, NotUsed] =
+ messageWriter: MessageWriter[T])
+ : akka.stream.javadsl.FlowWithContext[WriteMessage[T, NotUsed], C, WriteResult[T, C], C, NotUsed] =
scaladsl.ElasticsearchFlow
.createWithContext(elasticsearchParams, settings, messageWriter)
.asJava
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchSink.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchSink.scala
index e80459cd..a0cfc461 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchSink.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchSink.scala
@@ -8,7 +8,7 @@ import java.util.concurrent.CompletionStage
import akka.stream.alpakka.elasticsearch._
import akka.stream.javadsl._
-import akka.{Done, NotUsed}
+import akka.{ Done, NotUsed }
import com.fasterxml.jackson.databind.ObjectMapper
/**
@@ -22,8 +22,7 @@ object ElasticsearchSink {
def create[T](
elasticsearchParams: ElasticsearchParams,
settings: WriteSettingsBase[_, _],
- objectMapper: ObjectMapper
- ): akka.stream.javadsl.Sink[WriteMessage[T, NotUsed], CompletionStage[Done]] =
+ objectMapper: ObjectMapper): akka.stream.javadsl.Sink[WriteMessage[T, NotUsed], CompletionStage[Done]] =
ElasticsearchFlow
.create(elasticsearchParams, settings, objectMapper)
.toMat(Sink.ignore[WriteResult[T, NotUsed]](), Keep.right[NotUsed, CompletionStage[Done]])
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchSource.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchSource.scala
index dd1c4842..abb1a76e 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchSource.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchSource.scala
@@ -6,12 +6,12 @@ package akka.stream.alpakka.elasticsearch.javadsl
import akka.NotUsed
import akka.actor.ActorSystem
-import akka.http.scaladsl.{Http, HttpExt}
-import akka.stream.alpakka.elasticsearch.{impl, _}
+import akka.http.scaladsl.{ Http, HttpExt }
+import akka.stream.alpakka.elasticsearch.{ impl, _ }
import akka.stream.javadsl.Source
-import akka.stream.{Attributes, Materializer}
+import akka.stream.{ Attributes, Materializer }
import com.fasterxml.jackson.databind.ObjectMapper
-import com.fasterxml.jackson.databind.node.{ArrayNode, NumericNode}
+import com.fasterxml.jackson.databind.node.{ ArrayNode, NumericNode }
import scala.jdk.CollectionConverters._
import scala.concurrent.ExecutionContext
@@ -26,8 +26,8 @@ object ElasticsearchSource {
* Using default objectMapper
*/
def create(elasticsearchParams: ElasticsearchParams,
- query: String,
- settings: SourceSettingsBase[_, _]): Source[ReadResult[java.util.Map[String, Object]], NotUsed] =
+ query: String,
+ settings: SourceSettingsBase[_, _]): Source[ReadResult[java.util.Map[String, Object]], NotUsed] =
create(elasticsearchParams, query, settings, new ObjectMapper())
/**
@@ -35,9 +35,9 @@ object ElasticsearchSource {
* Using custom objectMapper
*/
def create(elasticsearchParams: ElasticsearchParams,
- query: String,
- settings: SourceSettingsBase[_, _],
- objectMapper: ObjectMapper): Source[ReadResult[java.util.Map[String, Object]], NotUsed] =
+ query: String,
+ settings: SourceSettingsBase[_, _],
+ objectMapper: ObjectMapper): Source[ReadResult[java.util.Map[String, Object]], NotUsed] =
Source
.fromMaterializer { (mat: Materializer, _: Attributes) =>
{
@@ -51,9 +51,7 @@ object ElasticsearchSource {
elasticsearchParams,
Map("query" -> query),
settings,
- new JacksonReader[java.util.Map[String, Object]](objectMapper, classOf[java.util.Map[String, Object]])
- )
- )
+ new JacksonReader[java.util.Map[String, Object]](objectMapper, classOf[java.util.Map[String, Object]])))
}
}
.mapMaterializedValue(_ => NotUsed)
@@ -69,9 +67,9 @@ object ElasticsearchSource {
* searchParams.put("_source", "[\"fieldToInclude\", \"anotherFieldToInclude\"]");
*/
def create(elasticsearchParams: ElasticsearchParams,
- searchParams: java.util.Map[String, String],
- settings: SourceSettingsBase[_, _],
- objectMapper: ObjectMapper): Source[ReadResult[java.util.Map[String, Object]], NotUsed] =
+ searchParams: java.util.Map[String, String],
+ settings: SourceSettingsBase[_, _],
+ objectMapper: ObjectMapper): Source[ReadResult[java.util.Map[String, Object]], NotUsed] =
Source
.fromMaterializer { (mat: Materializer, _: Attributes) =>
{
@@ -84,9 +82,7 @@ object ElasticsearchSource {
elasticsearchParams,
searchParams.asScala.toMap,
settings,
- new JacksonReader[java.util.Map[String, Object]](objectMapper, classOf[java.util.Map[String, Object]])
- )
- )
+ new JacksonReader[java.util.Map[String, Object]](objectMapper, classOf[java.util.Map[String, Object]])))
}
}
.mapMaterializedValue(_ => NotUsed)
@@ -96,9 +92,9 @@ object ElasticsearchSource {
* Using default objectMapper
*/
def typed[T](elasticsearchParams: ElasticsearchParams,
- query: String,
- settings: SourceSettingsBase[_, _],
- clazz: Class[T]): Source[ReadResult[T], NotUsed] =
+ query: String,
+ settings: SourceSettingsBase[_, _],
+ clazz: Class[T]): Source[ReadResult[T], NotUsed] =
typed[T](elasticsearchParams, query, settings, clazz, new ObjectMapper())
/**
@@ -106,10 +102,10 @@ object ElasticsearchSource {
* Using custom objectMapper
*/
def typed[T](elasticsearchParams: ElasticsearchParams,
- query: String,
- settings: SourceSettingsBase[_, _],
- clazz: Class[T],
- objectMapper: ObjectMapper): Source[ReadResult[T], NotUsed] =
+ query: String,
+ settings: SourceSettingsBase[_, _],
+ clazz: Class[T],
+ objectMapper: ObjectMapper): Source[ReadResult[T], NotUsed] =
Source
.fromMaterializer { (mat: Materializer, _: Attributes) =>
{
@@ -122,9 +118,7 @@ object ElasticsearchSource {
elasticsearchParams,
Map("query" -> query),
settings,
- new JacksonReader[T](objectMapper, clazz)
- )
- )
+ new JacksonReader[T](objectMapper, clazz)))
}
}
.mapMaterializedValue(_ => NotUsed)
@@ -140,10 +134,10 @@ object ElasticsearchSource {
* searchParams.put("_source", "[\"fieldToInclude\", \"anotherFieldToInclude\"]");
*/
def typed[T](elasticsearchParams: ElasticsearchParams,
- searchParams: java.util.Map[String, String],
- settings: SourceSettingsBase[_, _],
- clazz: Class[T],
- objectMapper: ObjectMapper): Source[ReadResult[T], NotUsed] =
+ searchParams: java.util.Map[String, String],
+ settings: SourceSettingsBase[_, _],
+ clazz: Class[T],
+ objectMapper: ObjectMapper): Source[ReadResult[T], NotUsed] =
Source
.fromMaterializer { (mat: Materializer, _: Attributes) =>
{
@@ -156,9 +150,7 @@ object ElasticsearchSource {
elasticsearchParams,
searchParams.asScala.toMap,
settings,
- new JacksonReader[T](objectMapper, clazz)
- )
- )
+ new JacksonReader[T](objectMapper, clazz)))
}
}
.mapMaterializedValue(_ => NotUsed)
@@ -179,7 +171,7 @@ object ElasticsearchSource {
val source = element.get("_source")
val version: Option[Long] = element.get("_version") match {
case n: NumericNode => Some(n.asLong())
- case _ => None
+ case _ => None
}
new ReadResult[T](id, mapper.treeToValue(source, clazz), version)
diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/scaladsl/ElasticsearchFlow.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/scaladsl/ElasticsearchFlow.scala
index a2163398..fb7c9e42 100644
--- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/scaladsl/ElasticsearchFlow.scala
+++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/scaladsl/ElasticsearchFlow.scala
@@ -6,10 +6,10 @@ package akka.stream.alpakka.elasticsearch.scaladsl
import akka.NotUsed
import akka.actor.ActorSystem
-import akka.annotation.{ApiMayChange, InternalApi}
-import akka.http.scaladsl.{Http, HttpExt}
-import akka.stream.alpakka.elasticsearch.{impl, _}
-import akka.stream.scaladsl.{Flow, FlowWithContext, RetryFlow}
+import akka.annotation.{ ApiMayChange, InternalApi }
+import akka.http.scaladsl.{ Http, HttpExt }
+import akka.stream.alpakka.elasticsearch.{ impl, _ }
+import akka.stream.scaladsl.{ Flow, FlowWithContext, RetryFlow }
import spray.json._
import scala.collection.immutable
@@ -28,8 +28,7 @@ object ElasticsearchFlow {
* This factory method requires an implicit Spray JSON writer for `T`.
*/
def create[T](elasticsearchParams: ElasticsearchParams, settings: WriteSettingsBase[_, _])(
- implicit sprayJsonWriter: JsonWriter[T]
- ): Flow[WriteMessage[T, NotUsed], WriteResult[T, NotUsed], NotUsed] =
+ implicit sprayJsonWriter: JsonWriter[T]): Flow[WriteMessage[T, NotUsed], WriteResult[T, NotUsed], NotUsed] =
create[T](elasticsearchParams, settings, new SprayJsonWriter[T]()(sprayJsonWriter))
/**
@@ -38,8 +37,8 @@ object ElasticsearchFlow {
* successful execution.
*/
def create[T](elasticsearchParams: ElasticsearchParams,
- settings: WriteSettingsBase[_, _],
- writer: MessageWriter[T]): Flow[WriteMessage[T, NotUsed], WriteResult[T, NotUsed], NotUsed] = {
+ settings: WriteSettingsBase[_, _],
+ writer: MessageWriter[T]): Flow[WriteMessage[T, NotUsed], WriteResult[T, NotUsed], NotUsed] = {
Flow[WriteMessage[T, NotUsed]]
.batch(settings.bufferSize, immutable.Seq(_)) { case (seq, wm) => seq :+ wm }
.via(stageFlow(elasticsearchParams, settings, writer))
@@ -55,8 +54,7 @@ object ElasticsearchFlow {
* This factory method requires an implicit Spray JSON writer for `T`.
*/
def createWithPassThrough[T, C](elasticsearchParams: ElasticsearchParams, settings: WriteSettingsBase[_, _])(
- implicit sprayJsonWriter: JsonWriter[T]
- ): Flow[WriteMessage[T, C], WriteResult[T, C], NotUsed] =
+ implicit sprayJsonWriter: JsonWriter[T]): Flow[WriteMessage[T, C], WriteResult[T, C], NotUsed] =
createWithPassThrough[T, C](elasticsearchParams, settings, new SprayJsonWriter[T]()(sprayJsonWriter))
/**
@@ -66,8 +64,8 @@ object ElasticsearchFlow {
* successful execution.
*/
def createWithPassThrough[T, C](elasticsearchParams: ElasticsearchParams,
- settings: WriteSettingsBase[_, _],
- writer: MessageWriter[T]): Flow[WriteMessage[T, C], WriteResult[T, C], NotUsed] = {
+ settings: WriteSettingsBase[_, _],
+ writer: MessageWriter[T]): Flow[WriteMessage[T, C], WriteResult[T, C], NotUsed] = {
Flow[WriteMessage[T, C]]
.batch(settings.bufferSize, immutable.Seq(_)) { case (seq, wm) => seq :+ wm }
.via(stageFlow(elasticsearchParams, settings, writer))
@@ -84,8 +82,8 @@ object ElasticsearchFlow {
* This factory method requires an implicit Spray JSON writer for `T`.
*/
def createBulk[T, C](elasticsearchParams: ElasticsearchParams, settings: WriteSettingsBase[_, _])(
- implicit sprayJsonWriter: JsonWriter[T]
- ): Flow[immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]], NotUsed] =
+ implicit sprayJsonWriter: JsonWriter[T])
+ : Flow[immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]], NotUsed] =
createBulk[T, C](elasticsearchParams, settings, new SprayJsonWriter[T]()(sprayJsonWriter))
/**
@@ -98,8 +96,7 @@ object ElasticsearchFlow {
def createBulk[T, C](
elasticsearchParams: ElasticsearchParams,
settings: WriteSettingsBase[_, _],
- writer: MessageWriter[T]
- ): Flow[immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]], NotUsed] = {
+ writer: MessageWriter[T]): Flow[immutable.Seq[WriteMessage[T, C]], immutable.Seq[WriteResult[T, C]], NotUsed] = {
stageFlow(elasticsearchParams, settings, writer)
}
@@ -113,8 +110,8 @@ object ElasticsearchFlow {
*/
@ApiMayChange
def createWithContext[T, C](elasticsearchParams: ElasticsearchParams, settings: WriteSettingsBase[_, _])(
- implicit sprayJsonWriter: JsonWriter[T]
- ): FlowWithContext[WriteMessage[T, NotUsed], C, WriteResult[T, C], C, NotUsed] =
+ implicit sprayJsonWriter: JsonWriter[T])
... 50816 lines suppressed ...
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pekko.apache.org
For additional commands, e-mail: commits-help@pekko.apache.org