You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@camel.apache.org by ac...@apache.org on 2023/07/27 06:20:17 UTC

[camel] branch aws-sdk-2.20.112 created (now 9927292df2a)

This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a change to branch aws-sdk-2.20.112
in repository https://gitbox.apache.org/repos/asf/camel.git


      at 9927292df2a Regen

This branch includes the following new commits:

     new 3be5c5ec6f5 Upgrade AWS SDK v2 to version 2.20.112
     new 9927292df2a Regen

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[camel] 01/02: Upgrade AWS SDK v2 to version 2.20.112

Posted by ac...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch aws-sdk-2.20.112
in repository https://gitbox.apache.org/repos/asf/camel.git

commit 3be5c5ec6f5774df9c5108857f673acec1126265
Author: Andrea Cosentino <an...@gmail.com>
AuthorDate: Thu Jul 27 08:14:02 2023 +0200

    Upgrade AWS SDK v2 to version 2.20.112
    
    Signed-off-by: Andrea Cosentino <an...@gmail.com>
---
 parent/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/parent/pom.xml b/parent/pom.xml
index 472f57fe532..c398bc5c59e 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -70,7 +70,7 @@
         <avro-ipc-jetty-version>1.11.2</avro-ipc-jetty-version>
         <avro-ipc-netty-version>1.11.2</avro-ipc-netty-version>
         <awaitility-version>4.2.0</awaitility-version>
-        <aws-java-sdk2-version>2.20.109</aws-java-sdk2-version>
+        <aws-java-sdk2-version>2.20.112</aws-java-sdk2-version>
         <aws-xray-version>2.14.0</aws-xray-version>
         <azure-sdk-bom-version>1.2.14</azure-sdk-bom-version>
         <azure-storage-blob-changefeed-version>12.0.0-beta.18</azure-storage-blob-changefeed-version>


[camel] 02/02: Regen

Posted by ac...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch aws-sdk-2.20.112
in repository https://gitbox.apache.org/repos/asf/camel.git

commit 9927292df2a70bb52d8fa949c91e19102f0aa8f7
Author: Andrea Cosentino <an...@gmail.com>
AuthorDate: Thu Jul 27 08:19:41 2023 +0200

    Regen
    
    Signed-off-by: Andrea Cosentino <an...@gmail.com>
---
 .../aws/secretsmanager/SecretsManagerEndpoint.java |  3 +-
 .../camel/component/kafka/KafkaComponent.java      |  1 -
 .../camel/component/kafka/KafkaConsumer.java       |  3 +-
 .../camel/component/kafka/KafkaProducer.java       |  3 +-
 .../quartz/SpringScheduledRoutePolicyTest.java     | 12 ++++---
 .../AzureServicebusComponentBuilderFactory.java    | 37 ++++++++--------------
 .../GoogleMailStreamComponentBuilderFactory.java   | 18 +++++++++++
 .../dsl/KafkaComponentBuilderFactory.java          | 20 ++++++++++--
 .../endpoint/dsl/KafkaEndpointBuilderFactory.java  | 37 ++++++++++++++++++++--
 9 files changed, 99 insertions(+), 35 deletions(-)

diff --git a/components/camel-aws/camel-aws-secrets-manager/src/main/java/org/apache/camel/component/aws/secretsmanager/SecretsManagerEndpoint.java b/components/camel-aws/camel-aws-secrets-manager/src/main/java/org/apache/camel/component/aws/secretsmanager/SecretsManagerEndpoint.java
index ef74b8169f4..2044a98f20e 100644
--- a/components/camel-aws/camel-aws-secrets-manager/src/main/java/org/apache/camel/component/aws/secretsmanager/SecretsManagerEndpoint.java
+++ b/components/camel-aws/camel-aws-secrets-manager/src/main/java/org/apache/camel/component/aws/secretsmanager/SecretsManagerEndpoint.java
@@ -79,7 +79,8 @@ public class SecretsManagerEndpoint extends ScheduledPollEndpoint {
 
         if (healthCheckRepository != null) {
             clientHealthCheck = new SecretsManagerClientHealthCheck(this, getId());
-            clientHealthCheck.setEnabled(getComponent().isHealthCheckEnabled() && getComponent().isHealthCheckProducerEnabled());
+            clientHealthCheck
+                    .setEnabled(getComponent().isHealthCheckEnabled() && getComponent().isHealthCheckProducerEnabled());
             healthCheckRepository.addHealthCheck(clientHealthCheck);
         }
     }
diff --git a/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaComponent.java b/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaComponent.java
index b55f4b9de31..a43b1510029 100644
--- a/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaComponent.java
+++ b/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaComponent.java
@@ -26,7 +26,6 @@ import org.apache.camel.component.kafka.consumer.KafkaManualCommit;
 import org.apache.camel.component.kafka.consumer.KafkaManualCommitFactory;
 import org.apache.camel.spi.Metadata;
 import org.apache.camel.spi.annotations.Component;
-import org.apache.camel.support.DefaultComponent;
 import org.apache.camel.support.HealthCheckComponent;
 import org.apache.camel.support.PropertyBindingSupport;
 import org.apache.camel.util.ObjectHelper;
diff --git a/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaConsumer.java b/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaConsumer.java
index c64f10db7c2..9188d498666 100644
--- a/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaConsumer.java
+++ b/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaConsumer.java
@@ -131,7 +131,8 @@ public class KafkaConsumer extends DefaultConsumer
 
         if (healthCheckRepository != null) {
             consumerHealthCheck = new KafkaConsumerHealthCheck(this, getRouteId());
-            consumerHealthCheck.setEnabled(getEndpoint().getComponent().isHealthCheckEnabled() && getEndpoint().getComponent().isHealthCheckConsumerEnabled());
+            consumerHealthCheck.setEnabled(getEndpoint().getComponent().isHealthCheckEnabled()
+                    && getEndpoint().getComponent().isHealthCheckConsumerEnabled());
             healthCheckRepository.addHealthCheck(consumerHealthCheck);
         }
 
diff --git a/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaProducer.java b/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaProducer.java
index 19803e2a3ad..a2b7f7f7d0d 100755
--- a/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaProducer.java
+++ b/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaProducer.java
@@ -202,7 +202,8 @@ public class KafkaProducer extends DefaultAsyncProducer {
 
         if (healthCheckRepository != null) {
             producerHealthCheck = new KafkaProducerHealthCheck(this, clientId);
-            producerHealthCheck.setEnabled(getEndpoint().getComponent().isHealthCheckEnabled() && getEndpoint().getComponent().isHealthCheckProducerEnabled());
+            producerHealthCheck.setEnabled(getEndpoint().getComponent().isHealthCheckEnabled()
+                    && getEndpoint().getComponent().isHealthCheckProducerEnabled());
             healthCheckRepository.addHealthCheck(producerHealthCheck);
         }
     }
diff --git a/components/camel-quartz/src/test/java/org/apache/camel/routepolicy/quartz/SpringScheduledRoutePolicyTest.java b/components/camel-quartz/src/test/java/org/apache/camel/routepolicy/quartz/SpringScheduledRoutePolicyTest.java
index 40a50180a4e..40cb27d72aa 100644
--- a/components/camel-quartz/src/test/java/org/apache/camel/routepolicy/quartz/SpringScheduledRoutePolicyTest.java
+++ b/components/camel-quartz/src/test/java/org/apache/camel/routepolicy/quartz/SpringScheduledRoutePolicyTest.java
@@ -56,7 +56,8 @@ public abstract class SpringScheduledRoutePolicyTest {
 
         context.getRouteController().stopRoute("testRoute", 1000, TimeUnit.MILLISECONDS);
 
-        Awaitility.await().atMost(5, TimeUnit.SECONDS).untilAsserted(() -> assertSame(ServiceStatus.Started, context.getRouteController().getRouteStatus("testRoute")));
+        Awaitility.await().atMost(5, TimeUnit.SECONDS).untilAsserted(
+                () -> assertSame(ServiceStatus.Started, context.getRouteController().getRouteStatus("testRoute")));
 
         context.createProducerTemplate().sendBody("direct:start?timeout=1000", "Ready or not, Here, I come");
 
@@ -69,7 +70,8 @@ public abstract class SpringScheduledRoutePolicyTest {
 
         CamelContext context = startRouteWithPolicy("stopPolicy");
 
-        Awaitility.await().atMost(5, TimeUnit.SECONDS).untilAsserted(() -> assertSame(ServiceStatus.Stopped, context.getRouteController().getRouteStatus("testRoute")));
+        Awaitility.await().atMost(5, TimeUnit.SECONDS).untilAsserted(
+                () -> assertSame(ServiceStatus.Stopped, context.getRouteController().getRouteStatus("testRoute")));
 
         assertThrows(CamelExecutionException.class,
                 () -> context.createProducerTemplate().sendBody("direct:start?timeout=1000", "Ready or not, Here, I come"));
@@ -83,7 +85,8 @@ public abstract class SpringScheduledRoutePolicyTest {
         CamelContext context = startRouteWithPolicy("suspendPolicy");
 
         // wait for route to suspend
-        Awaitility.await().atMost(5, TimeUnit.SECONDS).untilAsserted(() -> assertTrue(ServiceHelper.isSuspended(context.getRoute("testRoute").getConsumer())));
+        Awaitility.await().atMost(5, TimeUnit.SECONDS)
+                .untilAsserted(() -> assertTrue(ServiceHelper.isSuspended(context.getRoute("testRoute").getConsumer())));
 
         assertThrows(CamelExecutionException.class,
                 () -> context.createProducerTemplate().sendBody("direct:start?timeout=1000", "Ready or not, Here, I come"));
@@ -101,7 +104,8 @@ public abstract class SpringScheduledRoutePolicyTest {
 
         ServiceHelper.suspendService(context.getRoute("testRoute").getConsumer());
 
-        Awaitility.await().atMost(5, TimeUnit.SECONDS).untilAsserted(() -> assertTrue(ServiceHelper.isStarted(context.getRoute("testRoute").getConsumer())));
+        Awaitility.await().atMost(5, TimeUnit.SECONDS)
+                .untilAsserted(() -> assertTrue(ServiceHelper.isStarted(context.getRoute("testRoute").getConsumer())));
 
         context.createProducerTemplate().sendBody("direct:start?timeout=1000", "Ready or not, Here, I come");
 
diff --git a/dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/AzureServicebusComponentBuilderFactory.java b/dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/AzureServicebusComponentBuilderFactory.java
index 5ee94295f5e..d8c2b233d74 100644
--- a/dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/AzureServicebusComponentBuilderFactory.java
+++ b/dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/AzureServicebusComponentBuilderFactory.java
@@ -69,7 +69,7 @@ public interface AzureServicebusComponentBuilderFactory {
         }
         /**
          * Sets the transport type by which all the communication with Azure
-         * Service Bus occurs. Default value is AmqpTransportType#AMQP.
+         * Service Bus occurs. Default value is AMQP.
          * 
          * The option is a:
          * &lt;code&gt;com.azure.core.amqp.AmqpTransportType&lt;/code&gt; type.
@@ -88,8 +88,7 @@ public interface AzureServicebusComponentBuilderFactory {
         /**
          * Sets the ClientOptions to be sent from the client built from this
          * builder, enabling customization of certain properties, as well as
-         * support the addition of custom header information. Refer to the
-         * ClientOptions documentation for more information.
+         * support the addition of custom header information.
          * 
          * The option is a:
          * &lt;code&gt;com.azure.core.util.ClientOptions&lt;/code&gt; type.
@@ -122,8 +121,8 @@ public interface AzureServicebusComponentBuilderFactory {
         }
         /**
          * Sets the proxy configuration to use for ServiceBusSenderAsyncClient.
-         * When a proxy is configured, AmqpTransportType#AMQP_WEB_SOCKETS must
-         * be used for the transport type.
+         * When a proxy is configured, AMQP_WEB_SOCKETS must be used for the
+         * transport type.
          * 
          * The option is a:
          * &lt;code&gt;com.azure.core.amqp.ProxyOptions&lt;/code&gt; type.
@@ -197,12 +196,8 @@ public interface AzureServicebusComponentBuilderFactory {
         }
         /**
          * Disables auto-complete and auto-abandon of received messages. By
-         * default, a successfully processed message is {link
-         * ServiceBusReceiverAsyncClient#complete(ServiceBusReceivedMessage)
-         * completed}. If an error happens when the message is processed, it is
-         * {link
-         * ServiceBusReceiverAsyncClient#abandon(ServiceBusReceivedMessage)
-         * abandoned}.
+         * default, a successfully processed message is completed. If an error
+         * happens when the message is abandoned.
          * 
          * The option is a: &lt;code&gt;boolean&lt;/code&gt; type.
          * 
@@ -219,9 +214,8 @@ public interface AzureServicebusComponentBuilderFactory {
         }
         /**
          * Sets the amount of time to continue auto-renewing the lock. Setting
-         * Duration#ZERO or null disables auto-renewal. For {link
-         * ServiceBusReceiveMode#RECEIVE_AND_DELETE RECEIVE_AND_DELETE} mode,
-         * auto-renewal is disabled.
+         * ZERO disables auto-renewal. For ServiceBus receive mode
+         * (RECEIVE_AND_DELETE RECEIVE_AND_DELETE), auto-renewal is disabled.
          * 
          * The option is a: &lt;code&gt;java.time.Duration&lt;/code&gt; type.
          * 
@@ -253,13 +247,11 @@ public interface AzureServicebusComponentBuilderFactory {
             return this;
         }
         /**
-         * Sets the prefetch count of the receiver. For both {link
-         * ServiceBusReceiveMode#PEEK_LOCK PEEK_LOCK} and {link
-         * ServiceBusReceiveMode#RECEIVE_AND_DELETE RECEIVE_AND_DELETE} modes
-         * the default value is 1. Prefetch speeds up the message flow by aiming
-         * to have a message readily available for local retrieval when and
-         * before the application asks for one using
-         * ServiceBusReceiverAsyncClient#receiveMessages(). Setting a non-zero
+         * Sets the prefetch count of the receiver. For both PEEK_LOCK PEEK_LOCK
+         * and RECEIVE_AND_DELETE RECEIVE_AND_DELETE receive modes the default
+         * value is 1. Prefetch speeds up the message flow by aiming to have a
+         * message readily available for local retrieval when and before the
+         * application asks for one using receive message. Setting a non-zero
          * value will prefetch that number of messages. Setting the value to
          * zero turns prefetch off.
          * 
@@ -486,8 +478,7 @@ public interface AzureServicebusComponentBuilderFactory {
             return this;
         }
         /**
-         * A TokenCredential for Azure AD authentication, implemented in
-         * com.azure.identity.
+         * A TokenCredential for Azure AD authentication.
          * 
          * The option is a:
          * &lt;code&gt;com.azure.core.credential.TokenCredential&lt;/code&gt;
diff --git a/dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/GoogleMailStreamComponentBuilderFactory.java b/dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/GoogleMailStreamComponentBuilderFactory.java
index 11a6a993700..c0cbbf22356 100644
--- a/dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/GoogleMailStreamComponentBuilderFactory.java
+++ b/dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/GoogleMailStreamComponentBuilderFactory.java
@@ -176,6 +176,23 @@ public interface GoogleMailStreamComponentBuilderFactory {
             doSetProperty("query", query);
             return this;
         }
+        /**
+         * Whether to store the entire email message in an RFC 2822 formatted
+         * and base64url encoded string (in JSon format), in the Camel message
+         * body.
+         * 
+         * The option is a: &lt;code&gt;boolean&lt;/code&gt; type.
+         * 
+         * Default: false
+         * Group: consumer
+         * 
+         * @param raw the value to set
+         * @return the dsl builder
+         */
+        default GoogleMailStreamComponentBuilder raw(boolean raw) {
+            doSetProperty("raw", raw);
+            return this;
+        }
         /**
          * GMail scopes.
          * 
@@ -340,6 +357,7 @@ public interface GoogleMailStreamComponentBuilderFactory {
             case "markAsRead": getOrCreateConfiguration((GoogleMailStreamComponent) component).setMarkAsRead((boolean) value); return true;
             case "maxResults": getOrCreateConfiguration((GoogleMailStreamComponent) component).setMaxResults((long) value); return true;
             case "query": getOrCreateConfiguration((GoogleMailStreamComponent) component).setQuery((java.lang.String) value); return true;
+            case "raw": getOrCreateConfiguration((GoogleMailStreamComponent) component).setRaw((boolean) value); return true;
             case "scopes": getOrCreateConfiguration((GoogleMailStreamComponent) component).setScopes((java.util.List) value); return true;
             case "autowiredEnabled": ((GoogleMailStreamComponent) component).setAutowiredEnabled((boolean) value); return true;
             case "clientFactory": ((GoogleMailStreamComponent) component).setClientFactory((org.apache.camel.component.google.mail.GoogleMailClientFactory) value); return true;
diff --git a/dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/KafkaComponentBuilderFactory.java b/dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/KafkaComponentBuilderFactory.java
index 9507fd9b207..96215a84678 100644
--- a/dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/KafkaComponentBuilderFactory.java
+++ b/dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/KafkaComponentBuilderFactory.java
@@ -1260,8 +1260,6 @@ public interface KafkaComponentBuilderFactory {
          * 
          * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
          * 
-         * Default:
-         * org.apache.kafka.clients.producer.internals.DefaultPartitioner
          * Group: producer
          * 
          * @param partitioner the value to set
@@ -1271,6 +1269,23 @@ public interface KafkaComponentBuilderFactory {
             doSetProperty("partitioner", partitioner);
             return this;
         }
+        /**
+         * Whether the message keys should be ignored when computing partition.
+         * This setting has effect only when partitioner is not set.
+         * 
+         * The option is a: &lt;code&gt;boolean&lt;/code&gt; type.
+         * 
+         * Default: false
+         * Group: producer
+         * 
+         * @param partitionerIgnoreKeys the value to set
+         * @return the dsl builder
+         */
+        default KafkaComponentBuilder partitionerIgnoreKeys(
+                boolean partitionerIgnoreKeys) {
+            doSetProperty("partitionerIgnoreKeys", partitionerIgnoreKeys);
+            return this;
+        }
         /**
          * The partition to which the record will be sent (or null if no
          * partition was specified). If this option has been configured then it
@@ -2265,6 +2280,7 @@ public interface KafkaComponentBuilderFactory {
             case "metricsSampleWindowMs": getOrCreateConfiguration((KafkaComponent) component).setMetricsSampleWindowMs((java.lang.Integer) value); return true;
             case "noOfMetricsSample": getOrCreateConfiguration((KafkaComponent) component).setNoOfMetricsSample((java.lang.Integer) value); return true;
             case "partitioner": getOrCreateConfiguration((KafkaComponent) component).setPartitioner((java.lang.String) value); return true;
+            case "partitionerIgnoreKeys": getOrCreateConfiguration((KafkaComponent) component).setPartitionerIgnoreKeys((boolean) value); return true;
             case "partitionKey": getOrCreateConfiguration((KafkaComponent) component).setPartitionKey((java.lang.Integer) value); return true;
             case "producerBatchSize": getOrCreateConfiguration((KafkaComponent) component).setProducerBatchSize((java.lang.Integer) value); return true;
             case "queueBufferingMaxMessages": getOrCreateConfiguration((KafkaComponent) component).setQueueBufferingMaxMessages((java.lang.Integer) value); return true;
diff --git a/dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KafkaEndpointBuilderFactory.java b/dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KafkaEndpointBuilderFactory.java
index 2c777640af8..0877421d0ee 100644
--- a/dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KafkaEndpointBuilderFactory.java
+++ b/dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KafkaEndpointBuilderFactory.java
@@ -2885,8 +2885,6 @@ public interface KafkaEndpointBuilderFactory {
          * 
          * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
          * 
-         * Default:
-         * org.apache.kafka.clients.producer.internals.DefaultPartitioner
          * Group: producer
          * 
          * @param partitioner the value to set
@@ -2896,6 +2894,41 @@ public interface KafkaEndpointBuilderFactory {
             doSetProperty("partitioner", partitioner);
             return this;
         }
+        /**
+         * Whether the message keys should be ignored when computing partition.
+         * This setting has effect only when partitioner is not set.
+         * 
+         * The option is a: &lt;code&gt;boolean&lt;/code&gt; type.
+         * 
+         * Default: false
+         * Group: producer
+         * 
+         * @param partitionerIgnoreKeys the value to set
+         * @return the dsl builder
+         */
+        default KafkaEndpointProducerBuilder partitionerIgnoreKeys(
+                boolean partitionerIgnoreKeys) {
+            doSetProperty("partitionerIgnoreKeys", partitionerIgnoreKeys);
+            return this;
+        }
+        /**
+         * Whether the message keys should be ignored when computing partition.
+         * This setting has effect only when partitioner is not set.
+         * 
+         * The option will be converted to a &lt;code&gt;boolean&lt;/code&gt;
+         * type.
+         * 
+         * Default: false
+         * Group: producer
+         * 
+         * @param partitionerIgnoreKeys the value to set
+         * @return the dsl builder
+         */
+        default KafkaEndpointProducerBuilder partitionerIgnoreKeys(
+                String partitionerIgnoreKeys) {
+            doSetProperty("partitionerIgnoreKeys", partitionerIgnoreKeys);
+            return this;
+        }
         /**
          * The partition to which the record will be sent (or null if no
          * partition was specified). If this option has been configured then it