You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@camel.apache.org by nf...@apache.org on 2023/06/21 10:35:56 UTC

[camel] branch CAMEL-18890/remove-camel-vertx-kafka-p2 updated: CAMEL-18890: Remove camel-vertx-kafka

This is an automated email from the ASF dual-hosted git repository.

nfilotto pushed a commit to branch CAMEL-18890/remove-camel-vertx-kafka-p2
in repository https://gitbox.apache.org/repos/asf/camel.git


The following commit(s) were added to refs/heads/CAMEL-18890/remove-camel-vertx-kafka-p2 by this push:
     new b83648a7127 CAMEL-18890: Remove camel-vertx-kafka
b83648a7127 is described below

commit b83648a7127bb1abab5f60e3cb51e8c0df0667b0
Author: Nicolas Filotto <nf...@talend.com>
AuthorDate: Wed Jun 21 12:35:37 2023 +0200

    CAMEL-18890: Remove camel-vertx-kafka
---
 Jenkinsfile.s390x                                  |    2 +-
 bom/camel-bom/pom.xml                              |    5 -
 .../builder/endpoint/StaticEndpointBuilders.java   |   49 -
 .../dsl/VertxKafkaEndpointBuilderFactory.java      | 6318 --------------------
 parent/pom.xml                                     |    5 -
 5 files changed, 1 insertion(+), 6378 deletions(-)

diff --git a/Jenkinsfile.s390x b/Jenkinsfile.s390x
index 9d8f37b1449..819026eed0b 100644
--- a/Jenkinsfile.s390x
+++ b/Jenkinsfile.s390x
@@ -82,7 +82,7 @@ pipeline {
             steps {
                 timeout(unit: 'HOURS', time: 7) {
                     // Skip the test case execution of modules which are either not supported on s390x or vendor images are not available for s390x.
-                    sh "./mvnw $MAVEN_PARAMS $MAVEN_TEST_PARAMS $MAVEN_TEST_LIMIT_PARAMS -Dmaven.test.failure.ignore=true -Dcheckstyle.skip=true verify -pl '!docs,!components/camel-kudu,!components/camel-djl,!components/camel-consul,!components/camel-pulsar,!components/camel-xmpp,!components/camel-google/camel-google-pubsub,!components/camel-hdfs,!components/camel-vertx/camel-vertx-kafka/camel-vertx-kafka-component,!components/camel-zookeeper,!components/camel-zookeeper-master'"
+                    sh "./mvnw $MAVEN_PARAMS $MAVEN_TEST_PARAMS $MAVEN_TEST_LIMIT_PARAMS -Dmaven.test.failure.ignore=true -Dcheckstyle.skip=true verify -pl '!docs,!components/camel-kudu,!components/camel-djl,!components/camel-consul,!components/camel-pulsar,!components/camel-xmpp,!components/camel-google/camel-google-pubsub,!components/camel-hdfs,!components/camel-zookeeper,!components/camel-zookeeper-master'"
                 }
             }
             post {
diff --git a/bom/camel-bom/pom.xml b/bom/camel-bom/pom.xml
index cfe98c11b6e..f4e10404ea9 100644
--- a/bom/camel-bom/pom.xml
+++ b/bom/camel-bom/pom.xml
@@ -46,11 +46,6 @@
         <artifactId>camel-servicenow-maven-plugin</artifactId>
         <version>${project.version}</version>
       </dependency>
-      <dependency>
-        <groupId>org.apache.camel.maven</groupId>
-        <artifactId>camel-vertx-kafka-maven-plugin</artifactId>
-        <version>${project.version}</version>
-      </dependency>
       <dependency>
         <groupId>org.apache.camel</groupId>
         <artifactId>camel-activemq</artifactId>
diff --git a/dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/StaticEndpointBuilders.java b/dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/StaticEndpointBuilders.java
index bff8dd4b15c..aae39789dbf 100644
--- a/dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/StaticEndpointBuilders.java
+++ b/dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/StaticEndpointBuilders.java
@@ -15227,55 +15227,6 @@ public class StaticEndpointBuilders {
             String path) {
         return org.apache.camel.builder.endpoint.dsl.VertxHttpEndpointBuilderFactory.endpointBuilder(componentName, path);
     }
-    /**
-     * Vert.x Kafka (camel-vertx-kafka)
-     * Sent and receive messages to/from an Apache Kafka broker using vert.x
-     * Kafka client
-     * 
-     * Category: messaging
-     * Since: 3.7
-     * Maven coordinates: org.apache.camel:camel-vertx-kafka
-     * 
-     * Syntax: <code>vertx-kafka:topic</code>
-     * 
-     * Path parameter: topic (required)
-     * Name of the topic to use. On the consumer you can use comma to separate
-     * multiple topics. A producer can only send a message to a single topic.
-     * 
-     * @param path topic
-     * @return the dsl builder
-     */
-    @Deprecated
-    public static org.apache.camel.builder.endpoint.dsl.VertxKafkaEndpointBuilderFactory.VertxKafkaEndpointBuilder vertxKafka(
-            String path) {
-        return org.apache.camel.builder.endpoint.dsl.VertxKafkaEndpointBuilderFactory.endpointBuilder("vertx-kafka", path);
-    }
-    /**
-     * Vert.x Kafka (camel-vertx-kafka)
-     * Sent and receive messages to/from an Apache Kafka broker using vert.x
-     * Kafka client
-     * 
-     * Category: messaging
-     * Since: 3.7
-     * Maven coordinates: org.apache.camel:camel-vertx-kafka
-     * 
-     * Syntax: <code>vertx-kafka:topic</code>
-     * 
-     * Path parameter: topic (required)
-     * Name of the topic to use. On the consumer you can use comma to separate
-     * multiple topics. A producer can only send a message to a single topic.
-     * 
-     * @param componentName to use a custom component name for the endpoint
-     * instead of the default name
-     * @param path topic
-     * @return the dsl builder
-     */
-    @Deprecated
-    public static org.apache.camel.builder.endpoint.dsl.VertxKafkaEndpointBuilderFactory.VertxKafkaEndpointBuilder vertxKafka(
-            String componentName,
-            String path) {
-        return org.apache.camel.builder.endpoint.dsl.VertxKafkaEndpointBuilderFactory.endpointBuilder(componentName, path);
-    }
     /**
      * Vert.x WebSocket (camel-vertx-websocket)
      * Expose WebSocket endpoints and connect to remote WebSocket servers using
diff --git a/dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/VertxKafkaEndpointBuilderFactory.java b/dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/VertxKafkaEndpointBuilderFactory.java
deleted file mode 100644
index ad42e1223f8..00000000000
--- a/dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/VertxKafkaEndpointBuilderFactory.java
+++ /dev/null
@@ -1,6318 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.builder.endpoint.dsl;
-
-import java.util.*;
-import java.util.Map;
-import java.util.concurrent.*;
-import java.util.function.*;
-import java.util.stream.*;
-import javax.annotation.Generated;
-import org.apache.camel.builder.EndpointConsumerBuilder;
-import org.apache.camel.builder.EndpointProducerBuilder;
-import org.apache.camel.builder.endpoint.AbstractEndpointBuilder;
-
-/**
- * Sent and receive messages to/from an Apache Kafka broker using vert.x Kafka
- * client
- * 
- * Generated by camel build tools - do NOT edit this file!
- */
-@Generated("org.apache.camel.maven.packaging.EndpointDslMojo")
-public interface VertxKafkaEndpointBuilderFactory {
-
-
-    /**
-     * Builder for endpoint consumers for the Vert.x Kafka component.
-     */
-    public interface VertxKafkaEndpointConsumerBuilder
-            extends
-                EndpointConsumerBuilder {
-        default AdvancedVertxKafkaEndpointConsumerBuilder advanced() {
-            return (AdvancedVertxKafkaEndpointConsumerBuilder) this;
-        }
-        /**
-         * Sets additional properties for either kafka consumer or kafka
-         * producer in case they can't be set directly on the camel
-         * configurations (e.g: new Kafka properties that are not reflected yet
-         * in Camel configurations), the properties have to be prefixed with
-         * additionalProperties.. E.g:
-         * additionalProperties.transactional.id=12345&amp;amp;additionalProperties.schema.registry.url=http://localhost:8811/avro.
-         * 
-         * The option is a: &lt;code&gt;java.util.Map&amp;lt;java.lang.String,
-         * java.lang.Object&amp;gt;&lt;/code&gt; type.
-         * The option is multivalued, and you can use the
-         * additionalProperties(String, Object) method to add a value (call the
-         * method multiple times to set more values).
-         * 
-         * Group: common
-         * 
-         * @param key the option key
-         * @param value the option value
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder additionalProperties(
-                String key,
-                Object value) {
-            doSetMultiValueProperty("additionalProperties", "additionalProperties." + key, value);
-            return this;
-        }
-        /**
-         * Sets additional properties for either kafka consumer or kafka
-         * producer in case they can't be set directly on the camel
-         * configurations (e.g: new Kafka properties that are not reflected yet
-         * in Camel configurations), the properties have to be prefixed with
-         * additionalProperties.. E.g:
-         * additionalProperties.transactional.id=12345&amp;amp;additionalProperties.schema.registry.url=http://localhost:8811/avro.
-         * 
-         * The option is a: &lt;code&gt;java.util.Map&amp;lt;java.lang.String,
-         * java.lang.Object&amp;gt;&lt;/code&gt; type.
-         * The option is multivalued, and you can use the
-         * additionalProperties(String, Object) method to add a value (call the
-         * method multiple times to set more values).
-         * 
-         * Group: common
-         * 
-         * @param values the values
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder additionalProperties(
-                Map values) {
-            doSetMultiValueProperties("additionalProperties", "additionalProperties.", values);
-            return this;
-        }
-        /**
-         * A list of host/port pairs to use for establishing the initial
-         * connection to the Kafka cluster. The client will make use of all
-         * servers irrespective of which servers are specified here for
-         * bootstrapping&amp;amp;mdash;this list only impacts the initial hosts
-         * used to discover the full set of servers. This list should be in the
-         * form host1:port1,host2:port2,.... Since these servers are just used
-         * for the initial connection to discover the full cluster membership
-         * (which may change dynamically), this list need not contain the full
-         * set of servers (you may want more than one, though, in case a server
-         * is down).
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param bootstrapServers the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder bootstrapServers(
-                String bootstrapServers) {
-            doSetProperty("bootstrapServers", bootstrapServers);
-            return this;
-        }
-        /**
-         * Controls how the client uses DNS lookups. If set to use_all_dns_ips,
-         * connect to each returned IP address in sequence until a successful
-         * connection is established. After a disconnection, the next IP is
-         * used. Once all IPs have been used once, the client resolves the IP(s)
-         * from the hostname again (both the JVM and the OS cache DNS name
-         * lookups, however). If set to
-         * resolve_canonical_bootstrap_servers_only, resolve each bootstrap
-         * address into a list of canonical names. After the bootstrap phase,
-         * this behaves the same as use_all_dns_ips. If set to default
-         * (deprecated), attempt to connect to the first IP address returned by
-         * the lookup, even if the lookup returns multiple IP addresses.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: use_all_dns_ips
-         * Group: common
-         * 
-         * @param clientDnsLookup the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder clientDnsLookup(
-                String clientDnsLookup) {
-            doSetProperty("clientDnsLookup", clientDnsLookup);
-            return this;
-        }
-        /**
-         * An id string to pass to the server when making requests. The purpose
-         * of this is to be able to track the source of requests beyond just
-         * ip/port by allowing a logical application name to be included in
-         * server-side request logging.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param clientId the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder clientId(String clientId) {
-            doSetProperty("clientId", clientId);
-            return this;
-        }
-        /**
-         * Close idle connections after the number of milliseconds specified by
-         * this config.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 9m
-         * Group: common
-         * 
-         * @param connectionsMaxIdleMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder connectionsMaxIdleMs(
-                long connectionsMaxIdleMs) {
-            doSetProperty("connectionsMaxIdleMs", connectionsMaxIdleMs);
-            return this;
-        }
-        /**
-         * Close idle connections after the number of milliseconds specified by
-         * this config.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 9m
-         * Group: common
-         * 
-         * @param connectionsMaxIdleMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder connectionsMaxIdleMs(
-                String connectionsMaxIdleMs) {
-            doSetProperty("connectionsMaxIdleMs", connectionsMaxIdleMs);
-            return this;
-        }
-        /**
-         * To use a custom HeaderFilterStrategy to filter header to and from
-         * Camel message.
-         * 
-         * The option is a:
-         * &lt;code&gt;org.apache.camel.spi.HeaderFilterStrategy&lt;/code&gt;
-         * type.
-         * 
-         * Group: common
-         * 
-         * @param headerFilterStrategy the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder headerFilterStrategy(
-                org.apache.camel.spi.HeaderFilterStrategy headerFilterStrategy) {
-            doSetProperty("headerFilterStrategy", headerFilterStrategy);
-            return this;
-        }
-        /**
-         * To use a custom HeaderFilterStrategy to filter header to and from
-         * Camel message.
-         * 
-         * The option will be converted to a
-         * &lt;code&gt;org.apache.camel.spi.HeaderFilterStrategy&lt;/code&gt;
-         * type.
-         * 
-         * Group: common
-         * 
-         * @param headerFilterStrategy the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder headerFilterStrategy(
-                String headerFilterStrategy) {
-            doSetProperty("headerFilterStrategy", headerFilterStrategy);
-            return this;
-        }
-        /**
-         * A list of classes to use as interceptors. Implementing the
-         * org.apache.kafka.clients.producer.ProducerInterceptor interface
-         * allows you to intercept (and possibly mutate) the records received by
-         * the producer before they are published to the Kafka cluster. By
-         * default, there are no interceptors.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param interceptorClasses the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder interceptorClasses(
-                String interceptorClasses) {
-            doSetProperty("interceptorClasses", interceptorClasses);
-            return this;
-        }
-        /**
-         * The period of time in milliseconds after which we force a refresh of
-         * metadata even if we haven't seen any partition leadership changes to
-         * proactively discover any new brokers or partitions.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 5m
-         * Group: common
-         * 
-         * @param metadataMaxAgeMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder metadataMaxAgeMs(
-                long metadataMaxAgeMs) {
-            doSetProperty("metadataMaxAgeMs", metadataMaxAgeMs);
-            return this;
-        }
-        /**
-         * The period of time in milliseconds after which we force a refresh of
-         * metadata even if we haven't seen any partition leadership changes to
-         * proactively discover any new brokers or partitions.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 5m
-         * Group: common
-         * 
-         * @param metadataMaxAgeMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder metadataMaxAgeMs(
-                String metadataMaxAgeMs) {
-            doSetProperty("metadataMaxAgeMs", metadataMaxAgeMs);
-            return this;
-        }
-        /**
-         * A list of classes to use as metrics reporters. Implementing the
-         * org.apache.kafka.common.metrics.MetricsReporter interface allows
-         * plugging in classes that will be notified of new metric creation. The
-         * JmxReporter is always included to register JMX statistics.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param metricReporters the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder metricReporters(
-                String metricReporters) {
-            doSetProperty("metricReporters", metricReporters);
-            return this;
-        }
-        /**
-         * The number of samples maintained to compute metrics.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 2
-         * Group: common
-         * 
-         * @param metricsNumSamples the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder metricsNumSamples(
-                int metricsNumSamples) {
-            doSetProperty("metricsNumSamples", metricsNumSamples);
-            return this;
-        }
-        /**
-         * The number of samples maintained to compute metrics.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 2
-         * Group: common
-         * 
-         * @param metricsNumSamples the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder metricsNumSamples(
-                String metricsNumSamples) {
-            doSetProperty("metricsNumSamples", metricsNumSamples);
-            return this;
-        }
-        /**
-         * The highest recording level for metrics.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: INFO
-         * Group: common
-         * 
-         * @param metricsRecordingLevel the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder metricsRecordingLevel(
-                String metricsRecordingLevel) {
-            doSetProperty("metricsRecordingLevel", metricsRecordingLevel);
-            return this;
-        }
-        /**
-         * The window of time a metrics sample is computed over.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param metricsSampleWindowMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder metricsSampleWindowMs(
-                long metricsSampleWindowMs) {
-            doSetProperty("metricsSampleWindowMs", metricsSampleWindowMs);
-            return this;
-        }
-        /**
-         * The window of time a metrics sample is computed over.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param metricsSampleWindowMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder metricsSampleWindowMs(
-                String metricsSampleWindowMs) {
-            doSetProperty("metricsSampleWindowMs", metricsSampleWindowMs);
-            return this;
-        }
-        /**
-         * The partition to which the record will be sent (or null if no
-         * partition was specified) or read from a particular partition if set.
-         * Header VertxKafkaConstants#PARTITION_ID If configured, it will take
-         * precedence over this config.
-         * 
-         * The option is a: &lt;code&gt;java.lang.Integer&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param partitionId the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder partitionId(
-                Integer partitionId) {
-            doSetProperty("partitionId", partitionId);
-            return this;
-        }
-        /**
-         * The partition to which the record will be sent (or null if no
-         * partition was specified) or read from a particular partition if set.
-         * Header VertxKafkaConstants#PARTITION_ID If configured, it will take
-         * precedence over this config.
-         * 
-         * The option will be converted to a
-         * &lt;code&gt;java.lang.Integer&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param partitionId the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder partitionId(String partitionId) {
-            doSetProperty("partitionId", partitionId);
-            return this;
-        }
-        /**
-         * The size of the TCP receive buffer (SO_RCVBUF) to use when reading
-         * data. If the value is -1, the OS default will be used.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 32768
-         * Group: common
-         * 
-         * @param receiveBufferBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder receiveBufferBytes(
-                int receiveBufferBytes) {
-            doSetProperty("receiveBufferBytes", receiveBufferBytes);
-            return this;
-        }
-        /**
-         * The size of the TCP receive buffer (SO_RCVBUF) to use when reading
-         * data. If the value is -1, the OS default will be used.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 32768
-         * Group: common
-         * 
-         * @param receiveBufferBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder receiveBufferBytes(
-                String receiveBufferBytes) {
-            doSetProperty("receiveBufferBytes", receiveBufferBytes);
-            return this;
-        }
-        /**
-         * The maximum amount of time in milliseconds to wait when reconnecting
-         * to a broker that has repeatedly failed to connect. If provided, the
-         * backoff per host will increase exponentially for each consecutive
-         * connection failure, up to this maximum. After calculating the backoff
-         * increase, 20% random jitter is added to avoid connection storms.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 1s
-         * Group: common
-         * 
-         * @param reconnectBackoffMaxMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder reconnectBackoffMaxMs(
-                long reconnectBackoffMaxMs) {
-            doSetProperty("reconnectBackoffMaxMs", reconnectBackoffMaxMs);
-            return this;
-        }
-        /**
-         * The maximum amount of time in milliseconds to wait when reconnecting
-         * to a broker that has repeatedly failed to connect. If provided, the
-         * backoff per host will increase exponentially for each consecutive
-         * connection failure, up to this maximum. After calculating the backoff
-         * increase, 20% random jitter is added to avoid connection storms.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 1s
-         * Group: common
-         * 
-         * @param reconnectBackoffMaxMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder reconnectBackoffMaxMs(
-                String reconnectBackoffMaxMs) {
-            doSetProperty("reconnectBackoffMaxMs", reconnectBackoffMaxMs);
-            return this;
-        }
-        /**
-         * The base amount of time to wait before attempting to reconnect to a
-         * given host. This avoids repeatedly connecting to a host in a tight
-         * loop. This backoff applies to all connection attempts by the client
-         * to a broker.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 50ms
-         * Group: common
-         * 
-         * @param reconnectBackoffMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder reconnectBackoffMs(
-                long reconnectBackoffMs) {
-            doSetProperty("reconnectBackoffMs", reconnectBackoffMs);
-            return this;
-        }
-        /**
-         * The base amount of time to wait before attempting to reconnect to a
-         * given host. This avoids repeatedly connecting to a host in a tight
-         * loop. This backoff applies to all connection attempts by the client
-         * to a broker.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 50ms
-         * Group: common
-         * 
-         * @param reconnectBackoffMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder reconnectBackoffMs(
-                String reconnectBackoffMs) {
-            doSetProperty("reconnectBackoffMs", reconnectBackoffMs);
-            return this;
-        }
-        /**
-         * The configuration controls the maximum amount of time the client will
-         * wait for the response of a request. If the response is not received
-         * before the timeout elapses the client will resend the request if
-         * necessary or fail the request if retries are exhausted. This should
-         * be larger than replica.lag.time.max.ms (a broker configuration) to
-         * reduce the possibility of message duplication due to unnecessary
-         * producer retries.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param requestTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder requestTimeoutMs(
-                int requestTimeoutMs) {
-            doSetProperty("requestTimeoutMs", requestTimeoutMs);
-            return this;
-        }
-        /**
-         * The configuration controls the maximum amount of time the client will
-         * wait for the response of a request. If the response is not received
-         * before the timeout elapses the client will resend the request if
-         * necessary or fail the request if retries are exhausted. This should
-         * be larger than replica.lag.time.max.ms (a broker configuration) to
-         * reduce the possibility of message duplication due to unnecessary
-         * producer retries.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param requestTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder requestTimeoutMs(
-                String requestTimeoutMs) {
-            doSetProperty("requestTimeoutMs", requestTimeoutMs);
-            return this;
-        }
-        /**
-         * The amount of time to wait before attempting to retry a failed
-         * request to a given topic partition. This avoids repeatedly sending
-         * requests in a tight loop under some failure scenarios.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 100ms
-         * Group: common
-         * 
-         * @param retryBackoffMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder retryBackoffMs(
-                long retryBackoffMs) {
-            doSetProperty("retryBackoffMs", retryBackoffMs);
-            return this;
-        }
-        /**
-         * The amount of time to wait before attempting to retry a failed
-         * request to a given topic partition. This avoids repeatedly sending
-         * requests in a tight loop under some failure scenarios.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 100ms
-         * Group: common
-         * 
-         * @param retryBackoffMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder retryBackoffMs(
-                String retryBackoffMs) {
-            doSetProperty("retryBackoffMs", retryBackoffMs);
-            return this;
-        }
-        /**
-         * The size of the TCP send buffer (SO_SNDBUF) to use when sending data.
-         * If the value is -1, the OS default will be used.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 131072
-         * Group: common
-         * 
-         * @param sendBufferBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sendBufferBytes(
-                int sendBufferBytes) {
-            doSetProperty("sendBufferBytes", sendBufferBytes);
-            return this;
-        }
-        /**
-         * The size of the TCP send buffer (SO_SNDBUF) to use when sending data.
-         * If the value is -1, the OS default will be used.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 131072
-         * Group: common
-         * 
-         * @param sendBufferBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sendBufferBytes(
-                String sendBufferBytes) {
-            doSetProperty("sendBufferBytes", sendBufferBytes);
-            return this;
-        }
-        /**
-         * The maximum amount of time the client will wait for the socket
-         * connection to be established. The connection setup timeout will
-         * increase exponentially for each consecutive connection failure up to
-         * this maximum. To avoid connection storms, a randomization factor of
-         * 0.2 will be applied to the timeout resulting in a random range
-         * between 20% below and 20% above the computed value.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param socketConnectionSetupTimeoutMaxMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder socketConnectionSetupTimeoutMaxMs(
-                long socketConnectionSetupTimeoutMaxMs) {
-            doSetProperty("socketConnectionSetupTimeoutMaxMs", socketConnectionSetupTimeoutMaxMs);
-            return this;
-        }
-        /**
-         * The maximum amount of time the client will wait for the socket
-         * connection to be established. The connection setup timeout will
-         * increase exponentially for each consecutive connection failure up to
-         * this maximum. To avoid connection storms, a randomization factor of
-         * 0.2 will be applied to the timeout resulting in a random range
-         * between 20% below and 20% above the computed value.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param socketConnectionSetupTimeoutMaxMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder socketConnectionSetupTimeoutMaxMs(
-                String socketConnectionSetupTimeoutMaxMs) {
-            doSetProperty("socketConnectionSetupTimeoutMaxMs", socketConnectionSetupTimeoutMaxMs);
-            return this;
-        }
-        /**
-         * The amount of time the client will wait for the socket connection to
-         * be established. If the connection is not built before the timeout
-         * elapses, clients will close the socket channel.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 10s
-         * Group: common
-         * 
-         * @param socketConnectionSetupTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder socketConnectionSetupTimeoutMs(
-                long socketConnectionSetupTimeoutMs) {
-            doSetProperty("socketConnectionSetupTimeoutMs", socketConnectionSetupTimeoutMs);
-            return this;
-        }
-        /**
-         * The amount of time the client will wait for the socket connection to
-         * be established. If the connection is not built before the timeout
-         * elapses, clients will close the socket channel.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 10s
-         * Group: common
-         * 
-         * @param socketConnectionSetupTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder socketConnectionSetupTimeoutMs(
-                String socketConnectionSetupTimeoutMs) {
-            doSetProperty("socketConnectionSetupTimeoutMs", socketConnectionSetupTimeoutMs);
-            return this;
-        }
-        /**
-         * Allow automatic topic creation on the broker when subscribing to or
-         * assigning a topic. A topic being subscribed to will be automatically
-         * created only if the broker allows for it using
-         * auto.create.topics.enable broker configuration. This configuration
-         * must be set to false when using brokers older than 0.11.0.
-         * 
-         * The option is a: &lt;code&gt;boolean&lt;/code&gt; type.
-         * 
-         * Default: true
-         * Group: consumer
-         * 
-         * @param allowAutoCreateTopics the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder allowAutoCreateTopics(
-                boolean allowAutoCreateTopics) {
-            doSetProperty("allowAutoCreateTopics", allowAutoCreateTopics);
-            return this;
-        }
-        /**
-         * Allow automatic topic creation on the broker when subscribing to or
-         * assigning a topic. A topic being subscribed to will be automatically
-         * created only if the broker allows for it using
-         * auto.create.topics.enable broker configuration. This configuration
-         * must be set to false when using brokers older than 0.11.0.
-         * 
-         * The option will be converted to a &lt;code&gt;boolean&lt;/code&gt;
-         * type.
-         * 
-         * Default: true
-         * Group: consumer
-         * 
-         * @param allowAutoCreateTopics the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder allowAutoCreateTopics(
-                String allowAutoCreateTopics) {
-            doSetProperty("allowAutoCreateTopics", allowAutoCreateTopics);
-            return this;
-        }
-        /**
-         * Whether to allow doing manual commits via
-         * org.apache.camel.component.vertx.kafka.offset.VertxKafkaManualCommit.
-         * If this option is enabled then an instance of
-         * org.apache.camel.component.vertx.kafka.offset.VertxKafkaManualCommit
-         * is stored on the Exchange message header, which allows end users to
-         * access this API and perform manual offset commits via the Kafka
-         * consumer. Note: To take full control of the offset committing, you
-         * may need to disable the Kafka Consumer default auto commit behavior
-         * by setting 'enableAutoCommit' to 'false'.
-         * 
-         * The option is a: &lt;code&gt;boolean&lt;/code&gt; type.
-         * 
-         * Default: false
-         * Group: consumer
-         * 
-         * @param allowManualCommit the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder allowManualCommit(
-                boolean allowManualCommit) {
-            doSetProperty("allowManualCommit", allowManualCommit);
-            return this;
-        }
-        /**
-         * Whether to allow doing manual commits via
-         * org.apache.camel.component.vertx.kafka.offset.VertxKafkaManualCommit.
-         * If this option is enabled then an instance of
-         * org.apache.camel.component.vertx.kafka.offset.VertxKafkaManualCommit
-         * is stored on the Exchange message header, which allows end users to
-         * access this API and perform manual offset commits via the Kafka
-         * consumer. Note: To take full control of the offset committing, you
-         * may need to disable the Kafka Consumer default auto commit behavior
-         * by setting 'enableAutoCommit' to 'false'.
-         * 
-         * The option will be converted to a &lt;code&gt;boolean&lt;/code&gt;
-         * type.
-         * 
-         * Default: false
-         * Group: consumer
-         * 
-         * @param allowManualCommit the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder allowManualCommit(
-                String allowManualCommit) {
-            doSetProperty("allowManualCommit", allowManualCommit);
-            return this;
-        }
-        /**
-         * The frequency in milliseconds that the consumer offsets are
-         * auto-committed to Kafka if enable.auto.commit is set to true.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 5s
-         * Group: consumer
-         * 
-         * @param autoCommitIntervalMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder autoCommitIntervalMs(
-                int autoCommitIntervalMs) {
-            doSetProperty("autoCommitIntervalMs", autoCommitIntervalMs);
-            return this;
-        }
-        /**
-         * The frequency in milliseconds that the consumer offsets are
-         * auto-committed to Kafka if enable.auto.commit is set to true.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 5s
-         * Group: consumer
-         * 
-         * @param autoCommitIntervalMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder autoCommitIntervalMs(
-                String autoCommitIntervalMs) {
-            doSetProperty("autoCommitIntervalMs", autoCommitIntervalMs);
-            return this;
-        }
-        /**
-         * What to do when there is no initial offset in Kafka or if the current
-         * offset does not exist any more on the server (e.g. because that data
-         * has been deleted): earliest: automatically reset the offset to the
-         * earliest offsetlatest: automatically reset the offset to the latest
-         * offsetnone: throw exception to the consumer if no previous offset is
-         * found for the consumer's groupanything else: throw exception to the
-         * consumer.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: latest
-         * Group: consumer
-         * 
-         * @param autoOffsetReset the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder autoOffsetReset(
-                String autoOffsetReset) {
-            doSetProperty("autoOffsetReset", autoOffsetReset);
-            return this;
-        }
-        /**
-         * Automatically check the CRC32 of the records consumed. This ensures
-         * no on-the-wire or on-disk corruption to the messages occurred. This
-         * check adds some overhead, so it may be disabled in cases seeking
-         * extreme performance.
-         * 
-         * The option is a: &lt;code&gt;boolean&lt;/code&gt; type.
-         * 
-         * Default: true
-         * Group: consumer
-         * 
-         * @param checkCrcs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder checkCrcs(boolean checkCrcs) {
-            doSetProperty("checkCrcs", checkCrcs);
-            return this;
-        }
-        /**
-         * Automatically check the CRC32 of the records consumed. This ensures
-         * no on-the-wire or on-disk corruption to the messages occurred. This
-         * check adds some overhead, so it may be disabled in cases seeking
-         * extreme performance.
-         * 
-         * The option will be converted to a &lt;code&gt;boolean&lt;/code&gt;
-         * type.
-         * 
-         * Default: true
-         * Group: consumer
-         * 
-         * @param checkCrcs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder checkCrcs(String checkCrcs) {
-            doSetProperty("checkCrcs", checkCrcs);
-            return this;
-        }
-        /**
-         * A rack identifier for this client. This can be any string value which
-         * indicates where this client is physically located. It corresponds
-         * with the broker config 'broker.rack'.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: consumer
-         * 
-         * @param clientRack the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder clientRack(String clientRack) {
-            doSetProperty("clientRack", clientRack);
-            return this;
-        }
-        /**
-         * Specifies the timeout (in milliseconds) for client APIs. This
-         * configuration is used as the default timeout for all client
-         * operations that do not specify a timeout parameter.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 1m
-         * Group: consumer
-         * 
-         * @param defaultApiTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder defaultApiTimeoutMs(
-                int defaultApiTimeoutMs) {
-            doSetProperty("defaultApiTimeoutMs", defaultApiTimeoutMs);
-            return this;
-        }
-        /**
-         * Specifies the timeout (in milliseconds) for client APIs. This
-         * configuration is used as the default timeout for all client
-         * operations that do not specify a timeout parameter.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 1m
-         * Group: consumer
-         * 
-         * @param defaultApiTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder defaultApiTimeoutMs(
-                String defaultApiTimeoutMs) {
-            doSetProperty("defaultApiTimeoutMs", defaultApiTimeoutMs);
-            return this;
-        }
-        /**
-         * If true the consumer's offset will be periodically committed in the
-         * background.
-         * 
-         * The option is a: &lt;code&gt;boolean&lt;/code&gt; type.
-         * 
-         * Default: true
-         * Group: consumer
-         * 
-         * @param enableAutoCommit the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder enableAutoCommit(
-                boolean enableAutoCommit) {
-            doSetProperty("enableAutoCommit", enableAutoCommit);
-            return this;
-        }
-        /**
-         * If true the consumer's offset will be periodically committed in the
-         * background.
-         * 
-         * The option will be converted to a &lt;code&gt;boolean&lt;/code&gt;
-         * type.
-         * 
-         * Default: true
-         * Group: consumer
-         * 
-         * @param enableAutoCommit the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder enableAutoCommit(
-                String enableAutoCommit) {
-            doSetProperty("enableAutoCommit", enableAutoCommit);
-            return this;
-        }
-        /**
-         * Whether internal topics matching a subscribed pattern should be
-         * excluded from the subscription. It is always possible to explicitly
-         * subscribe to an internal topic.
-         * 
-         * The option is a: &lt;code&gt;boolean&lt;/code&gt; type.
-         * 
-         * Default: true
-         * Group: consumer
-         * 
-         * @param excludeInternalTopics the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder excludeInternalTopics(
-                boolean excludeInternalTopics) {
-            doSetProperty("excludeInternalTopics", excludeInternalTopics);
-            return this;
-        }
-        /**
-         * Whether internal topics matching a subscribed pattern should be
-         * excluded from the subscription. It is always possible to explicitly
-         * subscribe to an internal topic.
-         * 
-         * The option will be converted to a &lt;code&gt;boolean&lt;/code&gt;
-         * type.
-         * 
-         * Default: true
-         * Group: consumer
-         * 
-         * @param excludeInternalTopics the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder excludeInternalTopics(
-                String excludeInternalTopics) {
-            doSetProperty("excludeInternalTopics", excludeInternalTopics);
-            return this;
-        }
-        /**
-         * The maximum amount of data the server should return for a fetch
-         * request. Records are fetched in batches by the consumer, and if the
-         * first record batch in the first non-empty partition of the fetch is
-         * larger than this value, the record batch will still be returned to
-         * ensure that the consumer can make progress. As such, this is not a
-         * absolute maximum. The maximum record batch size accepted by the
-         * broker is defined via message.max.bytes (broker config) or
-         * max.message.bytes (topic config). Note that the consumer performs
-         * multiple fetches in parallel.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 52428800
-         * Group: consumer
-         * 
-         * @param fetchMaxBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder fetchMaxBytes(
-                int fetchMaxBytes) {
-            doSetProperty("fetchMaxBytes", fetchMaxBytes);
-            return this;
-        }
-        /**
-         * The maximum amount of data the server should return for a fetch
-         * request. Records are fetched in batches by the consumer, and if the
-         * first record batch in the first non-empty partition of the fetch is
-         * larger than this value, the record batch will still be returned to
-         * ensure that the consumer can make progress. As such, this is not a
-         * absolute maximum. The maximum record batch size accepted by the
-         * broker is defined via message.max.bytes (broker config) or
-         * max.message.bytes (topic config). Note that the consumer performs
-         * multiple fetches in parallel.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 52428800
-         * Group: consumer
-         * 
-         * @param fetchMaxBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder fetchMaxBytes(
-                String fetchMaxBytes) {
-            doSetProperty("fetchMaxBytes", fetchMaxBytes);
-            return this;
-        }
-        /**
-         * The maximum amount of time the server will block before answering the
-         * fetch request if there isn't sufficient data to immediately satisfy
-         * the requirement given by fetch.min.bytes.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 500ms
-         * Group: consumer
-         * 
-         * @param fetchMaxWaitMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder fetchMaxWaitMs(
-                int fetchMaxWaitMs) {
-            doSetProperty("fetchMaxWaitMs", fetchMaxWaitMs);
-            return this;
-        }
-        /**
-         * The maximum amount of time the server will block before answering the
-         * fetch request if there isn't sufficient data to immediately satisfy
-         * the requirement given by fetch.min.bytes.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 500ms
-         * Group: consumer
-         * 
-         * @param fetchMaxWaitMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder fetchMaxWaitMs(
-                String fetchMaxWaitMs) {
-            doSetProperty("fetchMaxWaitMs", fetchMaxWaitMs);
-            return this;
-        }
-        /**
-         * The minimum amount of data the server should return for a fetch
-         * request. If insufficient data is available the request will wait for
-         * that much data to accumulate before answering the request. The
-         * default setting of 1 byte means that fetch requests are answered as
-         * soon as a single byte of data is available or the fetch request times
-         * out waiting for data to arrive. Setting this to something greater
-         * than 1 will cause the server to wait for larger amounts of data to
-         * accumulate which can improve server throughput a bit at the cost of
-         * some additional latency.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 1
-         * Group: consumer
-         * 
-         * @param fetchMinBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder fetchMinBytes(
-                int fetchMinBytes) {
-            doSetProperty("fetchMinBytes", fetchMinBytes);
-            return this;
-        }
-        /**
-         * The minimum amount of data the server should return for a fetch
-         * request. If insufficient data is available the request will wait for
-         * that much data to accumulate before answering the request. The
-         * default setting of 1 byte means that fetch requests are answered as
-         * soon as a single byte of data is available or the fetch request times
-         * out waiting for data to arrive. Setting this to something greater
-         * than 1 will cause the server to wait for larger amounts of data to
-         * accumulate which can improve server throughput a bit at the cost of
-         * some additional latency.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 1
-         * Group: consumer
-         * 
-         * @param fetchMinBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder fetchMinBytes(
-                String fetchMinBytes) {
-            doSetProperty("fetchMinBytes", fetchMinBytes);
-            return this;
-        }
-        /**
-         * A unique string that identifies the consumer group this consumer
-         * belongs to. This property is required if the consumer uses either the
-         * group management functionality by using subscribe(topic) or the
-         * Kafka-based offset management strategy.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: consumer
-         * 
-         * @param groupId the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder groupId(String groupId) {
-            doSetProperty("groupId", groupId);
-            return this;
-        }
-        /**
-         * A unique identifier of the consumer instance provided by the end
-         * user. Only non-empty strings are permitted. If set, the consumer is
-         * treated as a static member, which means that only one instance with
-         * this ID is allowed in the consumer group at any time. This can be
-         * used in combination with a larger session timeout to avoid group
-         * rebalances caused by transient unavailability (e.g. process
-         * restarts). If not set, the consumer will join the group as a dynamic
-         * member, which is the traditional behavior.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: consumer
-         * 
-         * @param groupInstanceId the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder groupInstanceId(
-                String groupInstanceId) {
-            doSetProperty("groupInstanceId", groupInstanceId);
-            return this;
-        }
-        /**
-         * The expected time between heartbeats to the consumer coordinator when
-         * using Kafka's group management facilities. Heartbeats are used to
-         * ensure that the consumer's session stays active and to facilitate
-         * rebalancing when new consumers join or leave the group. The value
-         * must be set lower than session.timeout.ms, but typically should be
-         * set no higher than 1/3 of that value. It can be adjusted even lower
-         * to control the expected time for normal rebalances.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 3s
-         * Group: consumer
-         * 
-         * @param heartbeatIntervalMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder heartbeatIntervalMs(
-                int heartbeatIntervalMs) {
-            doSetProperty("heartbeatIntervalMs", heartbeatIntervalMs);
-            return this;
-        }
-        /**
-         * The expected time between heartbeats to the consumer coordinator when
-         * using Kafka's group management facilities. Heartbeats are used to
-         * ensure that the consumer's session stays active and to facilitate
-         * rebalancing when new consumers join or leave the group. The value
-         * must be set lower than session.timeout.ms, but typically should be
-         * set no higher than 1/3 of that value. It can be adjusted even lower
-         * to control the expected time for normal rebalances.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 3s
-         * Group: consumer
-         * 
-         * @param heartbeatIntervalMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder heartbeatIntervalMs(
-                String heartbeatIntervalMs) {
-            doSetProperty("heartbeatIntervalMs", heartbeatIntervalMs);
-            return this;
-        }
-        /**
-         * Controls how to read messages written transactionally. If set to
-         * read_committed, consumer.poll() will only return transactional
-         * messages which have been committed. If set to read_uncommitted (the
-         * default), consumer.poll() will return all messages, even
-         * transactional messages which have been aborted. Non-transactional
-         * messages will be returned unconditionally in either mode. Messages
-         * will always be returned in offset order. Hence, in read_committed
-         * mode, consumer.poll() will only return messages up to the last stable
-         * offset (LSO), which is the one less than the offset of the first open
-         * transaction. In particular any messages appearing after messages
-         * belonging to ongoing transactions will be withheld until the relevant
-         * transaction has been completed. As a result, read_committed consumers
-         * will not be able to read up to the high watermark when there are in
-         * flight transactions. Further, when in read_committed the seekToEnd
-         * method will return the LSO.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: read_uncommitted
-         * Group: consumer
-         * 
-         * @param isolationLevel the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder isolationLevel(
-                String isolationLevel) {
-            doSetProperty("isolationLevel", isolationLevel);
-            return this;
-        }
-        /**
-         * Deserializer class for key that implements the
-         * org.apache.kafka.common.serialization.Deserializer interface.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: org.apache.kafka.common.serialization.StringDeserializer
-         * Group: consumer
-         * 
-         * @param keyDeserializer the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder keyDeserializer(
-                String keyDeserializer) {
-            doSetProperty("keyDeserializer", keyDeserializer);
-            return this;
-        }
-        /**
-         * The maximum amount of data per-partition the server will return.
-         * Records are fetched in batches by the consumer. If the first record
-         * batch in the first non-empty partition of the fetch is larger than
-         * this limit, the batch will still be returned to ensure that the
-         * consumer can make progress. The maximum record batch size accepted by
-         * the broker is defined via message.max.bytes (broker config) or
-         * max.message.bytes (topic config). See fetch.max.bytes for limiting
-         * the consumer request size.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 1048576
-         * Group: consumer
-         * 
-         * @param maxPartitionFetchBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder maxPartitionFetchBytes(
-                int maxPartitionFetchBytes) {
-            doSetProperty("maxPartitionFetchBytes", maxPartitionFetchBytes);
-            return this;
-        }
-        /**
-         * The maximum amount of data per-partition the server will return.
-         * Records are fetched in batches by the consumer. If the first record
-         * batch in the first non-empty partition of the fetch is larger than
-         * this limit, the batch will still be returned to ensure that the
-         * consumer can make progress. The maximum record batch size accepted by
-         * the broker is defined via message.max.bytes (broker config) or
-         * max.message.bytes (topic config). See fetch.max.bytes for limiting
-         * the consumer request size.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 1048576
-         * Group: consumer
-         * 
-         * @param maxPartitionFetchBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder maxPartitionFetchBytes(
-                String maxPartitionFetchBytes) {
-            doSetProperty("maxPartitionFetchBytes", maxPartitionFetchBytes);
-            return this;
-        }
-        /**
-         * The maximum delay between invocations of poll() when using consumer
-         * group management. This places an upper bound on the amount of time
-         * that the consumer can be idle before fetching more records. If poll()
-         * is not called before expiration of this timeout, then the consumer is
-         * considered failed and the group will rebalance in order to reassign
-         * the partitions to another member. For consumers using a non-null
-         * group.instance.id which reach this timeout, partitions will not be
-         * immediately reassigned. Instead, the consumer will stop sending
-         * heartbeats and partitions will be reassigned after expiration of
-         * session.timeout.ms. This mirrors the behavior of a static consumer
-         * which has shutdown.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 5m
-         * Group: consumer
-         * 
-         * @param maxPollIntervalMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder maxPollIntervalMs(
-                int maxPollIntervalMs) {
-            doSetProperty("maxPollIntervalMs", maxPollIntervalMs);
-            return this;
-        }
-        /**
-         * The maximum delay between invocations of poll() when using consumer
-         * group management. This places an upper bound on the amount of time
-         * that the consumer can be idle before fetching more records. If poll()
-         * is not called before expiration of this timeout, then the consumer is
-         * considered failed and the group will rebalance in order to reassign
-         * the partitions to another member. For consumers using a non-null
-         * group.instance.id which reach this timeout, partitions will not be
-         * immediately reassigned. Instead, the consumer will stop sending
-         * heartbeats and partitions will be reassigned after expiration of
-         * session.timeout.ms. This mirrors the behavior of a static consumer
-         * which has shutdown.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 5m
-         * Group: consumer
-         * 
-         * @param maxPollIntervalMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder maxPollIntervalMs(
-                String maxPollIntervalMs) {
-            doSetProperty("maxPollIntervalMs", maxPollIntervalMs);
-            return this;
-        }
-        /**
-         * The maximum number of records returned in a single call to poll().
-         * Note, that max.poll.records does not impact the underlying fetching
-         * behavior. The consumer will cache the records from each fetch request
-         * and returns them incrementally from each poll.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 500
-         * Group: consumer
-         * 
-         * @param maxPollRecords the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder maxPollRecords(
-                int maxPollRecords) {
-            doSetProperty("maxPollRecords", maxPollRecords);
-            return this;
-        }
-        /**
-         * The maximum number of records returned in a single call to poll().
-         * Note, that max.poll.records does not impact the underlying fetching
-         * behavior. The consumer will cache the records from each fetch request
-         * and returns them incrementally from each poll.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 500
-         * Group: consumer
-         * 
-         * @param maxPollRecords the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder maxPollRecords(
-                String maxPollRecords) {
-            doSetProperty("maxPollRecords", maxPollRecords);
-            return this;
-        }
-        /**
-         * A list of class names or class types, ordered by preference, of
-         * supported partition assignment strategies that the client will use to
-         * distribute partition ownership amongst consumer instances when group
-         * management is used. Available options
-         * are:org.apache.kafka.clients.consumer.RangeAssignor: The default
-         * assignor, which works on a per-topic
-         * basis.org.apache.kafka.clients.consumer.RoundRobinAssignor: Assigns
-         * partitions to consumers in a round-robin
-         * fashion.org.apache.kafka.clients.consumer.StickyAssignor: Guarantees
-         * an assignment that is maximally balanced while preserving as many
-         * existing partition assignments as
-         * possible.org.apache.kafka.clients.consumer.CooperativeStickyAssignor:
-         * Follows the same StickyAssignor logic, but allows for cooperative
-         * rebalancing.Implementing the
-         * org.apache.kafka.clients.consumer.ConsumerPartitionAssignor interface
-         * allows you to plug in a custom assignment strategy.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: org.apache.kafka.clients.consumer.RangeAssignor
-         * Group: consumer
-         * 
-         * @param partitionAssignmentStrategy the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder partitionAssignmentStrategy(
-                String partitionAssignmentStrategy) {
-            doSetProperty("partitionAssignmentStrategy", partitionAssignmentStrategy);
-            return this;
-        }
-        /**
-         * Set if KafkaConsumer will read from a particular offset on startup.
-         * This config will take precedence over seekTo config.
-         * 
-         * The option is a: &lt;code&gt;java.lang.Long&lt;/code&gt; type.
-         * 
-         * Group: consumer
-         * 
-         * @param seekToOffset the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder seekToOffset(Long seekToOffset) {
-            doSetProperty("seekToOffset", seekToOffset);
-            return this;
-        }
-        /**
-         * Set if KafkaConsumer will read from a particular offset on startup.
-         * This config will take precedence over seekTo config.
-         * 
-         * The option will be converted to a
-         * &lt;code&gt;java.lang.Long&lt;/code&gt; type.
-         * 
-         * Group: consumer
-         * 
-         * @param seekToOffset the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder seekToOffset(
-                String seekToOffset) {
-            doSetProperty("seekToOffset", seekToOffset);
-            return this;
-        }
-        /**
-         * Set if KafkaConsumer will read from beginning or end on startup:
-         * beginning : read from beginning end : read from end.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: consumer
-         * 
-         * @param seekToPosition the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder seekToPosition(
-                String seekToPosition) {
-            doSetProperty("seekToPosition", seekToPosition);
-            return this;
-        }
-        /**
-         * The timeout used to detect client failures when using Kafka's group
-         * management facility. The client sends periodic heartbeats to indicate
-         * its liveness to the broker. If no heartbeats are received by the
-         * broker before the expiration of this session timeout, then the broker
-         * will remove this client from the group and initiate a rebalance. Note
-         * that the value must be in the allowable range as configured in the
-         * broker configuration by group.min.session.timeout.ms and
-         * group.max.session.timeout.ms.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 10s
-         * Group: consumer
-         * 
-         * @param sessionTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sessionTimeoutMs(
-                int sessionTimeoutMs) {
-            doSetProperty("sessionTimeoutMs", sessionTimeoutMs);
-            return this;
-        }
-        /**
-         * The timeout used to detect client failures when using Kafka's group
-         * management facility. The client sends periodic heartbeats to indicate
-         * its liveness to the broker. If no heartbeats are received by the
-         * broker before the expiration of this session timeout, then the broker
-         * will remove this client from the group and initiate a rebalance. Note
-         * that the value must be in the allowable range as configured in the
-         * broker configuration by group.min.session.timeout.ms and
-         * group.max.session.timeout.ms.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 10s
-         * Group: consumer
-         * 
-         * @param sessionTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sessionTimeoutMs(
-                String sessionTimeoutMs) {
-            doSetProperty("sessionTimeoutMs", sessionTimeoutMs);
-            return this;
-        }
-        /**
-         * Deserializer class for value that implements the
-         * org.apache.kafka.common.serialization.Deserializer interface.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: org.apache.kafka.common.serialization.StringDeserializer
-         * Group: consumer
-         * 
-         * @param valueDeserializer the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder valueDeserializer(
-                String valueDeserializer) {
-            doSetProperty("valueDeserializer", valueDeserializer);
-            return this;
-        }
-        /**
-         * The fully qualified name of a SASL client callback handler class that
-         * implements the AuthenticateCallbackHandler interface.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param saslClientCallbackHandlerClass the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslClientCallbackHandlerClass(
-                String saslClientCallbackHandlerClass) {
-            doSetProperty("saslClientCallbackHandlerClass", saslClientCallbackHandlerClass);
-            return this;
-        }
-        /**
-         * JAAS login context parameters for SASL connections in the format used
-         * by JAAS configuration files. JAAS configuration file format is
-         * described here. The format for the value is: loginModuleClass
-         * controlFlag (optionName=optionValue);. For brokers, the config must
-         * be prefixed with listener prefix and SASL mechanism name in
-         * lower-case. For example,
-         * listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param saslJaasConfig the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslJaasConfig(
-                String saslJaasConfig) {
-            doSetProperty("saslJaasConfig", saslJaasConfig);
-            return this;
-        }
-        /**
-         * Kerberos kinit command path.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: /usr/bin/kinit
-         * Group: security
-         * 
-         * @param saslKerberosKinitCmd the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslKerberosKinitCmd(
-                String saslKerberosKinitCmd) {
-            doSetProperty("saslKerberosKinitCmd", saslKerberosKinitCmd);
-            return this;
-        }
-        /**
-         * Login thread sleep time between refresh attempts.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 60000
-         * Group: security
-         * 
-         * @param saslKerberosMinTimeBeforeRelogin the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslKerberosMinTimeBeforeRelogin(
-                long saslKerberosMinTimeBeforeRelogin) {
-            doSetProperty("saslKerberosMinTimeBeforeRelogin", saslKerberosMinTimeBeforeRelogin);
-            return this;
-        }
-        /**
-         * Login thread sleep time between refresh attempts.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 60000
-         * Group: security
-         * 
-         * @param saslKerberosMinTimeBeforeRelogin the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslKerberosMinTimeBeforeRelogin(
-                String saslKerberosMinTimeBeforeRelogin) {
-            doSetProperty("saslKerberosMinTimeBeforeRelogin", saslKerberosMinTimeBeforeRelogin);
-            return this;
-        }
-        /**
-         * The Kerberos principal name that Kafka runs as. This can be defined
-         * either in Kafka's JAAS config or in Kafka's config.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param saslKerberosServiceName the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslKerberosServiceName(
-                String saslKerberosServiceName) {
-            doSetProperty("saslKerberosServiceName", saslKerberosServiceName);
-            return this;
-        }
-        /**
-         * Percentage of random jitter added to the renewal time.
-         * 
-         * The option is a: &lt;code&gt;double&lt;/code&gt; type.
-         * 
-         * Default: 0.05
-         * Group: security
-         * 
-         * @param saslKerberosTicketRenewJitter the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslKerberosTicketRenewJitter(
-                double saslKerberosTicketRenewJitter) {
-            doSetProperty("saslKerberosTicketRenewJitter", saslKerberosTicketRenewJitter);
-            return this;
-        }
-        /**
-         * Percentage of random jitter added to the renewal time.
-         * 
-         * The option will be converted to a &lt;code&gt;double&lt;/code&gt;
-         * type.
-         * 
-         * Default: 0.05
-         * Group: security
-         * 
-         * @param saslKerberosTicketRenewJitter the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslKerberosTicketRenewJitter(
-                String saslKerberosTicketRenewJitter) {
-            doSetProperty("saslKerberosTicketRenewJitter", saslKerberosTicketRenewJitter);
-            return this;
-        }
-        /**
-         * Login thread will sleep until the specified window factor of time
-         * from last refresh to ticket's expiry has been reached, at which time
-         * it will try to renew the ticket.
-         * 
-         * The option is a: &lt;code&gt;double&lt;/code&gt; type.
-         * 
-         * Default: 0.8
-         * Group: security
-         * 
-         * @param saslKerberosTicketRenewWindowFactor the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslKerberosTicketRenewWindowFactor(
-                double saslKerberosTicketRenewWindowFactor) {
-            doSetProperty("saslKerberosTicketRenewWindowFactor", saslKerberosTicketRenewWindowFactor);
-            return this;
-        }
-        /**
-         * Login thread will sleep until the specified window factor of time
-         * from last refresh to ticket's expiry has been reached, at which time
-         * it will try to renew the ticket.
-         * 
-         * The option will be converted to a &lt;code&gt;double&lt;/code&gt;
-         * type.
-         * 
-         * Default: 0.8
-         * Group: security
-         * 
-         * @param saslKerberosTicketRenewWindowFactor the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslKerberosTicketRenewWindowFactor(
-                String saslKerberosTicketRenewWindowFactor) {
-            doSetProperty("saslKerberosTicketRenewWindowFactor", saslKerberosTicketRenewWindowFactor);
-            return this;
-        }
-        /**
-         * The fully qualified name of a SASL login callback handler class that
-         * implements the AuthenticateCallbackHandler interface. For brokers,
-         * login callback handler config must be prefixed with listener prefix
-         * and SASL mechanism name in lower-case. For example,
-         * listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param saslLoginCallbackHandlerClass the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslLoginCallbackHandlerClass(
-                String saslLoginCallbackHandlerClass) {
-            doSetProperty("saslLoginCallbackHandlerClass", saslLoginCallbackHandlerClass);
-            return this;
-        }
-        /**
-         * The fully qualified name of a class that implements the Login
-         * interface. For brokers, login config must be prefixed with listener
-         * prefix and SASL mechanism name in lower-case. For example,
-         * listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param saslLoginClass the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslLoginClass(
-                String saslLoginClass) {
-            doSetProperty("saslLoginClass", saslLoginClass);
-            return this;
-        }
-        /**
-         * The amount of buffer time before credential expiration to maintain
-         * when refreshing a credential, in seconds. If a refresh would
-         * otherwise occur closer to expiration than the number of buffer
-         * seconds then the refresh will be moved up to maintain as much of the
-         * buffer time as possible. Legal values are between 0 and 3600 (1
-         * hour); a default value of 300 (5 minutes) is used if no value is
-         * specified. This value and sasl.login.refresh.min.period.seconds are
-         * both ignored if their sum exceeds the remaining lifetime of a
-         * credential. Currently applies only to OAUTHBEARER.
-         * 
-         * The option is a: &lt;code&gt;short&lt;/code&gt; type.
-         * 
-         * Default: 300
-         * Group: security
-         * 
-         * @param saslLoginRefreshBufferSeconds the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslLoginRefreshBufferSeconds(
-                short saslLoginRefreshBufferSeconds) {
-            doSetProperty("saslLoginRefreshBufferSeconds", saslLoginRefreshBufferSeconds);
-            return this;
-        }
-        /**
-         * The amount of buffer time before credential expiration to maintain
-         * when refreshing a credential, in seconds. If a refresh would
-         * otherwise occur closer to expiration than the number of buffer
-         * seconds then the refresh will be moved up to maintain as much of the
-         * buffer time as possible. Legal values are between 0 and 3600 (1
-         * hour); a default value of 300 (5 minutes) is used if no value is
-         * specified. This value and sasl.login.refresh.min.period.seconds are
-         * both ignored if their sum exceeds the remaining lifetime of a
-         * credential. Currently applies only to OAUTHBEARER.
-         * 
-         * The option will be converted to a &lt;code&gt;short&lt;/code&gt;
-         * type.
-         * 
-         * Default: 300
-         * Group: security
-         * 
-         * @param saslLoginRefreshBufferSeconds the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslLoginRefreshBufferSeconds(
-                String saslLoginRefreshBufferSeconds) {
-            doSetProperty("saslLoginRefreshBufferSeconds", saslLoginRefreshBufferSeconds);
-            return this;
-        }
-        /**
-         * The desired minimum time for the login refresh thread to wait before
-         * refreshing a credential, in seconds. Legal values are between 0 and
-         * 900 (15 minutes); a default value of 60 (1 minute) is used if no
-         * value is specified. This value and sasl.login.refresh.buffer.seconds
-         * are both ignored if their sum exceeds the remaining lifetime of a
-         * credential. Currently applies only to OAUTHBEARER.
-         * 
-         * The option is a: &lt;code&gt;short&lt;/code&gt; type.
-         * 
-         * Default: 60
-         * Group: security
-         * 
-         * @param saslLoginRefreshMinPeriodSeconds the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslLoginRefreshMinPeriodSeconds(
-                short saslLoginRefreshMinPeriodSeconds) {
-            doSetProperty("saslLoginRefreshMinPeriodSeconds", saslLoginRefreshMinPeriodSeconds);
-            return this;
-        }
-        /**
-         * The desired minimum time for the login refresh thread to wait before
-         * refreshing a credential, in seconds. Legal values are between 0 and
-         * 900 (15 minutes); a default value of 60 (1 minute) is used if no
-         * value is specified. This value and sasl.login.refresh.buffer.seconds
-         * are both ignored if their sum exceeds the remaining lifetime of a
-         * credential. Currently applies only to OAUTHBEARER.
-         * 
-         * The option will be converted to a &lt;code&gt;short&lt;/code&gt;
-         * type.
-         * 
-         * Default: 60
-         * Group: security
-         * 
-         * @param saslLoginRefreshMinPeriodSeconds the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslLoginRefreshMinPeriodSeconds(
-                String saslLoginRefreshMinPeriodSeconds) {
-            doSetProperty("saslLoginRefreshMinPeriodSeconds", saslLoginRefreshMinPeriodSeconds);
-            return this;
-        }
-        /**
-         * Login refresh thread will sleep until the specified window factor
-         * relative to the credential's lifetime has been reached, at which time
-         * it will try to refresh the credential. Legal values are between 0.5
-         * (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used
-         * if no value is specified. Currently applies only to OAUTHBEARER.
-         * 
-         * The option is a: &lt;code&gt;double&lt;/code&gt; type.
-         * 
-         * Default: 0.8
-         * Group: security
-         * 
-         * @param saslLoginRefreshWindowFactor the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslLoginRefreshWindowFactor(
-                double saslLoginRefreshWindowFactor) {
-            doSetProperty("saslLoginRefreshWindowFactor", saslLoginRefreshWindowFactor);
-            return this;
-        }
-        /**
-         * Login refresh thread will sleep until the specified window factor
-         * relative to the credential's lifetime has been reached, at which time
-         * it will try to refresh the credential. Legal values are between 0.5
-         * (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used
-         * if no value is specified. Currently applies only to OAUTHBEARER.
-         * 
-         * The option will be converted to a &lt;code&gt;double&lt;/code&gt;
-         * type.
-         * 
-         * Default: 0.8
-         * Group: security
-         * 
-         * @param saslLoginRefreshWindowFactor the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslLoginRefreshWindowFactor(
-                String saslLoginRefreshWindowFactor) {
-            doSetProperty("saslLoginRefreshWindowFactor", saslLoginRefreshWindowFactor);
-            return this;
-        }
-        /**
-         * The maximum amount of random jitter relative to the credential's
-         * lifetime that is added to the login refresh thread's sleep time.
-         * Legal values are between 0 and 0.25 (25%) inclusive; a default value
-         * of 0.05 (5%) is used if no value is specified. Currently applies only
-         * to OAUTHBEARER.
-         * 
-         * The option is a: &lt;code&gt;double&lt;/code&gt; type.
-         * 
-         * Default: 0.05
-         * Group: security
-         * 
-         * @param saslLoginRefreshWindowJitter the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslLoginRefreshWindowJitter(
-                double saslLoginRefreshWindowJitter) {
-            doSetProperty("saslLoginRefreshWindowJitter", saslLoginRefreshWindowJitter);
-            return this;
-        }
-        /**
-         * The maximum amount of random jitter relative to the credential's
-         * lifetime that is added to the login refresh thread's sleep time.
-         * Legal values are between 0 and 0.25 (25%) inclusive; a default value
-         * of 0.05 (5%) is used if no value is specified. Currently applies only
-         * to OAUTHBEARER.
-         * 
-         * The option will be converted to a &lt;code&gt;double&lt;/code&gt;
-         * type.
-         * 
-         * Default: 0.05
-         * Group: security
-         * 
-         * @param saslLoginRefreshWindowJitter the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslLoginRefreshWindowJitter(
-                String saslLoginRefreshWindowJitter) {
-            doSetProperty("saslLoginRefreshWindowJitter", saslLoginRefreshWindowJitter);
-            return this;
-        }
-        /**
-         * SASL mechanism used for client connections. This may be any mechanism
-         * for which a security provider is available. GSSAPI is the default
-         * mechanism.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: GSSAPI
-         * Group: security
-         * 
-         * @param saslMechanism the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder saslMechanism(
-                String saslMechanism) {
-            doSetProperty("saslMechanism", saslMechanism);
-            return this;
-        }
-        /**
-         * Protocol used to communicate with brokers. Valid values are:
-         * PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: PLAINTEXT
-         * Group: security
-         * 
-         * @param securityProtocol the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder securityProtocol(
-                String securityProtocol) {
-            doSetProperty("securityProtocol", securityProtocol);
-            return this;
-        }
-        /**
-         * A list of configurable creator classes each returning a provider
-         * implementing security algorithms. These classes should implement the
-         * org.apache.kafka.common.security.auth.SecurityProviderCreator
-         * interface.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param securityProviders the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder securityProviders(
-                String securityProviders) {
-            doSetProperty("securityProviders", securityProviders);
-            return this;
-        }
-        /**
-         * A list of cipher suites. This is a named combination of
-         * authentication, encryption, MAC and key exchange algorithm used to
-         * negotiate the security settings for a network connection using TLS or
-         * SSL network protocol. By default all the available cipher suites are
-         * supported.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslCipherSuites the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslCipherSuites(
-                String sslCipherSuites) {
-            doSetProperty("sslCipherSuites", sslCipherSuites);
-            return this;
-        }
-        /**
-         * The list of protocols enabled for SSL connections. The default is
-         * 'TLSv1.2,TLSv1.3' when running with Java 11 or newer, 'TLSv1.2'
-         * otherwise. With the default value for Java 11, clients and servers
-         * will prefer TLSv1.3 if both support it and fallback to TLSv1.2
-         * otherwise (assuming both support at least TLSv1.2). This default
-         * should be fine for most cases. Also see the config documentation for
-         * ssl.protocol.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: TLSv1.2,TLSv1.3
-         * Group: security
-         * 
-         * @param sslEnabledProtocols the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslEnabledProtocols(
-                String sslEnabledProtocols) {
-            doSetProperty("sslEnabledProtocols", sslEnabledProtocols);
-            return this;
-        }
-        /**
-         * The endpoint identification algorithm to validate server hostname
-         * using server certificate.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: https
-         * Group: security
-         * 
-         * @param sslEndpointIdentificationAlgorithm the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslEndpointIdentificationAlgorithm(
-                String sslEndpointIdentificationAlgorithm) {
-            doSetProperty("sslEndpointIdentificationAlgorithm", sslEndpointIdentificationAlgorithm);
-            return this;
-        }
-        /**
-         * The class of type
-         * org.apache.kafka.common.security.auth.SslEngineFactory to provide
-         * SSLEngine objects. Default value is
-         * org.apache.kafka.common.security.ssl.DefaultSslEngineFactory.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslEngineFactoryClass the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslEngineFactoryClass(
-                String sslEngineFactoryClass) {
-            doSetProperty("sslEngineFactoryClass", sslEngineFactoryClass);
-            return this;
-        }
-        /**
-         * The algorithm used by key manager factory for SSL connections.
-         * Default value is the key manager factory algorithm configured for the
-         * Java Virtual Machine.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: SunX509
-         * Group: security
-         * 
-         * @param sslKeymanagerAlgorithm the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslKeymanagerAlgorithm(
-                String sslKeymanagerAlgorithm) {
-            doSetProperty("sslKeymanagerAlgorithm", sslKeymanagerAlgorithm);
-            return this;
-        }
-        /**
-         * The password of the private key in the key store file orthe PEM key
-         * specified in ssl.keystore.key'. This is required for clients only if
-         * two-way authentication is configured.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslKeyPassword the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslKeyPassword(
-                String sslKeyPassword) {
-            doSetProperty("sslKeyPassword", sslKeyPassword);
-            return this;
-        }
-        /**
-         * Certificate chain in the format specified by 'ssl.keystore.type'.
-         * Default SSL engine factory supports only PEM format with a list of
-         * X.509 certificates.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslKeystoreCertificateChain the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslKeystoreCertificateChain(
-                String sslKeystoreCertificateChain) {
-            doSetProperty("sslKeystoreCertificateChain", sslKeystoreCertificateChain);
-            return this;
-        }
-        /**
-         * Private key in the format specified by 'ssl.keystore.type'. Default
-         * SSL engine factory supports only PEM format with PKCS#8 keys. If the
-         * key is encrypted, key password must be specified using
-         * 'ssl.key.password'.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslKeystoreKey the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslKeystoreKey(
-                String sslKeystoreKey) {
-            doSetProperty("sslKeystoreKey", sslKeystoreKey);
-            return this;
-        }
-        /**
-         * The location of the key store file. This is optional for client and
-         * can be used for two-way authentication for client.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslKeystoreLocation the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslKeystoreLocation(
-                String sslKeystoreLocation) {
-            doSetProperty("sslKeystoreLocation", sslKeystoreLocation);
-            return this;
-        }
-        /**
-         * The store password for the key store file. This is optional for
-         * client and only needed if 'ssl.keystore.location' is configured. Key
-         * store password is not supported for PEM format.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslKeystorePassword the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslKeystorePassword(
-                String sslKeystorePassword) {
-            doSetProperty("sslKeystorePassword", sslKeystorePassword);
-            return this;
-        }
-        /**
-         * The file format of the key store file. This is optional for client.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: JKS
-         * Group: security
-         * 
-         * @param sslKeystoreType the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslKeystoreType(
-                String sslKeystoreType) {
-            doSetProperty("sslKeystoreType", sslKeystoreType);
-            return this;
-        }
-        /**
-         * The SSL protocol used to generate the SSLContext. The default is
-         * 'TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise.
-         * This value should be fine for most use cases. Allowed values in
-         * recent JVMs are 'TLSv1.2' and 'TLSv1.3'. 'TLS', 'TLSv1.1', 'SSL',
-         * 'SSLv2' and 'SSLv3' may be supported in older JVMs, but their usage
-         * is discouraged due to known security vulnerabilities. With the
-         * default value for this config and 'ssl.enabled.protocols', clients
-         * will downgrade to 'TLSv1.2' if the server does not support 'TLSv1.3'.
-         * If this config is set to 'TLSv1.2', clients will not use 'TLSv1.3'
-         * even if it is one of the values in ssl.enabled.protocols and the
-         * server only supports 'TLSv1.3'.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: TLSv1.2
-         * Group: security
-         * 
-         * @param sslProtocol the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslProtocol(String sslProtocol) {
-            doSetProperty("sslProtocol", sslProtocol);
-            return this;
-        }
-        /**
-         * The name of the security provider used for SSL connections. Default
-         * value is the default security provider of the JVM.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslProvider the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslProvider(String sslProvider) {
-            doSetProperty("sslProvider", sslProvider);
-            return this;
-        }
-        /**
-         * The SecureRandom PRNG implementation to use for SSL cryptography
-         * operations.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslSecureRandomImplementation the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslSecureRandomImplementation(
-                String sslSecureRandomImplementation) {
-            doSetProperty("sslSecureRandomImplementation", sslSecureRandomImplementation);
-            return this;
-        }
-        /**
-         * The algorithm used by trust manager factory for SSL connections.
-         * Default value is the trust manager factory algorithm configured for
-         * the Java Virtual Machine.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: PKIX
-         * Group: security
-         * 
-         * @param sslTrustmanagerAlgorithm the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslTrustmanagerAlgorithm(
-                String sslTrustmanagerAlgorithm) {
-            doSetProperty("sslTrustmanagerAlgorithm", sslTrustmanagerAlgorithm);
-            return this;
-        }
-        /**
-         * Trusted certificates in the format specified by
-         * 'ssl.truststore.type'. Default SSL engine factory supports only PEM
-         * format with X.509 certificates.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslTruststoreCertificates the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslTruststoreCertificates(
-                String sslTruststoreCertificates) {
-            doSetProperty("sslTruststoreCertificates", sslTruststoreCertificates);
-            return this;
-        }
-        /**
-         * The location of the trust store file.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslTruststoreLocation the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslTruststoreLocation(
-                String sslTruststoreLocation) {
-            doSetProperty("sslTruststoreLocation", sslTruststoreLocation);
-            return this;
-        }
-        /**
-         * The password for the trust store file. If a password is not set,
-         * trust store file configured will still be used, but integrity
-         * checking is disabled. Trust store password is not supported for PEM
-         * format.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslTruststorePassword the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslTruststorePassword(
-                String sslTruststorePassword) {
-            doSetProperty("sslTruststorePassword", sslTruststorePassword);
-            return this;
-        }
-        /**
-         * The file format of the trust store file.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: JKS
-         * Group: security
-         * 
-         * @param sslTruststoreType the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointConsumerBuilder sslTruststoreType(
-                String sslTruststoreType) {
-            doSetProperty("sslTruststoreType", sslTruststoreType);
-            return this;
-        }
-    }
-
-    /**
-     * Advanced builder for endpoint consumers for the Vert.x Kafka component.
-     */
-    public interface AdvancedVertxKafkaEndpointConsumerBuilder
-            extends
-                EndpointConsumerBuilder {
-        default VertxKafkaEndpointConsumerBuilder basic() {
-            return (VertxKafkaEndpointConsumerBuilder) this;
-        }
-        /**
-         * Allows for bridging the consumer to the Camel routing Error Handler,
-         * which mean any exceptions occurred while the consumer is trying to
-         * pickup incoming messages, or the likes, will now be processed as a
-         * message and handled by the routing Error Handler. By default the
-         * consumer will use the org.apache.camel.spi.ExceptionHandler to deal
-         * with exceptions, that will be logged at WARN or ERROR level and
-         * ignored.
-         * 
-         * The option is a: &lt;code&gt;boolean&lt;/code&gt; type.
-         * 
-         * Default: false
-         * Group: consumer (advanced)
-         * 
-         * @param bridgeErrorHandler the value to set
-         * @return the dsl builder
-         */
-        default AdvancedVertxKafkaEndpointConsumerBuilder bridgeErrorHandler(
-                boolean bridgeErrorHandler) {
-            doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
-            return this;
-        }
-        /**
-         * Allows for bridging the consumer to the Camel routing Error Handler,
-         * which mean any exceptions occurred while the consumer is trying to
-         * pickup incoming messages, or the likes, will now be processed as a
-         * message and handled by the routing Error Handler. By default the
-         * consumer will use the org.apache.camel.spi.ExceptionHandler to deal
-         * with exceptions, that will be logged at WARN or ERROR level and
-         * ignored.
-         * 
-         * The option will be converted to a &lt;code&gt;boolean&lt;/code&gt;
-         * type.
-         * 
-         * Default: false
-         * Group: consumer (advanced)
-         * 
-         * @param bridgeErrorHandler the value to set
-         * @return the dsl builder
-         */
-        default AdvancedVertxKafkaEndpointConsumerBuilder bridgeErrorHandler(
-                String bridgeErrorHandler) {
-            doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
-            return this;
-        }
-        /**
-         * To let the consumer use a custom ExceptionHandler. Notice if the
-         * option bridgeErrorHandler is enabled then this option is not in use.
-         * By default the consumer will deal with exceptions, that will be
-         * logged at WARN or ERROR level and ignored.
-         * 
-         * The option is a:
-         * &lt;code&gt;org.apache.camel.spi.ExceptionHandler&lt;/code&gt; type.
-         * 
-         * Group: consumer (advanced)
-         * 
-         * @param exceptionHandler the value to set
-         * @return the dsl builder
-         */
-        default AdvancedVertxKafkaEndpointConsumerBuilder exceptionHandler(
-                org.apache.camel.spi.ExceptionHandler exceptionHandler) {
-            doSetProperty("exceptionHandler", exceptionHandler);
-            return this;
-        }
-        /**
-         * To let the consumer use a custom ExceptionHandler. Notice if the
-         * option bridgeErrorHandler is enabled then this option is not in use.
-         * By default the consumer will deal with exceptions, that will be
-         * logged at WARN or ERROR level and ignored.
-         * 
-         * The option will be converted to a
-         * &lt;code&gt;org.apache.camel.spi.ExceptionHandler&lt;/code&gt; type.
-         * 
-         * Group: consumer (advanced)
-         * 
-         * @param exceptionHandler the value to set
-         * @return the dsl builder
-         */
-        default AdvancedVertxKafkaEndpointConsumerBuilder exceptionHandler(
-                String exceptionHandler) {
-            doSetProperty("exceptionHandler", exceptionHandler);
-            return this;
-        }
-        /**
-         * Sets the exchange pattern when the consumer creates an exchange.
-         * 
-         * The option is a:
-         * &lt;code&gt;org.apache.camel.ExchangePattern&lt;/code&gt; type.
-         * 
-         * Group: consumer (advanced)
-         * 
-         * @param exchangePattern the value to set
-         * @return the dsl builder
-         */
-        default AdvancedVertxKafkaEndpointConsumerBuilder exchangePattern(
-                org.apache.camel.ExchangePattern exchangePattern) {
-            doSetProperty("exchangePattern", exchangePattern);
-            return this;
-        }
-        /**
-         * Sets the exchange pattern when the consumer creates an exchange.
-         * 
-         * The option will be converted to a
-         * &lt;code&gt;org.apache.camel.ExchangePattern&lt;/code&gt; type.
-         * 
-         * Group: consumer (advanced)
-         * 
-         * @param exchangePattern the value to set
-         * @return the dsl builder
-         */
-        default AdvancedVertxKafkaEndpointConsumerBuilder exchangePattern(
-                String exchangePattern) {
-            doSetProperty("exchangePattern", exchangePattern);
-            return this;
-        }
-    }
-
-    /**
-     * Builder for endpoint producers for the Vert.x Kafka component.
-     */
-    public interface VertxKafkaEndpointProducerBuilder
-            extends
-                EndpointProducerBuilder {
-        default AdvancedVertxKafkaEndpointProducerBuilder advanced() {
-            return (AdvancedVertxKafkaEndpointProducerBuilder) this;
-        }
-        /**
-         * Sets additional properties for either kafka consumer or kafka
-         * producer in case they can't be set directly on the camel
-         * configurations (e.g: new Kafka properties that are not reflected yet
-         * in Camel configurations), the properties have to be prefixed with
-         * additionalProperties.. E.g:
-         * additionalProperties.transactional.id=12345&amp;amp;additionalProperties.schema.registry.url=http://localhost:8811/avro.
-         * 
-         * The option is a: &lt;code&gt;java.util.Map&amp;lt;java.lang.String,
-         * java.lang.Object&amp;gt;&lt;/code&gt; type.
-         * The option is multivalued, and you can use the
-         * additionalProperties(String, Object) method to add a value (call the
-         * method multiple times to set more values).
-         * 
-         * Group: common
-         * 
-         * @param key the option key
-         * @param value the option value
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder additionalProperties(
-                String key,
-                Object value) {
-            doSetMultiValueProperty("additionalProperties", "additionalProperties." + key, value);
-            return this;
-        }
-        /**
-         * Sets additional properties for either kafka consumer or kafka
-         * producer in case they can't be set directly on the camel
-         * configurations (e.g: new Kafka properties that are not reflected yet
-         * in Camel configurations), the properties have to be prefixed with
-         * additionalProperties.. E.g:
-         * additionalProperties.transactional.id=12345&amp;amp;additionalProperties.schema.registry.url=http://localhost:8811/avro.
-         * 
-         * The option is a: &lt;code&gt;java.util.Map&amp;lt;java.lang.String,
-         * java.lang.Object&amp;gt;&lt;/code&gt; type.
-         * The option is multivalued, and you can use the
-         * additionalProperties(String, Object) method to add a value (call the
-         * method multiple times to set more values).
-         * 
-         * Group: common
-         * 
-         * @param values the values
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder additionalProperties(
-                Map values) {
-            doSetMultiValueProperties("additionalProperties", "additionalProperties.", values);
-            return this;
-        }
-        /**
-         * A list of host/port pairs to use for establishing the initial
-         * connection to the Kafka cluster. The client will make use of all
-         * servers irrespective of which servers are specified here for
-         * bootstrapping&amp;amp;mdash;this list only impacts the initial hosts
-         * used to discover the full set of servers. This list should be in the
-         * form host1:port1,host2:port2,.... Since these servers are just used
-         * for the initial connection to discover the full cluster membership
-         * (which may change dynamically), this list need not contain the full
-         * set of servers (you may want more than one, though, in case a server
-         * is down).
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param bootstrapServers the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder bootstrapServers(
-                String bootstrapServers) {
-            doSetProperty("bootstrapServers", bootstrapServers);
-            return this;
-        }
-        /**
-         * Controls how the client uses DNS lookups. If set to use_all_dns_ips,
-         * connect to each returned IP address in sequence until a successful
-         * connection is established. After a disconnection, the next IP is
-         * used. Once all IPs have been used once, the client resolves the IP(s)
-         * from the hostname again (both the JVM and the OS cache DNS name
-         * lookups, however). If set to
-         * resolve_canonical_bootstrap_servers_only, resolve each bootstrap
-         * address into a list of canonical names. After the bootstrap phase,
-         * this behaves the same as use_all_dns_ips. If set to default
-         * (deprecated), attempt to connect to the first IP address returned by
-         * the lookup, even if the lookup returns multiple IP addresses.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: use_all_dns_ips
-         * Group: common
-         * 
-         * @param clientDnsLookup the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder clientDnsLookup(
-                String clientDnsLookup) {
-            doSetProperty("clientDnsLookup", clientDnsLookup);
-            return this;
-        }
-        /**
-         * An id string to pass to the server when making requests. The purpose
-         * of this is to be able to track the source of requests beyond just
-         * ip/port by allowing a logical application name to be included in
-         * server-side request logging.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param clientId the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder clientId(String clientId) {
-            doSetProperty("clientId", clientId);
-            return this;
-        }
-        /**
-         * Close idle connections after the number of milliseconds specified by
-         * this config.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 9m
-         * Group: common
-         * 
-         * @param connectionsMaxIdleMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder connectionsMaxIdleMs(
-                long connectionsMaxIdleMs) {
-            doSetProperty("connectionsMaxIdleMs", connectionsMaxIdleMs);
-            return this;
-        }
-        /**
-         * Close idle connections after the number of milliseconds specified by
-         * this config.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 9m
-         * Group: common
-         * 
-         * @param connectionsMaxIdleMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder connectionsMaxIdleMs(
-                String connectionsMaxIdleMs) {
-            doSetProperty("connectionsMaxIdleMs", connectionsMaxIdleMs);
-            return this;
-        }
-        /**
-         * To use a custom HeaderFilterStrategy to filter header to and from
-         * Camel message.
-         * 
-         * The option is a:
-         * &lt;code&gt;org.apache.camel.spi.HeaderFilterStrategy&lt;/code&gt;
-         * type.
-         * 
-         * Group: common
-         * 
-         * @param headerFilterStrategy the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder headerFilterStrategy(
-                org.apache.camel.spi.HeaderFilterStrategy headerFilterStrategy) {
-            doSetProperty("headerFilterStrategy", headerFilterStrategy);
-            return this;
-        }
-        /**
-         * To use a custom HeaderFilterStrategy to filter header to and from
-         * Camel message.
-         * 
-         * The option will be converted to a
-         * &lt;code&gt;org.apache.camel.spi.HeaderFilterStrategy&lt;/code&gt;
-         * type.
-         * 
-         * Group: common
-         * 
-         * @param headerFilterStrategy the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder headerFilterStrategy(
-                String headerFilterStrategy) {
-            doSetProperty("headerFilterStrategy", headerFilterStrategy);
-            return this;
-        }
-        /**
-         * A list of classes to use as interceptors. Implementing the
-         * org.apache.kafka.clients.producer.ProducerInterceptor interface
-         * allows you to intercept (and possibly mutate) the records received by
-         * the producer before they are published to the Kafka cluster. By
-         * default, there are no interceptors.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param interceptorClasses the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder interceptorClasses(
-                String interceptorClasses) {
-            doSetProperty("interceptorClasses", interceptorClasses);
-            return this;
-        }
-        /**
-         * The period of time in milliseconds after which we force a refresh of
-         * metadata even if we haven't seen any partition leadership changes to
-         * proactively discover any new brokers or partitions.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 5m
-         * Group: common
-         * 
-         * @param metadataMaxAgeMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder metadataMaxAgeMs(
-                long metadataMaxAgeMs) {
-            doSetProperty("metadataMaxAgeMs", metadataMaxAgeMs);
-            return this;
-        }
-        /**
-         * The period of time in milliseconds after which we force a refresh of
-         * metadata even if we haven't seen any partition leadership changes to
-         * proactively discover any new brokers or partitions.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 5m
-         * Group: common
-         * 
-         * @param metadataMaxAgeMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder metadataMaxAgeMs(
-                String metadataMaxAgeMs) {
-            doSetProperty("metadataMaxAgeMs", metadataMaxAgeMs);
-            return this;
-        }
-        /**
-         * A list of classes to use as metrics reporters. Implementing the
-         * org.apache.kafka.common.metrics.MetricsReporter interface allows
-         * plugging in classes that will be notified of new metric creation. The
-         * JmxReporter is always included to register JMX statistics.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param metricReporters the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder metricReporters(
-                String metricReporters) {
-            doSetProperty("metricReporters", metricReporters);
-            return this;
-        }
-        /**
-         * The number of samples maintained to compute metrics.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 2
-         * Group: common
-         * 
-         * @param metricsNumSamples the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder metricsNumSamples(
-                int metricsNumSamples) {
-            doSetProperty("metricsNumSamples", metricsNumSamples);
-            return this;
-        }
-        /**
-         * The number of samples maintained to compute metrics.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 2
-         * Group: common
-         * 
-         * @param metricsNumSamples the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder metricsNumSamples(
-                String metricsNumSamples) {
-            doSetProperty("metricsNumSamples", metricsNumSamples);
-            return this;
-        }
-        /**
-         * The highest recording level for metrics.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: INFO
-         * Group: common
-         * 
-         * @param metricsRecordingLevel the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder metricsRecordingLevel(
-                String metricsRecordingLevel) {
-            doSetProperty("metricsRecordingLevel", metricsRecordingLevel);
-            return this;
-        }
-        /**
-         * The window of time a metrics sample is computed over.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param metricsSampleWindowMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder metricsSampleWindowMs(
-                long metricsSampleWindowMs) {
-            doSetProperty("metricsSampleWindowMs", metricsSampleWindowMs);
-            return this;
-        }
-        /**
-         * The window of time a metrics sample is computed over.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param metricsSampleWindowMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder metricsSampleWindowMs(
-                String metricsSampleWindowMs) {
-            doSetProperty("metricsSampleWindowMs", metricsSampleWindowMs);
-            return this;
-        }
-        /**
-         * The partition to which the record will be sent (or null if no
-         * partition was specified) or read from a particular partition if set.
-         * Header VertxKafkaConstants#PARTITION_ID If configured, it will take
-         * precedence over this config.
-         * 
-         * The option is a: &lt;code&gt;java.lang.Integer&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param partitionId the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder partitionId(
-                Integer partitionId) {
-            doSetProperty("partitionId", partitionId);
-            return this;
-        }
-        /**
-         * The partition to which the record will be sent (or null if no
-         * partition was specified) or read from a particular partition if set.
-         * Header VertxKafkaConstants#PARTITION_ID If configured, it will take
-         * precedence over this config.
-         * 
-         * The option will be converted to a
-         * &lt;code&gt;java.lang.Integer&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param partitionId the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder partitionId(String partitionId) {
-            doSetProperty("partitionId", partitionId);
-            return this;
-        }
-        /**
-         * The size of the TCP receive buffer (SO_RCVBUF) to use when reading
-         * data. If the value is -1, the OS default will be used.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 32768
-         * Group: common
-         * 
-         * @param receiveBufferBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder receiveBufferBytes(
-                int receiveBufferBytes) {
-            doSetProperty("receiveBufferBytes", receiveBufferBytes);
-            return this;
-        }
-        /**
-         * The size of the TCP receive buffer (SO_RCVBUF) to use when reading
-         * data. If the value is -1, the OS default will be used.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 32768
-         * Group: common
-         * 
-         * @param receiveBufferBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder receiveBufferBytes(
-                String receiveBufferBytes) {
-            doSetProperty("receiveBufferBytes", receiveBufferBytes);
-            return this;
-        }
-        /**
-         * The maximum amount of time in milliseconds to wait when reconnecting
-         * to a broker that has repeatedly failed to connect. If provided, the
-         * backoff per host will increase exponentially for each consecutive
-         * connection failure, up to this maximum. After calculating the backoff
-         * increase, 20% random jitter is added to avoid connection storms.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 1s
-         * Group: common
-         * 
-         * @param reconnectBackoffMaxMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder reconnectBackoffMaxMs(
-                long reconnectBackoffMaxMs) {
-            doSetProperty("reconnectBackoffMaxMs", reconnectBackoffMaxMs);
-            return this;
-        }
-        /**
-         * The maximum amount of time in milliseconds to wait when reconnecting
-         * to a broker that has repeatedly failed to connect. If provided, the
-         * backoff per host will increase exponentially for each consecutive
-         * connection failure, up to this maximum. After calculating the backoff
-         * increase, 20% random jitter is added to avoid connection storms.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 1s
-         * Group: common
-         * 
-         * @param reconnectBackoffMaxMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder reconnectBackoffMaxMs(
-                String reconnectBackoffMaxMs) {
-            doSetProperty("reconnectBackoffMaxMs", reconnectBackoffMaxMs);
-            return this;
-        }
-        /**
-         * The base amount of time to wait before attempting to reconnect to a
-         * given host. This avoids repeatedly connecting to a host in a tight
-         * loop. This backoff applies to all connection attempts by the client
-         * to a broker.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 50ms
-         * Group: common
-         * 
-         * @param reconnectBackoffMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder reconnectBackoffMs(
-                long reconnectBackoffMs) {
-            doSetProperty("reconnectBackoffMs", reconnectBackoffMs);
-            return this;
-        }
-        /**
-         * The base amount of time to wait before attempting to reconnect to a
-         * given host. This avoids repeatedly connecting to a host in a tight
-         * loop. This backoff applies to all connection attempts by the client
-         * to a broker.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 50ms
-         * Group: common
-         * 
-         * @param reconnectBackoffMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder reconnectBackoffMs(
-                String reconnectBackoffMs) {
-            doSetProperty("reconnectBackoffMs", reconnectBackoffMs);
-            return this;
-        }
-        /**
-         * The configuration controls the maximum amount of time the client will
-         * wait for the response of a request. If the response is not received
-         * before the timeout elapses the client will resend the request if
-         * necessary or fail the request if retries are exhausted. This should
-         * be larger than replica.lag.time.max.ms (a broker configuration) to
-         * reduce the possibility of message duplication due to unnecessary
-         * producer retries.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param requestTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder requestTimeoutMs(
-                int requestTimeoutMs) {
-            doSetProperty("requestTimeoutMs", requestTimeoutMs);
-            return this;
-        }
-        /**
-         * The configuration controls the maximum amount of time the client will
-         * wait for the response of a request. If the response is not received
-         * before the timeout elapses the client will resend the request if
-         * necessary or fail the request if retries are exhausted. This should
-         * be larger than replica.lag.time.max.ms (a broker configuration) to
-         * reduce the possibility of message duplication due to unnecessary
-         * producer retries.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param requestTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder requestTimeoutMs(
-                String requestTimeoutMs) {
-            doSetProperty("requestTimeoutMs", requestTimeoutMs);
-            return this;
-        }
-        /**
-         * The amount of time to wait before attempting to retry a failed
-         * request to a given topic partition. This avoids repeatedly sending
-         * requests in a tight loop under some failure scenarios.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 100ms
-         * Group: common
-         * 
-         * @param retryBackoffMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder retryBackoffMs(
-                long retryBackoffMs) {
-            doSetProperty("retryBackoffMs", retryBackoffMs);
-            return this;
-        }
-        /**
-         * The amount of time to wait before attempting to retry a failed
-         * request to a given topic partition. This avoids repeatedly sending
-         * requests in a tight loop under some failure scenarios.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 100ms
-         * Group: common
-         * 
-         * @param retryBackoffMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder retryBackoffMs(
-                String retryBackoffMs) {
-            doSetProperty("retryBackoffMs", retryBackoffMs);
-            return this;
-        }
-        /**
-         * The size of the TCP send buffer (SO_SNDBUF) to use when sending data.
-         * If the value is -1, the OS default will be used.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 131072
-         * Group: common
-         * 
-         * @param sendBufferBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sendBufferBytes(
-                int sendBufferBytes) {
-            doSetProperty("sendBufferBytes", sendBufferBytes);
-            return this;
-        }
-        /**
-         * The size of the TCP send buffer (SO_SNDBUF) to use when sending data.
-         * If the value is -1, the OS default will be used.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 131072
-         * Group: common
-         * 
-         * @param sendBufferBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sendBufferBytes(
-                String sendBufferBytes) {
-            doSetProperty("sendBufferBytes", sendBufferBytes);
-            return this;
-        }
-        /**
-         * The maximum amount of time the client will wait for the socket
-         * connection to be established. The connection setup timeout will
-         * increase exponentially for each consecutive connection failure up to
-         * this maximum. To avoid connection storms, a randomization factor of
-         * 0.2 will be applied to the timeout resulting in a random range
-         * between 20% below and 20% above the computed value.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param socketConnectionSetupTimeoutMaxMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder socketConnectionSetupTimeoutMaxMs(
-                long socketConnectionSetupTimeoutMaxMs) {
-            doSetProperty("socketConnectionSetupTimeoutMaxMs", socketConnectionSetupTimeoutMaxMs);
-            return this;
-        }
-        /**
-         * The maximum amount of time the client will wait for the socket
-         * connection to be established. The connection setup timeout will
-         * increase exponentially for each consecutive connection failure up to
-         * this maximum. To avoid connection storms, a randomization factor of
-         * 0.2 will be applied to the timeout resulting in a random range
-         * between 20% below and 20% above the computed value.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param socketConnectionSetupTimeoutMaxMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder socketConnectionSetupTimeoutMaxMs(
-                String socketConnectionSetupTimeoutMaxMs) {
-            doSetProperty("socketConnectionSetupTimeoutMaxMs", socketConnectionSetupTimeoutMaxMs);
-            return this;
-        }
-        /**
-         * The amount of time the client will wait for the socket connection to
-         * be established. If the connection is not built before the timeout
-         * elapses, clients will close the socket channel.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 10s
-         * Group: common
-         * 
-         * @param socketConnectionSetupTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder socketConnectionSetupTimeoutMs(
-                long socketConnectionSetupTimeoutMs) {
-            doSetProperty("socketConnectionSetupTimeoutMs", socketConnectionSetupTimeoutMs);
-            return this;
-        }
-        /**
-         * The amount of time the client will wait for the socket connection to
-         * be established. If the connection is not built before the timeout
-         * elapses, clients will close the socket channel.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 10s
-         * Group: common
-         * 
-         * @param socketConnectionSetupTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder socketConnectionSetupTimeoutMs(
-                String socketConnectionSetupTimeoutMs) {
-            doSetProperty("socketConnectionSetupTimeoutMs", socketConnectionSetupTimeoutMs);
-            return this;
-        }
-        /**
-         * The number of acknowledgments the producer requires the leader to
-         * have received before considering a request complete. This controls
-         * the durability of records that are sent. The following settings are
-         * allowed: acks=0 If set to zero then the producer will not wait for
-         * any acknowledgment from the server at all. The record will be
-         * immediately added to the socket buffer and considered sent. No
-         * guarantee can be made that the server has received the record in this
-         * case, and the retries configuration will not take effect (as the
-         * client won't generally know of any failures). The offset given back
-         * for each record will always be set to -1. acks=1 This will mean the
-         * leader will write the record to its local log but will respond
-         * without awaiting full acknowledgement from all followers. In this
-         * case should the leader fail immediately after acknowledging the
-         * record but before the followers have replicated it then the record
-         * will be lost. acks=all This means the leader will wait for the full
-         * set of in-sync replicas to acknowledge the record. This guarantees
-         * that the record will not be lost as long as at least one in-sync
-         * replica remains alive. This is the strongest available guarantee.
-         * This is equivalent to the acks=-1 setting.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: 1
-         * Group: producer
-         * 
-         * @param acks the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder acks(String acks) {
-            doSetProperty("acks", acks);
-            return this;
-        }
-        /**
-         * The producer will attempt to batch records together into fewer
-         * requests whenever multiple records are being sent to the same
-         * partition. This helps performance on both the client and the server.
-         * This configuration controls the default batch size in bytes. No
-         * attempt will be made to batch records larger than this size. Requests
-         * sent to brokers will contain multiple batches, one for each partition
-         * with data available to be sent. A small batch size will make batching
-         * less common and may reduce throughput (a batch size of zero will
-         * disable batching entirely). A very large batch size may use memory a
-         * bit more wastefully as we will always allocate a buffer of the
-         * specified batch size in anticipation of additional records.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 16384
-         * Group: producer
-         * 
-         * @param batchSize the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder batchSize(int batchSize) {
-            doSetProperty("batchSize", batchSize);
-            return this;
-        }
-        /**
-         * The producer will attempt to batch records together into fewer
-         * requests whenever multiple records are being sent to the same
-         * partition. This helps performance on both the client and the server.
-         * This configuration controls the default batch size in bytes. No
-         * attempt will be made to batch records larger than this size. Requests
-         * sent to brokers will contain multiple batches, one for each partition
-         * with data available to be sent. A small batch size will make batching
-         * less common and may reduce throughput (a batch size of zero will
-         * disable batching entirely). A very large batch size may use memory a
-         * bit more wastefully as we will always allocate a buffer of the
-         * specified batch size in anticipation of additional records.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 16384
-         * Group: producer
-         * 
-         * @param batchSize the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder batchSize(String batchSize) {
-            doSetProperty("batchSize", batchSize);
-            return this;
-        }
-        /**
-         * The total bytes of memory the producer can use to buffer records
-         * waiting to be sent to the server. If records are sent faster than
-         * they can be delivered to the server the producer will block for
-         * max.block.ms after which it will throw an exception.This setting
-         * should correspond roughly to the total memory the producer will use,
-         * but is not a hard bound since not all memory the producer uses is
-         * used for buffering. Some additional memory will be used for
-         * compression (if compression is enabled) as well as for maintaining
-         * in-flight requests.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 33554432
-         * Group: producer
-         * 
-         * @param bufferMemory the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder bufferMemory(long bufferMemory) {
-            doSetProperty("bufferMemory", bufferMemory);
-            return this;
-        }
-        /**
-         * The total bytes of memory the producer can use to buffer records
-         * waiting to be sent to the server. If records are sent faster than
-         * they can be delivered to the server the producer will block for
-         * max.block.ms after which it will throw an exception.This setting
-         * should correspond roughly to the total memory the producer will use,
-         * but is not a hard bound since not all memory the producer uses is
-         * used for buffering. Some additional memory will be used for
-         * compression (if compression is enabled) as well as for maintaining
-         * in-flight requests.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 33554432
-         * Group: producer
-         * 
-         * @param bufferMemory the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder bufferMemory(
-                String bufferMemory) {
-            doSetProperty("bufferMemory", bufferMemory);
-            return this;
-        }
-        /**
-         * The compression type for all data generated by the producer. The
-         * default is none (i.e. no compression). Valid values are none, gzip,
-         * snappy, lz4, or zstd. Compression is of full batches of data, so the
-         * efficacy of batching will also impact the compression ratio (more
-         * batching means better compression).
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: none
-         * Group: producer
-         * 
-         * @param compressionType the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder compressionType(
-                String compressionType) {
-            doSetProperty("compressionType", compressionType);
-            return this;
-        }
-        /**
-         * An upper bound on the time to report success or failure after a call
-         * to send() returns. This limits the total time that a record will be
-         * delayed prior to sending, the time to await acknowledgement from the
-         * broker (if expected), and the time allowed for retriable send
-         * failures. The producer may report failure to send a record earlier
-         * than this config if either an unrecoverable error is encountered, the
-         * retries have been exhausted, or the record is added to a batch which
-         * reached an earlier delivery expiration deadline. The value of this
-         * config should be greater than or equal to the sum of
-         * request.timeout.ms and linger.ms.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 2m
-         * Group: producer
-         * 
-         * @param deliveryTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder deliveryTimeoutMs(
-                int deliveryTimeoutMs) {
-            doSetProperty("deliveryTimeoutMs", deliveryTimeoutMs);
-            return this;
-        }
-        /**
-         * An upper bound on the time to report success or failure after a call
-         * to send() returns. This limits the total time that a record will be
-         * delayed prior to sending, the time to await acknowledgement from the
-         * broker (if expected), and the time allowed for retriable send
-         * failures. The producer may report failure to send a record earlier
-         * than this config if either an unrecoverable error is encountered, the
-         * retries have been exhausted, or the record is added to a batch which
-         * reached an earlier delivery expiration deadline. The value of this
-         * config should be greater than or equal to the sum of
-         * request.timeout.ms and linger.ms.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 2m
-         * Group: producer
-         * 
-         * @param deliveryTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder deliveryTimeoutMs(
-                String deliveryTimeoutMs) {
-            doSetProperty("deliveryTimeoutMs", deliveryTimeoutMs);
-            return this;
-        }
-        /**
-         * When set to 'true', the producer will ensure that exactly one copy of
-         * each message is written in the stream. If 'false', producer retries
-         * due to broker failures, etc., may write duplicates of the retried
-         * message in the stream. Note that enabling idempotence requires
-         * max.in.flight.requests.per.connection to be less than or equal to 5,
-         * retries to be greater than 0 and acks must be 'all'. If these values
-         * are not explicitly set by the user, suitable values will be chosen.
-         * If incompatible values are set, a ConfigException will be thrown.
-         * 
-         * The option is a: &lt;code&gt;boolean&lt;/code&gt; type.
-         * 
-         * Default: false
-         * Group: producer
-         * 
-         * @param enableIdempotence the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder enableIdempotence(
-                boolean enableIdempotence) {
-            doSetProperty("enableIdempotence", enableIdempotence);
-            return this;
-        }
-        /**
-         * When set to 'true', the producer will ensure that exactly one copy of
-         * each message is written in the stream. If 'false', producer retries
-         * due to broker failures, etc., may write duplicates of the retried
-         * message in the stream. Note that enabling idempotence requires
-         * max.in.flight.requests.per.connection to be less than or equal to 5,
-         * retries to be greater than 0 and acks must be 'all'. If these values
-         * are not explicitly set by the user, suitable values will be chosen.
-         * If incompatible values are set, a ConfigException will be thrown.
-         * 
-         * The option will be converted to a &lt;code&gt;boolean&lt;/code&gt;
-         * type.
-         * 
-         * Default: false
-         * Group: producer
-         * 
-         * @param enableIdempotence the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder enableIdempotence(
-                String enableIdempotence) {
-            doSetProperty("enableIdempotence", enableIdempotence);
-            return this;
-        }
-        /**
-         * Serializer class for key that implements the
-         * org.apache.kafka.common.serialization.Serializer interface.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: org.apache.kafka.common.serialization.StringSerializer
-         * Group: producer
-         * 
-         * @param keySerializer the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder keySerializer(
-                String keySerializer) {
-            doSetProperty("keySerializer", keySerializer);
-            return this;
-        }
-        /**
-         * The producer groups together any records that arrive in between
-         * request transmissions into a single batched request. Normally this
-         * occurs only under load when records arrive faster than they can be
-         * sent out. However in some circumstances the client may want to reduce
-         * the number of requests even under moderate load. This setting
-         * accomplishes this by adding a small amount of artificial
-         * delay&amp;amp;mdash;that is, rather than immediately sending out a
-         * record the producer will wait for up to the given delay to allow
-         * other records to be sent so that the sends can be batched together.
-         * This can be thought of as analogous to Nagle's algorithm in TCP. This
-         * setting gives the upper bound on the delay for batching: once we get
-         * batch.size worth of records for a partition it will be sent
-         * immediately regardless of this setting, however if we have fewer than
-         * this many bytes accumulated for this partition we will 'linger' for
-         * the specified time waiting for more records to show up. This setting
-         * defaults to 0 (i.e. no delay). Setting linger.ms=5, for example,
-         * would have the effect of reducing the number of requests sent but
-         * would add up to 5ms of latency to records sent in the absence of
-         * load.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 0ms
-         * Group: producer
-         * 
-         * @param lingerMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder lingerMs(long lingerMs) {
-            doSetProperty("lingerMs", lingerMs);
-            return this;
-        }
-        /**
-         * The producer groups together any records that arrive in between
-         * request transmissions into a single batched request. Normally this
-         * occurs only under load when records arrive faster than they can be
-         * sent out. However in some circumstances the client may want to reduce
-         * the number of requests even under moderate load. This setting
-         * accomplishes this by adding a small amount of artificial
-         * delay&amp;amp;mdash;that is, rather than immediately sending out a
-         * record the producer will wait for up to the given delay to allow
-         * other records to be sent so that the sends can be batched together.
-         * This can be thought of as analogous to Nagle's algorithm in TCP. This
-         * setting gives the upper bound on the delay for batching: once we get
-         * batch.size worth of records for a partition it will be sent
-         * immediately regardless of this setting, however if we have fewer than
-         * this many bytes accumulated for this partition we will 'linger' for
-         * the specified time waiting for more records to show up. This setting
-         * defaults to 0 (i.e. no delay). Setting linger.ms=5, for example,
-         * would have the effect of reducing the number of requests sent but
-         * would add up to 5ms of latency to records sent in the absence of
-         * load.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 0ms
-         * Group: producer
-         * 
-         * @param lingerMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder lingerMs(String lingerMs) {
-            doSetProperty("lingerMs", lingerMs);
-            return this;
-        }
-        /**
-         * The configuration controls how long the KafkaProducer's send(),
-         * partitionsFor(), initTransactions(), sendOffsetsToTransaction(),
-         * commitTransaction() and abortTransaction() methods will block. For
-         * send() this timeout bounds the total time waiting for both metadata
-         * fetch and buffer allocation (blocking in the user-supplied
-         * serializers or partitioner is not counted against this timeout). For
-         * partitionsFor() this timeout bounds the time spent waiting for
-         * metadata if it is unavailable. The transaction-related methods always
-         * block, but may timeout if the transaction coordinator could not be
-         * discovered or did not respond within the timeout.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 1m
-         * Group: producer
-         * 
-         * @param maxBlockMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder maxBlockMs(long maxBlockMs) {
-            doSetProperty("maxBlockMs", maxBlockMs);
-            return this;
-        }
-        /**
-         * The configuration controls how long the KafkaProducer's send(),
-         * partitionsFor(), initTransactions(), sendOffsetsToTransaction(),
-         * commitTransaction() and abortTransaction() methods will block. For
-         * send() this timeout bounds the total time waiting for both metadata
-         * fetch and buffer allocation (blocking in the user-supplied
-         * serializers or partitioner is not counted against this timeout). For
-         * partitionsFor() this timeout bounds the time spent waiting for
-         * metadata if it is unavailable. The transaction-related methods always
-         * block, but may timeout if the transaction coordinator could not be
-         * discovered or did not respond within the timeout.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 1m
-         * Group: producer
-         * 
-         * @param maxBlockMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder maxBlockMs(String maxBlockMs) {
-            doSetProperty("maxBlockMs", maxBlockMs);
-            return this;
-        }
-        /**
-         * The maximum number of unacknowledged requests the client will send on
-         * a single connection before blocking. Note that if this setting is set
-         * to be greater than 1 and there are failed sends, there is a risk of
-         * message re-ordering due to retries (i.e., if retries are enabled).
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 5
-         * Group: producer
-         * 
-         * @param maxInFlightRequestsPerConnection the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder maxInFlightRequestsPerConnection(
-                int maxInFlightRequestsPerConnection) {
-            doSetProperty("maxInFlightRequestsPerConnection", maxInFlightRequestsPerConnection);
-            return this;
-        }
-        /**
-         * The maximum number of unacknowledged requests the client will send on
-         * a single connection before blocking. Note that if this setting is set
-         * to be greater than 1 and there are failed sends, there is a risk of
-         * message re-ordering due to retries (i.e., if retries are enabled).
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 5
-         * Group: producer
-         * 
-         * @param maxInFlightRequestsPerConnection the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder maxInFlightRequestsPerConnection(
-                String maxInFlightRequestsPerConnection) {
-            doSetProperty("maxInFlightRequestsPerConnection", maxInFlightRequestsPerConnection);
-            return this;
-        }
-        /**
-         * The maximum size of a request in bytes. This setting will limit the
-         * number of record batches the producer will send in a single request
-         * to avoid sending huge requests. This is also effectively a cap on the
-         * maximum uncompressed record batch size. Note that the server has its
-         * own cap on the record batch size (after compression if compression is
-         * enabled) which may be different from this.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 1048576
-         * Group: producer
-         * 
-         * @param maxRequestSize the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder maxRequestSize(
-                int maxRequestSize) {
-            doSetProperty("maxRequestSize", maxRequestSize);
-            return this;
-        }
-        /**
-         * The maximum size of a request in bytes. This setting will limit the
-         * number of record batches the producer will send in a single request
-         * to avoid sending huge requests. This is also effectively a cap on the
-         * maximum uncompressed record batch size. Note that the server has its
-         * own cap on the record batch size (after compression if compression is
-         * enabled) which may be different from this.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 1048576
-         * Group: producer
-         * 
-         * @param maxRequestSize the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder maxRequestSize(
-                String maxRequestSize) {
-            doSetProperty("maxRequestSize", maxRequestSize);
-            return this;
-        }
-        /**
-         * Controls how long the producer will cache metadata for a topic that's
-         * idle. If the elapsed time since a topic was last produced to exceeds
-         * the metadata idle duration, then the topic's metadata is forgotten
-         * and the next access to it will force a metadata fetch request.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 5m
-         * Group: producer
-         * 
-         * @param metadataMaxIdleMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder metadataMaxIdleMs(
-                long metadataMaxIdleMs) {
-            doSetProperty("metadataMaxIdleMs", metadataMaxIdleMs);
-            return this;
-        }
-        /**
-         * Controls how long the producer will cache metadata for a topic that's
-         * idle. If the elapsed time since a topic was last produced to exceeds
-         * the metadata idle duration, then the topic's metadata is forgotten
-         * and the next access to it will force a metadata fetch request.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 5m
-         * Group: producer
-         * 
-         * @param metadataMaxIdleMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder metadataMaxIdleMs(
-                String metadataMaxIdleMs) {
-            doSetProperty("metadataMaxIdleMs", metadataMaxIdleMs);
-            return this;
-        }
-        /**
-         * Partitioner class that implements the
-         * org.apache.kafka.clients.producer.Partitioner interface.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default:
-         * org.apache.kafka.clients.producer.internals.DefaultPartitioner
-         * Group: producer
-         * 
-         * @param partitionerClass the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder partitionerClass(
-                String partitionerClass) {
-            doSetProperty("partitionerClass", partitionerClass);
-            return this;
-        }
-        /**
-         * Setting a value greater than zero will cause the client to resend any
-         * record whose send fails with a potentially transient error. Note that
-         * this retry is no different than if the client resent the record upon
-         * receiving the error. Allowing retries without setting
-         * max.in.flight.requests.per.connection to 1 will potentially change
-         * the ordering of records because if two batches are sent to a single
-         * partition, and the first fails and is retried but the second
-         * succeeds, then the records in the second batch may appear first. Note
-         * additionally that produce requests will be failed before the number
-         * of retries has been exhausted if the timeout configured by
-         * delivery.timeout.ms expires first before successful acknowledgement.
-         * Users should generally prefer to leave this config unset and instead
-         * use delivery.timeout.ms to control retry behavior.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 2147483647
-         * Group: producer
-         * 
-         * @param retries the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder retries(int retries) {
-            doSetProperty("retries", retries);
-            return this;
-        }
-        /**
-         * Setting a value greater than zero will cause the client to resend any
-         * record whose send fails with a potentially transient error. Note that
-         * this retry is no different than if the client resent the record upon
-         * receiving the error. Allowing retries without setting
-         * max.in.flight.requests.per.connection to 1 will potentially change
-         * the ordering of records because if two batches are sent to a single
-         * partition, and the first fails and is retried but the second
-         * succeeds, then the records in the second batch may appear first. Note
-         * additionally that produce requests will be failed before the number
-         * of retries has been exhausted if the timeout configured by
-         * delivery.timeout.ms expires first before successful acknowledgement.
-         * Users should generally prefer to leave this config unset and instead
-         * use delivery.timeout.ms to control retry behavior.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 2147483647
-         * Group: producer
-         * 
-         * @param retries the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder retries(String retries) {
-            doSetProperty("retries", retries);
-            return this;
-        }
-        /**
-         * The TransactionalId to use for transactional delivery. This enables
-         * reliability semantics which span multiple producer sessions since it
-         * allows the client to guarantee that transactions using the same
-         * TransactionalId have been completed prior to starting any new
-         * transactions. If no TransactionalId is provided, then the producer is
-         * limited to idempotent delivery. If a TransactionalId is configured,
-         * enable.idempotence is implied. By default the TransactionId is not
-         * configured, which means transactions cannot be used. Note that, by
-         * default, transactions require a cluster of at least three brokers
-         * which is the recommended setting for production; for development you
-         * can change this, by adjusting broker setting
-         * transaction.state.log.replication.factor.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: producer
-         * 
-         * @param transactionalId the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder transactionalId(
-                String transactionalId) {
-            doSetProperty("transactionalId", transactionalId);
-            return this;
-        }
-        /**
-         * The maximum amount of time in ms that the transaction coordinator
-         * will wait for a transaction status update from the producer before
-         * proactively aborting the ongoing transaction.If this value is larger
-         * than the transaction.max.timeout.ms setting in the broker, the
-         * request will fail with a InvalidTxnTimeoutException error.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 1m
-         * Group: producer
-         * 
-         * @param transactionTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder transactionTimeoutMs(
-                int transactionTimeoutMs) {
-            doSetProperty("transactionTimeoutMs", transactionTimeoutMs);
-            return this;
-        }
-        /**
-         * The maximum amount of time in ms that the transaction coordinator
-         * will wait for a transaction status update from the producer before
-         * proactively aborting the ongoing transaction.If this value is larger
-         * than the transaction.max.timeout.ms setting in the broker, the
-         * request will fail with a InvalidTxnTimeoutException error.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 1m
-         * Group: producer
-         * 
-         * @param transactionTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder transactionTimeoutMs(
-                String transactionTimeoutMs) {
-            doSetProperty("transactionTimeoutMs", transactionTimeoutMs);
-            return this;
-        }
-        /**
-         * Serializer class for value that implements the
-         * org.apache.kafka.common.serialization.Serializer interface.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: org.apache.kafka.common.serialization.StringSerializer
-         * Group: producer
-         * 
-         * @param valueSerializer the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder valueSerializer(
-                String valueSerializer) {
-            doSetProperty("valueSerializer", valueSerializer);
-            return this;
-        }
-        /**
-         * The fully qualified name of a SASL client callback handler class that
-         * implements the AuthenticateCallbackHandler interface.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param saslClientCallbackHandlerClass the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslClientCallbackHandlerClass(
-                String saslClientCallbackHandlerClass) {
-            doSetProperty("saslClientCallbackHandlerClass", saslClientCallbackHandlerClass);
-            return this;
-        }
-        /**
-         * JAAS login context parameters for SASL connections in the format used
-         * by JAAS configuration files. JAAS configuration file format is
-         * described here. The format for the value is: loginModuleClass
-         * controlFlag (optionName=optionValue);. For brokers, the config must
-         * be prefixed with listener prefix and SASL mechanism name in
-         * lower-case. For example,
-         * listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param saslJaasConfig the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslJaasConfig(
-                String saslJaasConfig) {
-            doSetProperty("saslJaasConfig", saslJaasConfig);
-            return this;
-        }
-        /**
-         * Kerberos kinit command path.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: /usr/bin/kinit
-         * Group: security
-         * 
-         * @param saslKerberosKinitCmd the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslKerberosKinitCmd(
-                String saslKerberosKinitCmd) {
-            doSetProperty("saslKerberosKinitCmd", saslKerberosKinitCmd);
-            return this;
-        }
-        /**
-         * Login thread sleep time between refresh attempts.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 60000
-         * Group: security
-         * 
-         * @param saslKerberosMinTimeBeforeRelogin the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslKerberosMinTimeBeforeRelogin(
-                long saslKerberosMinTimeBeforeRelogin) {
-            doSetProperty("saslKerberosMinTimeBeforeRelogin", saslKerberosMinTimeBeforeRelogin);
-            return this;
-        }
-        /**
-         * Login thread sleep time between refresh attempts.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 60000
-         * Group: security
-         * 
-         * @param saslKerberosMinTimeBeforeRelogin the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslKerberosMinTimeBeforeRelogin(
-                String saslKerberosMinTimeBeforeRelogin) {
-            doSetProperty("saslKerberosMinTimeBeforeRelogin", saslKerberosMinTimeBeforeRelogin);
-            return this;
-        }
-        /**
-         * The Kerberos principal name that Kafka runs as. This can be defined
-         * either in Kafka's JAAS config or in Kafka's config.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param saslKerberosServiceName the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslKerberosServiceName(
-                String saslKerberosServiceName) {
-            doSetProperty("saslKerberosServiceName", saslKerberosServiceName);
-            return this;
-        }
-        /**
-         * Percentage of random jitter added to the renewal time.
-         * 
-         * The option is a: &lt;code&gt;double&lt;/code&gt; type.
-         * 
-         * Default: 0.05
-         * Group: security
-         * 
-         * @param saslKerberosTicketRenewJitter the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslKerberosTicketRenewJitter(
-                double saslKerberosTicketRenewJitter) {
-            doSetProperty("saslKerberosTicketRenewJitter", saslKerberosTicketRenewJitter);
-            return this;
-        }
-        /**
-         * Percentage of random jitter added to the renewal time.
-         * 
-         * The option will be converted to a &lt;code&gt;double&lt;/code&gt;
-         * type.
-         * 
-         * Default: 0.05
-         * Group: security
-         * 
-         * @param saslKerberosTicketRenewJitter the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslKerberosTicketRenewJitter(
-                String saslKerberosTicketRenewJitter) {
-            doSetProperty("saslKerberosTicketRenewJitter", saslKerberosTicketRenewJitter);
-            return this;
-        }
-        /**
-         * Login thread will sleep until the specified window factor of time
-         * from last refresh to ticket's expiry has been reached, at which time
-         * it will try to renew the ticket.
-         * 
-         * The option is a: &lt;code&gt;double&lt;/code&gt; type.
-         * 
-         * Default: 0.8
-         * Group: security
-         * 
-         * @param saslKerberosTicketRenewWindowFactor the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslKerberosTicketRenewWindowFactor(
-                double saslKerberosTicketRenewWindowFactor) {
-            doSetProperty("saslKerberosTicketRenewWindowFactor", saslKerberosTicketRenewWindowFactor);
-            return this;
-        }
-        /**
-         * Login thread will sleep until the specified window factor of time
-         * from last refresh to ticket's expiry has been reached, at which time
-         * it will try to renew the ticket.
-         * 
-         * The option will be converted to a &lt;code&gt;double&lt;/code&gt;
-         * type.
-         * 
-         * Default: 0.8
-         * Group: security
-         * 
-         * @param saslKerberosTicketRenewWindowFactor the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslKerberosTicketRenewWindowFactor(
-                String saslKerberosTicketRenewWindowFactor) {
-            doSetProperty("saslKerberosTicketRenewWindowFactor", saslKerberosTicketRenewWindowFactor);
-            return this;
-        }
-        /**
-         * The fully qualified name of a SASL login callback handler class that
-         * implements the AuthenticateCallbackHandler interface. For brokers,
-         * login callback handler config must be prefixed with listener prefix
-         * and SASL mechanism name in lower-case. For example,
-         * listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param saslLoginCallbackHandlerClass the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslLoginCallbackHandlerClass(
-                String saslLoginCallbackHandlerClass) {
-            doSetProperty("saslLoginCallbackHandlerClass", saslLoginCallbackHandlerClass);
-            return this;
-        }
-        /**
-         * The fully qualified name of a class that implements the Login
-         * interface. For brokers, login config must be prefixed with listener
-         * prefix and SASL mechanism name in lower-case. For example,
-         * listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param saslLoginClass the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslLoginClass(
-                String saslLoginClass) {
-            doSetProperty("saslLoginClass", saslLoginClass);
-            return this;
-        }
-        /**
-         * The amount of buffer time before credential expiration to maintain
-         * when refreshing a credential, in seconds. If a refresh would
-         * otherwise occur closer to expiration than the number of buffer
-         * seconds then the refresh will be moved up to maintain as much of the
-         * buffer time as possible. Legal values are between 0 and 3600 (1
-         * hour); a default value of 300 (5 minutes) is used if no value is
-         * specified. This value and sasl.login.refresh.min.period.seconds are
-         * both ignored if their sum exceeds the remaining lifetime of a
-         * credential. Currently applies only to OAUTHBEARER.
-         * 
-         * The option is a: &lt;code&gt;short&lt;/code&gt; type.
-         * 
-         * Default: 300
-         * Group: security
-         * 
-         * @param saslLoginRefreshBufferSeconds the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslLoginRefreshBufferSeconds(
-                short saslLoginRefreshBufferSeconds) {
-            doSetProperty("saslLoginRefreshBufferSeconds", saslLoginRefreshBufferSeconds);
-            return this;
-        }
-        /**
-         * The amount of buffer time before credential expiration to maintain
-         * when refreshing a credential, in seconds. If a refresh would
-         * otherwise occur closer to expiration than the number of buffer
-         * seconds then the refresh will be moved up to maintain as much of the
-         * buffer time as possible. Legal values are between 0 and 3600 (1
-         * hour); a default value of 300 (5 minutes) is used if no value is
-         * specified. This value and sasl.login.refresh.min.period.seconds are
-         * both ignored if their sum exceeds the remaining lifetime of a
-         * credential. Currently applies only to OAUTHBEARER.
-         * 
-         * The option will be converted to a &lt;code&gt;short&lt;/code&gt;
-         * type.
-         * 
-         * Default: 300
-         * Group: security
-         * 
-         * @param saslLoginRefreshBufferSeconds the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslLoginRefreshBufferSeconds(
-                String saslLoginRefreshBufferSeconds) {
-            doSetProperty("saslLoginRefreshBufferSeconds", saslLoginRefreshBufferSeconds);
-            return this;
-        }
-        /**
-         * The desired minimum time for the login refresh thread to wait before
-         * refreshing a credential, in seconds. Legal values are between 0 and
-         * 900 (15 minutes); a default value of 60 (1 minute) is used if no
-         * value is specified. This value and sasl.login.refresh.buffer.seconds
-         * are both ignored if their sum exceeds the remaining lifetime of a
-         * credential. Currently applies only to OAUTHBEARER.
-         * 
-         * The option is a: &lt;code&gt;short&lt;/code&gt; type.
-         * 
-         * Default: 60
-         * Group: security
-         * 
-         * @param saslLoginRefreshMinPeriodSeconds the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslLoginRefreshMinPeriodSeconds(
-                short saslLoginRefreshMinPeriodSeconds) {
-            doSetProperty("saslLoginRefreshMinPeriodSeconds", saslLoginRefreshMinPeriodSeconds);
-            return this;
-        }
-        /**
-         * The desired minimum time for the login refresh thread to wait before
-         * refreshing a credential, in seconds. Legal values are between 0 and
-         * 900 (15 minutes); a default value of 60 (1 minute) is used if no
-         * value is specified. This value and sasl.login.refresh.buffer.seconds
-         * are both ignored if their sum exceeds the remaining lifetime of a
-         * credential. Currently applies only to OAUTHBEARER.
-         * 
-         * The option will be converted to a &lt;code&gt;short&lt;/code&gt;
-         * type.
-         * 
-         * Default: 60
-         * Group: security
-         * 
-         * @param saslLoginRefreshMinPeriodSeconds the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslLoginRefreshMinPeriodSeconds(
-                String saslLoginRefreshMinPeriodSeconds) {
-            doSetProperty("saslLoginRefreshMinPeriodSeconds", saslLoginRefreshMinPeriodSeconds);
-            return this;
-        }
-        /**
-         * Login refresh thread will sleep until the specified window factor
-         * relative to the credential's lifetime has been reached, at which time
-         * it will try to refresh the credential. Legal values are between 0.5
-         * (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used
-         * if no value is specified. Currently applies only to OAUTHBEARER.
-         * 
-         * The option is a: &lt;code&gt;double&lt;/code&gt; type.
-         * 
-         * Default: 0.8
-         * Group: security
-         * 
-         * @param saslLoginRefreshWindowFactor the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslLoginRefreshWindowFactor(
-                double saslLoginRefreshWindowFactor) {
-            doSetProperty("saslLoginRefreshWindowFactor", saslLoginRefreshWindowFactor);
-            return this;
-        }
-        /**
-         * Login refresh thread will sleep until the specified window factor
-         * relative to the credential's lifetime has been reached, at which time
-         * it will try to refresh the credential. Legal values are between 0.5
-         * (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used
-         * if no value is specified. Currently applies only to OAUTHBEARER.
-         * 
-         * The option will be converted to a &lt;code&gt;double&lt;/code&gt;
-         * type.
-         * 
-         * Default: 0.8
-         * Group: security
-         * 
-         * @param saslLoginRefreshWindowFactor the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslLoginRefreshWindowFactor(
-                String saslLoginRefreshWindowFactor) {
-            doSetProperty("saslLoginRefreshWindowFactor", saslLoginRefreshWindowFactor);
-            return this;
-        }
-        /**
-         * The maximum amount of random jitter relative to the credential's
-         * lifetime that is added to the login refresh thread's sleep time.
-         * Legal values are between 0 and 0.25 (25%) inclusive; a default value
-         * of 0.05 (5%) is used if no value is specified. Currently applies only
-         * to OAUTHBEARER.
-         * 
-         * The option is a: &lt;code&gt;double&lt;/code&gt; type.
-         * 
-         * Default: 0.05
-         * Group: security
-         * 
-         * @param saslLoginRefreshWindowJitter the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslLoginRefreshWindowJitter(
-                double saslLoginRefreshWindowJitter) {
-            doSetProperty("saslLoginRefreshWindowJitter", saslLoginRefreshWindowJitter);
-            return this;
-        }
-        /**
-         * The maximum amount of random jitter relative to the credential's
-         * lifetime that is added to the login refresh thread's sleep time.
-         * Legal values are between 0 and 0.25 (25%) inclusive; a default value
-         * of 0.05 (5%) is used if no value is specified. Currently applies only
-         * to OAUTHBEARER.
-         * 
-         * The option will be converted to a &lt;code&gt;double&lt;/code&gt;
-         * type.
-         * 
-         * Default: 0.05
-         * Group: security
-         * 
-         * @param saslLoginRefreshWindowJitter the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslLoginRefreshWindowJitter(
-                String saslLoginRefreshWindowJitter) {
-            doSetProperty("saslLoginRefreshWindowJitter", saslLoginRefreshWindowJitter);
-            return this;
-        }
-        /**
-         * SASL mechanism used for client connections. This may be any mechanism
-         * for which a security provider is available. GSSAPI is the default
-         * mechanism.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: GSSAPI
-         * Group: security
-         * 
-         * @param saslMechanism the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder saslMechanism(
-                String saslMechanism) {
-            doSetProperty("saslMechanism", saslMechanism);
-            return this;
-        }
-        /**
-         * Protocol used to communicate with brokers. Valid values are:
-         * PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: PLAINTEXT
-         * Group: security
-         * 
-         * @param securityProtocol the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder securityProtocol(
-                String securityProtocol) {
-            doSetProperty("securityProtocol", securityProtocol);
-            return this;
-        }
-        /**
-         * A list of configurable creator classes each returning a provider
-         * implementing security algorithms. These classes should implement the
-         * org.apache.kafka.common.security.auth.SecurityProviderCreator
-         * interface.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param securityProviders the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder securityProviders(
-                String securityProviders) {
-            doSetProperty("securityProviders", securityProviders);
-            return this;
-        }
-        /**
-         * A list of cipher suites. This is a named combination of
-         * authentication, encryption, MAC and key exchange algorithm used to
-         * negotiate the security settings for a network connection using TLS or
-         * SSL network protocol. By default all the available cipher suites are
-         * supported.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslCipherSuites the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslCipherSuites(
-                String sslCipherSuites) {
-            doSetProperty("sslCipherSuites", sslCipherSuites);
-            return this;
-        }
-        /**
-         * The list of protocols enabled for SSL connections. The default is
-         * 'TLSv1.2,TLSv1.3' when running with Java 11 or newer, 'TLSv1.2'
-         * otherwise. With the default value for Java 11, clients and servers
-         * will prefer TLSv1.3 if both support it and fallback to TLSv1.2
-         * otherwise (assuming both support at least TLSv1.2). This default
-         * should be fine for most cases. Also see the config documentation for
-         * ssl.protocol.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: TLSv1.2,TLSv1.3
-         * Group: security
-         * 
-         * @param sslEnabledProtocols the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslEnabledProtocols(
-                String sslEnabledProtocols) {
-            doSetProperty("sslEnabledProtocols", sslEnabledProtocols);
-            return this;
-        }
-        /**
-         * The endpoint identification algorithm to validate server hostname
-         * using server certificate.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: https
-         * Group: security
-         * 
-         * @param sslEndpointIdentificationAlgorithm the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslEndpointIdentificationAlgorithm(
-                String sslEndpointIdentificationAlgorithm) {
-            doSetProperty("sslEndpointIdentificationAlgorithm", sslEndpointIdentificationAlgorithm);
-            return this;
-        }
-        /**
-         * The class of type
-         * org.apache.kafka.common.security.auth.SslEngineFactory to provide
-         * SSLEngine objects. Default value is
-         * org.apache.kafka.common.security.ssl.DefaultSslEngineFactory.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslEngineFactoryClass the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslEngineFactoryClass(
-                String sslEngineFactoryClass) {
-            doSetProperty("sslEngineFactoryClass", sslEngineFactoryClass);
-            return this;
-        }
-        /**
-         * The algorithm used by key manager factory for SSL connections.
-         * Default value is the key manager factory algorithm configured for the
-         * Java Virtual Machine.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: SunX509
-         * Group: security
-         * 
-         * @param sslKeymanagerAlgorithm the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslKeymanagerAlgorithm(
-                String sslKeymanagerAlgorithm) {
-            doSetProperty("sslKeymanagerAlgorithm", sslKeymanagerAlgorithm);
-            return this;
-        }
-        /**
-         * The password of the private key in the key store file orthe PEM key
-         * specified in ssl.keystore.key'. This is required for clients only if
-         * two-way authentication is configured.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslKeyPassword the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslKeyPassword(
-                String sslKeyPassword) {
-            doSetProperty("sslKeyPassword", sslKeyPassword);
-            return this;
-        }
-        /**
-         * Certificate chain in the format specified by 'ssl.keystore.type'.
-         * Default SSL engine factory supports only PEM format with a list of
-         * X.509 certificates.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslKeystoreCertificateChain the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslKeystoreCertificateChain(
-                String sslKeystoreCertificateChain) {
-            doSetProperty("sslKeystoreCertificateChain", sslKeystoreCertificateChain);
-            return this;
-        }
-        /**
-         * Private key in the format specified by 'ssl.keystore.type'. Default
-         * SSL engine factory supports only PEM format with PKCS#8 keys. If the
-         * key is encrypted, key password must be specified using
-         * 'ssl.key.password'.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslKeystoreKey the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslKeystoreKey(
-                String sslKeystoreKey) {
-            doSetProperty("sslKeystoreKey", sslKeystoreKey);
-            return this;
-        }
-        /**
-         * The location of the key store file. This is optional for client and
-         * can be used for two-way authentication for client.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslKeystoreLocation the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslKeystoreLocation(
-                String sslKeystoreLocation) {
-            doSetProperty("sslKeystoreLocation", sslKeystoreLocation);
-            return this;
-        }
-        /**
-         * The store password for the key store file. This is optional for
-         * client and only needed if 'ssl.keystore.location' is configured. Key
-         * store password is not supported for PEM format.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslKeystorePassword the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslKeystorePassword(
-                String sslKeystorePassword) {
-            doSetProperty("sslKeystorePassword", sslKeystorePassword);
-            return this;
-        }
-        /**
-         * The file format of the key store file. This is optional for client.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: JKS
-         * Group: security
-         * 
-         * @param sslKeystoreType the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslKeystoreType(
-                String sslKeystoreType) {
-            doSetProperty("sslKeystoreType", sslKeystoreType);
-            return this;
-        }
-        /**
-         * The SSL protocol used to generate the SSLContext. The default is
-         * 'TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise.
-         * This value should be fine for most use cases. Allowed values in
-         * recent JVMs are 'TLSv1.2' and 'TLSv1.3'. 'TLS', 'TLSv1.1', 'SSL',
-         * 'SSLv2' and 'SSLv3' may be supported in older JVMs, but their usage
-         * is discouraged due to known security vulnerabilities. With the
-         * default value for this config and 'ssl.enabled.protocols', clients
-         * will downgrade to 'TLSv1.2' if the server does not support 'TLSv1.3'.
-         * If this config is set to 'TLSv1.2', clients will not use 'TLSv1.3'
-         * even if it is one of the values in ssl.enabled.protocols and the
-         * server only supports 'TLSv1.3'.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: TLSv1.2
-         * Group: security
-         * 
-         * @param sslProtocol the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslProtocol(String sslProtocol) {
-            doSetProperty("sslProtocol", sslProtocol);
-            return this;
-        }
-        /**
-         * The name of the security provider used for SSL connections. Default
-         * value is the default security provider of the JVM.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslProvider the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslProvider(String sslProvider) {
-            doSetProperty("sslProvider", sslProvider);
-            return this;
-        }
-        /**
-         * The SecureRandom PRNG implementation to use for SSL cryptography
-         * operations.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslSecureRandomImplementation the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslSecureRandomImplementation(
-                String sslSecureRandomImplementation) {
-            doSetProperty("sslSecureRandomImplementation", sslSecureRandomImplementation);
-            return this;
-        }
-        /**
-         * The algorithm used by trust manager factory for SSL connections.
-         * Default value is the trust manager factory algorithm configured for
-         * the Java Virtual Machine.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: PKIX
-         * Group: security
-         * 
-         * @param sslTrustmanagerAlgorithm the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslTrustmanagerAlgorithm(
-                String sslTrustmanagerAlgorithm) {
-            doSetProperty("sslTrustmanagerAlgorithm", sslTrustmanagerAlgorithm);
-            return this;
-        }
-        /**
-         * Trusted certificates in the format specified by
-         * 'ssl.truststore.type'. Default SSL engine factory supports only PEM
-         * format with X.509 certificates.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslTruststoreCertificates the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslTruststoreCertificates(
-                String sslTruststoreCertificates) {
-            doSetProperty("sslTruststoreCertificates", sslTruststoreCertificates);
-            return this;
-        }
-        /**
-         * The location of the trust store file.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslTruststoreLocation the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslTruststoreLocation(
-                String sslTruststoreLocation) {
-            doSetProperty("sslTruststoreLocation", sslTruststoreLocation);
-            return this;
-        }
-        /**
-         * The password for the trust store file. If a password is not set,
-         * trust store file configured will still be used, but integrity
-         * checking is disabled. Trust store password is not supported for PEM
-         * format.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslTruststorePassword the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslTruststorePassword(
-                String sslTruststorePassword) {
-            doSetProperty("sslTruststorePassword", sslTruststorePassword);
-            return this;
-        }
-        /**
-         * The file format of the trust store file.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: JKS
-         * Group: security
-         * 
-         * @param sslTruststoreType the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointProducerBuilder sslTruststoreType(
-                String sslTruststoreType) {
-            doSetProperty("sslTruststoreType", sslTruststoreType);
-            return this;
-        }
-    }
-
-    /**
-     * Advanced builder for endpoint producers for the Vert.x Kafka component.
-     */
-    public interface AdvancedVertxKafkaEndpointProducerBuilder
-            extends
-                EndpointProducerBuilder {
-        default VertxKafkaEndpointProducerBuilder basic() {
-            return (VertxKafkaEndpointProducerBuilder) this;
-        }
-        /**
-         * Whether the producer should be started lazy (on the first message).
-         * By starting lazy you can use this to allow CamelContext and routes to
-         * startup in situations where a producer may otherwise fail during
-         * starting and cause the route to fail being started. By deferring this
-         * startup to be lazy then the startup failure can be handled during
-         * routing messages via Camel's routing error handlers. Beware that when
-         * the first message is processed then creating and starting the
-         * producer may take a little time and prolong the total processing time
-         * of the processing.
-         * 
-         * The option is a: &lt;code&gt;boolean&lt;/code&gt; type.
-         * 
-         * Default: false
-         * Group: producer (advanced)
-         * 
-         * @param lazyStartProducer the value to set
-         * @return the dsl builder
-         */
-        default AdvancedVertxKafkaEndpointProducerBuilder lazyStartProducer(
-                boolean lazyStartProducer) {
-            doSetProperty("lazyStartProducer", lazyStartProducer);
-            return this;
-        }
-        /**
-         * Whether the producer should be started lazy (on the first message).
-         * By starting lazy you can use this to allow CamelContext and routes to
-         * startup in situations where a producer may otherwise fail during
-         * starting and cause the route to fail being started. By deferring this
-         * startup to be lazy then the startup failure can be handled during
-         * routing messages via Camel's routing error handlers. Beware that when
-         * the first message is processed then creating and starting the
-         * producer may take a little time and prolong the total processing time
-         * of the processing.
-         * 
-         * The option will be converted to a &lt;code&gt;boolean&lt;/code&gt;
-         * type.
-         * 
-         * Default: false
-         * Group: producer (advanced)
-         * 
-         * @param lazyStartProducer the value to set
-         * @return the dsl builder
-         */
-        default AdvancedVertxKafkaEndpointProducerBuilder lazyStartProducer(
-                String lazyStartProducer) {
-            doSetProperty("lazyStartProducer", lazyStartProducer);
-            return this;
-        }
-    }
-
-    /**
-     * Builder for endpoint for the Vert.x Kafka component.
-     */
-    public interface VertxKafkaEndpointBuilder
-            extends
-                VertxKafkaEndpointConsumerBuilder,
-                VertxKafkaEndpointProducerBuilder {
-        default AdvancedVertxKafkaEndpointBuilder advanced() {
-            return (AdvancedVertxKafkaEndpointBuilder) this;
-        }
-        /**
-         * Sets additional properties for either kafka consumer or kafka
-         * producer in case they can't be set directly on the camel
-         * configurations (e.g: new Kafka properties that are not reflected yet
-         * in Camel configurations), the properties have to be prefixed with
-         * additionalProperties.. E.g:
-         * additionalProperties.transactional.id=12345&amp;amp;additionalProperties.schema.registry.url=http://localhost:8811/avro.
-         * 
-         * The option is a: &lt;code&gt;java.util.Map&amp;lt;java.lang.String,
-         * java.lang.Object&amp;gt;&lt;/code&gt; type.
-         * The option is multivalued, and you can use the
-         * additionalProperties(String, Object) method to add a value (call the
-         * method multiple times to set more values).
-         * 
-         * Group: common
-         * 
-         * @param key the option key
-         * @param value the option value
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder additionalProperties(
-                String key,
-                Object value) {
-            doSetMultiValueProperty("additionalProperties", "additionalProperties." + key, value);
-            return this;
-        }
-        /**
-         * Sets additional properties for either kafka consumer or kafka
-         * producer in case they can't be set directly on the camel
-         * configurations (e.g: new Kafka properties that are not reflected yet
-         * in Camel configurations), the properties have to be prefixed with
-         * additionalProperties.. E.g:
-         * additionalProperties.transactional.id=12345&amp;amp;additionalProperties.schema.registry.url=http://localhost:8811/avro.
-         * 
-         * The option is a: &lt;code&gt;java.util.Map&amp;lt;java.lang.String,
-         * java.lang.Object&amp;gt;&lt;/code&gt; type.
-         * The option is multivalued, and you can use the
-         * additionalProperties(String, Object) method to add a value (call the
-         * method multiple times to set more values).
-         * 
-         * Group: common
-         * 
-         * @param values the values
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder additionalProperties(Map values) {
-            doSetMultiValueProperties("additionalProperties", "additionalProperties.", values);
-            return this;
-        }
-        /**
-         * A list of host/port pairs to use for establishing the initial
-         * connection to the Kafka cluster. The client will make use of all
-         * servers irrespective of which servers are specified here for
-         * bootstrapping&amp;amp;mdash;this list only impacts the initial hosts
-         * used to discover the full set of servers. This list should be in the
-         * form host1:port1,host2:port2,.... Since these servers are just used
-         * for the initial connection to discover the full cluster membership
-         * (which may change dynamically), this list need not contain the full
-         * set of servers (you may want more than one, though, in case a server
-         * is down).
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param bootstrapServers the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder bootstrapServers(
-                String bootstrapServers) {
-            doSetProperty("bootstrapServers", bootstrapServers);
-            return this;
-        }
-        /**
-         * Controls how the client uses DNS lookups. If set to use_all_dns_ips,
-         * connect to each returned IP address in sequence until a successful
-         * connection is established. After a disconnection, the next IP is
-         * used. Once all IPs have been used once, the client resolves the IP(s)
-         * from the hostname again (both the JVM and the OS cache DNS name
-         * lookups, however). If set to
-         * resolve_canonical_bootstrap_servers_only, resolve each bootstrap
-         * address into a list of canonical names. After the bootstrap phase,
-         * this behaves the same as use_all_dns_ips. If set to default
-         * (deprecated), attempt to connect to the first IP address returned by
-         * the lookup, even if the lookup returns multiple IP addresses.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: use_all_dns_ips
-         * Group: common
-         * 
-         * @param clientDnsLookup the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder clientDnsLookup(String clientDnsLookup) {
-            doSetProperty("clientDnsLookup", clientDnsLookup);
-            return this;
-        }
-        /**
-         * An id string to pass to the server when making requests. The purpose
-         * of this is to be able to track the source of requests beyond just
-         * ip/port by allowing a logical application name to be included in
-         * server-side request logging.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param clientId the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder clientId(String clientId) {
-            doSetProperty("clientId", clientId);
-            return this;
-        }
-        /**
-         * Close idle connections after the number of milliseconds specified by
-         * this config.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 9m
-         * Group: common
-         * 
-         * @param connectionsMaxIdleMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder connectionsMaxIdleMs(
-                long connectionsMaxIdleMs) {
-            doSetProperty("connectionsMaxIdleMs", connectionsMaxIdleMs);
-            return this;
-        }
-        /**
-         * Close idle connections after the number of milliseconds specified by
-         * this config.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 9m
-         * Group: common
-         * 
-         * @param connectionsMaxIdleMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder connectionsMaxIdleMs(
-                String connectionsMaxIdleMs) {
-            doSetProperty("connectionsMaxIdleMs", connectionsMaxIdleMs);
-            return this;
-        }
-        /**
-         * To use a custom HeaderFilterStrategy to filter header to and from
-         * Camel message.
-         * 
-         * The option is a:
-         * &lt;code&gt;org.apache.camel.spi.HeaderFilterStrategy&lt;/code&gt;
-         * type.
-         * 
-         * Group: common
-         * 
-         * @param headerFilterStrategy the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder headerFilterStrategy(
-                org.apache.camel.spi.HeaderFilterStrategy headerFilterStrategy) {
-            doSetProperty("headerFilterStrategy", headerFilterStrategy);
-            return this;
-        }
-        /**
-         * To use a custom HeaderFilterStrategy to filter header to and from
-         * Camel message.
-         * 
-         * The option will be converted to a
-         * &lt;code&gt;org.apache.camel.spi.HeaderFilterStrategy&lt;/code&gt;
-         * type.
-         * 
-         * Group: common
-         * 
-         * @param headerFilterStrategy the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder headerFilterStrategy(
-                String headerFilterStrategy) {
-            doSetProperty("headerFilterStrategy", headerFilterStrategy);
-            return this;
-        }
-        /**
-         * A list of classes to use as interceptors. Implementing the
-         * org.apache.kafka.clients.producer.ProducerInterceptor interface
-         * allows you to intercept (and possibly mutate) the records received by
-         * the producer before they are published to the Kafka cluster. By
-         * default, there are no interceptors.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param interceptorClasses the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder interceptorClasses(
-                String interceptorClasses) {
-            doSetProperty("interceptorClasses", interceptorClasses);
-            return this;
-        }
-        /**
-         * The period of time in milliseconds after which we force a refresh of
-         * metadata even if we haven't seen any partition leadership changes to
-         * proactively discover any new brokers or partitions.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 5m
-         * Group: common
-         * 
-         * @param metadataMaxAgeMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder metadataMaxAgeMs(long metadataMaxAgeMs) {
-            doSetProperty("metadataMaxAgeMs", metadataMaxAgeMs);
-            return this;
-        }
-        /**
-         * The period of time in milliseconds after which we force a refresh of
-         * metadata even if we haven't seen any partition leadership changes to
-         * proactively discover any new brokers or partitions.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 5m
-         * Group: common
-         * 
-         * @param metadataMaxAgeMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder metadataMaxAgeMs(
-                String metadataMaxAgeMs) {
-            doSetProperty("metadataMaxAgeMs", metadataMaxAgeMs);
-            return this;
-        }
-        /**
-         * A list of classes to use as metrics reporters. Implementing the
-         * org.apache.kafka.common.metrics.MetricsReporter interface allows
-         * plugging in classes that will be notified of new metric creation. The
-         * JmxReporter is always included to register JMX statistics.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param metricReporters the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder metricReporters(String metricReporters) {
-            doSetProperty("metricReporters", metricReporters);
-            return this;
-        }
-        /**
-         * The number of samples maintained to compute metrics.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 2
-         * Group: common
-         * 
-         * @param metricsNumSamples the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder metricsNumSamples(
-                int metricsNumSamples) {
-            doSetProperty("metricsNumSamples", metricsNumSamples);
-            return this;
-        }
-        /**
-         * The number of samples maintained to compute metrics.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 2
-         * Group: common
-         * 
-         * @param metricsNumSamples the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder metricsNumSamples(
-                String metricsNumSamples) {
-            doSetProperty("metricsNumSamples", metricsNumSamples);
-            return this;
-        }
-        /**
-         * The highest recording level for metrics.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: INFO
-         * Group: common
-         * 
-         * @param metricsRecordingLevel the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder metricsRecordingLevel(
-                String metricsRecordingLevel) {
-            doSetProperty("metricsRecordingLevel", metricsRecordingLevel);
-            return this;
-        }
-        /**
-         * The window of time a metrics sample is computed over.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param metricsSampleWindowMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder metricsSampleWindowMs(
-                long metricsSampleWindowMs) {
-            doSetProperty("metricsSampleWindowMs", metricsSampleWindowMs);
-            return this;
-        }
-        /**
-         * The window of time a metrics sample is computed over.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param metricsSampleWindowMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder metricsSampleWindowMs(
-                String metricsSampleWindowMs) {
-            doSetProperty("metricsSampleWindowMs", metricsSampleWindowMs);
-            return this;
-        }
-        /**
-         * The partition to which the record will be sent (or null if no
-         * partition was specified) or read from a particular partition if set.
-         * Header VertxKafkaConstants#PARTITION_ID If configured, it will take
-         * precedence over this config.
-         * 
-         * The option is a: &lt;code&gt;java.lang.Integer&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param partitionId the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder partitionId(Integer partitionId) {
-            doSetProperty("partitionId", partitionId);
-            return this;
-        }
-        /**
-         * The partition to which the record will be sent (or null if no
-         * partition was specified) or read from a particular partition if set.
-         * Header VertxKafkaConstants#PARTITION_ID If configured, it will take
-         * precedence over this config.
-         * 
-         * The option will be converted to a
-         * &lt;code&gt;java.lang.Integer&lt;/code&gt; type.
-         * 
-         * Group: common
-         * 
-         * @param partitionId the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder partitionId(String partitionId) {
-            doSetProperty("partitionId", partitionId);
-            return this;
-        }
-        /**
-         * The size of the TCP receive buffer (SO_RCVBUF) to use when reading
-         * data. If the value is -1, the OS default will be used.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 32768
-         * Group: common
-         * 
-         * @param receiveBufferBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder receiveBufferBytes(
-                int receiveBufferBytes) {
-            doSetProperty("receiveBufferBytes", receiveBufferBytes);
-            return this;
-        }
-        /**
-         * The size of the TCP receive buffer (SO_RCVBUF) to use when reading
-         * data. If the value is -1, the OS default will be used.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 32768
-         * Group: common
-         * 
-         * @param receiveBufferBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder receiveBufferBytes(
-                String receiveBufferBytes) {
-            doSetProperty("receiveBufferBytes", receiveBufferBytes);
-            return this;
-        }
-        /**
-         * The maximum amount of time in milliseconds to wait when reconnecting
-         * to a broker that has repeatedly failed to connect. If provided, the
-         * backoff per host will increase exponentially for each consecutive
-         * connection failure, up to this maximum. After calculating the backoff
-         * increase, 20% random jitter is added to avoid connection storms.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 1s
-         * Group: common
-         * 
-         * @param reconnectBackoffMaxMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder reconnectBackoffMaxMs(
-                long reconnectBackoffMaxMs) {
-            doSetProperty("reconnectBackoffMaxMs", reconnectBackoffMaxMs);
-            return this;
-        }
-        /**
-         * The maximum amount of time in milliseconds to wait when reconnecting
-         * to a broker that has repeatedly failed to connect. If provided, the
-         * backoff per host will increase exponentially for each consecutive
-         * connection failure, up to this maximum. After calculating the backoff
-         * increase, 20% random jitter is added to avoid connection storms.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 1s
-         * Group: common
-         * 
-         * @param reconnectBackoffMaxMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder reconnectBackoffMaxMs(
-                String reconnectBackoffMaxMs) {
-            doSetProperty("reconnectBackoffMaxMs", reconnectBackoffMaxMs);
-            return this;
-        }
-        /**
-         * The base amount of time to wait before attempting to reconnect to a
-         * given host. This avoids repeatedly connecting to a host in a tight
-         * loop. This backoff applies to all connection attempts by the client
-         * to a broker.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 50ms
-         * Group: common
-         * 
-         * @param reconnectBackoffMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder reconnectBackoffMs(
-                long reconnectBackoffMs) {
-            doSetProperty("reconnectBackoffMs", reconnectBackoffMs);
-            return this;
-        }
-        /**
-         * The base amount of time to wait before attempting to reconnect to a
-         * given host. This avoids repeatedly connecting to a host in a tight
-         * loop. This backoff applies to all connection attempts by the client
-         * to a broker.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 50ms
-         * Group: common
-         * 
-         * @param reconnectBackoffMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder reconnectBackoffMs(
-                String reconnectBackoffMs) {
-            doSetProperty("reconnectBackoffMs", reconnectBackoffMs);
-            return this;
-        }
-        /**
-         * The configuration controls the maximum amount of time the client will
-         * wait for the response of a request. If the response is not received
-         * before the timeout elapses the client will resend the request if
-         * necessary or fail the request if retries are exhausted. This should
-         * be larger than replica.lag.time.max.ms (a broker configuration) to
-         * reduce the possibility of message duplication due to unnecessary
-         * producer retries.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param requestTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder requestTimeoutMs(int requestTimeoutMs) {
-            doSetProperty("requestTimeoutMs", requestTimeoutMs);
-            return this;
-        }
-        /**
-         * The configuration controls the maximum amount of time the client will
-         * wait for the response of a request. If the response is not received
-         * before the timeout elapses the client will resend the request if
-         * necessary or fail the request if retries are exhausted. This should
-         * be larger than replica.lag.time.max.ms (a broker configuration) to
-         * reduce the possibility of message duplication due to unnecessary
-         * producer retries.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param requestTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder requestTimeoutMs(
-                String requestTimeoutMs) {
-            doSetProperty("requestTimeoutMs", requestTimeoutMs);
-            return this;
-        }
-        /**
-         * The amount of time to wait before attempting to retry a failed
-         * request to a given topic partition. This avoids repeatedly sending
-         * requests in a tight loop under some failure scenarios.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 100ms
-         * Group: common
-         * 
-         * @param retryBackoffMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder retryBackoffMs(long retryBackoffMs) {
-            doSetProperty("retryBackoffMs", retryBackoffMs);
-            return this;
-        }
-        /**
-         * The amount of time to wait before attempting to retry a failed
-         * request to a given topic partition. This avoids repeatedly sending
-         * requests in a tight loop under some failure scenarios.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 100ms
-         * Group: common
-         * 
-         * @param retryBackoffMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder retryBackoffMs(String retryBackoffMs) {
-            doSetProperty("retryBackoffMs", retryBackoffMs);
-            return this;
-        }
-        /**
-         * The size of the TCP send buffer (SO_SNDBUF) to use when sending data.
-         * If the value is -1, the OS default will be used.
-         * 
-         * The option is a: &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 131072
-         * Group: common
-         * 
-         * @param sendBufferBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sendBufferBytes(int sendBufferBytes) {
-            doSetProperty("sendBufferBytes", sendBufferBytes);
-            return this;
-        }
-        /**
-         * The size of the TCP send buffer (SO_SNDBUF) to use when sending data.
-         * If the value is -1, the OS default will be used.
-         * 
-         * The option will be converted to a &lt;code&gt;int&lt;/code&gt; type.
-         * 
-         * Default: 131072
-         * Group: common
-         * 
-         * @param sendBufferBytes the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sendBufferBytes(String sendBufferBytes) {
-            doSetProperty("sendBufferBytes", sendBufferBytes);
-            return this;
-        }
-        /**
-         * The maximum amount of time the client will wait for the socket
-         * connection to be established. The connection setup timeout will
-         * increase exponentially for each consecutive connection failure up to
-         * this maximum. To avoid connection storms, a randomization factor of
-         * 0.2 will be applied to the timeout resulting in a random range
-         * between 20% below and 20% above the computed value.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param socketConnectionSetupTimeoutMaxMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder socketConnectionSetupTimeoutMaxMs(
-                long socketConnectionSetupTimeoutMaxMs) {
-            doSetProperty("socketConnectionSetupTimeoutMaxMs", socketConnectionSetupTimeoutMaxMs);
-            return this;
-        }
-        /**
-         * The maximum amount of time the client will wait for the socket
-         * connection to be established. The connection setup timeout will
-         * increase exponentially for each consecutive connection failure up to
-         * this maximum. To avoid connection storms, a randomization factor of
-         * 0.2 will be applied to the timeout resulting in a random range
-         * between 20% below and 20% above the computed value.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 30s
-         * Group: common
-         * 
-         * @param socketConnectionSetupTimeoutMaxMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder socketConnectionSetupTimeoutMaxMs(
-                String socketConnectionSetupTimeoutMaxMs) {
-            doSetProperty("socketConnectionSetupTimeoutMaxMs", socketConnectionSetupTimeoutMaxMs);
-            return this;
-        }
-        /**
-         * The amount of time the client will wait for the socket connection to
-         * be established. If the connection is not built before the timeout
-         * elapses, clients will close the socket channel.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 10s
-         * Group: common
-         * 
-         * @param socketConnectionSetupTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder socketConnectionSetupTimeoutMs(
-                long socketConnectionSetupTimeoutMs) {
-            doSetProperty("socketConnectionSetupTimeoutMs", socketConnectionSetupTimeoutMs);
-            return this;
-        }
-        /**
-         * The amount of time the client will wait for the socket connection to
-         * be established. If the connection is not built before the timeout
-         * elapses, clients will close the socket channel.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 10s
-         * Group: common
-         * 
-         * @param socketConnectionSetupTimeoutMs the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder socketConnectionSetupTimeoutMs(
-                String socketConnectionSetupTimeoutMs) {
-            doSetProperty("socketConnectionSetupTimeoutMs", socketConnectionSetupTimeoutMs);
-            return this;
-        }
-        /**
-         * The fully qualified name of a SASL client callback handler class that
-         * implements the AuthenticateCallbackHandler interface.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param saslClientCallbackHandlerClass the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslClientCallbackHandlerClass(
-                String saslClientCallbackHandlerClass) {
-            doSetProperty("saslClientCallbackHandlerClass", saslClientCallbackHandlerClass);
-            return this;
-        }
-        /**
-         * JAAS login context parameters for SASL connections in the format used
-         * by JAAS configuration files. JAAS configuration file format is
-         * described here. The format for the value is: loginModuleClass
-         * controlFlag (optionName=optionValue);. For brokers, the config must
-         * be prefixed with listener prefix and SASL mechanism name in
-         * lower-case. For example,
-         * listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param saslJaasConfig the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslJaasConfig(String saslJaasConfig) {
-            doSetProperty("saslJaasConfig", saslJaasConfig);
-            return this;
-        }
-        /**
-         * Kerberos kinit command path.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: /usr/bin/kinit
-         * Group: security
-         * 
-         * @param saslKerberosKinitCmd the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslKerberosKinitCmd(
-                String saslKerberosKinitCmd) {
-            doSetProperty("saslKerberosKinitCmd", saslKerberosKinitCmd);
-            return this;
-        }
-        /**
-         * Login thread sleep time between refresh attempts.
-         * 
-         * The option is a: &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 60000
-         * Group: security
-         * 
-         * @param saslKerberosMinTimeBeforeRelogin the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslKerberosMinTimeBeforeRelogin(
-                long saslKerberosMinTimeBeforeRelogin) {
-            doSetProperty("saslKerberosMinTimeBeforeRelogin", saslKerberosMinTimeBeforeRelogin);
-            return this;
-        }
-        /**
-         * Login thread sleep time between refresh attempts.
-         * 
-         * The option will be converted to a &lt;code&gt;long&lt;/code&gt; type.
-         * 
-         * Default: 60000
-         * Group: security
-         * 
-         * @param saslKerberosMinTimeBeforeRelogin the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslKerberosMinTimeBeforeRelogin(
-                String saslKerberosMinTimeBeforeRelogin) {
-            doSetProperty("saslKerberosMinTimeBeforeRelogin", saslKerberosMinTimeBeforeRelogin);
-            return this;
-        }
-        /**
-         * The Kerberos principal name that Kafka runs as. This can be defined
-         * either in Kafka's JAAS config or in Kafka's config.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param saslKerberosServiceName the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslKerberosServiceName(
-                String saslKerberosServiceName) {
-            doSetProperty("saslKerberosServiceName", saslKerberosServiceName);
-            return this;
-        }
-        /**
-         * Percentage of random jitter added to the renewal time.
-         * 
-         * The option is a: &lt;code&gt;double&lt;/code&gt; type.
-         * 
-         * Default: 0.05
-         * Group: security
-         * 
-         * @param saslKerberosTicketRenewJitter the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslKerberosTicketRenewJitter(
-                double saslKerberosTicketRenewJitter) {
-            doSetProperty("saslKerberosTicketRenewJitter", saslKerberosTicketRenewJitter);
-            return this;
-        }
-        /**
-         * Percentage of random jitter added to the renewal time.
-         * 
-         * The option will be converted to a &lt;code&gt;double&lt;/code&gt;
-         * type.
-         * 
-         * Default: 0.05
-         * Group: security
-         * 
-         * @param saslKerberosTicketRenewJitter the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslKerberosTicketRenewJitter(
-                String saslKerberosTicketRenewJitter) {
-            doSetProperty("saslKerberosTicketRenewJitter", saslKerberosTicketRenewJitter);
-            return this;
-        }
-        /**
-         * Login thread will sleep until the specified window factor of time
-         * from last refresh to ticket's expiry has been reached, at which time
-         * it will try to renew the ticket.
-         * 
-         * The option is a: &lt;code&gt;double&lt;/code&gt; type.
-         * 
-         * Default: 0.8
-         * Group: security
-         * 
-         * @param saslKerberosTicketRenewWindowFactor the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslKerberosTicketRenewWindowFactor(
-                double saslKerberosTicketRenewWindowFactor) {
-            doSetProperty("saslKerberosTicketRenewWindowFactor", saslKerberosTicketRenewWindowFactor);
-            return this;
-        }
-        /**
-         * Login thread will sleep until the specified window factor of time
-         * from last refresh to ticket's expiry has been reached, at which time
-         * it will try to renew the ticket.
-         * 
-         * The option will be converted to a &lt;code&gt;double&lt;/code&gt;
-         * type.
-         * 
-         * Default: 0.8
-         * Group: security
-         * 
-         * @param saslKerberosTicketRenewWindowFactor the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslKerberosTicketRenewWindowFactor(
-                String saslKerberosTicketRenewWindowFactor) {
-            doSetProperty("saslKerberosTicketRenewWindowFactor", saslKerberosTicketRenewWindowFactor);
-            return this;
-        }
-        /**
-         * The fully qualified name of a SASL login callback handler class that
-         * implements the AuthenticateCallbackHandler interface. For brokers,
-         * login callback handler config must be prefixed with listener prefix
-         * and SASL mechanism name in lower-case. For example,
-         * listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param saslLoginCallbackHandlerClass the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslLoginCallbackHandlerClass(
-                String saslLoginCallbackHandlerClass) {
-            doSetProperty("saslLoginCallbackHandlerClass", saslLoginCallbackHandlerClass);
-            return this;
-        }
-        /**
-         * The fully qualified name of a class that implements the Login
-         * interface. For brokers, login config must be prefixed with listener
-         * prefix and SASL mechanism name in lower-case. For example,
-         * listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param saslLoginClass the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslLoginClass(String saslLoginClass) {
-            doSetProperty("saslLoginClass", saslLoginClass);
-            return this;
-        }
-        /**
-         * The amount of buffer time before credential expiration to maintain
-         * when refreshing a credential, in seconds. If a refresh would
-         * otherwise occur closer to expiration than the number of buffer
-         * seconds then the refresh will be moved up to maintain as much of the
-         * buffer time as possible. Legal values are between 0 and 3600 (1
-         * hour); a default value of 300 (5 minutes) is used if no value is
-         * specified. This value and sasl.login.refresh.min.period.seconds are
-         * both ignored if their sum exceeds the remaining lifetime of a
-         * credential. Currently applies only to OAUTHBEARER.
-         * 
-         * The option is a: &lt;code&gt;short&lt;/code&gt; type.
-         * 
-         * Default: 300
-         * Group: security
-         * 
-         * @param saslLoginRefreshBufferSeconds the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslLoginRefreshBufferSeconds(
-                short saslLoginRefreshBufferSeconds) {
-            doSetProperty("saslLoginRefreshBufferSeconds", saslLoginRefreshBufferSeconds);
-            return this;
-        }
-        /**
-         * The amount of buffer time before credential expiration to maintain
-         * when refreshing a credential, in seconds. If a refresh would
-         * otherwise occur closer to expiration than the number of buffer
-         * seconds then the refresh will be moved up to maintain as much of the
-         * buffer time as possible. Legal values are between 0 and 3600 (1
-         * hour); a default value of 300 (5 minutes) is used if no value is
-         * specified. This value and sasl.login.refresh.min.period.seconds are
-         * both ignored if their sum exceeds the remaining lifetime of a
-         * credential. Currently applies only to OAUTHBEARER.
-         * 
-         * The option will be converted to a &lt;code&gt;short&lt;/code&gt;
-         * type.
-         * 
-         * Default: 300
-         * Group: security
-         * 
-         * @param saslLoginRefreshBufferSeconds the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslLoginRefreshBufferSeconds(
-                String saslLoginRefreshBufferSeconds) {
-            doSetProperty("saslLoginRefreshBufferSeconds", saslLoginRefreshBufferSeconds);
-            return this;
-        }
-        /**
-         * The desired minimum time for the login refresh thread to wait before
-         * refreshing a credential, in seconds. Legal values are between 0 and
-         * 900 (15 minutes); a default value of 60 (1 minute) is used if no
-         * value is specified. This value and sasl.login.refresh.buffer.seconds
-         * are both ignored if their sum exceeds the remaining lifetime of a
-         * credential. Currently applies only to OAUTHBEARER.
-         * 
-         * The option is a: &lt;code&gt;short&lt;/code&gt; type.
-         * 
-         * Default: 60
-         * Group: security
-         * 
-         * @param saslLoginRefreshMinPeriodSeconds the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslLoginRefreshMinPeriodSeconds(
-                short saslLoginRefreshMinPeriodSeconds) {
-            doSetProperty("saslLoginRefreshMinPeriodSeconds", saslLoginRefreshMinPeriodSeconds);
-            return this;
-        }
-        /**
-         * The desired minimum time for the login refresh thread to wait before
-         * refreshing a credential, in seconds. Legal values are between 0 and
-         * 900 (15 minutes); a default value of 60 (1 minute) is used if no
-         * value is specified. This value and sasl.login.refresh.buffer.seconds
-         * are both ignored if their sum exceeds the remaining lifetime of a
-         * credential. Currently applies only to OAUTHBEARER.
-         * 
-         * The option will be converted to a &lt;code&gt;short&lt;/code&gt;
-         * type.
-         * 
-         * Default: 60
-         * Group: security
-         * 
-         * @param saslLoginRefreshMinPeriodSeconds the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslLoginRefreshMinPeriodSeconds(
-                String saslLoginRefreshMinPeriodSeconds) {
-            doSetProperty("saslLoginRefreshMinPeriodSeconds", saslLoginRefreshMinPeriodSeconds);
-            return this;
-        }
-        /**
-         * Login refresh thread will sleep until the specified window factor
-         * relative to the credential's lifetime has been reached, at which time
-         * it will try to refresh the credential. Legal values are between 0.5
-         * (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used
-         * if no value is specified. Currently applies only to OAUTHBEARER.
-         * 
-         * The option is a: &lt;code&gt;double&lt;/code&gt; type.
-         * 
-         * Default: 0.8
-         * Group: security
-         * 
-         * @param saslLoginRefreshWindowFactor the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslLoginRefreshWindowFactor(
-                double saslLoginRefreshWindowFactor) {
-            doSetProperty("saslLoginRefreshWindowFactor", saslLoginRefreshWindowFactor);
-            return this;
-        }
-        /**
-         * Login refresh thread will sleep until the specified window factor
-         * relative to the credential's lifetime has been reached, at which time
-         * it will try to refresh the credential. Legal values are between 0.5
-         * (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used
-         * if no value is specified. Currently applies only to OAUTHBEARER.
-         * 
-         * The option will be converted to a &lt;code&gt;double&lt;/code&gt;
-         * type.
-         * 
-         * Default: 0.8
-         * Group: security
-         * 
-         * @param saslLoginRefreshWindowFactor the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslLoginRefreshWindowFactor(
-                String saslLoginRefreshWindowFactor) {
-            doSetProperty("saslLoginRefreshWindowFactor", saslLoginRefreshWindowFactor);
-            return this;
-        }
-        /**
-         * The maximum amount of random jitter relative to the credential's
-         * lifetime that is added to the login refresh thread's sleep time.
-         * Legal values are between 0 and 0.25 (25%) inclusive; a default value
-         * of 0.05 (5%) is used if no value is specified. Currently applies only
-         * to OAUTHBEARER.
-         * 
-         * The option is a: &lt;code&gt;double&lt;/code&gt; type.
-         * 
-         * Default: 0.05
-         * Group: security
-         * 
-         * @param saslLoginRefreshWindowJitter the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslLoginRefreshWindowJitter(
-                double saslLoginRefreshWindowJitter) {
-            doSetProperty("saslLoginRefreshWindowJitter", saslLoginRefreshWindowJitter);
-            return this;
-        }
-        /**
-         * The maximum amount of random jitter relative to the credential's
-         * lifetime that is added to the login refresh thread's sleep time.
-         * Legal values are between 0 and 0.25 (25%) inclusive; a default value
-         * of 0.05 (5%) is used if no value is specified. Currently applies only
-         * to OAUTHBEARER.
-         * 
-         * The option will be converted to a &lt;code&gt;double&lt;/code&gt;
-         * type.
-         * 
-         * Default: 0.05
-         * Group: security
-         * 
-         * @param saslLoginRefreshWindowJitter the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslLoginRefreshWindowJitter(
-                String saslLoginRefreshWindowJitter) {
-            doSetProperty("saslLoginRefreshWindowJitter", saslLoginRefreshWindowJitter);
-            return this;
-        }
-        /**
-         * SASL mechanism used for client connections. This may be any mechanism
-         * for which a security provider is available. GSSAPI is the default
-         * mechanism.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: GSSAPI
-         * Group: security
-         * 
-         * @param saslMechanism the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder saslMechanism(String saslMechanism) {
-            doSetProperty("saslMechanism", saslMechanism);
-            return this;
-        }
-        /**
-         * Protocol used to communicate with brokers. Valid values are:
-         * PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: PLAINTEXT
-         * Group: security
-         * 
-         * @param securityProtocol the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder securityProtocol(
-                String securityProtocol) {
-            doSetProperty("securityProtocol", securityProtocol);
-            return this;
-        }
-        /**
-         * A list of configurable creator classes each returning a provider
-         * implementing security algorithms. These classes should implement the
-         * org.apache.kafka.common.security.auth.SecurityProviderCreator
-         * interface.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param securityProviders the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder securityProviders(
-                String securityProviders) {
-            doSetProperty("securityProviders", securityProviders);
-            return this;
-        }
-        /**
-         * A list of cipher suites. This is a named combination of
-         * authentication, encryption, MAC and key exchange algorithm used to
-         * negotiate the security settings for a network connection using TLS or
-         * SSL network protocol. By default all the available cipher suites are
-         * supported.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslCipherSuites the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslCipherSuites(String sslCipherSuites) {
-            doSetProperty("sslCipherSuites", sslCipherSuites);
-            return this;
-        }
-        /**
-         * The list of protocols enabled for SSL connections. The default is
-         * 'TLSv1.2,TLSv1.3' when running with Java 11 or newer, 'TLSv1.2'
-         * otherwise. With the default value for Java 11, clients and servers
-         * will prefer TLSv1.3 if both support it and fallback to TLSv1.2
-         * otherwise (assuming both support at least TLSv1.2). This default
-         * should be fine for most cases. Also see the config documentation for
-         * ssl.protocol.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: TLSv1.2,TLSv1.3
-         * Group: security
-         * 
-         * @param sslEnabledProtocols the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslEnabledProtocols(
-                String sslEnabledProtocols) {
-            doSetProperty("sslEnabledProtocols", sslEnabledProtocols);
-            return this;
-        }
-        /**
-         * The endpoint identification algorithm to validate server hostname
-         * using server certificate.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: https
-         * Group: security
-         * 
-         * @param sslEndpointIdentificationAlgorithm the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslEndpointIdentificationAlgorithm(
-                String sslEndpointIdentificationAlgorithm) {
-            doSetProperty("sslEndpointIdentificationAlgorithm", sslEndpointIdentificationAlgorithm);
-            return this;
-        }
-        /**
-         * The class of type
-         * org.apache.kafka.common.security.auth.SslEngineFactory to provide
-         * SSLEngine objects. Default value is
-         * org.apache.kafka.common.security.ssl.DefaultSslEngineFactory.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslEngineFactoryClass the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslEngineFactoryClass(
-                String sslEngineFactoryClass) {
-            doSetProperty("sslEngineFactoryClass", sslEngineFactoryClass);
-            return this;
-        }
-        /**
-         * The algorithm used by key manager factory for SSL connections.
-         * Default value is the key manager factory algorithm configured for the
-         * Java Virtual Machine.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: SunX509
-         * Group: security
-         * 
-         * @param sslKeymanagerAlgorithm the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslKeymanagerAlgorithm(
-                String sslKeymanagerAlgorithm) {
-            doSetProperty("sslKeymanagerAlgorithm", sslKeymanagerAlgorithm);
-            return this;
-        }
-        /**
-         * The password of the private key in the key store file orthe PEM key
-         * specified in ssl.keystore.key'. This is required for clients only if
-         * two-way authentication is configured.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslKeyPassword the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslKeyPassword(String sslKeyPassword) {
-            doSetProperty("sslKeyPassword", sslKeyPassword);
-            return this;
-        }
-        /**
-         * Certificate chain in the format specified by 'ssl.keystore.type'.
-         * Default SSL engine factory supports only PEM format with a list of
-         * X.509 certificates.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslKeystoreCertificateChain the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslKeystoreCertificateChain(
-                String sslKeystoreCertificateChain) {
-            doSetProperty("sslKeystoreCertificateChain", sslKeystoreCertificateChain);
-            return this;
-        }
-        /**
-         * Private key in the format specified by 'ssl.keystore.type'. Default
-         * SSL engine factory supports only PEM format with PKCS#8 keys. If the
-         * key is encrypted, key password must be specified using
-         * 'ssl.key.password'.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslKeystoreKey the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslKeystoreKey(String sslKeystoreKey) {
-            doSetProperty("sslKeystoreKey", sslKeystoreKey);
-            return this;
-        }
-        /**
-         * The location of the key store file. This is optional for client and
-         * can be used for two-way authentication for client.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslKeystoreLocation the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslKeystoreLocation(
-                String sslKeystoreLocation) {
-            doSetProperty("sslKeystoreLocation", sslKeystoreLocation);
-            return this;
-        }
-        /**
-         * The store password for the key store file. This is optional for
-         * client and only needed if 'ssl.keystore.location' is configured. Key
-         * store password is not supported for PEM format.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslKeystorePassword the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslKeystorePassword(
-                String sslKeystorePassword) {
-            doSetProperty("sslKeystorePassword", sslKeystorePassword);
-            return this;
-        }
-        /**
-         * The file format of the key store file. This is optional for client.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: JKS
-         * Group: security
-         * 
-         * @param sslKeystoreType the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslKeystoreType(String sslKeystoreType) {
-            doSetProperty("sslKeystoreType", sslKeystoreType);
-            return this;
-        }
-        /**
-         * The SSL protocol used to generate the SSLContext. The default is
-         * 'TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise.
-         * This value should be fine for most use cases. Allowed values in
-         * recent JVMs are 'TLSv1.2' and 'TLSv1.3'. 'TLS', 'TLSv1.1', 'SSL',
-         * 'SSLv2' and 'SSLv3' may be supported in older JVMs, but their usage
-         * is discouraged due to known security vulnerabilities. With the
-         * default value for this config and 'ssl.enabled.protocols', clients
-         * will downgrade to 'TLSv1.2' if the server does not support 'TLSv1.3'.
-         * If this config is set to 'TLSv1.2', clients will not use 'TLSv1.3'
-         * even if it is one of the values in ssl.enabled.protocols and the
-         * server only supports 'TLSv1.3'.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: TLSv1.2
-         * Group: security
-         * 
-         * @param sslProtocol the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslProtocol(String sslProtocol) {
-            doSetProperty("sslProtocol", sslProtocol);
-            return this;
-        }
-        /**
-         * The name of the security provider used for SSL connections. Default
-         * value is the default security provider of the JVM.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslProvider the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslProvider(String sslProvider) {
-            doSetProperty("sslProvider", sslProvider);
-            return this;
-        }
-        /**
-         * The SecureRandom PRNG implementation to use for SSL cryptography
-         * operations.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslSecureRandomImplementation the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslSecureRandomImplementation(
-                String sslSecureRandomImplementation) {
-            doSetProperty("sslSecureRandomImplementation", sslSecureRandomImplementation);
-            return this;
-        }
-        /**
-         * The algorithm used by trust manager factory for SSL connections.
-         * Default value is the trust manager factory algorithm configured for
-         * the Java Virtual Machine.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: PKIX
-         * Group: security
-         * 
-         * @param sslTrustmanagerAlgorithm the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslTrustmanagerAlgorithm(
-                String sslTrustmanagerAlgorithm) {
-            doSetProperty("sslTrustmanagerAlgorithm", sslTrustmanagerAlgorithm);
-            return this;
-        }
-        /**
-         * Trusted certificates in the format specified by
-         * 'ssl.truststore.type'. Default SSL engine factory supports only PEM
-         * format with X.509 certificates.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslTruststoreCertificates the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslTruststoreCertificates(
-                String sslTruststoreCertificates) {
-            doSetProperty("sslTruststoreCertificates", sslTruststoreCertificates);
-            return this;
-        }
-        /**
-         * The location of the trust store file.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslTruststoreLocation the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslTruststoreLocation(
-                String sslTruststoreLocation) {
-            doSetProperty("sslTruststoreLocation", sslTruststoreLocation);
-            return this;
-        }
-        /**
-         * The password for the trust store file. If a password is not set,
-         * trust store file configured will still be used, but integrity
-         * checking is disabled. Trust store password is not supported for PEM
-         * format.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Group: security
-         * 
-         * @param sslTruststorePassword the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslTruststorePassword(
-                String sslTruststorePassword) {
-            doSetProperty("sslTruststorePassword", sslTruststorePassword);
-            return this;
-        }
-        /**
-         * The file format of the trust store file.
-         * 
-         * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type.
-         * 
-         * Default: JKS
-         * Group: security
-         * 
-         * @param sslTruststoreType the value to set
-         * @return the dsl builder
-         */
-        default VertxKafkaEndpointBuilder sslTruststoreType(
-                String sslTruststoreType) {
-            doSetProperty("sslTruststoreType", sslTruststoreType);
-            return this;
-        }
-    }
-
-    /**
-     * Advanced builder for endpoint for the Vert.x Kafka component.
-     */
-    public interface AdvancedVertxKafkaEndpointBuilder
-            extends
-                AdvancedVertxKafkaEndpointConsumerBuilder,
-                AdvancedVertxKafkaEndpointProducerBuilder {
-        default VertxKafkaEndpointBuilder basic() {
-            return (VertxKafkaEndpointBuilder) this;
-        }
-    }
-
-    public interface VertxKafkaBuilders {
-        /**
-         * Vert.x Kafka (camel-vertx-kafka)
-         * Sent and receive messages to/from an Apache Kafka broker using vert.x
-         * Kafka client
-         * 
-         * Category: messaging
-         * Since: 3.7
-         * Maven coordinates: org.apache.camel:camel-vertx-kafka
-         * 
-         * @return the dsl builder for the headers' name.
-         */
-        default VertxKafkaHeaderNameBuilder vertxKafka() {
-            return VertxKafkaHeaderNameBuilder.INSTANCE;
-        }
-        /**
-         * Vert.x Kafka (camel-vertx-kafka)
-         * Sent and receive messages to/from an Apache Kafka broker using vert.x
-         * Kafka client
-         * 
-         * Category: messaging
-         * Since: 3.7
-         * Maven coordinates: org.apache.camel:camel-vertx-kafka
-         * 
-         * Syntax: <code>vertx-kafka:topic</code>
-         * 
-         * Path parameter: topic (required)
-         * Name of the topic to use. On the consumer you can use comma to
-         * separate multiple topics. A producer can only send a message to a
-         * single topic.
-         * 
-         * @param path topic
-         * @return the dsl builder
-         */
-        @Deprecated
-        default VertxKafkaEndpointBuilder vertxKafka(String path) {
-            return VertxKafkaEndpointBuilderFactory.endpointBuilder("vertx-kafka", path);
-        }
-        /**
-         * Vert.x Kafka (camel-vertx-kafka)
-         * Sent and receive messages to/from an Apache Kafka broker using vert.x
-         * Kafka client
-         * 
-         * Category: messaging
-         * Since: 3.7
-         * Maven coordinates: org.apache.camel:camel-vertx-kafka
-         * 
-         * Syntax: <code>vertx-kafka:topic</code>
-         * 
-         * Path parameter: topic (required)
-         * Name of the topic to use. On the consumer you can use comma to
-         * separate multiple topics. A producer can only send a message to a
-         * single topic.
-         * 
-         * @param componentName to use a custom component name for the endpoint
-         * instead of the default name
-         * @param path topic
-         * @return the dsl builder
-         */
-        @Deprecated
-        default VertxKafkaEndpointBuilder vertxKafka(
-                String componentName,
-                String path) {
-            return VertxKafkaEndpointBuilderFactory.endpointBuilder(componentName, path);
-        }
-    }
-
-    /**
-     * The builder of headers' name for the Vert.x Kafka component.
-     */
-    public static class VertxKafkaHeaderNameBuilder {
-        /**
-         * The internal instance of the builder used to access to all the
-         * methods representing the name of headers.
-         */
-        private static final VertxKafkaHeaderNameBuilder INSTANCE = new VertxKafkaHeaderNameBuilder();
-
-        /**
-         * Producer: Explicitly specify the partition identifier, for example
-         * partition 0. This will trigger the component to produce all the
-         * massages to the specified partition. Consumer: The partition
-         * identifier where the message were consumed from.
-         * 
-         * The option is a: {@code Integer} type.
-         * 
-         * Group: common
-         * 
-         * @return the name of the header {@code VertxKafkaPartitionId}.
-         */
-        public String vertxKafkaPartitionId() {
-            return "VertxKafkaPartitionId";
-        }
-
-        /**
-         * Producer: Explicitly specify the message key, if partition ID is not
-         * specified, this will trigger the messages to go into the same
-         * partition. Consumer: The message key.
-         * 
-         * The option is a: {@code String} type.
-         * 
-         * Group: common
-         * 
-         * @return the name of the header {@code VertxKafkaMessageKey}.
-         */
-        public String vertxKafkaMessageKey() {
-            return "VertxKafkaMessageKey";
-        }
-
-        /**
-         * Producer: Explicitly specify the topic to where produce the messages,
-         * this will be preserved in case of header aggregation. Consumer: The
-         * topic from where the message originated.
-         * 
-         * The option is a: {@code String} type.
-         * 
-         * Group: common
-         * 
-         * @return the name of the header {@code VertxKafkaTopic}.
-         */
-        public String vertxKafkaTopic() {
-            return "VertxKafkaTopic";
-        }
-
-        /**
-         * Produced record metadata.
-         * 
-         * The option is a: {@code List<RecordMetadata>} type.
-         * 
-         * Group: producer
-         * 
-         * @return the name of the header {@code VertxKafkaRecordMetadata}.
-         */
-        public String vertxKafkaRecordMetadata() {
-            return "VertxKafkaRecordMetadata";
-        }
-
-        /**
-         * The offset of the message in Kafka topic.
-         * 
-         * The option is a: {@code Long} type.
-         * 
-         * Group: consumer
-         * 
-         * @return the name of the header {@code VertxKafkaOffset}.
-         */
-        public String vertxKafkaOffset() {
-            return "VertxKafkaOffset";
-        }
-
-        /**
-         * The record Kafka headers.
-         * 
-         * The option is a: {@code List<KafkaHeader>} type.
-         * 
-         * Group: consumer
-         * 
-         * @return the name of the header {@code VertxKafkaHeaders}.
-         */
-        public String vertxKafkaHeaders() {
-            return "VertxKafkaHeaders";
-        }
-
-        /**
-         * The timestamp of this record.
-         * 
-         * The option is a: {@code Long} type.
-         * 
-         * Group: consumer
-         * 
-         * @return the name of the header {@code VertxKafkaTimestamp}.
-         */
-        public String vertxKafkaTimestamp() {
-            return "VertxKafkaTimestamp";
-        }
-
-        /**
-         * The ProducerRecord also has an associated timestamp. If the user did
-         * provide a timestamp, the producer will stamp the record with the
-         * provided timestamp and the header is not preserved.
-         * 
-         * The option is a: {@code Long} type.
-         * 
-         * Group: producer
-         * 
-         * @return the name of the header {@code VertxKafkaOverrideTimestamp}.
-         */
-        public String vertxKafkaOverrideTimestamp() {
-            return "VertxKafkaOverrideTimestamp";
-        }
-
-        /**
-         * Explicitly specify the topic to where produce the messages, this will
-         * not be preserved in case of header aggregation and it will take
-         * precedence over CamelVertxKafkaTopic.
-         * 
-         * The option is a: {@code String} type.
-         * 
-         * Group: producer
-         * 
-         * @return the name of the header {@code VertxKafkaOverrideTopic}.
-         */
-        public String vertxKafkaOverrideTopic() {
-            return "VertxKafkaOverrideTopic";
-        }
-    }
-    @Deprecated
-    static VertxKafkaEndpointBuilder endpointBuilder(
-            String componentName,
-            String path) {
-        class VertxKafkaEndpointBuilderImpl extends AbstractEndpointBuilder implements VertxKafkaEndpointBuilder, AdvancedVertxKafkaEndpointBuilder {
-            public VertxKafkaEndpointBuilderImpl(String path) {
-                super(componentName, path);
-            }
-        }
-        return new VertxKafkaEndpointBuilderImpl(path);
-    }
-}
\ No newline at end of file
diff --git a/parent/pom.xml b/parent/pom.xml
index 4ddb39db2cc..d499fc21043 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -2732,11 +2732,6 @@
                 <artifactId>camel-servicenow-maven-plugin</artifactId>
                 <version>${project.version}</version>
             </dependency>
-            <dependency>
-                <groupId>org.apache.camel.maven</groupId>
-                <artifactId>camel-vertx-kafka-maven-plugin</artifactId>
-                <version>${project.version}</version>
-            </dependency>
         <!-- camel components: END -->
 
             <!-- camel dsl: START -->