You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@camel.apache.org by ac...@apache.org on 2020/10/12 16:23:53 UTC

[camel-kafka-connector] 02/03: Regen camel-kafka-kafka-connector

This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch reintr-kafka
in repository https://gitbox.apache.org/repos/asf/camel-kafka-connector.git

commit 08507bb23e2cef6891e2a4e276a337c6cc744412
Author: Andrea Cosentino <an...@gmail.com>
AuthorDate: Mon Oct 12 17:54:35 2020 +0200

    Regen camel-kafka-kafka-connector
---
 .../resources/descriptors/connectors.properties    |   2 +
 connectors/camel-kafka-kafka-connector/pom.xml     | 135 ++++
 .../descriptors/connector-sink.properties          |   1 +
 .../descriptors/connector-source.properties        |   1 +
 .../src/generated/resources/camel-kafka-sink.json  | 816 +++++++++++++++++++++
 .../generated/resources/camel-kafka-source.json    | 738 +++++++++++++++++++
 .../src/main/assembly/package.xml                  |  57 ++
 .../docs/camel-kafka-kafka-sink-connector.adoc     | 187 +++++
 .../docs/camel-kafka-kafka-source-connector.adoc   | 174 +++++
 .../examples/CamelKafkaSinkConnector.properties    |  33 +
 .../examples/CamelKafkaSourceConnector.properties  |  33 +
 .../kafka/CamelKafkaSinkConnector.java             |  35 +
 .../kafka/CamelKafkaSinkConnectorConfig.java       | 578 +++++++++++++++
 .../kafkaconnector/kafka/CamelKafkaSinkTask.java   |  39 +
 .../kafka/CamelKafkaSourceConnector.java           |  35 +
 .../kafka/CamelKafkaSourceConnectorConfig.java     | 528 +++++++++++++
 .../kafkaconnector/kafka/CamelKafkaSourceTask.java |  39 +
 .../src/main/resources/META-INF/LICENSE.txt        | 203 +++++
 .../src/main/resources/META-INF/NOTICE.txt         |  11 +
 connectors/pom.xml                                 |   1 +
 docs/modules/ROOT/pages/connectors.adoc            |   3 +-
 .../camel-kafka-kafka-sink-connector.adoc          | 187 +++++
 .../camel-kafka-kafka-source-connector.adoc        | 174 +++++
 23 files changed, 4009 insertions(+), 1 deletion(-)

diff --git a/camel-kafka-connector-catalog/src/generated/resources/descriptors/connectors.properties b/camel-kafka-connector-catalog/src/generated/resources/descriptors/connectors.properties
index 818e080..a77388e 100644
--- a/camel-kafka-connector-catalog/src/generated/resources/descriptors/connectors.properties
+++ b/camel-kafka-connector-catalog/src/generated/resources/descriptors/connectors.properties
@@ -521,3 +521,5 @@ camel-aws2-iam-sink
 camel-aws2-kms-sink
 camel-aws2-mq-sink
 camel-aws2-msk-sink
+camel-kafka-source
+camel-kafka-sink
diff --git a/connectors/camel-kafka-kafka-connector/pom.xml b/connectors/camel-kafka-kafka-connector/pom.xml
new file mode 100644
index 0000000..da33b6c
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/pom.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+         http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.camel.kafkaconnector</groupId>
+    <artifactId>connectors</artifactId>
+    <version>0.6.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>camel-kafka-kafka-connector</artifactId>
+  <name>Camel-Kafka-Connector :: kafka</name>
+  <description>Camel Kafka Connector for kafka</description>
+  <dependencies>
+    <!-- Kafka -->
+    <dependency>
+      <groupId>org.apache.kafka</groupId>
+      <artifactId>connect-api</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.kafka</groupId>
+      <artifactId>connect-transforms</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <!-- Camel -->
+    <dependency>
+      <groupId>org.apache.camel</groupId>
+      <artifactId>camel-kafka</artifactId>
+    </dependency>
+    <!--START OF GENERATED CODE-->
+    <dependency>
+      <groupId>org.apache.camel.kafkaconnector</groupId>
+      <artifactId>camel-kafka-connector</artifactId>
+    </dependency>
+    <!--END OF GENERATED CODE-->
+  </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <version>${version.maven.surefire.plugin}</version>
+        <configuration>
+          <failIfNoTests>false</failIfNoTests>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+        <version>${version.maven.jar}</version>
+        <configuration>
+          <archive>
+            <manifest>
+              <addDefaultImplementationEntries>true</addDefaultImplementationEntries>
+              <addDefaultSpecificationEntries>true</addDefaultSpecificationEntries>
+            </manifest>
+          </archive>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>2.5.1</version>
+        <inherited>true</inherited>
+        <configuration>
+          <source>1.8</source>
+          <target>1.8</target>
+        </configuration>
+      </plugin>
+      <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <version>2.5.3</version>
+        <configuration>
+          <descriptors>
+            <descriptor>src/main/assembly/package.xml</descriptor>
+          </descriptors>
+        </configuration>
+        <executions>
+          <execution>
+            <id>make-assembly</id>
+            <phase>package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+  <!--START OF GENERATED CODE-->
+  <repositories>
+    <!-- camel-jira -->
+    <repository>
+      <id>atlassian-public</id>
+      <url>https://packages.atlassian.com/maven-external</url>
+      <name>Atlassian Public Repo</name>
+      <snapshots>
+        <enabled>false</enabled>
+      </snapshots>
+      <releases>
+        <enabled>true</enabled>
+      </releases>
+    </repository>
+    <!-- camel-ipfs and camel-weka -->
+    <repository>
+      <id>jboss.thirdparty</id>
+      <name>JBoss Thirdparty Repository</name>
+      <url>https://repository.jboss.org/nexus/service/local/repositories/thirdparty-releases/content/</url>
+      <snapshots>
+        <enabled>false</enabled>
+      </snapshots>
+      <releases>
+        <enabled>true</enabled>
+      </releases>
+    </repository>
+  </repositories>
+  <!--END OF GENERATED CODE-->
+</project>
diff --git a/connectors/camel-kafka-kafka-connector/src/generated/descriptors/connector-sink.properties b/connectors/camel-kafka-kafka-connector/src/generated/descriptors/connector-sink.properties
new file mode 100644
index 0000000..6dfe369
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/generated/descriptors/connector-sink.properties
@@ -0,0 +1 @@
+camel-kafka-sink
\ No newline at end of file
diff --git a/connectors/camel-kafka-kafka-connector/src/generated/descriptors/connector-source.properties b/connectors/camel-kafka-kafka-connector/src/generated/descriptors/connector-source.properties
new file mode 100644
index 0000000..e814b95
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/generated/descriptors/connector-source.properties
@@ -0,0 +1 @@
+camel-kafka-source
\ No newline at end of file
diff --git a/connectors/camel-kafka-kafka-connector/src/generated/resources/camel-kafka-sink.json b/connectors/camel-kafka-kafka-connector/src/generated/resources/camel-kafka-sink.json
new file mode 100644
index 0000000..9212d31
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/generated/resources/camel-kafka-sink.json
@@ -0,0 +1,816 @@
+{
+	"connector": {
+		"class": "org.apache.camel.kafkaconnector.kafka.CamelKafkaSinkConnector",
+		"artifactId": "camel-kafka-kafka-connector",
+		"groupId": "org.apache.camel.kafkaconnector",
+		"id": "camel-kafka-sink",
+		"type": "sink",
+		"version": "0.6.0-SNAPSHOT"
+	},
+	"properties": {
+		"camel.sink.path.topic": {
+			"name": "camel.sink.path.topic",
+			"description": "Name of the topic to use. On the consumer you can use comma to separate multiple topics. A producer can only send a message to a single topic.",
+			"defaultValue": "null",
+			"priority": "HIGH"
+		},
+		"camel.sink.endpoint.additionalProperties": {
+			"name": "camel.sink.endpoint.additionalProperties",
+			"description": "Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http:\/\/localhost:8811\/avro",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.brokers": {
+			"name": "camel.sink.endpoint.brokers",
+			"description": "URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.clientId": {
+			"name": "camel.sink.endpoint.clientId",
+			"description": "The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.headerFilterStrategy": {
+			"name": "camel.sink.endpoint.headerFilterStrategy",
+			"description": "To use a custom HeaderFilterStrategy to filter header to and from Camel message.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.reconnectBackoffMaxMs": {
+			"name": "camel.sink.endpoint.reconnectBackoffMaxMs",
+			"description": "The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.",
+			"defaultValue": "\"1000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.shutdownTimeout": {
+			"name": "camel.sink.endpoint.shutdownTimeout",
+			"description": "Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads.",
+			"defaultValue": "30000",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.bufferMemorySize": {
+			"name": "camel.sink.endpoint.bufferMemorySize",
+			"description": "The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are sent faster than they can be delivered to the server the producer will either block or throw an exception based on the preference specified by block.on.buffer.full.This setting should correspond roughly to the total memory the producer will use, but is not a hard bound since not all memory the producer uses is used for buffering. Some additional memory will [...]
+			"defaultValue": "\"33554432\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.compressionCodec": {
+			"name": "camel.sink.endpoint.compressionCodec",
+			"description": "This parameter allows you to specify the compression codec for all data generated by this producer. Valid values are none, gzip and snappy. One of: [none] [gzip] [snappy] [lz4]",
+			"defaultValue": "\"none\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.connectionMaxIdleMs": {
+			"name": "camel.sink.endpoint.connectionMaxIdleMs",
+			"description": "Close idle connections after the number of milliseconds specified by this config.",
+			"defaultValue": "\"540000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.enableIdempotence": {
+			"name": "camel.sink.endpoint.enableIdempotence",
+			"description": "If set to 'true' the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries may write duplicates of the retried message in the stream. If set to true this option will require max.in.flight.requests.per.connection to be set to 1 and retries cannot be zero and additionally acks must be set to 'all'.",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.kafkaHeaderSerializer": {
+			"name": "camel.sink.endpoint.kafkaHeaderSerializer",
+			"description": "To use a custom KafkaHeaderSerializer to serialize kafka headers values",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.key": {
+			"name": "camel.sink.endpoint.key",
+			"description": "The record key (or null if no key is specified). If this option has been configured then it take precedence over header KafkaConstants#KEY",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.keySerializerClass": {
+			"name": "camel.sink.endpoint.keySerializerClass",
+			"description": "The serializer class for keys (defaults to the same as for messages if nothing is given).",
+			"defaultValue": "\"org.apache.kafka.common.serialization.StringSerializer\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.lazyStartProducer": {
+			"name": "camel.sink.endpoint.lazyStartProducer",
+			"description": "Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the pr [...]
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.lingerMs": {
+			"name": "camel.sink.endpoint.lingerMs",
+			"description": "The producer groups together any records that arrive in between request transmissions into a single batched request. Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount of artificial delay that is, rather than immediately sending out a record the producer will wait for [...]
+			"defaultValue": "\"0\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.maxBlockMs": {
+			"name": "camel.sink.endpoint.maxBlockMs",
+			"description": "The configuration controls how long sending to kafka will block. These methods can be blocked for multiple reasons. For e.g: buffer full, metadata unavailable.This configuration imposes maximum limit on the total time spent in fetching metadata, serialization of key and value, partitioning and allocation of buffer memory when doing a send(). In case of partitionsFor(), this configuration imposes a maximum time threshold on waiting for metadata",
+			"defaultValue": "\"60000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.maxInFlightRequest": {
+			"name": "camel.sink.endpoint.maxInFlightRequest",
+			"description": "The maximum number of unacknowledged requests the client will send on a single connection before blocking. Note that if this setting is set to be greater than 1 and there are failed sends, there is a risk of message re-ordering due to retries (i.e., if retries are enabled).",
+			"defaultValue": "\"5\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.maxRequestSize": {
+			"name": "camel.sink.endpoint.maxRequestSize",
+			"description": "The maximum size of a request. This is also effectively a cap on the maximum record size. Note that the server has its own cap on record size which may be different from this. This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.",
+			"defaultValue": "\"1048576\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.metadataMaxAgeMs": {
+			"name": "camel.sink.endpoint.metadataMaxAgeMs",
+			"description": "The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.",
+			"defaultValue": "\"300000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.metricReporters": {
+			"name": "camel.sink.endpoint.metricReporters",
+			"description": "A list of classes to use as metrics reporters. Implementing the MetricReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.metricsSampleWindowMs": {
+			"name": "camel.sink.endpoint.metricsSampleWindowMs",
+			"description": "The number of samples maintained to compute metrics.",
+			"defaultValue": "\"30000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.noOfMetricsSample": {
+			"name": "camel.sink.endpoint.noOfMetricsSample",
+			"description": "The number of samples maintained to compute metrics.",
+			"defaultValue": "\"2\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.partitioner": {
+			"name": "camel.sink.endpoint.partitioner",
+			"description": "The partitioner class for partitioning messages amongst sub-topics. The default partitioner is based on the hash of the key.",
+			"defaultValue": "\"org.apache.kafka.clients.producer.internals.DefaultPartitioner\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.partitionKey": {
+			"name": "camel.sink.endpoint.partitionKey",
+			"description": "The partition to which the record will be sent (or null if no partition was specified). If this option has been configured then it take precedence over header KafkaConstants#PARTITION_KEY",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.producerBatchSize": {
+			"name": "camel.sink.endpoint.producerBatchSize",
+			"description": "The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps performance on both the client and the server. This configuration controls the default batch size in bytes. No attempt will be made to batch records larger than this size.Requests sent to brokers will contain multiple batches, one for each partition with data available to be sent.A small batch size will make batching less co [...]
+			"defaultValue": "\"16384\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.queueBufferingMaxMessages": {
+			"name": "camel.sink.endpoint.queueBufferingMaxMessages",
+			"description": "The maximum number of unsent messages that can be queued up the producer when using async mode before either the producer must be blocked or data must be dropped.",
+			"defaultValue": "\"10000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.receiveBufferBytes": {
+			"name": "camel.sink.endpoint.receiveBufferBytes",
+			"description": "The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.",
+			"defaultValue": "\"65536\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.reconnectBackoffMs": {
+			"name": "camel.sink.endpoint.reconnectBackoffMs",
+			"description": "The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker.",
+			"defaultValue": "\"50\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.recordMetadata": {
+			"name": "camel.sink.endpoint.recordMetadata",
+			"description": "Whether the producer should store the RecordMetadata results from sending to Kafka. The results are stored in a List containing the RecordMetadata metadata's. The list is stored on a header with the key KafkaConstants#KAFKA_RECORDMETA",
+			"defaultValue": "true",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.requestRequiredAcks": {
+			"name": "camel.sink.endpoint.requestRequiredAcks",
+			"description": "The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. The following settings are common: acks=0 If set to zero then the producer will not wait for any acknowledgment from the server at all. The record will be immediately added to the socket buffer and considered sent. No guarantee can be made that the server has received the record in this case, and t [...]
+			"defaultValue": "\"1\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.requestTimeoutMs": {
+			"name": "camel.sink.endpoint.requestTimeoutMs",
+			"description": "The amount of time the broker will wait trying to meet the request.required.acks requirement before sending back an error to the client.",
+			"defaultValue": "\"30000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.retries": {
+			"name": "camel.sink.endpoint.retries",
+			"description": "Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. Note that this retry is no different than if the client resent the record upon receiving the error. Allowing retries will potentially change the ordering of records because if two records are sent to a single partition, and the first fails and is retried but the second succeeds, then the second record may appear first.",
+			"defaultValue": "\"0\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.retryBackoffMs": {
+			"name": "camel.sink.endpoint.retryBackoffMs",
+			"description": "Before each retry, the producer refreshes the metadata of relevant topics to see if a new leader has been elected. Since leader election takes a bit of time, this property specifies the amount of time that the producer waits before refreshing the metadata.",
+			"defaultValue": "\"100\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sendBufferBytes": {
+			"name": "camel.sink.endpoint.sendBufferBytes",
+			"description": "Socket write buffer size",
+			"defaultValue": "\"131072\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.serializerClass": {
+			"name": "camel.sink.endpoint.serializerClass",
+			"description": "The serializer class for messages.",
+			"defaultValue": "\"org.apache.kafka.common.serialization.StringSerializer\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.workerPool": {
+			"name": "camel.sink.endpoint.workerPool",
+			"description": "To use a custom worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. If using this option then you must handle the lifecycle of the thread pool to shut the pool down when no longer needed.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.workerPoolCoreSize": {
+			"name": "camel.sink.endpoint.workerPoolCoreSize",
+			"description": "Number of core threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing.",
+			"defaultValue": "\"10\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.workerPoolMaxSize": {
+			"name": "camel.sink.endpoint.workerPoolMaxSize",
+			"description": "Maximum number of threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing.",
+			"defaultValue": "\"20\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.basicPropertyBinding": {
+			"name": "camel.sink.endpoint.basicPropertyBinding",
+			"description": "Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.synchronous": {
+			"name": "camel.sink.endpoint.synchronous",
+			"description": "Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported).",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.schemaRegistryURL": {
+			"name": "camel.sink.endpoint.schemaRegistryURL",
+			"description": "URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka)",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.interceptorClasses": {
+			"name": "camel.sink.endpoint.interceptorClasses",
+			"description": "Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.kerberosBeforeReloginMinTime": {
+			"name": "camel.sink.endpoint.kerberosBeforeReloginMinTime",
+			"description": "Login thread sleep time between refresh attempts.",
+			"defaultValue": "\"60000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.kerberosInitCmd": {
+			"name": "camel.sink.endpoint.kerberosInitCmd",
+			"description": "Kerberos kinit command path. Default is \/usr\/bin\/kinit",
+			"defaultValue": "\"\/usr\/bin\/kinit\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.kerberosPrincipalToLocalRules": {
+			"name": "camel.sink.endpoint.kerberosPrincipalToLocalRules",
+			"description": "A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form {username}\/{hostname}{REALM} are mapped to {username}. For more details on the format please see the security authorization and acls documentation.. Multiple values can b [...]
+			"defaultValue": "\"DEFAULT\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.kerberosRenewJitter": {
+			"name": "camel.sink.endpoint.kerberosRenewJitter",
+			"description": "Percentage of random jitter added to the renewal time.",
+			"defaultValue": "\"0.05\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.kerberosRenewWindowFactor": {
+			"name": "camel.sink.endpoint.kerberosRenewWindowFactor",
+			"description": "Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.",
+			"defaultValue": "\"0.8\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.saslJaasConfig": {
+			"name": "camel.sink.endpoint.saslJaasConfig",
+			"description": "Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD;",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.saslKerberosServiceName": {
+			"name": "camel.sink.endpoint.saslKerberosServiceName",
+			"description": "The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.saslMechanism": {
+			"name": "camel.sink.endpoint.saslMechanism",
+			"description": "The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see http:\/\/www.iana.org\/assignments\/sasl-mechanisms\/sasl-mechanisms.xhtml",
+			"defaultValue": "\"GSSAPI\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.securityProtocol": {
+			"name": "camel.sink.endpoint.securityProtocol",
+			"description": "Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported",
+			"defaultValue": "\"PLAINTEXT\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sslCipherSuites": {
+			"name": "camel.sink.endpoint.sslCipherSuites",
+			"description": "A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sslContextParameters": {
+			"name": "camel.sink.endpoint.sslContextParameters",
+			"description": "SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sslEnabledProtocols": {
+			"name": "camel.sink.endpoint.sslEnabledProtocols",
+			"description": "The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default.",
+			"defaultValue": "\"TLSv1.2\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sslEndpointAlgorithm": {
+			"name": "camel.sink.endpoint.sslEndpointAlgorithm",
+			"description": "The endpoint identification algorithm to validate server hostname using server certificate.",
+			"defaultValue": "\"https\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sslKeymanagerAlgorithm": {
+			"name": "camel.sink.endpoint.sslKeymanagerAlgorithm",
+			"description": "The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.",
+			"defaultValue": "\"SunX509\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sslKeyPassword": {
+			"name": "camel.sink.endpoint.sslKeyPassword",
+			"description": "The password of the private key in the key store file. This is optional for client.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sslKeystoreLocation": {
+			"name": "camel.sink.endpoint.sslKeystoreLocation",
+			"description": "The location of the key store file. This is optional for client and can be used for two-way authentication for client.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sslKeystorePassword": {
+			"name": "camel.sink.endpoint.sslKeystorePassword",
+			"description": "The store password for the key store file.This is optional for client and only needed if ssl.keystore.location is configured.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sslKeystoreType": {
+			"name": "camel.sink.endpoint.sslKeystoreType",
+			"description": "The file format of the key store file. This is optional for client. Default value is JKS",
+			"defaultValue": "\"JKS\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sslProtocol": {
+			"name": "camel.sink.endpoint.sslProtocol",
+			"description": "The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.",
+			"defaultValue": "\"TLSv1.2\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sslProvider": {
+			"name": "camel.sink.endpoint.sslProvider",
+			"description": "The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sslTrustmanagerAlgorithm": {
+			"name": "camel.sink.endpoint.sslTrustmanagerAlgorithm",
+			"description": "The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.",
+			"defaultValue": "\"PKIX\"",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sslTruststoreLocation": {
+			"name": "camel.sink.endpoint.sslTruststoreLocation",
+			"description": "The location of the trust store file.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sslTruststorePassword": {
+			"name": "camel.sink.endpoint.sslTruststorePassword",
+			"description": "The password for the trust store file.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.sink.endpoint.sslTruststoreType": {
+			"name": "camel.sink.endpoint.sslTruststoreType",
+			"description": "The file format of the trust store file. Default value is JKS.",
+			"defaultValue": "\"JKS\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.additionalProperties": {
+			"name": "camel.component.kafka.additionalProperties",
+			"description": "Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http:\/\/localhost:8811\/avro",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.brokers": {
+			"name": "camel.component.kafka.brokers",
+			"description": "URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.clientId": {
+			"name": "camel.component.kafka.clientId",
+			"description": "The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.configuration": {
+			"name": "camel.component.kafka.configuration",
+			"description": "Allows to pre-configure the Kafka component with common options that the endpoints will reuse.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.headerFilterStrategy": {
+			"name": "camel.component.kafka.headerFilterStrategy",
+			"description": "To use a custom HeaderFilterStrategy to filter header to and from Camel message.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.reconnectBackoffMaxMs": {
+			"name": "camel.component.kafka.reconnectBackoffMaxMs",
+			"description": "The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.",
+			"defaultValue": "\"1000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.shutdownTimeout": {
+			"name": "camel.component.kafka.shutdownTimeout",
+			"description": "Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads.",
+			"defaultValue": "30000",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.bufferMemorySize": {
+			"name": "camel.component.kafka.bufferMemorySize",
+			"description": "The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are sent faster than they can be delivered to the server the producer will either block or throw an exception based on the preference specified by block.on.buffer.full.This setting should correspond roughly to the total memory the producer will use, but is not a hard bound since not all memory the producer uses is used for buffering. Some additional memory will [...]
+			"defaultValue": "\"33554432\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.compressionCodec": {
+			"name": "camel.component.kafka.compressionCodec",
+			"description": "This parameter allows you to specify the compression codec for all data generated by this producer. Valid values are none, gzip and snappy. One of: [none] [gzip] [snappy] [lz4]",
+			"defaultValue": "\"none\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.connectionMaxIdleMs": {
+			"name": "camel.component.kafka.connectionMaxIdleMs",
+			"description": "Close idle connections after the number of milliseconds specified by this config.",
+			"defaultValue": "\"540000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.enableIdempotence": {
+			"name": "camel.component.kafka.enableIdempotence",
+			"description": "If set to 'true' the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries may write duplicates of the retried message in the stream. If set to true this option will require max.in.flight.requests.per.connection to be set to 1 and retries cannot be zero and additionally acks must be set to 'all'.",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.kafkaHeaderSerializer": {
+			"name": "camel.component.kafka.kafkaHeaderSerializer",
+			"description": "To use a custom KafkaHeaderSerializer to serialize kafka headers values",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.key": {
+			"name": "camel.component.kafka.key",
+			"description": "The record key (or null if no key is specified). If this option has been configured then it take precedence over header KafkaConstants#KEY",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.keySerializerClass": {
+			"name": "camel.component.kafka.keySerializerClass",
+			"description": "The serializer class for keys (defaults to the same as for messages if nothing is given).",
+			"defaultValue": "\"org.apache.kafka.common.serialization.StringSerializer\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.lazyStartProducer": {
+			"name": "camel.component.kafka.lazyStartProducer",
+			"description": "Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the pr [...]
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.lingerMs": {
+			"name": "camel.component.kafka.lingerMs",
+			"description": "The producer groups together any records that arrive in between request transmissions into a single batched request. Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount of artificial delay that is, rather than immediately sending out a record the producer will wait for [...]
+			"defaultValue": "\"0\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.maxBlockMs": {
+			"name": "camel.component.kafka.maxBlockMs",
+			"description": "The configuration controls how long sending to kafka will block. These methods can be blocked for multiple reasons. For e.g: buffer full, metadata unavailable.This configuration imposes maximum limit on the total time spent in fetching metadata, serialization of key and value, partitioning and allocation of buffer memory when doing a send(). In case of partitionsFor(), this configuration imposes a maximum time threshold on waiting for metadata",
+			"defaultValue": "\"60000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.maxInFlightRequest": {
+			"name": "camel.component.kafka.maxInFlightRequest",
+			"description": "The maximum number of unacknowledged requests the client will send on a single connection before blocking. Note that if this setting is set to be greater than 1 and there are failed sends, there is a risk of message re-ordering due to retries (i.e., if retries are enabled).",
+			"defaultValue": "\"5\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.maxRequestSize": {
+			"name": "camel.component.kafka.maxRequestSize",
+			"description": "The maximum size of a request. This is also effectively a cap on the maximum record size. Note that the server has its own cap on record size which may be different from this. This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.",
+			"defaultValue": "\"1048576\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.metadataMaxAgeMs": {
+			"name": "camel.component.kafka.metadataMaxAgeMs",
+			"description": "The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.",
+			"defaultValue": "\"300000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.metricReporters": {
+			"name": "camel.component.kafka.metricReporters",
+			"description": "A list of classes to use as metrics reporters. Implementing the MetricReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.metricsSampleWindowMs": {
+			"name": "camel.component.kafka.metricsSampleWindowMs",
+			"description": "The number of samples maintained to compute metrics.",
+			"defaultValue": "\"30000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.noOfMetricsSample": {
+			"name": "camel.component.kafka.noOfMetricsSample",
+			"description": "The number of samples maintained to compute metrics.",
+			"defaultValue": "\"2\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.partitioner": {
+			"name": "camel.component.kafka.partitioner",
+			"description": "The partitioner class for partitioning messages amongst sub-topics. The default partitioner is based on the hash of the key.",
+			"defaultValue": "\"org.apache.kafka.clients.producer.internals.DefaultPartitioner\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.partitionKey": {
+			"name": "camel.component.kafka.partitionKey",
+			"description": "The partition to which the record will be sent (or null if no partition was specified). If this option has been configured then it take precedence over header KafkaConstants#PARTITION_KEY",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.producerBatchSize": {
+			"name": "camel.component.kafka.producerBatchSize",
+			"description": "The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps performance on both the client and the server. This configuration controls the default batch size in bytes. No attempt will be made to batch records larger than this size.Requests sent to brokers will contain multiple batches, one for each partition with data available to be sent.A small batch size will make batching less co [...]
+			"defaultValue": "\"16384\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.queueBufferingMaxMessages": {
+			"name": "camel.component.kafka.queueBufferingMaxMessages",
+			"description": "The maximum number of unsent messages that can be queued up the producer when using async mode before either the producer must be blocked or data must be dropped.",
+			"defaultValue": "\"10000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.receiveBufferBytes": {
+			"name": "camel.component.kafka.receiveBufferBytes",
+			"description": "The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.",
+			"defaultValue": "\"65536\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.reconnectBackoffMs": {
+			"name": "camel.component.kafka.reconnectBackoffMs",
+			"description": "The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker.",
+			"defaultValue": "\"50\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.recordMetadata": {
+			"name": "camel.component.kafka.recordMetadata",
+			"description": "Whether the producer should store the RecordMetadata results from sending to Kafka. The results are stored in a List containing the RecordMetadata metadata's. The list is stored on a header with the key KafkaConstants#KAFKA_RECORDMETA",
+			"defaultValue": "true",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.requestRequiredAcks": {
+			"name": "camel.component.kafka.requestRequiredAcks",
+			"description": "The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. The following settings are common: acks=0 If set to zero then the producer will not wait for any acknowledgment from the server at all. The record will be immediately added to the socket buffer and considered sent. No guarantee can be made that the server has received the record in this case, and t [...]
+			"defaultValue": "\"1\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.requestTimeoutMs": {
+			"name": "camel.component.kafka.requestTimeoutMs",
+			"description": "The amount of time the broker will wait trying to meet the request.required.acks requirement before sending back an error to the client.",
+			"defaultValue": "\"30000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.retries": {
+			"name": "camel.component.kafka.retries",
+			"description": "Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. Note that this retry is no different than if the client resent the record upon receiving the error. Allowing retries will potentially change the ordering of records because if two records are sent to a single partition, and the first fails and is retried but the second succeeds, then the second record may appear first.",
+			"defaultValue": "\"0\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.retryBackoffMs": {
+			"name": "camel.component.kafka.retryBackoffMs",
+			"description": "Before each retry, the producer refreshes the metadata of relevant topics to see if a new leader has been elected. Since leader election takes a bit of time, this property specifies the amount of time that the producer waits before refreshing the metadata.",
+			"defaultValue": "\"100\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sendBufferBytes": {
+			"name": "camel.component.kafka.sendBufferBytes",
+			"description": "Socket write buffer size",
+			"defaultValue": "\"131072\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.serializerClass": {
+			"name": "camel.component.kafka.serializerClass",
+			"description": "The serializer class for messages.",
+			"defaultValue": "\"org.apache.kafka.common.serialization.StringSerializer\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.workerPool": {
+			"name": "camel.component.kafka.workerPool",
+			"description": "To use a custom worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. If using this option then you must handle the lifecycle of the thread pool to shut the pool down when no longer needed.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.workerPoolCoreSize": {
+			"name": "camel.component.kafka.workerPoolCoreSize",
+			"description": "Number of core threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing.",
+			"defaultValue": "\"10\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.workerPoolMaxSize": {
+			"name": "camel.component.kafka.workerPoolMaxSize",
+			"description": "Maximum number of threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing.",
+			"defaultValue": "\"20\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.basicPropertyBinding": {
+			"name": "camel.component.kafka.basicPropertyBinding",
+			"description": "Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.schemaRegistryURL": {
+			"name": "camel.component.kafka.schemaRegistryURL",
+			"description": "URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka)",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.interceptorClasses": {
+			"name": "camel.component.kafka.interceptorClasses",
+			"description": "Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.kerberosBeforeReloginMinTime": {
+			"name": "camel.component.kafka.kerberosBeforeReloginMinTime",
+			"description": "Login thread sleep time between refresh attempts.",
+			"defaultValue": "\"60000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.kerberosInitCmd": {
+			"name": "camel.component.kafka.kerberosInitCmd",
+			"description": "Kerberos kinit command path. Default is \/usr\/bin\/kinit",
+			"defaultValue": "\"\/usr\/bin\/kinit\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.kerberosPrincipalToLocalRules": {
+			"name": "camel.component.kafka.kerberosPrincipalToLocalRules",
+			"description": "A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form {username}\/{hostname}{REALM} are mapped to {username}. For more details on the format please see the security authorization and acls documentation.. Multiple values can b [...]
+			"defaultValue": "\"DEFAULT\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.kerberosRenewJitter": {
+			"name": "camel.component.kafka.kerberosRenewJitter",
+			"description": "Percentage of random jitter added to the renewal time.",
+			"defaultValue": "\"0.05\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.kerberosRenewWindowFactor": {
+			"name": "camel.component.kafka.kerberosRenewWindowFactor",
+			"description": "Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.",
+			"defaultValue": "\"0.8\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.saslJaasConfig": {
+			"name": "camel.component.kafka.saslJaasConfig",
+			"description": "Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD;",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.saslKerberosServiceName": {
+			"name": "camel.component.kafka.saslKerberosServiceName",
+			"description": "The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.saslMechanism": {
+			"name": "camel.component.kafka.saslMechanism",
+			"description": "The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see http:\/\/www.iana.org\/assignments\/sasl-mechanisms\/sasl-mechanisms.xhtml",
+			"defaultValue": "\"GSSAPI\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.securityProtocol": {
+			"name": "camel.component.kafka.securityProtocol",
+			"description": "Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported",
+			"defaultValue": "\"PLAINTEXT\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslCipherSuites": {
+			"name": "camel.component.kafka.sslCipherSuites",
+			"description": "A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslContextParameters": {
+			"name": "camel.component.kafka.sslContextParameters",
+			"description": "SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslEnabledProtocols": {
+			"name": "camel.component.kafka.sslEnabledProtocols",
+			"description": "The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default.",
+			"defaultValue": "\"TLSv1.2\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslEndpointAlgorithm": {
+			"name": "camel.component.kafka.sslEndpointAlgorithm",
+			"description": "The endpoint identification algorithm to validate server hostname using server certificate.",
+			"defaultValue": "\"https\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslKeymanagerAlgorithm": {
+			"name": "camel.component.kafka.sslKeymanagerAlgorithm",
+			"description": "The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.",
+			"defaultValue": "\"SunX509\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslKeyPassword": {
+			"name": "camel.component.kafka.sslKeyPassword",
+			"description": "The password of the private key in the key store file. This is optional for client.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslKeystoreLocation": {
+			"name": "camel.component.kafka.sslKeystoreLocation",
+			"description": "The location of the key store file. This is optional for client and can be used for two-way authentication for client.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslKeystorePassword": {
+			"name": "camel.component.kafka.sslKeystorePassword",
+			"description": "The store password for the key store file.This is optional for client and only needed if ssl.keystore.location is configured.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslKeystoreType": {
+			"name": "camel.component.kafka.sslKeystoreType",
+			"description": "The file format of the key store file. This is optional for client. Default value is JKS",
+			"defaultValue": "\"JKS\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslProtocol": {
+			"name": "camel.component.kafka.sslProtocol",
+			"description": "The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.",
+			"defaultValue": "\"TLSv1.2\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslProvider": {
+			"name": "camel.component.kafka.sslProvider",
+			"description": "The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslTrustmanagerAlgorithm": {
+			"name": "camel.component.kafka.sslTrustmanagerAlgorithm",
+			"description": "The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.",
+			"defaultValue": "\"PKIX\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslTruststoreLocation": {
+			"name": "camel.component.kafka.sslTruststoreLocation",
+			"description": "The location of the trust store file.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslTruststorePassword": {
+			"name": "camel.component.kafka.sslTruststorePassword",
+			"description": "The password for the trust store file.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslTruststoreType": {
+			"name": "camel.component.kafka.sslTruststoreType",
+			"description": "The file format of the trust store file. Default value is JKS.",
+			"defaultValue": "\"JKS\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.useGlobalSslContextParameters": {
+			"name": "camel.component.kafka.useGlobalSslContextParameters",
+			"description": "Enable usage of global SSL context parameters.",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		}
+	}
+}
diff --git a/connectors/camel-kafka-kafka-connector/src/generated/resources/camel-kafka-source.json b/connectors/camel-kafka-kafka-connector/src/generated/resources/camel-kafka-source.json
new file mode 100644
index 0000000..6b0a62c
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/generated/resources/camel-kafka-source.json
@@ -0,0 +1,738 @@
+{
+	"connector": {
+		"class": "org.apache.camel.kafkaconnector.kafka.CamelKafkaSourceConnector",
+		"artifactId": "camel-kafka-kafka-connector",
+		"groupId": "org.apache.camel.kafkaconnector",
+		"id": "camel-kafka-source",
+		"type": "source",
+		"version": "0.6.0-SNAPSHOT"
+	},
+	"properties": {
+		"camel.source.path.topic": {
+			"name": "camel.source.path.topic",
+			"description": "Name of the topic to use. On the consumer you can use comma to separate multiple topics. A producer can only send a message to a single topic.",
+			"defaultValue": "null",
+			"priority": "HIGH"
+		},
+		"camel.source.endpoint.additionalProperties": {
+			"name": "camel.source.endpoint.additionalProperties",
+			"description": "Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http:\/\/localhost:8811\/avro",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.brokers": {
+			"name": "camel.source.endpoint.brokers",
+			"description": "URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.clientId": {
+			"name": "camel.source.endpoint.clientId",
+			"description": "The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.headerFilterStrategy": {
+			"name": "camel.source.endpoint.headerFilterStrategy",
+			"description": "To use a custom HeaderFilterStrategy to filter header to and from Camel message.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.reconnectBackoffMaxMs": {
+			"name": "camel.source.endpoint.reconnectBackoffMaxMs",
+			"description": "The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.",
+			"defaultValue": "\"1000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.shutdownTimeout": {
+			"name": "camel.source.endpoint.shutdownTimeout",
+			"description": "Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads.",
+			"defaultValue": "30000",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.allowManualCommit": {
+			"name": "camel.source.endpoint.allowManualCommit",
+			"description": "Whether to allow doing manual commits via KafkaManualCommit. If this option is enabled then an instance of KafkaManualCommit is stored on the Exchange message header, which allows end users to access this API and perform manual offset commits via the Kafka consumer.",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.autoCommitEnable": {
+			"name": "camel.source.endpoint.autoCommitEnable",
+			"description": "If true, periodically commit to ZooKeeper the offset of messages already fetched by the consumer. This committed offset will be used when the process fails as the position from which the new consumer will begin.",
+			"defaultValue": "\"true\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.autoCommitIntervalMs": {
+			"name": "camel.source.endpoint.autoCommitIntervalMs",
+			"description": "The frequency in ms that the consumer offsets are committed to zookeeper.",
+			"defaultValue": "\"5000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.autoCommitOnStop": {
+			"name": "camel.source.endpoint.autoCommitOnStop",
+			"description": "Whether to perform an explicit auto commit when the consumer stops to ensure the broker has a commit from the last consumed message. This requires the option autoCommitEnable is turned on. The possible values are: sync, async, or none. And sync is the default value. One of: [sync] [async] [none]",
+			"defaultValue": "\"sync\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.autoOffsetReset": {
+			"name": "camel.source.endpoint.autoOffsetReset",
+			"description": "What to do when there is no initial offset in ZooKeeper or if an offset is out of range: earliest : automatically reset the offset to the earliest offset latest : automatically reset the offset to the latest offset fail: throw exception to the consumer One of: [latest] [earliest] [none]",
+			"defaultValue": "\"latest\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.breakOnFirstError": {
+			"name": "camel.source.endpoint.breakOnFirstError",
+			"description": "This options controls what happens when a consumer is processing an exchange and it fails. If the option is false then the consumer continues to the next message and processes it. If the option is true then the consumer breaks out, and will seek back to offset of the message that caused a failure, and then re-attempt to process this message. However this can lead to endless processing of the same message if its bound to fail every time, eg a poison message. Therefore i [...]
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.bridgeErrorHandler": {
+			"name": "camel.source.endpoint.bridgeErrorHandler",
+			"description": "Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored.",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.checkCrcs": {
+			"name": "camel.source.endpoint.checkCrcs",
+			"description": "Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance.",
+			"defaultValue": "\"true\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.consumerRequestTimeoutMs": {
+			"name": "camel.source.endpoint.consumerRequestTimeoutMs",
+			"description": "The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.",
+			"defaultValue": "\"40000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.consumersCount": {
+			"name": "camel.source.endpoint.consumersCount",
+			"description": "The number of consumers that connect to kafka server",
+			"defaultValue": "1",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.consumerStreams": {
+			"name": "camel.source.endpoint.consumerStreams",
+			"description": "Number of concurrent consumers on the consumer",
+			"defaultValue": "10",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.fetchMaxBytes": {
+			"name": "camel.source.endpoint.fetchMaxBytes",
+			"description": "The maximum amount of data the server should return for a fetch request This is not an absolute maximum, if the first message in the first non-empty partition of the fetch is larger than this value, the message will still be returned to ensure that the consumer can make progress. The maximum message size accepted by the broker is defined via message.max.bytes (broker config) or max.message.bytes (topic config). Note that the consumer performs multiple fetches in parallel.",
+			"defaultValue": "\"52428800\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.fetchMinBytes": {
+			"name": "camel.source.endpoint.fetchMinBytes",
+			"description": "The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request.",
+			"defaultValue": "\"1\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.fetchWaitMaxMs": {
+			"name": "camel.source.endpoint.fetchWaitMaxMs",
+			"description": "The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy fetch.min.bytes",
+			"defaultValue": "\"500\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.groupId": {
+			"name": "camel.source.endpoint.groupId",
+			"description": "A string that uniquely identifies the group of consumer processes to which this consumer belongs. By setting the same group id multiple processes indicate that they are all part of the same consumer group. This option is required for consumers.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.heartbeatIntervalMs": {
+			"name": "camel.source.endpoint.heartbeatIntervalMs",
+			"description": "The expected time between heartbeats to the consumer coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. The value must be set lower than session.timeout.ms, but typically should be set no higher than 1\/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.",
+			"defaultValue": "\"3000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.kafkaHeaderDeserializer": {
+			"name": "camel.source.endpoint.kafkaHeaderDeserializer",
+			"description": "To use a custom KafkaHeaderDeserializer to deserialize kafka headers values",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.keyDeserializer": {
+			"name": "camel.source.endpoint.keyDeserializer",
+			"description": "Deserializer class for key that implements the Deserializer interface.",
+			"defaultValue": "\"org.apache.kafka.common.serialization.StringDeserializer\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.maxPartitionFetchBytes": {
+			"name": "camel.source.endpoint.maxPartitionFetchBytes",
+			"description": "The maximum amount of data per-partition the server will return. The maximum total memory used for a request will be #partitions max.partition.fetch.bytes. This size must be at least as large as the maximum message size the server allows or else it is possible for the producer to send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying to fetch a large message on a certain partition.",
+			"defaultValue": "\"1048576\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.maxPollIntervalMs": {
+			"name": "camel.source.endpoint.maxPollIntervalMs",
+			"description": "The maximum delay between invocations of poll() when using consumer group management. This places an upper bound on the amount of time that the consumer can be idle before fetching more records. If poll() is not called before expiration of this timeout, then the consumer is considered failed and the group will rebalance in order to reassign the partitions to another member.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.maxPollRecords": {
+			"name": "camel.source.endpoint.maxPollRecords",
+			"description": "The maximum number of records returned in a single call to poll()",
+			"defaultValue": "\"500\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.offsetRepository": {
+			"name": "camel.source.endpoint.offsetRepository",
+			"description": "The offset repository to use in order to locally store the offset of each partition of the topic. Defining one will disable the autocommit.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.partitionAssignor": {
+			"name": "camel.source.endpoint.partitionAssignor",
+			"description": "The class name of the partition assignment strategy that the client will use to distribute partition ownership amongst consumer instances when group management is used",
+			"defaultValue": "\"org.apache.kafka.clients.consumer.RangeAssignor\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.pollTimeoutMs": {
+			"name": "camel.source.endpoint.pollTimeoutMs",
+			"description": "The timeout used when polling the KafkaConsumer.",
+			"defaultValue": "\"5000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.seekTo": {
+			"name": "camel.source.endpoint.seekTo",
+			"description": "Set if KafkaConsumer will read from beginning or end on startup: beginning : read from beginning end : read from end This is replacing the earlier property seekToBeginning One of: [beginning] [end]",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.sessionTimeoutMs": {
+			"name": "camel.source.endpoint.sessionTimeoutMs",
+			"description": "The timeout used to detect failures when using Kafka's group management facilities.",
+			"defaultValue": "\"10000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.specificAvroReader": {
+			"name": "camel.source.endpoint.specificAvroReader",
+			"description": "This enables the use of a specific Avro reader for use with the Confluent Platform schema registry and the io.confluent.kafka.serializers.KafkaAvroDeserializer. This option is only available in the Confluent Platform (not standard Apache Kafka)",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.topicIsPattern": {
+			"name": "camel.source.endpoint.topicIsPattern",
+			"description": "Whether the topic is a pattern (regular expression). This can be used to subscribe to dynamic number of topics matching the pattern.",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.valueDeserializer": {
+			"name": "camel.source.endpoint.valueDeserializer",
+			"description": "Deserializer class for value that implements the Deserializer interface.",
+			"defaultValue": "\"org.apache.kafka.common.serialization.StringDeserializer\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.exceptionHandler": {
+			"name": "camel.source.endpoint.exceptionHandler",
+			"description": "To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.exchangePattern": {
+			"name": "camel.source.endpoint.exchangePattern",
+			"description": "Sets the exchange pattern when the consumer creates an exchange. One of: [InOnly] [InOut] [InOptionalOut]",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.basicPropertyBinding": {
+			"name": "camel.source.endpoint.basicPropertyBinding",
+			"description": "Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.synchronous": {
+			"name": "camel.source.endpoint.synchronous",
+			"description": "Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported).",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.schemaRegistryURL": {
+			"name": "camel.source.endpoint.schemaRegistryURL",
+			"description": "URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka)",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.interceptorClasses": {
+			"name": "camel.source.endpoint.interceptorClasses",
+			"description": "Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.kerberosBeforeReloginMinTime": {
+			"name": "camel.source.endpoint.kerberosBeforeReloginMinTime",
+			"description": "Login thread sleep time between refresh attempts.",
+			"defaultValue": "\"60000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.kerberosInitCmd": {
+			"name": "camel.source.endpoint.kerberosInitCmd",
+			"description": "Kerberos kinit command path. Default is \/usr\/bin\/kinit",
+			"defaultValue": "\"\/usr\/bin\/kinit\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.kerberosPrincipalToLocalRules": {
+			"name": "camel.source.endpoint.kerberosPrincipalToLocalRules",
+			"description": "A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form {username}\/{hostname}{REALM} are mapped to {username}. For more details on the format please see the security authorization and acls documentation.. Multiple values can b [...]
+			"defaultValue": "\"DEFAULT\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.kerberosRenewJitter": {
+			"name": "camel.source.endpoint.kerberosRenewJitter",
+			"description": "Percentage of random jitter added to the renewal time.",
+			"defaultValue": "\"0.05\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.kerberosRenewWindowFactor": {
+			"name": "camel.source.endpoint.kerberosRenewWindowFactor",
+			"description": "Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.",
+			"defaultValue": "\"0.8\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.saslJaasConfig": {
+			"name": "camel.source.endpoint.saslJaasConfig",
+			"description": "Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD;",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.saslKerberosServiceName": {
+			"name": "camel.source.endpoint.saslKerberosServiceName",
+			"description": "The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.saslMechanism": {
+			"name": "camel.source.endpoint.saslMechanism",
+			"description": "The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see http:\/\/www.iana.org\/assignments\/sasl-mechanisms\/sasl-mechanisms.xhtml",
+			"defaultValue": "\"GSSAPI\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.securityProtocol": {
+			"name": "camel.source.endpoint.securityProtocol",
+			"description": "Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported",
+			"defaultValue": "\"PLAINTEXT\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.sslCipherSuites": {
+			"name": "camel.source.endpoint.sslCipherSuites",
+			"description": "A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.sslContextParameters": {
+			"name": "camel.source.endpoint.sslContextParameters",
+			"description": "SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.sslEnabledProtocols": {
+			"name": "camel.source.endpoint.sslEnabledProtocols",
+			"description": "The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default.",
+			"defaultValue": "\"TLSv1.2\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.sslEndpointAlgorithm": {
+			"name": "camel.source.endpoint.sslEndpointAlgorithm",
+			"description": "The endpoint identification algorithm to validate server hostname using server certificate.",
+			"defaultValue": "\"https\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.sslKeymanagerAlgorithm": {
+			"name": "camel.source.endpoint.sslKeymanagerAlgorithm",
+			"description": "The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.",
+			"defaultValue": "\"SunX509\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.sslKeystoreType": {
+			"name": "camel.source.endpoint.sslKeystoreType",
+			"description": "The file format of the key store file. This is optional for client. Default value is JKS",
+			"defaultValue": "\"JKS\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.sslProtocol": {
+			"name": "camel.source.endpoint.sslProtocol",
+			"description": "The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.",
+			"defaultValue": "\"TLSv1.2\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.sslProvider": {
+			"name": "camel.source.endpoint.sslProvider",
+			"description": "The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.sslTrustmanagerAlgorithm": {
+			"name": "camel.source.endpoint.sslTrustmanagerAlgorithm",
+			"description": "The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.",
+			"defaultValue": "\"PKIX\"",
+			"priority": "MEDIUM"
+		},
+		"camel.source.endpoint.sslTruststoreType": {
+			"name": "camel.source.endpoint.sslTruststoreType",
+			"description": "The file format of the trust store file. Default value is JKS.",
+			"defaultValue": "\"JKS\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.additionalProperties": {
+			"name": "camel.component.kafka.additionalProperties",
+			"description": "Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http:\/\/localhost:8811\/avro",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.brokers": {
+			"name": "camel.component.kafka.brokers",
+			"description": "URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.clientId": {
+			"name": "camel.component.kafka.clientId",
+			"description": "The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.configuration": {
+			"name": "camel.component.kafka.configuration",
+			"description": "Allows to pre-configure the Kafka component with common options that the endpoints will reuse.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.headerFilterStrategy": {
+			"name": "camel.component.kafka.headerFilterStrategy",
+			"description": "To use a custom HeaderFilterStrategy to filter header to and from Camel message.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.reconnectBackoffMaxMs": {
+			"name": "camel.component.kafka.reconnectBackoffMaxMs",
+			"description": "The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.",
+			"defaultValue": "\"1000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.shutdownTimeout": {
+			"name": "camel.component.kafka.shutdownTimeout",
+			"description": "Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads.",
+			"defaultValue": "30000",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.allowManualCommit": {
+			"name": "camel.component.kafka.allowManualCommit",
+			"description": "Whether to allow doing manual commits via KafkaManualCommit. If this option is enabled then an instance of KafkaManualCommit is stored on the Exchange message header, which allows end users to access this API and perform manual offset commits via the Kafka consumer.",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.autoCommitEnable": {
+			"name": "camel.component.kafka.autoCommitEnable",
+			"description": "If true, periodically commit to ZooKeeper the offset of messages already fetched by the consumer. This committed offset will be used when the process fails as the position from which the new consumer will begin.",
+			"defaultValue": "\"true\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.autoCommitIntervalMs": {
+			"name": "camel.component.kafka.autoCommitIntervalMs",
+			"description": "The frequency in ms that the consumer offsets are committed to zookeeper.",
+			"defaultValue": "\"5000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.autoCommitOnStop": {
+			"name": "camel.component.kafka.autoCommitOnStop",
+			"description": "Whether to perform an explicit auto commit when the consumer stops to ensure the broker has a commit from the last consumed message. This requires the option autoCommitEnable is turned on. The possible values are: sync, async, or none. And sync is the default value. One of: [sync] [async] [none]",
+			"defaultValue": "\"sync\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.autoOffsetReset": {
+			"name": "camel.component.kafka.autoOffsetReset",
+			"description": "What to do when there is no initial offset in ZooKeeper or if an offset is out of range: earliest : automatically reset the offset to the earliest offset latest : automatically reset the offset to the latest offset fail: throw exception to the consumer One of: [latest] [earliest] [none]",
+			"defaultValue": "\"latest\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.breakOnFirstError": {
+			"name": "camel.component.kafka.breakOnFirstError",
+			"description": "This options controls what happens when a consumer is processing an exchange and it fails. If the option is false then the consumer continues to the next message and processes it. If the option is true then the consumer breaks out, and will seek back to offset of the message that caused a failure, and then re-attempt to process this message. However this can lead to endless processing of the same message if its bound to fail every time, eg a poison message. Therefore i [...]
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.bridgeErrorHandler": {
+			"name": "camel.component.kafka.bridgeErrorHandler",
+			"description": "Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored.",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.checkCrcs": {
+			"name": "camel.component.kafka.checkCrcs",
+			"description": "Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance.",
+			"defaultValue": "\"true\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.consumerRequestTimeoutMs": {
+			"name": "camel.component.kafka.consumerRequestTimeoutMs",
+			"description": "The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.",
+			"defaultValue": "\"40000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.consumersCount": {
+			"name": "camel.component.kafka.consumersCount",
+			"description": "The number of consumers that connect to kafka server",
+			"defaultValue": "1",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.consumerStreams": {
+			"name": "camel.component.kafka.consumerStreams",
+			"description": "Number of concurrent consumers on the consumer",
+			"defaultValue": "10",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.fetchMaxBytes": {
+			"name": "camel.component.kafka.fetchMaxBytes",
+			"description": "The maximum amount of data the server should return for a fetch request This is not an absolute maximum, if the first message in the first non-empty partition of the fetch is larger than this value, the message will still be returned to ensure that the consumer can make progress. The maximum message size accepted by the broker is defined via message.max.bytes (broker config) or max.message.bytes (topic config). Note that the consumer performs multiple fetches in parallel.",
+			"defaultValue": "\"52428800\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.fetchMinBytes": {
+			"name": "camel.component.kafka.fetchMinBytes",
+			"description": "The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request.",
+			"defaultValue": "\"1\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.fetchWaitMaxMs": {
+			"name": "camel.component.kafka.fetchWaitMaxMs",
+			"description": "The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy fetch.min.bytes",
+			"defaultValue": "\"500\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.groupId": {
+			"name": "camel.component.kafka.groupId",
+			"description": "A string that uniquely identifies the group of consumer processes to which this consumer belongs. By setting the same group id multiple processes indicate that they are all part of the same consumer group. This option is required for consumers.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.heartbeatIntervalMs": {
+			"name": "camel.component.kafka.heartbeatIntervalMs",
+			"description": "The expected time between heartbeats to the consumer coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. The value must be set lower than session.timeout.ms, but typically should be set no higher than 1\/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.",
+			"defaultValue": "\"3000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.kafkaHeaderDeserializer": {
+			"name": "camel.component.kafka.kafkaHeaderDeserializer",
+			"description": "To use a custom KafkaHeaderDeserializer to deserialize kafka headers values",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.keyDeserializer": {
+			"name": "camel.component.kafka.keyDeserializer",
+			"description": "Deserializer class for key that implements the Deserializer interface.",
+			"defaultValue": "\"org.apache.kafka.common.serialization.StringDeserializer\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.maxPartitionFetchBytes": {
+			"name": "camel.component.kafka.maxPartitionFetchBytes",
+			"description": "The maximum amount of data per-partition the server will return. The maximum total memory used for a request will be #partitions max.partition.fetch.bytes. This size must be at least as large as the maximum message size the server allows or else it is possible for the producer to send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying to fetch a large message on a certain partition.",
+			"defaultValue": "\"1048576\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.maxPollIntervalMs": {
+			"name": "camel.component.kafka.maxPollIntervalMs",
+			"description": "The maximum delay between invocations of poll() when using consumer group management. This places an upper bound on the amount of time that the consumer can be idle before fetching more records. If poll() is not called before expiration of this timeout, then the consumer is considered failed and the group will rebalance in order to reassign the partitions to another member.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.maxPollRecords": {
+			"name": "camel.component.kafka.maxPollRecords",
+			"description": "The maximum number of records returned in a single call to poll()",
+			"defaultValue": "\"500\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.offsetRepository": {
+			"name": "camel.component.kafka.offsetRepository",
+			"description": "The offset repository to use in order to locally store the offset of each partition of the topic. Defining one will disable the autocommit.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.partitionAssignor": {
+			"name": "camel.component.kafka.partitionAssignor",
+			"description": "The class name of the partition assignment strategy that the client will use to distribute partition ownership amongst consumer instances when group management is used",
+			"defaultValue": "\"org.apache.kafka.clients.consumer.RangeAssignor\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.pollTimeoutMs": {
+			"name": "camel.component.kafka.pollTimeoutMs",
+			"description": "The timeout used when polling the KafkaConsumer.",
+			"defaultValue": "\"5000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.seekTo": {
+			"name": "camel.component.kafka.seekTo",
+			"description": "Set if KafkaConsumer will read from beginning or end on startup: beginning : read from beginning end : read from end This is replacing the earlier property seekToBeginning One of: [beginning] [end]",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sessionTimeoutMs": {
+			"name": "camel.component.kafka.sessionTimeoutMs",
+			"description": "The timeout used to detect failures when using Kafka's group management facilities.",
+			"defaultValue": "\"10000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.specificAvroReader": {
+			"name": "camel.component.kafka.specificAvroReader",
+			"description": "This enables the use of a specific Avro reader for use with the Confluent Platform schema registry and the io.confluent.kafka.serializers.KafkaAvroDeserializer. This option is only available in the Confluent Platform (not standard Apache Kafka)",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.topicIsPattern": {
+			"name": "camel.component.kafka.topicIsPattern",
+			"description": "Whether the topic is a pattern (regular expression). This can be used to subscribe to dynamic number of topics matching the pattern.",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.valueDeserializer": {
+			"name": "camel.component.kafka.valueDeserializer",
+			"description": "Deserializer class for value that implements the Deserializer interface.",
+			"defaultValue": "\"org.apache.kafka.common.serialization.StringDeserializer\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.kafkaManualCommitFactory": {
+			"name": "camel.component.kafka.kafkaManualCommitFactory",
+			"description": "Factory to use for creating KafkaManualCommit instances. This allows to plugin a custom factory to create custom KafkaManualCommit instances in case special logic is needed when doing manual commits that deviates from the default implementation that comes out of the box.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.basicPropertyBinding": {
+			"name": "camel.component.kafka.basicPropertyBinding",
+			"description": "Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.schemaRegistryURL": {
+			"name": "camel.component.kafka.schemaRegistryURL",
+			"description": "URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka)",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.interceptorClasses": {
+			"name": "camel.component.kafka.interceptorClasses",
+			"description": "Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.kerberosBeforeReloginMinTime": {
+			"name": "camel.component.kafka.kerberosBeforeReloginMinTime",
+			"description": "Login thread sleep time between refresh attempts.",
+			"defaultValue": "\"60000\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.kerberosInitCmd": {
+			"name": "camel.component.kafka.kerberosInitCmd",
+			"description": "Kerberos kinit command path. Default is \/usr\/bin\/kinit",
+			"defaultValue": "\"\/usr\/bin\/kinit\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.kerberosPrincipalToLocalRules": {
+			"name": "camel.component.kafka.kerberosPrincipalToLocalRules",
+			"description": "A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form {username}\/{hostname}{REALM} are mapped to {username}. For more details on the format please see the security authorization and acls documentation.. Multiple values can b [...]
+			"defaultValue": "\"DEFAULT\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.kerberosRenewJitter": {
+			"name": "camel.component.kafka.kerberosRenewJitter",
+			"description": "Percentage of random jitter added to the renewal time.",
+			"defaultValue": "\"0.05\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.kerberosRenewWindowFactor": {
+			"name": "camel.component.kafka.kerberosRenewWindowFactor",
+			"description": "Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.",
+			"defaultValue": "\"0.8\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.saslJaasConfig": {
+			"name": "camel.component.kafka.saslJaasConfig",
+			"description": "Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD;",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.saslKerberosServiceName": {
+			"name": "camel.component.kafka.saslKerberosServiceName",
+			"description": "The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.saslMechanism": {
+			"name": "camel.component.kafka.saslMechanism",
+			"description": "The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see http:\/\/www.iana.org\/assignments\/sasl-mechanisms\/sasl-mechanisms.xhtml",
+			"defaultValue": "\"GSSAPI\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.securityProtocol": {
+			"name": "camel.component.kafka.securityProtocol",
+			"description": "Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported",
+			"defaultValue": "\"PLAINTEXT\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslCipherSuites": {
+			"name": "camel.component.kafka.sslCipherSuites",
+			"description": "A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslContextParameters": {
+			"name": "camel.component.kafka.sslContextParameters",
+			"description": "SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslEnabledProtocols": {
+			"name": "camel.component.kafka.sslEnabledProtocols",
+			"description": "The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default.",
+			"defaultValue": "\"TLSv1.2\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslEndpointAlgorithm": {
+			"name": "camel.component.kafka.sslEndpointAlgorithm",
+			"description": "The endpoint identification algorithm to validate server hostname using server certificate.",
+			"defaultValue": "\"https\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslKeymanagerAlgorithm": {
+			"name": "camel.component.kafka.sslKeymanagerAlgorithm",
+			"description": "The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.",
+			"defaultValue": "\"SunX509\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslKeystoreType": {
+			"name": "camel.component.kafka.sslKeystoreType",
+			"description": "The file format of the key store file. This is optional for client. Default value is JKS",
+			"defaultValue": "\"JKS\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslProtocol": {
+			"name": "camel.component.kafka.sslProtocol",
+			"description": "The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.",
+			"defaultValue": "\"TLSv1.2\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslProvider": {
+			"name": "camel.component.kafka.sslProvider",
+			"description": "The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.",
+			"defaultValue": "null",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslTrustmanagerAlgorithm": {
+			"name": "camel.component.kafka.sslTrustmanagerAlgorithm",
+			"description": "The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.",
+			"defaultValue": "\"PKIX\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.sslTruststoreType": {
+			"name": "camel.component.kafka.sslTruststoreType",
+			"description": "The file format of the trust store file. Default value is JKS.",
+			"defaultValue": "\"JKS\"",
+			"priority": "MEDIUM"
+		},
+		"camel.component.kafka.useGlobalSslContextParameters": {
+			"name": "camel.component.kafka.useGlobalSslContextParameters",
+			"description": "Enable usage of global SSL context parameters.",
+			"defaultValue": "false",
+			"priority": "MEDIUM"
+		}
+	}
+}
diff --git a/connectors/camel-kafka-kafka-connector/src/main/assembly/package.xml b/connectors/camel-kafka-kafka-connector/src/main/assembly/package.xml
new file mode 100644
index 0000000..4fd2529
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/main/assembly/package.xml
@@ -0,0 +1,57 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+         http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<assembly>
+  <!-- Assembles a packaged version targeting OS installation. -->
+  <id>package</id>
+  <formats>
+    <format>zip</format>
+    <format>tar.gz</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  <fileSets>
+    <fileSet>
+      <directory>${project.basedir}/../..</directory>
+      <outputDirectory>${project.artifactId}/</outputDirectory>
+      <includes>
+        <include>README*</include>
+        <include>LICENSE*</include>
+        <include>NOTICE*</include>
+        <include>licenses/</include>
+      </includes>
+    </fileSet>
+    <fileSet>
+      <directory>${project.basedir}/src/main/docs</directory>
+      <outputDirectory>docs/</outputDirectory>
+      <includes>
+        <include>**/*</include>
+      </includes>
+    </fileSet>
+  </fileSets>
+  <dependencySets>
+    <dependencySet>
+      <outputDirectory>${project.artifactId}/</outputDirectory>
+      <useProjectArtifact>true</useProjectArtifact>
+      <useTransitiveFiltering>true</useTransitiveFiltering>
+      <excludes>
+        <exclude>org.apache.kafka:connect-api</exclude>
+      </excludes>
+    </dependencySet>
+  </dependencySets>
+</assembly>
diff --git a/connectors/camel-kafka-kafka-connector/src/main/docs/camel-kafka-kafka-sink-connector.adoc b/connectors/camel-kafka-kafka-connector/src/main/docs/camel-kafka-kafka-sink-connector.adoc
new file mode 100644
index 0000000..a2361a0
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/main/docs/camel-kafka-kafka-sink-connector.adoc
@@ -0,0 +1,187 @@
+// kafka-connector options: START
+[[camel-kafka-kafka-connector-sink]]
+= camel-kafka-kafka-connector sink configuration
+
+When using camel-kafka-kafka-connector as sink make sure to use the following Maven dependency to have support for the connector:
+
+[source,xml]
+----
+<dependency>
+  <groupId>org.apache.camel.kafkaconnector</groupId>
+  <artifactId>camel-kafka-kafka-connector</artifactId>
+  <version>x.x.x</version>
+  <!-- use the same version as your Camel Kafka connector version -->
+</dependency>
+----
+
+To use this Sink connector in Kafka connect you'll need to set the following connector.class
+
+[source,java]
+----
+connector.class=org.apache.camel.kafkaconnector.kafka.CamelKafkaSinkConnector
+----
+
+
+The camel-kafka sink connector supports 134 options, which are listed below.
+
+
+
+[width="100%",cols="2,5,^1,2",options="header"]
+|===
+| Name | Description | Default | Priority
+| *camel.sink.path.topic* | Name of the topic to use. On the consumer you can use comma to separate multiple topics. A producer can only send a message to a single topic. | null | HIGH
+| *camel.sink.endpoint.additionalProperties* | Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=\http://localhost:8811/avro | null | MEDIUM
+| *camel.sink.endpoint.brokers* | URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation. | null | MEDIUM
+| *camel.sink.endpoint.clientId* | The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request. | null | MEDIUM
+| *camel.sink.endpoint.headerFilterStrategy* | To use a custom HeaderFilterStrategy to filter header to and from Camel message. | null | MEDIUM
+| *camel.sink.endpoint.reconnectBackoffMaxMs* | The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms. | "1000" | MEDIUM
+| *camel.sink.endpoint.shutdownTimeout* | Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads. | 30000 | MEDIUM
+| *camel.sink.endpoint.bufferMemorySize* | The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are sent faster than they can be delivered to the server the producer will either block or throw an exception based on the preference specified by block.on.buffer.full.This setting should correspond roughly to the total memory the producer will use, but is not a hard bound since not all memory the producer uses is used for buffering. Som [...]
+| *camel.sink.endpoint.compressionCodec* | This parameter allows you to specify the compression codec for all data generated by this producer. Valid values are none, gzip and snappy. One of: [none] [gzip] [snappy] [lz4] | "none" | MEDIUM
+| *camel.sink.endpoint.connectionMaxIdleMs* | Close idle connections after the number of milliseconds specified by this config. | "540000" | MEDIUM
+| *camel.sink.endpoint.enableIdempotence* | If set to 'true' the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries may write duplicates of the retried message in the stream. If set to true this option will require max.in.flight.requests.per.connection to be set to 1 and retries cannot be zero and additionally acks must be set to 'all'. | false | MEDIUM
+| *camel.sink.endpoint.kafkaHeaderSerializer* | To use a custom KafkaHeaderSerializer to serialize kafka headers values | null | MEDIUM
+| *camel.sink.endpoint.key* | The record key (or null if no key is specified). If this option has been configured then it take precedence over header KafkaConstants#KEY | null | MEDIUM
+| *camel.sink.endpoint.keySerializerClass* | The serializer class for keys (defaults to the same as for messages if nothing is given). | "org.apache.kafka.common.serialization.StringSerializer" | MEDIUM
+| *camel.sink.endpoint.lazyStartProducer* | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then cre [...]
+| *camel.sink.endpoint.lingerMs* | The producer groups together any records that arrive in between request transmissions into a single batched request. Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount of artificial delay that is, rather than immediately sending out a record the produc [...]
+| *camel.sink.endpoint.maxBlockMs* | The configuration controls how long sending to kafka will block. These methods can be blocked for multiple reasons. For e.g: buffer full, metadata unavailable.This configuration imposes maximum limit on the total time spent in fetching metadata, serialization of key and value, partitioning and allocation of buffer memory when doing a send(). In case of partitionsFor(), this configuration imposes a maximum time threshold on waiting for metadata | "6000 [...]
+| *camel.sink.endpoint.maxInFlightRequest* | The maximum number of unacknowledged requests the client will send on a single connection before blocking. Note that if this setting is set to be greater than 1 and there are failed sends, there is a risk of message re-ordering due to retries (i.e., if retries are enabled). | "5" | MEDIUM
+| *camel.sink.endpoint.maxRequestSize* | The maximum size of a request. This is also effectively a cap on the maximum record size. Note that the server has its own cap on record size which may be different from this. This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. | "1048576" | MEDIUM
+| *camel.sink.endpoint.metadataMaxAgeMs* | The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions. | "300000" | MEDIUM
+| *camel.sink.endpoint.metricReporters* | A list of classes to use as metrics reporters. Implementing the MetricReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics. | null | MEDIUM
+| *camel.sink.endpoint.metricsSampleWindowMs* | The number of samples maintained to compute metrics. | "30000" | MEDIUM
+| *camel.sink.endpoint.noOfMetricsSample* | The number of samples maintained to compute metrics. | "2" | MEDIUM
+| *camel.sink.endpoint.partitioner* | The partitioner class for partitioning messages amongst sub-topics. The default partitioner is based on the hash of the key. | "org.apache.kafka.clients.producer.internals.DefaultPartitioner" | MEDIUM
+| *camel.sink.endpoint.partitionKey* | The partition to which the record will be sent (or null if no partition was specified). If this option has been configured then it take precedence over header KafkaConstants#PARTITION_KEY | null | MEDIUM
+| *camel.sink.endpoint.producerBatchSize* | The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps performance on both the client and the server. This configuration controls the default batch size in bytes. No attempt will be made to batch records larger than this size.Requests sent to brokers will contain multiple batches, one for each partition with data available to be sent.A small batch size w [...]
+| *camel.sink.endpoint.queueBufferingMaxMessages* | The maximum number of unsent messages that can be queued up the producer when using async mode before either the producer must be blocked or data must be dropped. | "10000" | MEDIUM
+| *camel.sink.endpoint.receiveBufferBytes* | The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. | "65536" | MEDIUM
+| *camel.sink.endpoint.reconnectBackoffMs* | The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker. | "50" | MEDIUM
+| *camel.sink.endpoint.recordMetadata* | Whether the producer should store the RecordMetadata results from sending to Kafka. The results are stored in a List containing the RecordMetadata metadata's. The list is stored on a header with the key KafkaConstants#KAFKA_RECORDMETA | true | MEDIUM
+| *camel.sink.endpoint.requestRequiredAcks* | The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. The following settings are common: acks=0 If set to zero then the producer will not wait for any acknowledgment from the server at all. The record will be immediately added to the socket buffer and considered sent. No guarantee can be made that the server has received the [...]
+| *camel.sink.endpoint.requestTimeoutMs* | The amount of time the broker will wait trying to meet the request.required.acks requirement before sending back an error to the client. | "30000" | MEDIUM
+| *camel.sink.endpoint.retries* | Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. Note that this retry is no different than if the client resent the record upon receiving the error. Allowing retries will potentially change the ordering of records because if two records are sent to a single partition, and the first fails and is retried but the second succeeds, then the second record may appear first. | "0" | MEDIUM
+| *camel.sink.endpoint.retryBackoffMs* | Before each retry, the producer refreshes the metadata of relevant topics to see if a new leader has been elected. Since leader election takes a bit of time, this property specifies the amount of time that the producer waits before refreshing the metadata. | "100" | MEDIUM
+| *camel.sink.endpoint.sendBufferBytes* | Socket write buffer size | "131072" | MEDIUM
+| *camel.sink.endpoint.serializerClass* | The serializer class for messages. | "org.apache.kafka.common.serialization.StringSerializer" | MEDIUM
+| *camel.sink.endpoint.workerPool* | To use a custom worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. If using this option then you must handle the lifecycle of the thread pool to shut the pool down when no longer needed. | null | MEDIUM
+| *camel.sink.endpoint.workerPoolCoreSize* | Number of core threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. | "10" | MEDIUM
+| *camel.sink.endpoint.workerPoolMaxSize* | Maximum number of threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. | "20" | MEDIUM
+| *camel.sink.endpoint.basicPropertyBinding* | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | MEDIUM
+| *camel.sink.endpoint.synchronous* | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | MEDIUM
+| *camel.sink.endpoint.schemaRegistryURL* | URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka) | null | MEDIUM
+| *camel.sink.endpoint.interceptorClasses* | Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime | null | MEDIUM
+| *camel.sink.endpoint.kerberosBeforeReloginMinTime* | Login thread sleep time between refresh attempts. | "60000" | MEDIUM
+| *camel.sink.endpoint.kerberosInitCmd* | Kerberos kinit command path. Default is /usr/bin/kinit | "/usr/bin/kinit" | MEDIUM
+| *camel.sink.endpoint.kerberosPrincipalToLocalRules* | A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form \{username\}/\{hostname\}\{REALM\} are mapped to \{username\}. For more details on the format please see the security authorization an [...]
+| *camel.sink.endpoint.kerberosRenewJitter* | Percentage of random jitter added to the renewal time. | "0.05" | MEDIUM
+| *camel.sink.endpoint.kerberosRenewWindowFactor* | Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket. | "0.8" | MEDIUM
+| *camel.sink.endpoint.saslJaasConfig* | Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD; | null | MEDIUM
+| *camel.sink.endpoint.saslKerberosServiceName* | The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config. | null | MEDIUM
+| *camel.sink.endpoint.saslMechanism* | The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see \http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml | "GSSAPI" | MEDIUM
+| *camel.sink.endpoint.securityProtocol* | Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported | "PLAINTEXT" | MEDIUM
+| *camel.sink.endpoint.sslCipherSuites* | A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported. | null | MEDIUM
+| *camel.sink.endpoint.sslContextParameters* | SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option. | null | MEDIUM
+| *camel.sink.endpoint.sslEnabledProtocols* | The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default. | "TLSv1.2" | MEDIUM
+| *camel.sink.endpoint.sslEndpointAlgorithm* | The endpoint identification algorithm to validate server hostname using server certificate. | "https" | MEDIUM
+| *camel.sink.endpoint.sslKeymanagerAlgorithm* | The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine. | "SunX509" | MEDIUM
+| *camel.sink.endpoint.sslKeyPassword* | The password of the private key in the key store file. This is optional for client. | null | MEDIUM
+| *camel.sink.endpoint.sslKeystoreLocation* | The location of the key store file. This is optional for client and can be used for two-way authentication for client. | null | MEDIUM
+| *camel.sink.endpoint.sslKeystorePassword* | The store password for the key store file.This is optional for client and only needed if ssl.keystore.location is configured. | null | MEDIUM
+| *camel.sink.endpoint.sslKeystoreType* | The file format of the key store file. This is optional for client. Default value is JKS | "JKS" | MEDIUM
+| *camel.sink.endpoint.sslProtocol* | The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities. | "TLSv1.2" | MEDIUM
+| *camel.sink.endpoint.sslProvider* | The name of the security provider used for SSL connections. Default value is the default security provider of the JVM. | null | MEDIUM
+| *camel.sink.endpoint.sslTrustmanagerAlgorithm* | The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine. | "PKIX" | MEDIUM
+| *camel.sink.endpoint.sslTruststoreLocation* | The location of the trust store file. | null | MEDIUM
+| *camel.sink.endpoint.sslTruststorePassword* | The password for the trust store file. | null | MEDIUM
+| *camel.sink.endpoint.sslTruststoreType* | The file format of the trust store file. Default value is JKS. | "JKS" | MEDIUM
+| *camel.component.kafka.additionalProperties* | Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=\http://localhost:8811/avro | null | MEDIUM
+| *camel.component.kafka.brokers* | URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation. | null | MEDIUM
+| *camel.component.kafka.clientId* | The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request. | null | MEDIUM
+| *camel.component.kafka.configuration* | Allows to pre-configure the Kafka component with common options that the endpoints will reuse. | null | MEDIUM
+| *camel.component.kafka.headerFilterStrategy* | To use a custom HeaderFilterStrategy to filter header to and from Camel message. | null | MEDIUM
+| *camel.component.kafka.reconnectBackoffMaxMs* | The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms. | "1000" | MEDIUM
+| *camel.component.kafka.shutdownTimeout* | Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads. | 30000 | MEDIUM
+| *camel.component.kafka.bufferMemorySize* | The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are sent faster than they can be delivered to the server the producer will either block or throw an exception based on the preference specified by block.on.buffer.full.This setting should correspond roughly to the total memory the producer will use, but is not a hard bound since not all memory the producer uses is used for buffering. S [...]
+| *camel.component.kafka.compressionCodec* | This parameter allows you to specify the compression codec for all data generated by this producer. Valid values are none, gzip and snappy. One of: [none] [gzip] [snappy] [lz4] | "none" | MEDIUM
+| *camel.component.kafka.connectionMaxIdleMs* | Close idle connections after the number of milliseconds specified by this config. | "540000" | MEDIUM
+| *camel.component.kafka.enableIdempotence* | If set to 'true' the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries may write duplicates of the retried message in the stream. If set to true this option will require max.in.flight.requests.per.connection to be set to 1 and retries cannot be zero and additionally acks must be set to 'all'. | false | MEDIUM
+| *camel.component.kafka.kafkaHeaderSerializer* | To use a custom KafkaHeaderSerializer to serialize kafka headers values | null | MEDIUM
+| *camel.component.kafka.key* | The record key (or null if no key is specified). If this option has been configured then it take precedence over header KafkaConstants#KEY | null | MEDIUM
+| *camel.component.kafka.keySerializerClass* | The serializer class for keys (defaults to the same as for messages if nothing is given). | "org.apache.kafka.common.serialization.StringSerializer" | MEDIUM
+| *camel.component.kafka.lazyStartProducer* | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then c [...]
+| *camel.component.kafka.lingerMs* | The producer groups together any records that arrive in between request transmissions into a single batched request. Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount of artificial delay that is, rather than immediately sending out a record the prod [...]
+| *camel.component.kafka.maxBlockMs* | The configuration controls how long sending to kafka will block. These methods can be blocked for multiple reasons. For e.g: buffer full, metadata unavailable.This configuration imposes maximum limit on the total time spent in fetching metadata, serialization of key and value, partitioning and allocation of buffer memory when doing a send(). In case of partitionsFor(), this configuration imposes a maximum time threshold on waiting for metadata | "60 [...]
+| *camel.component.kafka.maxInFlightRequest* | The maximum number of unacknowledged requests the client will send on a single connection before blocking. Note that if this setting is set to be greater than 1 and there are failed sends, there is a risk of message re-ordering due to retries (i.e., if retries are enabled). | "5" | MEDIUM
+| *camel.component.kafka.maxRequestSize* | The maximum size of a request. This is also effectively a cap on the maximum record size. Note that the server has its own cap on record size which may be different from this. This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. | "1048576" | MEDIUM
+| *camel.component.kafka.metadataMaxAgeMs* | The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions. | "300000" | MEDIUM
+| *camel.component.kafka.metricReporters* | A list of classes to use as metrics reporters. Implementing the MetricReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics. | null | MEDIUM
+| *camel.component.kafka.metricsSampleWindowMs* | The number of samples maintained to compute metrics. | "30000" | MEDIUM
+| *camel.component.kafka.noOfMetricsSample* | The number of samples maintained to compute metrics. | "2" | MEDIUM
+| *camel.component.kafka.partitioner* | The partitioner class for partitioning messages amongst sub-topics. The default partitioner is based on the hash of the key. | "org.apache.kafka.clients.producer.internals.DefaultPartitioner" | MEDIUM
+| *camel.component.kafka.partitionKey* | The partition to which the record will be sent (or null if no partition was specified). If this option has been configured then it take precedence over header KafkaConstants#PARTITION_KEY | null | MEDIUM
+| *camel.component.kafka.producerBatchSize* | The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps performance on both the client and the server. This configuration controls the default batch size in bytes. No attempt will be made to batch records larger than this size.Requests sent to brokers will contain multiple batches, one for each partition with data available to be sent.A small batch size [...]
+| *camel.component.kafka.queueBufferingMaxMessages* | The maximum number of unsent messages that can be queued up the producer when using async mode before either the producer must be blocked or data must be dropped. | "10000" | MEDIUM
+| *camel.component.kafka.receiveBufferBytes* | The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. | "65536" | MEDIUM
+| *camel.component.kafka.reconnectBackoffMs* | The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker. | "50" | MEDIUM
+| *camel.component.kafka.recordMetadata* | Whether the producer should store the RecordMetadata results from sending to Kafka. The results are stored in a List containing the RecordMetadata metadata's. The list is stored on a header with the key KafkaConstants#KAFKA_RECORDMETA | true | MEDIUM
+| *camel.component.kafka.requestRequiredAcks* | The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. The following settings are common: acks=0 If set to zero then the producer will not wait for any acknowledgment from the server at all. The record will be immediately added to the socket buffer and considered sent. No guarantee can be made that the server has received t [...]
+| *camel.component.kafka.requestTimeoutMs* | The amount of time the broker will wait trying to meet the request.required.acks requirement before sending back an error to the client. | "30000" | MEDIUM
+| *camel.component.kafka.retries* | Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. Note that this retry is no different than if the client resent the record upon receiving the error. Allowing retries will potentially change the ordering of records because if two records are sent to a single partition, and the first fails and is retried but the second succeeds, then the second record may appear first. | "0" [...]
+| *camel.component.kafka.retryBackoffMs* | Before each retry, the producer refreshes the metadata of relevant topics to see if a new leader has been elected. Since leader election takes a bit of time, this property specifies the amount of time that the producer waits before refreshing the metadata. | "100" | MEDIUM
+| *camel.component.kafka.sendBufferBytes* | Socket write buffer size | "131072" | MEDIUM
+| *camel.component.kafka.serializerClass* | The serializer class for messages. | "org.apache.kafka.common.serialization.StringSerializer" | MEDIUM
+| *camel.component.kafka.workerPool* | To use a custom worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. If using this option then you must handle the lifecycle of the thread pool to shut the pool down when no longer needed. | null | MEDIUM
+| *camel.component.kafka.workerPoolCoreSize* | Number of core threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. | "10" | MEDIUM
+| *camel.component.kafka.workerPoolMaxSize* | Maximum number of threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. | "20" | MEDIUM
+| *camel.component.kafka.basicPropertyBinding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | MEDIUM
+| *camel.component.kafka.schemaRegistryURL* | URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka) | null | MEDIUM
+| *camel.component.kafka.interceptorClasses* | Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime | null | MEDIUM
+| *camel.component.kafka.kerberosBeforeReloginMinTime* | Login thread sleep time between refresh attempts. | "60000" | MEDIUM
+| *camel.component.kafka.kerberosInitCmd* | Kerberos kinit command path. Default is /usr/bin/kinit | "/usr/bin/kinit" | MEDIUM
+| *camel.component.kafka.kerberosPrincipalToLocal Rules* | A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form \{username\}/\{hostname\}\{REALM\} are mapped to \{username\}. For more details on the format please see the security authorization [...]
+| *camel.component.kafka.kerberosRenewJitter* | Percentage of random jitter added to the renewal time. | "0.05" | MEDIUM
+| *camel.component.kafka.kerberosRenewWindowFactor* | Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket. | "0.8" | MEDIUM
+| *camel.component.kafka.saslJaasConfig* | Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD; | null | MEDIUM
+| *camel.component.kafka.saslKerberosServiceName* | The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config. | null | MEDIUM
+| *camel.component.kafka.saslMechanism* | The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see \http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml | "GSSAPI" | MEDIUM
+| *camel.component.kafka.securityProtocol* | Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported | "PLAINTEXT" | MEDIUM
+| *camel.component.kafka.sslCipherSuites* | A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported. | null | MEDIUM
+| *camel.component.kafka.sslContextParameters* | SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option. | null | MEDIUM
+| *camel.component.kafka.sslEnabledProtocols* | The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default. | "TLSv1.2" | MEDIUM
+| *camel.component.kafka.sslEndpointAlgorithm* | The endpoint identification algorithm to validate server hostname using server certificate. | "https" | MEDIUM
+| *camel.component.kafka.sslKeymanagerAlgorithm* | The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine. | "SunX509" | MEDIUM
+| *camel.component.kafka.sslKeyPassword* | The password of the private key in the key store file. This is optional for client. | null | MEDIUM
+| *camel.component.kafka.sslKeystoreLocation* | The location of the key store file. This is optional for client and can be used for two-way authentication for client. | null | MEDIUM
+| *camel.component.kafka.sslKeystorePassword* | The store password for the key store file.This is optional for client and only needed if ssl.keystore.location is configured. | null | MEDIUM
+| *camel.component.kafka.sslKeystoreType* | The file format of the key store file. This is optional for client. Default value is JKS | "JKS" | MEDIUM
+| *camel.component.kafka.sslProtocol* | The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities. | "TLSv1.2" | MEDIUM
+| *camel.component.kafka.sslProvider* | The name of the security provider used for SSL connections. Default value is the default security provider of the JVM. | null | MEDIUM
+| *camel.component.kafka.sslTrustmanagerAlgorithm* | The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine. | "PKIX" | MEDIUM
+| *camel.component.kafka.sslTruststoreLocation* | The location of the trust store file. | null | MEDIUM
+| *camel.component.kafka.sslTruststorePassword* | The password for the trust store file. | null | MEDIUM
+| *camel.component.kafka.sslTruststoreType* | The file format of the trust store file. Default value is JKS. | "JKS" | MEDIUM
+| *camel.component.kafka.useGlobalSslContext Parameters* | Enable usage of global SSL context parameters. | false | MEDIUM
+|===
+
+
+
+The camel-kafka sink connector has no converters out of the box.
+
+
+
+
+
+The camel-kafka sink connector has no transforms out of the box.
+
+
+
+
+
+The camel-kafka sink connector has no aggregation strategies out of the box.
+
+
+
+
+// kafka-connector options: END
diff --git a/connectors/camel-kafka-kafka-connector/src/main/docs/camel-kafka-kafka-source-connector.adoc b/connectors/camel-kafka-kafka-connector/src/main/docs/camel-kafka-kafka-source-connector.adoc
new file mode 100644
index 0000000..102aff0
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/main/docs/camel-kafka-kafka-source-connector.adoc
@@ -0,0 +1,174 @@
+// kafka-connector options: START
+[[camel-kafka-kafka-connector-source]]
+= camel-kafka-kafka-connector source configuration
+
+When using camel-kafka-kafka-connector as source make sure to use the following Maven dependency to have support for the connector:
+
+[source,xml]
+----
+<dependency>
+  <groupId>org.apache.camel.kafkaconnector</groupId>
+  <artifactId>camel-kafka-kafka-connector</artifactId>
+  <version>x.x.x</version>
+  <!-- use the same version as your Camel Kafka connector version -->
+</dependency>
+----
+
+To use this Source connector in Kafka connect you'll need to set the following connector.class
+
+[source,java]
+----
+connector.class=org.apache.camel.kafkaconnector.kafka.CamelKafkaSourceConnector
+----
+
+
+The camel-kafka source connector supports 121 options, which are listed below.
+
+
+
+[width="100%",cols="2,5,^1,2",options="header"]
+|===
+| Name | Description | Default | Priority
+| *camel.source.path.topic* | Name of the topic to use. On the consumer you can use comma to separate multiple topics. A producer can only send a message to a single topic. | null | HIGH
+| *camel.source.endpoint.additionalProperties* | Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=\http://localhost:8811/avro | null | MEDIUM
+| *camel.source.endpoint.brokers* | URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation. | null | MEDIUM
+| *camel.source.endpoint.clientId* | The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request. | null | MEDIUM
+| *camel.source.endpoint.headerFilterStrategy* | To use a custom HeaderFilterStrategy to filter header to and from Camel message. | null | MEDIUM
+| *camel.source.endpoint.reconnectBackoffMaxMs* | The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms. | "1000" | MEDIUM
+| *camel.source.endpoint.shutdownTimeout* | Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads. | 30000 | MEDIUM
+| *camel.source.endpoint.allowManualCommit* | Whether to allow doing manual commits via KafkaManualCommit. If this option is enabled then an instance of KafkaManualCommit is stored on the Exchange message header, which allows end users to access this API and perform manual offset commits via the Kafka consumer. | false | MEDIUM
+| *camel.source.endpoint.autoCommitEnable* | If true, periodically commit to ZooKeeper the offset of messages already fetched by the consumer. This committed offset will be used when the process fails as the position from which the new consumer will begin. | "true" | MEDIUM
+| *camel.source.endpoint.autoCommitIntervalMs* | The frequency in ms that the consumer offsets are committed to zookeeper. | "5000" | MEDIUM
+| *camel.source.endpoint.autoCommitOnStop* | Whether to perform an explicit auto commit when the consumer stops to ensure the broker has a commit from the last consumed message. This requires the option autoCommitEnable is turned on. The possible values are: sync, async, or none. And sync is the default value. One of: [sync] [async] [none] | "sync" | MEDIUM
+| *camel.source.endpoint.autoOffsetReset* | What to do when there is no initial offset in ZooKeeper or if an offset is out of range: earliest : automatically reset the offset to the earliest offset latest : automatically reset the offset to the latest offset fail: throw exception to the consumer One of: [latest] [earliest] [none] | "latest" | MEDIUM
+| *camel.source.endpoint.breakOnFirstError* | This options controls what happens when a consumer is processing an exchange and it fails. If the option is false then the consumer continues to the next message and processes it. If the option is true then the consumer breaks out, and will seek back to offset of the message that caused a failure, and then re-attempt to process this message. However this can lead to endless processing of the same message if its bound to fail every time, eg a  [...]
+| *camel.source.endpoint.bridgeErrorHandler* | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | MEDIUM
+| *camel.source.endpoint.checkCrcs* | Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance. | "true" | MEDIUM
+| *camel.source.endpoint.consumerRequestTimeoutMs* | The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. | "40000" | MEDIUM
+| *camel.source.endpoint.consumersCount* | The number of consumers that connect to kafka server | 1 | MEDIUM
+| *camel.source.endpoint.consumerStreams* | Number of concurrent consumers on the consumer | 10 | MEDIUM
+| *camel.source.endpoint.fetchMaxBytes* | The maximum amount of data the server should return for a fetch request This is not an absolute maximum, if the first message in the first non-empty partition of the fetch is larger than this value, the message will still be returned to ensure that the consumer can make progress. The maximum message size accepted by the broker is defined via message.max.bytes (broker config) or max.message.bytes (topic config). Note that the consumer performs mul [...]
+| *camel.source.endpoint.fetchMinBytes* | The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request. | "1" | MEDIUM
+| *camel.source.endpoint.fetchWaitMaxMs* | The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy fetch.min.bytes | "500" | MEDIUM
+| *camel.source.endpoint.groupId* | A string that uniquely identifies the group of consumer processes to which this consumer belongs. By setting the same group id multiple processes indicate that they are all part of the same consumer group. This option is required for consumers. | null | MEDIUM
+| *camel.source.endpoint.heartbeatIntervalMs* | The expected time between heartbeats to the consumer coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. The value must be set lower than session.timeout.ms, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal r [...]
+| *camel.source.endpoint.kafkaHeaderDeserializer* | To use a custom KafkaHeaderDeserializer to deserialize kafka headers values | null | MEDIUM
+| *camel.source.endpoint.keyDeserializer* | Deserializer class for key that implements the Deserializer interface. | "org.apache.kafka.common.serialization.StringDeserializer" | MEDIUM
+| *camel.source.endpoint.maxPartitionFetchBytes* | The maximum amount of data per-partition the server will return. The maximum total memory used for a request will be #partitions max.partition.fetch.bytes. This size must be at least as large as the maximum message size the server allows or else it is possible for the producer to send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying to fetch a large message on a certain partition. | "1048576 [...]
+| *camel.source.endpoint.maxPollIntervalMs* | The maximum delay between invocations of poll() when using consumer group management. This places an upper bound on the amount of time that the consumer can be idle before fetching more records. If poll() is not called before expiration of this timeout, then the consumer is considered failed and the group will rebalance in order to reassign the partitions to another member. | null | MEDIUM
+| *camel.source.endpoint.maxPollRecords* | The maximum number of records returned in a single call to poll() | "500" | MEDIUM
+| *camel.source.endpoint.offsetRepository* | The offset repository to use in order to locally store the offset of each partition of the topic. Defining one will disable the autocommit. | null | MEDIUM
+| *camel.source.endpoint.partitionAssignor* | The class name of the partition assignment strategy that the client will use to distribute partition ownership amongst consumer instances when group management is used | "org.apache.kafka.clients.consumer.RangeAssignor" | MEDIUM
+| *camel.source.endpoint.pollTimeoutMs* | The timeout used when polling the KafkaConsumer. | "5000" | MEDIUM
+| *camel.source.endpoint.seekTo* | Set if KafkaConsumer will read from beginning or end on startup: beginning : read from beginning end : read from end This is replacing the earlier property seekToBeginning One of: [beginning] [end] | null | MEDIUM
+| *camel.source.endpoint.sessionTimeoutMs* | The timeout used to detect failures when using Kafka's group management facilities. | "10000" | MEDIUM
+| *camel.source.endpoint.specificAvroReader* | This enables the use of a specific Avro reader for use with the Confluent Platform schema registry and the io.confluent.kafka.serializers.KafkaAvroDeserializer. This option is only available in the Confluent Platform (not standard Apache Kafka) | false | MEDIUM
+| *camel.source.endpoint.topicIsPattern* | Whether the topic is a pattern (regular expression). This can be used to subscribe to dynamic number of topics matching the pattern. | false | MEDIUM
+| *camel.source.endpoint.valueDeserializer* | Deserializer class for value that implements the Deserializer interface. | "org.apache.kafka.common.serialization.StringDeserializer" | MEDIUM
+| *camel.source.endpoint.exceptionHandler* | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | null | MEDIUM
+| *camel.source.endpoint.exchangePattern* | Sets the exchange pattern when the consumer creates an exchange. One of: [InOnly] [InOut] [InOptionalOut] | null | MEDIUM
+| *camel.source.endpoint.basicPropertyBinding* | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | MEDIUM
+| *camel.source.endpoint.synchronous* | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | MEDIUM
+| *camel.source.endpoint.schemaRegistryURL* | URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka) | null | MEDIUM
+| *camel.source.endpoint.interceptorClasses* | Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime | null | MEDIUM
+| *camel.source.endpoint.kerberosBeforeReloginMinTime* | Login thread sleep time between refresh attempts. | "60000" | MEDIUM
+| *camel.source.endpoint.kerberosInitCmd* | Kerberos kinit command path. Default is /usr/bin/kinit | "/usr/bin/kinit" | MEDIUM
+| *camel.source.endpoint.kerberosPrincipalToLocal Rules* | A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form \{username\}/\{hostname\}\{REALM\} are mapped to \{username\}. For more details on the format please see the security authorization [...]
+| *camel.source.endpoint.kerberosRenewJitter* | Percentage of random jitter added to the renewal time. | "0.05" | MEDIUM
+| *camel.source.endpoint.kerberosRenewWindowFactor* | Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket. | "0.8" | MEDIUM
+| *camel.source.endpoint.saslJaasConfig* | Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD; | null | MEDIUM
+| *camel.source.endpoint.saslKerberosServiceName* | The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config. | null | MEDIUM
+| *camel.source.endpoint.saslMechanism* | The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see \http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml | "GSSAPI" | MEDIUM
+| *camel.source.endpoint.securityProtocol* | Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported | "PLAINTEXT" | MEDIUM
+| *camel.source.endpoint.sslCipherSuites* | A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported. | null | MEDIUM
+| *camel.source.endpoint.sslContextParameters* | SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option. | null | MEDIUM
+| *camel.source.endpoint.sslEnabledProtocols* | The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default. | "TLSv1.2" | MEDIUM
+| *camel.source.endpoint.sslEndpointAlgorithm* | The endpoint identification algorithm to validate server hostname using server certificate. | "https" | MEDIUM
+| *camel.source.endpoint.sslKeymanagerAlgorithm* | The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine. | "SunX509" | MEDIUM
+| *camel.source.endpoint.sslKeystoreType* | The file format of the key store file. This is optional for client. Default value is JKS | "JKS" | MEDIUM
+| *camel.source.endpoint.sslProtocol* | The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities. | "TLSv1.2" | MEDIUM
+| *camel.source.endpoint.sslProvider* | The name of the security provider used for SSL connections. Default value is the default security provider of the JVM. | null | MEDIUM
+| *camel.source.endpoint.sslTrustmanagerAlgorithm* | The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine. | "PKIX" | MEDIUM
+| *camel.source.endpoint.sslTruststoreType* | The file format of the trust store file. Default value is JKS. | "JKS" | MEDIUM
+| *camel.component.kafka.additionalProperties* | Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=\http://localhost:8811/avro | null | MEDIUM
+| *camel.component.kafka.brokers* | URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation. | null | MEDIUM
+| *camel.component.kafka.clientId* | The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request. | null | MEDIUM
+| *camel.component.kafka.configuration* | Allows to pre-configure the Kafka component with common options that the endpoints will reuse. | null | MEDIUM
+| *camel.component.kafka.headerFilterStrategy* | To use a custom HeaderFilterStrategy to filter header to and from Camel message. | null | MEDIUM
+| *camel.component.kafka.reconnectBackoffMaxMs* | The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms. | "1000" | MEDIUM
+| *camel.component.kafka.shutdownTimeout* | Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads. | 30000 | MEDIUM
+| *camel.component.kafka.allowManualCommit* | Whether to allow doing manual commits via KafkaManualCommit. If this option is enabled then an instance of KafkaManualCommit is stored on the Exchange message header, which allows end users to access this API and perform manual offset commits via the Kafka consumer. | false | MEDIUM
+| *camel.component.kafka.autoCommitEnable* | If true, periodically commit to ZooKeeper the offset of messages already fetched by the consumer. This committed offset will be used when the process fails as the position from which the new consumer will begin. | "true" | MEDIUM
+| *camel.component.kafka.autoCommitIntervalMs* | The frequency in ms that the consumer offsets are committed to zookeeper. | "5000" | MEDIUM
+| *camel.component.kafka.autoCommitOnStop* | Whether to perform an explicit auto commit when the consumer stops to ensure the broker has a commit from the last consumed message. This requires the option autoCommitEnable is turned on. The possible values are: sync, async, or none. And sync is the default value. One of: [sync] [async] [none] | "sync" | MEDIUM
+| *camel.component.kafka.autoOffsetReset* | What to do when there is no initial offset in ZooKeeper or if an offset is out of range: earliest : automatically reset the offset to the earliest offset latest : automatically reset the offset to the latest offset fail: throw exception to the consumer One of: [latest] [earliest] [none] | "latest" | MEDIUM
+| *camel.component.kafka.breakOnFirstError* | This options controls what happens when a consumer is processing an exchange and it fails. If the option is false then the consumer continues to the next message and processes it. If the option is true then the consumer breaks out, and will seek back to offset of the message that caused a failure, and then re-attempt to process this message. However this can lead to endless processing of the same message if its bound to fail every time, eg a  [...]
+| *camel.component.kafka.bridgeErrorHandler* | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | MEDIUM
+| *camel.component.kafka.checkCrcs* | Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance. | "true" | MEDIUM
+| *camel.component.kafka.consumerRequestTimeoutMs* | The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. | "40000" | MEDIUM
+| *camel.component.kafka.consumersCount* | The number of consumers that connect to kafka server | 1 | MEDIUM
+| *camel.component.kafka.consumerStreams* | Number of concurrent consumers on the consumer | 10 | MEDIUM
+| *camel.component.kafka.fetchMaxBytes* | The maximum amount of data the server should return for a fetch request This is not an absolute maximum, if the first message in the first non-empty partition of the fetch is larger than this value, the message will still be returned to ensure that the consumer can make progress. The maximum message size accepted by the broker is defined via message.max.bytes (broker config) or max.message.bytes (topic config). Note that the consumer performs mul [...]
+| *camel.component.kafka.fetchMinBytes* | The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request. | "1" | MEDIUM
+| *camel.component.kafka.fetchWaitMaxMs* | The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy fetch.min.bytes | "500" | MEDIUM
+| *camel.component.kafka.groupId* | A string that uniquely identifies the group of consumer processes to which this consumer belongs. By setting the same group id multiple processes indicate that they are all part of the same consumer group. This option is required for consumers. | null | MEDIUM
+| *camel.component.kafka.heartbeatIntervalMs* | The expected time between heartbeats to the consumer coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. The value must be set lower than session.timeout.ms, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal r [...]
+| *camel.component.kafka.kafkaHeaderDeserializer* | To use a custom KafkaHeaderDeserializer to deserialize kafka headers values | null | MEDIUM
+| *camel.component.kafka.keyDeserializer* | Deserializer class for key that implements the Deserializer interface. | "org.apache.kafka.common.serialization.StringDeserializer" | MEDIUM
+| *camel.component.kafka.maxPartitionFetchBytes* | The maximum amount of data per-partition the server will return. The maximum total memory used for a request will be #partitions max.partition.fetch.bytes. This size must be at least as large as the maximum message size the server allows or else it is possible for the producer to send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying to fetch a large message on a certain partition. | "1048576 [...]
+| *camel.component.kafka.maxPollIntervalMs* | The maximum delay between invocations of poll() when using consumer group management. This places an upper bound on the amount of time that the consumer can be idle before fetching more records. If poll() is not called before expiration of this timeout, then the consumer is considered failed and the group will rebalance in order to reassign the partitions to another member. | null | MEDIUM
+| *camel.component.kafka.maxPollRecords* | The maximum number of records returned in a single call to poll() | "500" | MEDIUM
+| *camel.component.kafka.offsetRepository* | The offset repository to use in order to locally store the offset of each partition of the topic. Defining one will disable the autocommit. | null | MEDIUM
+| *camel.component.kafka.partitionAssignor* | The class name of the partition assignment strategy that the client will use to distribute partition ownership amongst consumer instances when group management is used | "org.apache.kafka.clients.consumer.RangeAssignor" | MEDIUM
+| *camel.component.kafka.pollTimeoutMs* | The timeout used when polling the KafkaConsumer. | "5000" | MEDIUM
+| *camel.component.kafka.seekTo* | Set if KafkaConsumer will read from beginning or end on startup: beginning : read from beginning end : read from end This is replacing the earlier property seekToBeginning One of: [beginning] [end] | null | MEDIUM
+| *camel.component.kafka.sessionTimeoutMs* | The timeout used to detect failures when using Kafka's group management facilities. | "10000" | MEDIUM
+| *camel.component.kafka.specificAvroReader* | This enables the use of a specific Avro reader for use with the Confluent Platform schema registry and the io.confluent.kafka.serializers.KafkaAvroDeserializer. This option is only available in the Confluent Platform (not standard Apache Kafka) | false | MEDIUM
+| *camel.component.kafka.topicIsPattern* | Whether the topic is a pattern (regular expression). This can be used to subscribe to dynamic number of topics matching the pattern. | false | MEDIUM
+| *camel.component.kafka.valueDeserializer* | Deserializer class for value that implements the Deserializer interface. | "org.apache.kafka.common.serialization.StringDeserializer" | MEDIUM
+| *camel.component.kafka.kafkaManualCommitFactory* | Factory to use for creating KafkaManualCommit instances. This allows to plugin a custom factory to create custom KafkaManualCommit instances in case special logic is needed when doing manual commits that deviates from the default implementation that comes out of the box. | null | MEDIUM
+| *camel.component.kafka.basicPropertyBinding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | MEDIUM
+| *camel.component.kafka.schemaRegistryURL* | URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka) | null | MEDIUM
+| *camel.component.kafka.interceptorClasses* | Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime | null | MEDIUM
+| *camel.component.kafka.kerberosBeforeReloginMinTime* | Login thread sleep time between refresh attempts. | "60000" | MEDIUM
+| *camel.component.kafka.kerberosInitCmd* | Kerberos kinit command path. Default is /usr/bin/kinit | "/usr/bin/kinit" | MEDIUM
+| *camel.component.kafka.kerberosPrincipalToLocal Rules* | A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form \{username\}/\{hostname\}\{REALM\} are mapped to \{username\}. For more details on the format please see the security authorization [...]
+| *camel.component.kafka.kerberosRenewJitter* | Percentage of random jitter added to the renewal time. | "0.05" | MEDIUM
+| *camel.component.kafka.kerberosRenewWindowFactor* | Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket. | "0.8" | MEDIUM
+| *camel.component.kafka.saslJaasConfig* | Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD; | null | MEDIUM
+| *camel.component.kafka.saslKerberosServiceName* | The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config. | null | MEDIUM
+| *camel.component.kafka.saslMechanism* | The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see \http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml | "GSSAPI" | MEDIUM
+| *camel.component.kafka.securityProtocol* | Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported | "PLAINTEXT" | MEDIUM
+| *camel.component.kafka.sslCipherSuites* | A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported. | null | MEDIUM
+| *camel.component.kafka.sslContextParameters* | SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option. | null | MEDIUM
+| *camel.component.kafka.sslEnabledProtocols* | The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default. | "TLSv1.2" | MEDIUM
+| *camel.component.kafka.sslEndpointAlgorithm* | The endpoint identification algorithm to validate server hostname using server certificate. | "https" | MEDIUM
+| *camel.component.kafka.sslKeymanagerAlgorithm* | The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine. | "SunX509" | MEDIUM
+| *camel.component.kafka.sslKeystoreType* | The file format of the key store file. This is optional for client. Default value is JKS | "JKS" | MEDIUM
+| *camel.component.kafka.sslProtocol* | The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities. | "TLSv1.2" | MEDIUM
+| *camel.component.kafka.sslProvider* | The name of the security provider used for SSL connections. Default value is the default security provider of the JVM. | null | MEDIUM
+| *camel.component.kafka.sslTrustmanagerAlgorithm* | The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine. | "PKIX" | MEDIUM
+| *camel.component.kafka.sslTruststoreType* | The file format of the trust store file. Default value is JKS. | "JKS" | MEDIUM
+| *camel.component.kafka.useGlobalSslContext Parameters* | Enable usage of global SSL context parameters. | false | MEDIUM
+|===
+
+
+
+The camel-kafka sink connector has no converters out of the box.
+
+
+
+
+
+The camel-kafka sink connector has no transforms out of the box.
+
+
+
+
+
+The camel-kafka sink connector has no aggregation strategies out of the box.
+
+
+
+
+// kafka-connector options: END
diff --git a/connectors/camel-kafka-kafka-connector/src/main/docs/examples/CamelKafkaSinkConnector.properties b/connectors/camel-kafka-kafka-connector/src/main/docs/examples/CamelKafkaSinkConnector.properties
new file mode 100644
index 0000000..09b0d2f
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/main/docs/examples/CamelKafkaSinkConnector.properties
@@ -0,0 +1,33 @@
+## ---------------------------------------------------------------------------
+## Licensed to the Apache Software Foundation (ASF) under one or more
+## contributor license agreements.  See the NOTICE file distributed with
+## this work for additional information regarding copyright ownership.
+## The ASF licenses this file to You under the Apache License, Version 2.0
+## (the "License"); you may not use this file except in compliance with
+## the License.  You may obtain a copy of the License at
+##
+##      http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+## ---------------------------------------------------------------------------
+
+name=CamelKafkaSinkConnector
+connector.class=org.apache.camel.kafkaconnector.kafka.CamelKafkaSinkConnector
+tasks.max=1
+
+# use the kafka converters that better suit your needs, these are just defaults:
+key.converter=org.apache.kafka.connect.storage.StringConverter
+value.converter=org.apache.kafka.connect.storage.StringConverter
+
+# comma separated topics to get messages from
+topics=
+
+# mandatory properties (for a complete properties list see the connector documentation):
+
+# Name of the topic to use. On the consumer you can use comma to separate multiple topics. A producer can only send a message to a single topic.
+camel.sink.path.topic=
+
diff --git a/connectors/camel-kafka-kafka-connector/src/main/docs/examples/CamelKafkaSourceConnector.properties b/connectors/camel-kafka-kafka-connector/src/main/docs/examples/CamelKafkaSourceConnector.properties
new file mode 100644
index 0000000..57e79bb
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/main/docs/examples/CamelKafkaSourceConnector.properties
@@ -0,0 +1,33 @@
+## ---------------------------------------------------------------------------
+## Licensed to the Apache Software Foundation (ASF) under one or more
+## contributor license agreements.  See the NOTICE file distributed with
+## this work for additional information regarding copyright ownership.
+## The ASF licenses this file to You under the Apache License, Version 2.0
+## (the "License"); you may not use this file except in compliance with
+## the License.  You may obtain a copy of the License at
+##
+##      http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+## ---------------------------------------------------------------------------
+
+name=CamelKafkaSourceConnector
+connector.class=org.apache.camel.kafkaconnector.kafka.CamelKafkaSourceConnector
+tasks.max=1
+
+# use the kafka converters that better suit your needs, these are just defaults:
+key.converter=org.apache.kafka.connect.storage.StringConverter
+value.converter=org.apache.kafka.connect.storage.StringConverter
+
+# comma separated topics to send messages into
+topics=
+
+# mandatory properties (for a complete properties list see the connector documentation):
+
+# Name of the topic to use. On the consumer you can use comma to separate multiple topics. A producer can only send a message to a single topic.
+camel.source.path.topic=
+
diff --git a/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSinkConnector.java b/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSinkConnector.java
new file mode 100644
index 0000000..f6ba254
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSinkConnector.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.kafkaconnector.kafka;
+
+import javax.annotation.Generated;
+import org.apache.camel.kafkaconnector.CamelSinkConnector;
+import org.apache.kafka.common.config.ConfigDef;
+import org.apache.kafka.connect.connector.Task;
+
+@Generated("This class has been generated by camel-kafka-connector-generator-maven-plugin, remove this annotation to prevent it from being generated.")
+public class CamelKafkaSinkConnector extends CamelSinkConnector {
+
+    @Override
+    public ConfigDef config() {
+        return CamelKafkaSinkConnectorConfig.conf();
+    }
+    @Override
+    public Class<? extends Task> taskClass() {
+        return CamelKafkaSinkTask.class;
+    }
+}
\ No newline at end of file
diff --git a/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSinkConnectorConfig.java b/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSinkConnectorConfig.java
new file mode 100644
index 0000000..a25a238
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSinkConnectorConfig.java
@@ -0,0 +1,578 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.kafkaconnector.kafka;
+
+import java.util.Map;
+import javax.annotation.Generated;
+import org.apache.camel.kafkaconnector.CamelSinkConnectorConfig;
+import org.apache.kafka.common.config.ConfigDef;
+
+@Generated("This class has been generated by camel-kafka-connector-generator-maven-plugin, remove this annotation to prevent it from being generated.")
+public class CamelKafkaSinkConnectorConfig extends CamelSinkConnectorConfig {
+
+    public static final String CAMEL_SINK_KAFKA_PATH_TOPIC_CONF = "camel.sink.path.topic";
+    public static final String CAMEL_SINK_KAFKA_PATH_TOPIC_DOC = "Name of the topic to use. On the consumer you can use comma to separate multiple topics. A producer can only send a message to a single topic.";
+    public static final String CAMEL_SINK_KAFKA_PATH_TOPIC_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_ADDITIONAL_PROPERTIES_CONF = "camel.sink.endpoint.additionalProperties";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_ADDITIONAL_PROPERTIES_DOC = "Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http://localhost:8811/avro";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_ADDITIONAL_PROPERTIES_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_BROKERS_CONF = "camel.sink.endpoint.brokers";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_BROKERS_DOC = "URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_BROKERS_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_CLIENT_ID_CONF = "camel.sink.endpoint.clientId";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_CLIENT_ID_DOC = "The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_CLIENT_ID_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_HEADER_FILTER_STRATEGY_CONF = "camel.sink.endpoint.headerFilterStrategy";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_HEADER_FILTER_STRATEGY_DOC = "To use a custom HeaderFilterStrategy to filter header to and from Camel message.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_HEADER_FILTER_STRATEGY_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MAX_MS_CONF = "camel.sink.endpoint.reconnectBackoffMaxMs";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MAX_MS_DOC = "The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MAX_MS_DEFAULT = "1000";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SHUTDOWN_TIMEOUT_CONF = "camel.sink.endpoint.shutdownTimeout";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SHUTDOWN_TIMEOUT_DOC = "Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads.";
+    public static final Integer CAMEL_SINK_KAFKA_ENDPOINT_SHUTDOWN_TIMEOUT_DEFAULT = 30000;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_BUFFER_MEMORY_SIZE_CONF = "camel.sink.endpoint.bufferMemorySize";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_BUFFER_MEMORY_SIZE_DOC = "The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are sent faster than they can be delivered to the server the producer will either block or throw an exception based on the preference specified by block.on.buffer.full.This setting should correspond roughly to the total memory the producer will use, but is not a hard bound since not all memory the  [...]
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_BUFFER_MEMORY_SIZE_DEFAULT = "33554432";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_COMPRESSION_CODEC_CONF = "camel.sink.endpoint.compressionCodec";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_COMPRESSION_CODEC_DOC = "This parameter allows you to specify the compression codec for all data generated by this producer. Valid values are none, gzip and snappy. One of: [none] [gzip] [snappy] [lz4]";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_COMPRESSION_CODEC_DEFAULT = "none";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_CONNECTION_MAX_IDLE_MS_CONF = "camel.sink.endpoint.connectionMaxIdleMs";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_CONNECTION_MAX_IDLE_MS_DOC = "Close idle connections after the number of milliseconds specified by this config.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_CONNECTION_MAX_IDLE_MS_DEFAULT = "540000";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_ENABLE_IDEMPOTENCE_CONF = "camel.sink.endpoint.enableIdempotence";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_ENABLE_IDEMPOTENCE_DOC = "If set to 'true' the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries may write duplicates of the retried message in the stream. If set to true this option will require max.in.flight.requests.per.connection to be set to 1 and retries cannot be zero and additionally acks must be set to 'all'.";
+    public static final Boolean CAMEL_SINK_KAFKA_ENDPOINT_ENABLE_IDEMPOTENCE_DEFAULT = false;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KAFKA_HEADER_SERIALIZER_CONF = "camel.sink.endpoint.kafkaHeaderSerializer";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KAFKA_HEADER_SERIALIZER_DOC = "To use a custom KafkaHeaderSerializer to serialize kafka headers values";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KAFKA_HEADER_SERIALIZER_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KEY_CONF = "camel.sink.endpoint.key";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KEY_DOC = "The record key (or null if no key is specified). If this option has been configured then it take precedence over header KafkaConstants#KEY";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KEY_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KEY_SERIALIZER_CLASS_CONF = "camel.sink.endpoint.keySerializerClass";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KEY_SERIALIZER_CLASS_DOC = "The serializer class for keys (defaults to the same as for messages if nothing is given).";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KEY_SERIALIZER_CLASS_DEFAULT = "org.apache.kafka.common.serialization.StringSerializer";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_LAZY_START_PRODUCER_CONF = "camel.sink.endpoint.lazyStartProducer";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_LAZY_START_PRODUCER_DOC = "Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when [...]
+    public static final Boolean CAMEL_SINK_KAFKA_ENDPOINT_LAZY_START_PRODUCER_DEFAULT = false;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_LINGER_MS_CONF = "camel.sink.endpoint.lingerMs";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_LINGER_MS_DOC = "The producer groups together any records that arrive in between request transmissions into a single batched request. Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount of artificial delay that is, rather than imme [...]
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_LINGER_MS_DEFAULT = "0";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_MAX_BLOCK_MS_CONF = "camel.sink.endpoint.maxBlockMs";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_MAX_BLOCK_MS_DOC = "The configuration controls how long sending to kafka will block. These methods can be blocked for multiple reasons. For e.g: buffer full, metadata unavailable.This configuration imposes maximum limit on the total time spent in fetching metadata, serialization of key and value, partitioning and allocation of buffer memory when doing a send(). In case of partitionsFor(), this configuration imposes a maximum time t [...]
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_MAX_BLOCK_MS_DEFAULT = "60000";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_MAX_IN_FLIGHT_REQUEST_CONF = "camel.sink.endpoint.maxInFlightRequest";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_MAX_IN_FLIGHT_REQUEST_DOC = "The maximum number of unacknowledged requests the client will send on a single connection before blocking. Note that if this setting is set to be greater than 1 and there are failed sends, there is a risk of message re-ordering due to retries (i.e., if retries are enabled).";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_MAX_IN_FLIGHT_REQUEST_DEFAULT = "5";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_MAX_REQUEST_SIZE_CONF = "camel.sink.endpoint.maxRequestSize";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_MAX_REQUEST_SIZE_DOC = "The maximum size of a request. This is also effectively a cap on the maximum record size. Note that the server has its own cap on record size which may be different from this. This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_MAX_REQUEST_SIZE_DEFAULT = "1048576";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_METADATA_MAX_AGE_MS_CONF = "camel.sink.endpoint.metadataMaxAgeMs";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_METADATA_MAX_AGE_MS_DOC = "The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_METADATA_MAX_AGE_MS_DEFAULT = "300000";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_METRIC_REPORTERS_CONF = "camel.sink.endpoint.metricReporters";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_METRIC_REPORTERS_DOC = "A list of classes to use as metrics reporters. Implementing the MetricReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_METRIC_REPORTERS_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_METRICS_SAMPLE_WINDOW_MS_CONF = "camel.sink.endpoint.metricsSampleWindowMs";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_METRICS_SAMPLE_WINDOW_MS_DOC = "The number of samples maintained to compute metrics.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_METRICS_SAMPLE_WINDOW_MS_DEFAULT = "30000";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_NO_OF_METRICS_SAMPLE_CONF = "camel.sink.endpoint.noOfMetricsSample";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_NO_OF_METRICS_SAMPLE_DOC = "The number of samples maintained to compute metrics.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_NO_OF_METRICS_SAMPLE_DEFAULT = "2";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_PARTITIONER_CONF = "camel.sink.endpoint.partitioner";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_PARTITIONER_DOC = "The partitioner class for partitioning messages amongst sub-topics. The default partitioner is based on the hash of the key.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_PARTITIONER_DEFAULT = "org.apache.kafka.clients.producer.internals.DefaultPartitioner";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_PARTITION_KEY_CONF = "camel.sink.endpoint.partitionKey";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_PARTITION_KEY_DOC = "The partition to which the record will be sent (or null if no partition was specified). If this option has been configured then it take precedence over header KafkaConstants#PARTITION_KEY";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_PARTITION_KEY_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_PRODUCER_BATCH_SIZE_CONF = "camel.sink.endpoint.producerBatchSize";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_PRODUCER_BATCH_SIZE_DOC = "The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps performance on both the client and the server. This configuration controls the default batch size in bytes. No attempt will be made to batch records larger than this size.Requests sent to brokers will contain multiple batches, one for each partition with data a [...]
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_PRODUCER_BATCH_SIZE_DEFAULT = "16384";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_QUEUE_BUFFERING_MAX_MESSAGES_CONF = "camel.sink.endpoint.queueBufferingMaxMessages";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_QUEUE_BUFFERING_MAX_MESSAGES_DOC = "The maximum number of unsent messages that can be queued up the producer when using async mode before either the producer must be blocked or data must be dropped.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_QUEUE_BUFFERING_MAX_MESSAGES_DEFAULT = "10000";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RECEIVE_BUFFER_BYTES_CONF = "camel.sink.endpoint.receiveBufferBytes";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RECEIVE_BUFFER_BYTES_DOC = "The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RECEIVE_BUFFER_BYTES_DEFAULT = "65536";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MS_CONF = "camel.sink.endpoint.reconnectBackoffMs";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MS_DOC = "The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MS_DEFAULT = "50";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RECORD_METADATA_CONF = "camel.sink.endpoint.recordMetadata";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RECORD_METADATA_DOC = "Whether the producer should store the RecordMetadata results from sending to Kafka. The results are stored in a List containing the RecordMetadata metadata's. The list is stored on a header with the key KafkaConstants#KAFKA_RECORDMETA";
+    public static final Boolean CAMEL_SINK_KAFKA_ENDPOINT_RECORD_METADATA_DEFAULT = true;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_REQUEST_REQUIRED_ACKS_CONF = "camel.sink.endpoint.requestRequiredAcks";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_REQUEST_REQUIRED_ACKS_DOC = "The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. The following settings are common: acks=0 If set to zero then the producer will not wait for any acknowledgment from the server at all. The record will be immediately added to the socket buffer and considered sent. No guarantee can  [...]
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_REQUEST_REQUIRED_ACKS_DEFAULT = "1";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_REQUEST_TIMEOUT_MS_CONF = "camel.sink.endpoint.requestTimeoutMs";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_REQUEST_TIMEOUT_MS_DOC = "The amount of time the broker will wait trying to meet the request.required.acks requirement before sending back an error to the client.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_REQUEST_TIMEOUT_MS_DEFAULT = "30000";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RETRIES_CONF = "camel.sink.endpoint.retries";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RETRIES_DOC = "Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. Note that this retry is no different than if the client resent the record upon receiving the error. Allowing retries will potentially change the ordering of records because if two records are sent to a single partition, and the first fails and is retried but the second succeeds, then the s [...]
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RETRIES_DEFAULT = "0";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RETRY_BACKOFF_MS_CONF = "camel.sink.endpoint.retryBackoffMs";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RETRY_BACKOFF_MS_DOC = "Before each retry, the producer refreshes the metadata of relevant topics to see if a new leader has been elected. Since leader election takes a bit of time, this property specifies the amount of time that the producer waits before refreshing the metadata.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_RETRY_BACKOFF_MS_DEFAULT = "100";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SEND_BUFFER_BYTES_CONF = "camel.sink.endpoint.sendBufferBytes";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SEND_BUFFER_BYTES_DOC = "Socket write buffer size";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SEND_BUFFER_BYTES_DEFAULT = "131072";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SERIALIZER_CLASS_CONF = "camel.sink.endpoint.serializerClass";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SERIALIZER_CLASS_DOC = "The serializer class for messages.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SERIALIZER_CLASS_DEFAULT = "org.apache.kafka.common.serialization.StringSerializer";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_CONF = "camel.sink.endpoint.workerPool";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_DOC = "To use a custom worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. If using this option then you must handle the lifecycle of the thread pool to shut the pool down when no longer needed.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_CORE_SIZE_CONF = "camel.sink.endpoint.workerPoolCoreSize";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_CORE_SIZE_DOC = "Number of core threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_CORE_SIZE_DEFAULT = "10";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_MAX_SIZE_CONF = "camel.sink.endpoint.workerPoolMaxSize";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_MAX_SIZE_DOC = "Maximum number of threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_MAX_SIZE_DEFAULT = "20";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_BASIC_PROPERTY_BINDING_CONF = "camel.sink.endpoint.basicPropertyBinding";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_BASIC_PROPERTY_BINDING_DOC = "Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities";
+    public static final Boolean CAMEL_SINK_KAFKA_ENDPOINT_BASIC_PROPERTY_BINDING_DEFAULT = false;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SYNCHRONOUS_CONF = "camel.sink.endpoint.synchronous";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SYNCHRONOUS_DOC = "Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported).";
+    public static final Boolean CAMEL_SINK_KAFKA_ENDPOINT_SYNCHRONOUS_DEFAULT = false;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SCHEMA_REGISTRY_URLCONF = "camel.sink.endpoint.schemaRegistryURL";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SCHEMA_REGISTRY_URLDOC = "URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka)";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SCHEMA_REGISTRY_URLDEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_INTERCEPTOR_CLASSES_CONF = "camel.sink.endpoint.interceptorClasses";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_INTERCEPTOR_CLASSES_DOC = "Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_INTERCEPTOR_CLASSES_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_CONF = "camel.sink.endpoint.kerberosBeforeReloginMinTime";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DOC = "Login thread sleep time between refresh attempts.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DEFAULT = "60000";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_INIT_CMD_CONF = "camel.sink.endpoint.kerberosInitCmd";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_INIT_CMD_DOC = "Kerberos kinit command path. Default is /usr/bin/kinit";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_INIT_CMD_DEFAULT = "/usr/bin/kinit";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONF = "camel.sink.endpoint.kerberosPrincipalToLocalRules";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DOC = "A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form {username}/{hostname}{REALM} are mapped to {username}. For more details on the format please [...]
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DEFAULT = "DEFAULT";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_RENEW_JITTER_CONF = "camel.sink.endpoint.kerberosRenewJitter";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_RENEW_JITTER_DOC = "Percentage of random jitter added to the renewal time.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_RENEW_JITTER_DEFAULT = "0.05";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_RENEW_WINDOW_FACTOR_CONF = "camel.sink.endpoint.kerberosRenewWindowFactor";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_RENEW_WINDOW_FACTOR_DOC = "Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_RENEW_WINDOW_FACTOR_DEFAULT = "0.8";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SASL_JAAS_CONFIG_CONF = "camel.sink.endpoint.saslJaasConfig";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SASL_JAAS_CONFIG_DOC = "Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD;";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SASL_JAAS_CONFIG_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SASL_KERBEROS_SERVICE_NAME_CONF = "camel.sink.endpoint.saslKerberosServiceName";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SASL_KERBEROS_SERVICE_NAME_DOC = "The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SASL_KERBEROS_SERVICE_NAME_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SASL_MECHANISM_CONF = "camel.sink.endpoint.saslMechanism";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SASL_MECHANISM_DOC = "The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SASL_MECHANISM_DEFAULT = "GSSAPI";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SECURITY_PROTOCOL_CONF = "camel.sink.endpoint.securityProtocol";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SECURITY_PROTOCOL_DEFAULT = "PLAINTEXT";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_CIPHER_SUITES_CONF = "camel.sink.endpoint.sslCipherSuites";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_CIPHER_SUITES_DOC = "A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_CIPHER_SUITES_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_CONTEXT_PARAMETERS_CONF = "camel.sink.endpoint.sslContextParameters";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_CONTEXT_PARAMETERS_DOC = "SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_CONTEXT_PARAMETERS_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_ENABLED_PROTOCOLS_CONF = "camel.sink.endpoint.sslEnabledProtocols";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_ENABLED_PROTOCOLS_DOC = "The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_ENABLED_PROTOCOLS_DEFAULT = "TLSv1.2";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_ENDPOINT_ALGORITHM_CONF = "camel.sink.endpoint.sslEndpointAlgorithm";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_ENDPOINT_ALGORITHM_DOC = "The endpoint identification algorithm to validate server hostname using server certificate.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_ENDPOINT_ALGORITHM_DEFAULT = "https";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYMANAGER_ALGORITHM_CONF = "camel.sink.endpoint.sslKeymanagerAlgorithm";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYMANAGER_ALGORITHM_DOC = "The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYMANAGER_ALGORITHM_DEFAULT = "SunX509";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEY_PASSWORD_CONF = "camel.sink.endpoint.sslKeyPassword";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEY_PASSWORD_DOC = "The password of the private key in the key store file. This is optional for client.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEY_PASSWORD_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_LOCATION_CONF = "camel.sink.endpoint.sslKeystoreLocation";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_LOCATION_DOC = "The location of the key store file. This is optional for client and can be used for two-way authentication for client.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_LOCATION_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_PASSWORD_CONF = "camel.sink.endpoint.sslKeystorePassword";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_PASSWORD_DOC = "The store password for the key store file.This is optional for client and only needed if ssl.keystore.location is configured.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_PASSWORD_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_TYPE_CONF = "camel.sink.endpoint.sslKeystoreType";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_TYPE_DOC = "The file format of the key store file. This is optional for client. Default value is JKS";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_TYPE_DEFAULT = "JKS";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_PROTOCOL_CONF = "camel.sink.endpoint.sslProtocol";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_PROTOCOL_DOC = "The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_PROTOCOL_DEFAULT = "TLSv1.2";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_PROVIDER_CONF = "camel.sink.endpoint.sslProvider";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_PROVIDER_DOC = "The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_PROVIDER_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTMANAGER_ALGORITHM_CONF = "camel.sink.endpoint.sslTrustmanagerAlgorithm";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTMANAGER_ALGORITHM_DOC = "The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTMANAGER_ALGORITHM_DEFAULT = "PKIX";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_LOCATION_CONF = "camel.sink.endpoint.sslTruststoreLocation";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_LOCATION_DOC = "The location of the trust store file.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_LOCATION_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_PASSWORD_CONF = "camel.sink.endpoint.sslTruststorePassword";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_PASSWORD_DOC = "The password for the trust store file.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_PASSWORD_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_TYPE_CONF = "camel.sink.endpoint.sslTruststoreType";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. Default value is JKS.";
+    public static final String CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_TYPE_DEFAULT = "JKS";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_ADDITIONAL_PROPERTIES_CONF = "camel.component.kafka.additionalProperties";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_ADDITIONAL_PROPERTIES_DOC = "Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http://localhost:8811/avro";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_ADDITIONAL_PROPERTIES_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_BROKERS_CONF = "camel.component.kafka.brokers";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_BROKERS_DOC = "URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_BROKERS_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_CLIENT_ID_CONF = "camel.component.kafka.clientId";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_CLIENT_ID_DOC = "The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_CLIENT_ID_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_CONFIGURATION_CONF = "camel.component.kafka.configuration";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_CONFIGURATION_DOC = "Allows to pre-configure the Kafka component with common options that the endpoints will reuse.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_CONFIGURATION_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_HEADER_FILTER_STRATEGY_CONF = "camel.component.kafka.headerFilterStrategy";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_HEADER_FILTER_STRATEGY_DOC = "To use a custom HeaderFilterStrategy to filter header to and from Camel message.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_HEADER_FILTER_STRATEGY_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RECONNECT_BACKOFF_MAX_MS_CONF = "camel.component.kafka.reconnectBackoffMaxMs";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RECONNECT_BACKOFF_MAX_MS_DOC = "The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RECONNECT_BACKOFF_MAX_MS_DEFAULT = "1000";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SHUTDOWN_TIMEOUT_CONF = "camel.component.kafka.shutdownTimeout";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SHUTDOWN_TIMEOUT_DOC = "Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads.";
+    public static final Integer CAMEL_SINK_KAFKA_COMPONENT_SHUTDOWN_TIMEOUT_DEFAULT = 30000;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_BUFFER_MEMORY_SIZE_CONF = "camel.component.kafka.bufferMemorySize";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_BUFFER_MEMORY_SIZE_DOC = "The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are sent faster than they can be delivered to the server the producer will either block or throw an exception based on the preference specified by block.on.buffer.full.This setting should correspond roughly to the total memory the producer will use, but is not a hard bound since not all memory the [...]
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_BUFFER_MEMORY_SIZE_DEFAULT = "33554432";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_COMPRESSION_CODEC_CONF = "camel.component.kafka.compressionCodec";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_COMPRESSION_CODEC_DOC = "This parameter allows you to specify the compression codec for all data generated by this producer. Valid values are none, gzip and snappy. One of: [none] [gzip] [snappy] [lz4]";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_COMPRESSION_CODEC_DEFAULT = "none";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_CONNECTION_MAX_IDLE_MS_CONF = "camel.component.kafka.connectionMaxIdleMs";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_CONNECTION_MAX_IDLE_MS_DOC = "Close idle connections after the number of milliseconds specified by this config.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_CONNECTION_MAX_IDLE_MS_DEFAULT = "540000";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_ENABLE_IDEMPOTENCE_CONF = "camel.component.kafka.enableIdempotence";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_ENABLE_IDEMPOTENCE_DOC = "If set to 'true' the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries may write duplicates of the retried message in the stream. If set to true this option will require max.in.flight.requests.per.connection to be set to 1 and retries cannot be zero and additionally acks must be set to 'all'.";
+    public static final Boolean CAMEL_SINK_KAFKA_COMPONENT_ENABLE_IDEMPOTENCE_DEFAULT = false;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KAFKA_HEADER_SERIALIZER_CONF = "camel.component.kafka.kafkaHeaderSerializer";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KAFKA_HEADER_SERIALIZER_DOC = "To use a custom KafkaHeaderSerializer to serialize kafka headers values";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KAFKA_HEADER_SERIALIZER_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KEY_CONF = "camel.component.kafka.key";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KEY_DOC = "The record key (or null if no key is specified). If this option has been configured then it take precedence over header KafkaConstants#KEY";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KEY_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KEY_SERIALIZER_CLASS_CONF = "camel.component.kafka.keySerializerClass";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KEY_SERIALIZER_CLASS_DOC = "The serializer class for keys (defaults to the same as for messages if nothing is given).";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KEY_SERIALIZER_CLASS_DEFAULT = "org.apache.kafka.common.serialization.StringSerializer";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_LAZY_START_PRODUCER_CONF = "camel.component.kafka.lazyStartProducer";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_LAZY_START_PRODUCER_DOC = "Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that whe [...]
+    public static final Boolean CAMEL_SINK_KAFKA_COMPONENT_LAZY_START_PRODUCER_DEFAULT = false;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_LINGER_MS_CONF = "camel.component.kafka.lingerMs";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_LINGER_MS_DOC = "The producer groups together any records that arrive in between request transmissions into a single batched request. Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount of artificial delay that is, rather than imm [...]
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_LINGER_MS_DEFAULT = "0";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_MAX_BLOCK_MS_CONF = "camel.component.kafka.maxBlockMs";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_MAX_BLOCK_MS_DOC = "The configuration controls how long sending to kafka will block. These methods can be blocked for multiple reasons. For e.g: buffer full, metadata unavailable.This configuration imposes maximum limit on the total time spent in fetching metadata, serialization of key and value, partitioning and allocation of buffer memory when doing a send(). In case of partitionsFor(), this configuration imposes a maximum time  [...]
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_MAX_BLOCK_MS_DEFAULT = "60000";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_MAX_IN_FLIGHT_REQUEST_CONF = "camel.component.kafka.maxInFlightRequest";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_MAX_IN_FLIGHT_REQUEST_DOC = "The maximum number of unacknowledged requests the client will send on a single connection before blocking. Note that if this setting is set to be greater than 1 and there are failed sends, there is a risk of message re-ordering due to retries (i.e., if retries are enabled).";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_MAX_IN_FLIGHT_REQUEST_DEFAULT = "5";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_MAX_REQUEST_SIZE_CONF = "camel.component.kafka.maxRequestSize";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_MAX_REQUEST_SIZE_DOC = "The maximum size of a request. This is also effectively a cap on the maximum record size. Note that the server has its own cap on record size which may be different from this. This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_MAX_REQUEST_SIZE_DEFAULT = "1048576";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_METADATA_MAX_AGE_MS_CONF = "camel.component.kafka.metadataMaxAgeMs";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_METADATA_MAX_AGE_MS_DOC = "The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_METADATA_MAX_AGE_MS_DEFAULT = "300000";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_METRIC_REPORTERS_CONF = "camel.component.kafka.metricReporters";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_METRIC_REPORTERS_DOC = "A list of classes to use as metrics reporters. Implementing the MetricReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_METRIC_REPORTERS_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_METRICS_SAMPLE_WINDOW_MS_CONF = "camel.component.kafka.metricsSampleWindowMs";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_METRICS_SAMPLE_WINDOW_MS_DOC = "The number of samples maintained to compute metrics.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_METRICS_SAMPLE_WINDOW_MS_DEFAULT = "30000";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_NO_OF_METRICS_SAMPLE_CONF = "camel.component.kafka.noOfMetricsSample";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_NO_OF_METRICS_SAMPLE_DOC = "The number of samples maintained to compute metrics.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_NO_OF_METRICS_SAMPLE_DEFAULT = "2";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_PARTITIONER_CONF = "camel.component.kafka.partitioner";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_PARTITIONER_DOC = "The partitioner class for partitioning messages amongst sub-topics. The default partitioner is based on the hash of the key.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_PARTITIONER_DEFAULT = "org.apache.kafka.clients.producer.internals.DefaultPartitioner";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_PARTITION_KEY_CONF = "camel.component.kafka.partitionKey";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_PARTITION_KEY_DOC = "The partition to which the record will be sent (or null if no partition was specified). If this option has been configured then it take precedence over header KafkaConstants#PARTITION_KEY";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_PARTITION_KEY_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_PRODUCER_BATCH_SIZE_CONF = "camel.component.kafka.producerBatchSize";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_PRODUCER_BATCH_SIZE_DOC = "The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps performance on both the client and the server. This configuration controls the default batch size in bytes. No attempt will be made to batch records larger than this size.Requests sent to brokers will contain multiple batches, one for each partition with data  [...]
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_PRODUCER_BATCH_SIZE_DEFAULT = "16384";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_QUEUE_BUFFERING_MAX_MESSAGES_CONF = "camel.component.kafka.queueBufferingMaxMessages";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_QUEUE_BUFFERING_MAX_MESSAGES_DOC = "The maximum number of unsent messages that can be queued up the producer when using async mode before either the producer must be blocked or data must be dropped.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_QUEUE_BUFFERING_MAX_MESSAGES_DEFAULT = "10000";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RECEIVE_BUFFER_BYTES_CONF = "camel.component.kafka.receiveBufferBytes";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RECEIVE_BUFFER_BYTES_DOC = "The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RECEIVE_BUFFER_BYTES_DEFAULT = "65536";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RECONNECT_BACKOFF_MS_CONF = "camel.component.kafka.reconnectBackoffMs";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RECONNECT_BACKOFF_MS_DOC = "The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RECONNECT_BACKOFF_MS_DEFAULT = "50";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RECORD_METADATA_CONF = "camel.component.kafka.recordMetadata";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RECORD_METADATA_DOC = "Whether the producer should store the RecordMetadata results from sending to Kafka. The results are stored in a List containing the RecordMetadata metadata's. The list is stored on a header with the key KafkaConstants#KAFKA_RECORDMETA";
+    public static final Boolean CAMEL_SINK_KAFKA_COMPONENT_RECORD_METADATA_DEFAULT = true;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_REQUEST_REQUIRED_ACKS_CONF = "camel.component.kafka.requestRequiredAcks";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_REQUEST_REQUIRED_ACKS_DOC = "The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. The following settings are common: acks=0 If set to zero then the producer will not wait for any acknowledgment from the server at all. The record will be immediately added to the socket buffer and considered sent. No guarantee can [...]
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_REQUEST_REQUIRED_ACKS_DEFAULT = "1";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_REQUEST_TIMEOUT_MS_CONF = "camel.component.kafka.requestTimeoutMs";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_REQUEST_TIMEOUT_MS_DOC = "The amount of time the broker will wait trying to meet the request.required.acks requirement before sending back an error to the client.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_REQUEST_TIMEOUT_MS_DEFAULT = "30000";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RETRIES_CONF = "camel.component.kafka.retries";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RETRIES_DOC = "Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. Note that this retry is no different than if the client resent the record upon receiving the error. Allowing retries will potentially change the ordering of records because if two records are sent to a single partition, and the first fails and is retried but the second succeeds, then the  [...]
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RETRIES_DEFAULT = "0";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RETRY_BACKOFF_MS_CONF = "camel.component.kafka.retryBackoffMs";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RETRY_BACKOFF_MS_DOC = "Before each retry, the producer refreshes the metadata of relevant topics to see if a new leader has been elected. Since leader election takes a bit of time, this property specifies the amount of time that the producer waits before refreshing the metadata.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_RETRY_BACKOFF_MS_DEFAULT = "100";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SEND_BUFFER_BYTES_CONF = "camel.component.kafka.sendBufferBytes";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SEND_BUFFER_BYTES_DOC = "Socket write buffer size";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SEND_BUFFER_BYTES_DEFAULT = "131072";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SERIALIZER_CLASS_CONF = "camel.component.kafka.serializerClass";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SERIALIZER_CLASS_DOC = "The serializer class for messages.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SERIALIZER_CLASS_DEFAULT = "org.apache.kafka.common.serialization.StringSerializer";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_CONF = "camel.component.kafka.workerPool";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_DOC = "To use a custom worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. If using this option then you must handle the lifecycle of the thread pool to shut the pool down when no longer needed.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_CORE_SIZE_CONF = "camel.component.kafka.workerPoolCoreSize";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_CORE_SIZE_DOC = "Number of core threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_CORE_SIZE_DEFAULT = "10";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_MAX_SIZE_CONF = "camel.component.kafka.workerPoolMaxSize";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_MAX_SIZE_DOC = "Maximum number of threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_MAX_SIZE_DEFAULT = "20";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_BASIC_PROPERTY_BINDING_CONF = "camel.component.kafka.basicPropertyBinding";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_BASIC_PROPERTY_BINDING_DOC = "Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities";
+    public static final Boolean CAMEL_SINK_KAFKA_COMPONENT_BASIC_PROPERTY_BINDING_DEFAULT = false;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SCHEMA_REGISTRY_URLCONF = "camel.component.kafka.schemaRegistryURL";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SCHEMA_REGISTRY_URLDOC = "URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka)";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SCHEMA_REGISTRY_URLDEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_INTERCEPTOR_CLASSES_CONF = "camel.component.kafka.interceptorClasses";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_INTERCEPTOR_CLASSES_DOC = "Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_INTERCEPTOR_CLASSES_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_CONF = "camel.component.kafka.kerberosBeforeReloginMinTime";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DOC = "Login thread sleep time between refresh attempts.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DEFAULT = "60000";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_INIT_CMD_CONF = "camel.component.kafka.kerberosInitCmd";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_INIT_CMD_DOC = "Kerberos kinit command path. Default is /usr/bin/kinit";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_INIT_CMD_DEFAULT = "/usr/bin/kinit";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONF = "camel.component.kafka.kerberosPrincipalToLocalRules";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DOC = "A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form {username}/{hostname}{REALM} are mapped to {username}. For more details on the format pleas [...]
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DEFAULT = "DEFAULT";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_RENEW_JITTER_CONF = "camel.component.kafka.kerberosRenewJitter";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_RENEW_JITTER_DOC = "Percentage of random jitter added to the renewal time.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_RENEW_JITTER_DEFAULT = "0.05";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_RENEW_WINDOW_FACTOR_CONF = "camel.component.kafka.kerberosRenewWindowFactor";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_RENEW_WINDOW_FACTOR_DOC = "Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_RENEW_WINDOW_FACTOR_DEFAULT = "0.8";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SASL_JAAS_CONFIG_CONF = "camel.component.kafka.saslJaasConfig";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SASL_JAAS_CONFIG_DOC = "Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD;";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SASL_JAAS_CONFIG_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SASL_KERBEROS_SERVICE_NAME_CONF = "camel.component.kafka.saslKerberosServiceName";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SASL_KERBEROS_SERVICE_NAME_DOC = "The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SASL_KERBEROS_SERVICE_NAME_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SASL_MECHANISM_CONF = "camel.component.kafka.saslMechanism";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SASL_MECHANISM_DOC = "The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SASL_MECHANISM_DEFAULT = "GSSAPI";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SECURITY_PROTOCOL_CONF = "camel.component.kafka.securityProtocol";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SECURITY_PROTOCOL_DEFAULT = "PLAINTEXT";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_CIPHER_SUITES_CONF = "camel.component.kafka.sslCipherSuites";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_CIPHER_SUITES_DOC = "A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_CIPHER_SUITES_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_CONTEXT_PARAMETERS_CONF = "camel.component.kafka.sslContextParameters";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_CONTEXT_PARAMETERS_DOC = "SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_CONTEXT_PARAMETERS_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_ENABLED_PROTOCOLS_CONF = "camel.component.kafka.sslEnabledProtocols";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_ENABLED_PROTOCOLS_DOC = "The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_ENABLED_PROTOCOLS_DEFAULT = "TLSv1.2";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_ENDPOINT_ALGORITHM_CONF = "camel.component.kafka.sslEndpointAlgorithm";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_ENDPOINT_ALGORITHM_DOC = "The endpoint identification algorithm to validate server hostname using server certificate.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_ENDPOINT_ALGORITHM_DEFAULT = "https";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYMANAGER_ALGORITHM_CONF = "camel.component.kafka.sslKeymanagerAlgorithm";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYMANAGER_ALGORITHM_DOC = "The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYMANAGER_ALGORITHM_DEFAULT = "SunX509";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_KEY_PASSWORD_CONF = "camel.component.kafka.sslKeyPassword";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_KEY_PASSWORD_DOC = "The password of the private key in the key store file. This is optional for client.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_KEY_PASSWORD_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_LOCATION_CONF = "camel.component.kafka.sslKeystoreLocation";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_LOCATION_DOC = "The location of the key store file. This is optional for client and can be used for two-way authentication for client.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_LOCATION_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_PASSWORD_CONF = "camel.component.kafka.sslKeystorePassword";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_PASSWORD_DOC = "The store password for the key store file.This is optional for client and only needed if ssl.keystore.location is configured.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_PASSWORD_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_TYPE_CONF = "camel.component.kafka.sslKeystoreType";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_TYPE_DOC = "The file format of the key store file. This is optional for client. Default value is JKS";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_TYPE_DEFAULT = "JKS";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_PROTOCOL_CONF = "camel.component.kafka.sslProtocol";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_PROTOCOL_DOC = "The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_PROTOCOL_DEFAULT = "TLSv1.2";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_PROVIDER_CONF = "camel.component.kafka.sslProvider";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_PROVIDER_DOC = "The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_PROVIDER_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTMANAGER_ALGORITHM_CONF = "camel.component.kafka.sslTrustmanagerAlgorithm";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTMANAGER_ALGORITHM_DOC = "The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTMANAGER_ALGORITHM_DEFAULT = "PKIX";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_LOCATION_CONF = "camel.component.kafka.sslTruststoreLocation";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_LOCATION_DOC = "The location of the trust store file.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_LOCATION_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_PASSWORD_CONF = "camel.component.kafka.sslTruststorePassword";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_PASSWORD_DOC = "The password for the trust store file.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_PASSWORD_DEFAULT = null;
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_TYPE_CONF = "camel.component.kafka.sslTruststoreType";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. Default value is JKS.";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_TYPE_DEFAULT = "JKS";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_USE_GLOBAL_SSL_CONTEXT_PARAMETERS_CONF = "camel.component.kafka.useGlobalSslContextParameters";
+    public static final String CAMEL_SINK_KAFKA_COMPONENT_USE_GLOBAL_SSL_CONTEXT_PARAMETERS_DOC = "Enable usage of global SSL context parameters.";
+    public static final Boolean CAMEL_SINK_KAFKA_COMPONENT_USE_GLOBAL_SSL_CONTEXT_PARAMETERS_DEFAULT = false;
+
+    public CamelKafkaSinkConnectorConfig(
+            ConfigDef config,
+            Map<String, String> parsedConfig) {
+        super(config, parsedConfig);
+    }
+
+    public CamelKafkaSinkConnectorConfig(Map<String, String> parsedConfig) {
+        this(conf(), parsedConfig);
+    }
+
+    public static ConfigDef conf() {
+        ConfigDef conf = new ConfigDef(CamelSinkConnectorConfig.conf());
+        conf.define(CAMEL_SINK_KAFKA_PATH_TOPIC_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_PATH_TOPIC_DEFAULT, ConfigDef.Importance.HIGH, CAMEL_SINK_KAFKA_PATH_TOPIC_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_ADDITIONAL_PROPERTIES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_ADDITIONAL_PROPERTIES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_ADDITIONAL_PROPERTIES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_BROKERS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_BROKERS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_BROKERS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_CLIENT_ID_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_CLIENT_ID_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_CLIENT_ID_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_HEADER_FILTER_STRATEGY_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_HEADER_FILTER_STRATEGY_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_HEADER_FILTER_STRATEGY_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MAX_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MAX_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MAX_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SHUTDOWN_TIMEOUT_CONF, ConfigDef.Type.INT, CAMEL_SINK_KAFKA_ENDPOINT_SHUTDOWN_TIMEOUT_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SHUTDOWN_TIMEOUT_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_BUFFER_MEMORY_SIZE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_BUFFER_MEMORY_SIZE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_BUFFER_MEMORY_SIZE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_COMPRESSION_CODEC_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_COMPRESSION_CODEC_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_COMPRESSION_CODEC_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_CONNECTION_MAX_IDLE_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_CONNECTION_MAX_IDLE_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_CONNECTION_MAX_IDLE_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_ENABLE_IDEMPOTENCE_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SINK_KAFKA_ENDPOINT_ENABLE_IDEMPOTENCE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_ENABLE_IDEMPOTENCE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_KAFKA_HEADER_SERIALIZER_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_KAFKA_HEADER_SERIALIZER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_KAFKA_HEADER_SERIALIZER_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_KEY_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_KEY_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_KEY_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_KEY_SERIALIZER_CLASS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_KEY_SERIALIZER_CLASS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_KEY_SERIALIZER_CLASS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_LAZY_START_PRODUCER_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SINK_KAFKA_ENDPOINT_LAZY_START_PRODUCER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_LAZY_START_PRODUCER_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_LINGER_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_LINGER_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_LINGER_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_MAX_BLOCK_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_MAX_BLOCK_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_MAX_BLOCK_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_MAX_IN_FLIGHT_REQUEST_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_MAX_IN_FLIGHT_REQUEST_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_MAX_IN_FLIGHT_REQUEST_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_MAX_REQUEST_SIZE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_MAX_REQUEST_SIZE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_MAX_REQUEST_SIZE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_METADATA_MAX_AGE_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_METADATA_MAX_AGE_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_METADATA_MAX_AGE_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_METRIC_REPORTERS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_METRIC_REPORTERS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_METRIC_REPORTERS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_METRICS_SAMPLE_WINDOW_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_METRICS_SAMPLE_WINDOW_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_METRICS_SAMPLE_WINDOW_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_NO_OF_METRICS_SAMPLE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_NO_OF_METRICS_SAMPLE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_NO_OF_METRICS_SAMPLE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_PARTITIONER_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_PARTITIONER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_PARTITIONER_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_PARTITION_KEY_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_PARTITION_KEY_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_PARTITION_KEY_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_PRODUCER_BATCH_SIZE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_PRODUCER_BATCH_SIZE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_PRODUCER_BATCH_SIZE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_QUEUE_BUFFERING_MAX_MESSAGES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_QUEUE_BUFFERING_MAX_MESSAGES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_QUEUE_BUFFERING_MAX_MESSAGES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_RECEIVE_BUFFER_BYTES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_RECEIVE_BUFFER_BYTES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_RECEIVE_BUFFER_BYTES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_RECORD_METADATA_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SINK_KAFKA_ENDPOINT_RECORD_METADATA_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_RECORD_METADATA_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_REQUEST_REQUIRED_ACKS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_REQUEST_REQUIRED_ACKS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_REQUEST_REQUIRED_ACKS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_REQUEST_TIMEOUT_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_REQUEST_TIMEOUT_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_REQUEST_TIMEOUT_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_RETRIES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_RETRIES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_RETRIES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_RETRY_BACKOFF_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_RETRY_BACKOFF_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_RETRY_BACKOFF_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SEND_BUFFER_BYTES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SEND_BUFFER_BYTES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SEND_BUFFER_BYTES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SERIALIZER_CLASS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SERIALIZER_CLASS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SERIALIZER_CLASS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_CORE_SIZE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_CORE_SIZE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_CORE_SIZE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_MAX_SIZE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_MAX_SIZE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_WORKER_POOL_MAX_SIZE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_BASIC_PROPERTY_BINDING_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SINK_KAFKA_ENDPOINT_BASIC_PROPERTY_BINDING_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_BASIC_PROPERTY_BINDING_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SYNCHRONOUS_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SINK_KAFKA_ENDPOINT_SYNCHRONOUS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SYNCHRONOUS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SCHEMA_REGISTRY_URLCONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SCHEMA_REGISTRY_URLDEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SCHEMA_REGISTRY_URLDOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_INTERCEPTOR_CLASSES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_INTERCEPTOR_CLASSES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_INTERCEPTOR_CLASSES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_INIT_CMD_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_INIT_CMD_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_INIT_CMD_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_RENEW_JITTER_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_RENEW_JITTER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_RENEW_JITTER_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_RENEW_WINDOW_FACTOR_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_RENEW_WINDOW_FACTOR_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_KERBEROS_RENEW_WINDOW_FACTOR_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SASL_JAAS_CONFIG_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SASL_JAAS_CONFIG_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SASL_JAAS_CONFIG_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SASL_KERBEROS_SERVICE_NAME_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SASL_KERBEROS_SERVICE_NAME_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SASL_KERBEROS_SERVICE_NAME_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SASL_MECHANISM_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SASL_MECHANISM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SASL_MECHANISM_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SECURITY_PROTOCOL_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SECURITY_PROTOCOL_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SECURITY_PROTOCOL_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SSL_CIPHER_SUITES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SSL_CIPHER_SUITES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SSL_CIPHER_SUITES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SSL_CONTEXT_PARAMETERS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SSL_CONTEXT_PARAMETERS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SSL_CONTEXT_PARAMETERS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SSL_ENABLED_PROTOCOLS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SSL_ENABLED_PROTOCOLS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SSL_ENABLED_PROTOCOLS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SSL_ENDPOINT_ALGORITHM_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SSL_ENDPOINT_ALGORITHM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SSL_ENDPOINT_ALGORITHM_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYMANAGER_ALGORITHM_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYMANAGER_ALGORITHM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYMANAGER_ALGORITHM_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEY_PASSWORD_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEY_PASSWORD_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEY_PASSWORD_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_LOCATION_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_LOCATION_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_LOCATION_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_PASSWORD_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_PASSWORD_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_PASSWORD_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_TYPE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_TYPE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SSL_KEYSTORE_TYPE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SSL_PROTOCOL_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SSL_PROTOCOL_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SSL_PROTOCOL_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SSL_PROVIDER_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SSL_PROVIDER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SSL_PROVIDER_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTMANAGER_ALGORITHM_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTMANAGER_ALGORITHM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTMANAGER_ALGORITHM_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_LOCATION_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_LOCATION_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_LOCATION_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_PASSWORD_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_PASSWORD_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_PASSWORD_DOC);
+        conf.define(CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_TYPE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_TYPE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_ENDPOINT_SSL_TRUSTSTORE_TYPE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_ADDITIONAL_PROPERTIES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_ADDITIONAL_PROPERTIES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_ADDITIONAL_PROPERTIES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_BROKERS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_BROKERS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_BROKERS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_CLIENT_ID_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_CLIENT_ID_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_CLIENT_ID_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_CONFIGURATION_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_CONFIGURATION_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_CONFIGURATION_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_HEADER_FILTER_STRATEGY_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_HEADER_FILTER_STRATEGY_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_HEADER_FILTER_STRATEGY_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_RECONNECT_BACKOFF_MAX_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_RECONNECT_BACKOFF_MAX_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_RECONNECT_BACKOFF_MAX_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SHUTDOWN_TIMEOUT_CONF, ConfigDef.Type.INT, CAMEL_SINK_KAFKA_COMPONENT_SHUTDOWN_TIMEOUT_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SHUTDOWN_TIMEOUT_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_BUFFER_MEMORY_SIZE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_BUFFER_MEMORY_SIZE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_BUFFER_MEMORY_SIZE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_COMPRESSION_CODEC_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_COMPRESSION_CODEC_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_COMPRESSION_CODEC_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_CONNECTION_MAX_IDLE_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_CONNECTION_MAX_IDLE_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_CONNECTION_MAX_IDLE_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_ENABLE_IDEMPOTENCE_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SINK_KAFKA_COMPONENT_ENABLE_IDEMPOTENCE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_ENABLE_IDEMPOTENCE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_KAFKA_HEADER_SERIALIZER_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_KAFKA_HEADER_SERIALIZER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_KAFKA_HEADER_SERIALIZER_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_KEY_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_KEY_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_KEY_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_KEY_SERIALIZER_CLASS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_KEY_SERIALIZER_CLASS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_KEY_SERIALIZER_CLASS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_LAZY_START_PRODUCER_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SINK_KAFKA_COMPONENT_LAZY_START_PRODUCER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_LAZY_START_PRODUCER_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_LINGER_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_LINGER_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_LINGER_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_MAX_BLOCK_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_MAX_BLOCK_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_MAX_BLOCK_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_MAX_IN_FLIGHT_REQUEST_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_MAX_IN_FLIGHT_REQUEST_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_MAX_IN_FLIGHT_REQUEST_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_MAX_REQUEST_SIZE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_MAX_REQUEST_SIZE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_MAX_REQUEST_SIZE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_METADATA_MAX_AGE_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_METADATA_MAX_AGE_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_METADATA_MAX_AGE_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_METRIC_REPORTERS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_METRIC_REPORTERS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_METRIC_REPORTERS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_METRICS_SAMPLE_WINDOW_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_METRICS_SAMPLE_WINDOW_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_METRICS_SAMPLE_WINDOW_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_NO_OF_METRICS_SAMPLE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_NO_OF_METRICS_SAMPLE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_NO_OF_METRICS_SAMPLE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_PARTITIONER_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_PARTITIONER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_PARTITIONER_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_PARTITION_KEY_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_PARTITION_KEY_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_PARTITION_KEY_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_PRODUCER_BATCH_SIZE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_PRODUCER_BATCH_SIZE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_PRODUCER_BATCH_SIZE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_QUEUE_BUFFERING_MAX_MESSAGES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_QUEUE_BUFFERING_MAX_MESSAGES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_QUEUE_BUFFERING_MAX_MESSAGES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_RECEIVE_BUFFER_BYTES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_RECEIVE_BUFFER_BYTES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_RECEIVE_BUFFER_BYTES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_RECONNECT_BACKOFF_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_RECONNECT_BACKOFF_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_RECONNECT_BACKOFF_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_RECORD_METADATA_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SINK_KAFKA_COMPONENT_RECORD_METADATA_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_RECORD_METADATA_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_REQUEST_REQUIRED_ACKS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_REQUEST_REQUIRED_ACKS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_REQUEST_REQUIRED_ACKS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_REQUEST_TIMEOUT_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_REQUEST_TIMEOUT_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_REQUEST_TIMEOUT_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_RETRIES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_RETRIES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_RETRIES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_RETRY_BACKOFF_MS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_RETRY_BACKOFF_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_RETRY_BACKOFF_MS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SEND_BUFFER_BYTES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SEND_BUFFER_BYTES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SEND_BUFFER_BYTES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SERIALIZER_CLASS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SERIALIZER_CLASS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SERIALIZER_CLASS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_CORE_SIZE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_CORE_SIZE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_CORE_SIZE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_MAX_SIZE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_MAX_SIZE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_WORKER_POOL_MAX_SIZE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_BASIC_PROPERTY_BINDING_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SINK_KAFKA_COMPONENT_BASIC_PROPERTY_BINDING_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_BASIC_PROPERTY_BINDING_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SCHEMA_REGISTRY_URLCONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SCHEMA_REGISTRY_URLDEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SCHEMA_REGISTRY_URLDOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_INTERCEPTOR_CLASSES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_INTERCEPTOR_CLASSES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_INTERCEPTOR_CLASSES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_INIT_CMD_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_INIT_CMD_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_INIT_CMD_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_RENEW_JITTER_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_RENEW_JITTER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_RENEW_JITTER_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_RENEW_WINDOW_FACTOR_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_RENEW_WINDOW_FACTOR_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_KERBEROS_RENEW_WINDOW_FACTOR_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SASL_JAAS_CONFIG_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SASL_JAAS_CONFIG_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SASL_JAAS_CONFIG_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SASL_KERBEROS_SERVICE_NAME_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SASL_KERBEROS_SERVICE_NAME_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SASL_KERBEROS_SERVICE_NAME_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SASL_MECHANISM_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SASL_MECHANISM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SASL_MECHANISM_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SECURITY_PROTOCOL_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SECURITY_PROTOCOL_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SECURITY_PROTOCOL_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SSL_CIPHER_SUITES_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SSL_CIPHER_SUITES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SSL_CIPHER_SUITES_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SSL_CONTEXT_PARAMETERS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SSL_CONTEXT_PARAMETERS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SSL_CONTEXT_PARAMETERS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SSL_ENABLED_PROTOCOLS_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SSL_ENABLED_PROTOCOLS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SSL_ENABLED_PROTOCOLS_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SSL_ENDPOINT_ALGORITHM_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SSL_ENDPOINT_ALGORITHM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SSL_ENDPOINT_ALGORITHM_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYMANAGER_ALGORITHM_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYMANAGER_ALGORITHM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYMANAGER_ALGORITHM_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SSL_KEY_PASSWORD_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SSL_KEY_PASSWORD_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SSL_KEY_PASSWORD_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_LOCATION_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_LOCATION_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_LOCATION_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_PASSWORD_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_PASSWORD_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_PASSWORD_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_TYPE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_TYPE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SSL_KEYSTORE_TYPE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SSL_PROTOCOL_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SSL_PROTOCOL_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SSL_PROTOCOL_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SSL_PROVIDER_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SSL_PROVIDER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SSL_PROVIDER_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTMANAGER_ALGORITHM_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTMANAGER_ALGORITHM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTMANAGER_ALGORITHM_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_LOCATION_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_LOCATION_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_LOCATION_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_PASSWORD_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_PASSWORD_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_PASSWORD_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_TYPE_CONF, ConfigDef.Type.STRING, CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_TYPE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_SSL_TRUSTSTORE_TYPE_DOC);
+        conf.define(CAMEL_SINK_KAFKA_COMPONENT_USE_GLOBAL_SSL_CONTEXT_PARAMETERS_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SINK_KAFKA_COMPONENT_USE_GLOBAL_SSL_CONTEXT_PARAMETERS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SINK_KAFKA_COMPONENT_USE_GLOBAL_SSL_CONTEXT_PARAMETERS_DOC);
+        return conf;
+    }
+}
\ No newline at end of file
diff --git a/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSinkTask.java b/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSinkTask.java
new file mode 100644
index 0000000..8c4fe73
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSinkTask.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.kafkaconnector.kafka;
+
+import java.util.HashMap;
+import java.util.Map;
+import javax.annotation.Generated;
+import org.apache.camel.kafkaconnector.CamelSinkConnectorConfig;
+import org.apache.camel.kafkaconnector.CamelSinkTask;
+
+@Generated("This class has been generated by camel-kafka-connector-generator-maven-plugin, remove this annotation to prevent it from being generated.")
+public class CamelKafkaSinkTask extends CamelSinkTask {
+
+    @Override
+    protected CamelSinkConnectorConfig getCamelSinkConnectorConfig(
+            Map<String, String> props) {
+        return new CamelKafkaSinkConnectorConfig(props);
+    }
+    @Override
+    protected Map<String, String> getDefaultConfig() {
+        return new HashMap<String, String>() {{
+            put(CamelSinkConnectorConfig.CAMEL_SINK_COMPONENT_CONF, "kafka");
+        }};
+    }
+}
\ No newline at end of file
diff --git a/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSourceConnector.java b/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSourceConnector.java
new file mode 100644
index 0000000..fb41c7f
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSourceConnector.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.kafkaconnector.kafka;
+
+import javax.annotation.Generated;
+import org.apache.camel.kafkaconnector.CamelSourceConnector;
+import org.apache.kafka.common.config.ConfigDef;
+import org.apache.kafka.connect.connector.Task;
+
+@Generated("This class has been generated by camel-kafka-connector-generator-maven-plugin, remove this annotation to prevent it from being generated.")
+public class CamelKafkaSourceConnector extends CamelSourceConnector {
+
+    @Override
+    public ConfigDef config() {
+        return CamelKafkaSourceConnectorConfig.conf();
+    }
+    @Override
+    public Class<? extends Task> taskClass() {
+        return CamelKafkaSourceTask.class;
+    }
+}
\ No newline at end of file
diff --git a/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSourceConnectorConfig.java b/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSourceConnectorConfig.java
new file mode 100644
index 0000000..c9e9915
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSourceConnectorConfig.java
@@ -0,0 +1,528 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.kafkaconnector.kafka;
+
+import java.util.Map;
+import javax.annotation.Generated;
+import org.apache.camel.kafkaconnector.CamelSourceConnectorConfig;
+import org.apache.kafka.common.config.ConfigDef;
+
+@Generated("This class has been generated by camel-kafka-connector-generator-maven-plugin, remove this annotation to prevent it from being generated.")
+public class CamelKafkaSourceConnectorConfig
+        extends
+            CamelSourceConnectorConfig {
+
+    public static final String CAMEL_SOURCE_KAFKA_PATH_TOPIC_CONF = "camel.source.path.topic";
+    public static final String CAMEL_SOURCE_KAFKA_PATH_TOPIC_DOC = "Name of the topic to use. On the consumer you can use comma to separate multiple topics. A producer can only send a message to a single topic.";
+    public static final String CAMEL_SOURCE_KAFKA_PATH_TOPIC_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_ADDITIONAL_PROPERTIES_CONF = "camel.source.endpoint.additionalProperties";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_ADDITIONAL_PROPERTIES_DOC = "Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http://localhost:8811/avro";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_ADDITIONAL_PROPERTIES_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_BROKERS_CONF = "camel.source.endpoint.brokers";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_BROKERS_DOC = "URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_BROKERS_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_CLIENT_ID_CONF = "camel.source.endpoint.clientId";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_CLIENT_ID_DOC = "The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_CLIENT_ID_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_HEADER_FILTER_STRATEGY_CONF = "camel.source.endpoint.headerFilterStrategy";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_HEADER_FILTER_STRATEGY_DOC = "To use a custom HeaderFilterStrategy to filter header to and from Camel message.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_HEADER_FILTER_STRATEGY_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MAX_MS_CONF = "camel.source.endpoint.reconnectBackoffMaxMs";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MAX_MS_DOC = "The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MAX_MS_DEFAULT = "1000";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SHUTDOWN_TIMEOUT_CONF = "camel.source.endpoint.shutdownTimeout";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SHUTDOWN_TIMEOUT_DOC = "Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads.";
+    public static final Integer CAMEL_SOURCE_KAFKA_ENDPOINT_SHUTDOWN_TIMEOUT_DEFAULT = 30000;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_ALLOW_MANUAL_COMMIT_CONF = "camel.source.endpoint.allowManualCommit";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_ALLOW_MANUAL_COMMIT_DOC = "Whether to allow doing manual commits via KafkaManualCommit. If this option is enabled then an instance of KafkaManualCommit is stored on the Exchange message header, which allows end users to access this API and perform manual offset commits via the Kafka consumer.";
+    public static final Boolean CAMEL_SOURCE_KAFKA_ENDPOINT_ALLOW_MANUAL_COMMIT_DEFAULT = false;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_ENABLE_CONF = "camel.source.endpoint.autoCommitEnable";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_ENABLE_DOC = "If true, periodically commit to ZooKeeper the offset of messages already fetched by the consumer. This committed offset will be used when the process fails as the position from which the new consumer will begin.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_ENABLE_DEFAULT = "true";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_INTERVAL_MS_CONF = "camel.source.endpoint.autoCommitIntervalMs";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_INTERVAL_MS_DOC = "The frequency in ms that the consumer offsets are committed to zookeeper.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_INTERVAL_MS_DEFAULT = "5000";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_ON_STOP_CONF = "camel.source.endpoint.autoCommitOnStop";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_ON_STOP_DOC = "Whether to perform an explicit auto commit when the consumer stops to ensure the broker has a commit from the last consumed message. This requires the option autoCommitEnable is turned on. The possible values are: sync, async, or none. And sync is the default value. One of: [sync] [async] [none]";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_ON_STOP_DEFAULT = "sync";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_OFFSET_RESET_CONF = "camel.source.endpoint.autoOffsetReset";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_OFFSET_RESET_DOC = "What to do when there is no initial offset in ZooKeeper or if an offset is out of range: earliest : automatically reset the offset to the earliest offset latest : automatically reset the offset to the latest offset fail: throw exception to the consumer One of: [latest] [earliest] [none]";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_OFFSET_RESET_DEFAULT = "latest";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_BREAK_ON_FIRST_ERROR_CONF = "camel.source.endpoint.breakOnFirstError";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_BREAK_ON_FIRST_ERROR_DOC = "This options controls what happens when a consumer is processing an exchange and it fails. If the option is false then the consumer continues to the next message and processes it. If the option is true then the consumer breaks out, and will seek back to offset of the message that caused a failure, and then re-attempt to process this message. However this can lead to endless processing of the same messa [...]
+    public static final Boolean CAMEL_SOURCE_KAFKA_ENDPOINT_BREAK_ON_FIRST_ERROR_DEFAULT = false;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_BRIDGE_ERROR_HANDLER_CONF = "camel.source.endpoint.bridgeErrorHandler";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_BRIDGE_ERROR_HANDLER_DOC = "Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored.";
+    public static final Boolean CAMEL_SOURCE_KAFKA_ENDPOINT_BRIDGE_ERROR_HANDLER_DEFAULT = false;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_CHECK_CRCS_CONF = "camel.source.endpoint.checkCrcs";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_CHECK_CRCS_DOC = "Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_CHECK_CRCS_DEFAULT = "true";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMER_REQUEST_TIMEOUT_MS_CONF = "camel.source.endpoint.consumerRequestTimeoutMs";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMER_REQUEST_TIMEOUT_MS_DOC = "The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMER_REQUEST_TIMEOUT_MS_DEFAULT = "40000";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMERS_COUNT_CONF = "camel.source.endpoint.consumersCount";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMERS_COUNT_DOC = "The number of consumers that connect to kafka server";
+    public static final Integer CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMERS_COUNT_DEFAULT = 1;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMER_STREAMS_CONF = "camel.source.endpoint.consumerStreams";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMER_STREAMS_DOC = "Number of concurrent consumers on the consumer";
+    public static final Integer CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMER_STREAMS_DEFAULT = 10;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_MAX_BYTES_CONF = "camel.source.endpoint.fetchMaxBytes";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_MAX_BYTES_DOC = "The maximum amount of data the server should return for a fetch request This is not an absolute maximum, if the first message in the first non-empty partition of the fetch is larger than this value, the message will still be returned to ensure that the consumer can make progress. The maximum message size accepted by the broker is defined via message.max.bytes (broker config) or max.message.bytes (topic conf [...]
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_MAX_BYTES_DEFAULT = "52428800";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_MIN_BYTES_CONF = "camel.source.endpoint.fetchMinBytes";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_MIN_BYTES_DOC = "The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_MIN_BYTES_DEFAULT = "1";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_WAIT_MAX_MS_CONF = "camel.source.endpoint.fetchWaitMaxMs";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_WAIT_MAX_MS_DOC = "The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy fetch.min.bytes";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_WAIT_MAX_MS_DEFAULT = "500";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_GROUP_ID_CONF = "camel.source.endpoint.groupId";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_GROUP_ID_DOC = "A string that uniquely identifies the group of consumer processes to which this consumer belongs. By setting the same group id multiple processes indicate that they are all part of the same consumer group. This option is required for consumers.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_GROUP_ID_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_HEARTBEAT_INTERVAL_MS_CONF = "camel.source.endpoint.heartbeatIntervalMs";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_HEARTBEAT_INTERVAL_MS_DOC = "The expected time between heartbeats to the consumer coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. The value must be set lower than session.timeout.ms, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower t [...]
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_HEARTBEAT_INTERVAL_MS_DEFAULT = "3000";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KAFKA_HEADER_DESERIALIZER_CONF = "camel.source.endpoint.kafkaHeaderDeserializer";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KAFKA_HEADER_DESERIALIZER_DOC = "To use a custom KafkaHeaderDeserializer to deserialize kafka headers values";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KAFKA_HEADER_DESERIALIZER_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KEY_DESERIALIZER_CONF = "camel.source.endpoint.keyDeserializer";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KEY_DESERIALIZER_DOC = "Deserializer class for key that implements the Deserializer interface.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KEY_DESERIALIZER_DEFAULT = "org.apache.kafka.common.serialization.StringDeserializer";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_PARTITION_FETCH_BYTES_CONF = "camel.source.endpoint.maxPartitionFetchBytes";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_PARTITION_FETCH_BYTES_DOC = "The maximum amount of data per-partition the server will return. The maximum total memory used for a request will be #partitions max.partition.fetch.bytes. This size must be at least as large as the maximum message size the server allows or else it is possible for the producer to send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying to fetch a large m [...]
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_PARTITION_FETCH_BYTES_DEFAULT = "1048576";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_POLL_INTERVAL_MS_CONF = "camel.source.endpoint.maxPollIntervalMs";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_POLL_INTERVAL_MS_DOC = "The maximum delay between invocations of poll() when using consumer group management. This places an upper bound on the amount of time that the consumer can be idle before fetching more records. If poll() is not called before expiration of this timeout, then the consumer is considered failed and the group will rebalance in order to reassign the partitions to another member.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_POLL_INTERVAL_MS_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_POLL_RECORDS_CONF = "camel.source.endpoint.maxPollRecords";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_POLL_RECORDS_DOC = "The maximum number of records returned in a single call to poll()";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_POLL_RECORDS_DEFAULT = "500";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_OFFSET_REPOSITORY_CONF = "camel.source.endpoint.offsetRepository";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_OFFSET_REPOSITORY_DOC = "The offset repository to use in order to locally store the offset of each partition of the topic. Defining one will disable the autocommit.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_OFFSET_REPOSITORY_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_PARTITION_ASSIGNOR_CONF = "camel.source.endpoint.partitionAssignor";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_PARTITION_ASSIGNOR_DOC = "The class name of the partition assignment strategy that the client will use to distribute partition ownership amongst consumer instances when group management is used";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_PARTITION_ASSIGNOR_DEFAULT = "org.apache.kafka.clients.consumer.RangeAssignor";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_POLL_TIMEOUT_MS_CONF = "camel.source.endpoint.pollTimeoutMs";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_POLL_TIMEOUT_MS_DOC = "The timeout used when polling the KafkaConsumer.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_POLL_TIMEOUT_MS_DEFAULT = "5000";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SEEK_TO_CONF = "camel.source.endpoint.seekTo";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SEEK_TO_DOC = "Set if KafkaConsumer will read from beginning or end on startup: beginning : read from beginning end : read from end This is replacing the earlier property seekToBeginning One of: [beginning] [end]";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SEEK_TO_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SESSION_TIMEOUT_MS_CONF = "camel.source.endpoint.sessionTimeoutMs";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SESSION_TIMEOUT_MS_DOC = "The timeout used to detect failures when using Kafka's group management facilities.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SESSION_TIMEOUT_MS_DEFAULT = "10000";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SPECIFIC_AVRO_READER_CONF = "camel.source.endpoint.specificAvroReader";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SPECIFIC_AVRO_READER_DOC = "This enables the use of a specific Avro reader for use with the Confluent Platform schema registry and the io.confluent.kafka.serializers.KafkaAvroDeserializer. This option is only available in the Confluent Platform (not standard Apache Kafka)";
+    public static final Boolean CAMEL_SOURCE_KAFKA_ENDPOINT_SPECIFIC_AVRO_READER_DEFAULT = false;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_TOPIC_IS_PATTERN_CONF = "camel.source.endpoint.topicIsPattern";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_TOPIC_IS_PATTERN_DOC = "Whether the topic is a pattern (regular expression). This can be used to subscribe to dynamic number of topics matching the pattern.";
+    public static final Boolean CAMEL_SOURCE_KAFKA_ENDPOINT_TOPIC_IS_PATTERN_DEFAULT = false;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_VALUE_DESERIALIZER_CONF = "camel.source.endpoint.valueDeserializer";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_VALUE_DESERIALIZER_DOC = "Deserializer class for value that implements the Deserializer interface.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_VALUE_DESERIALIZER_DEFAULT = "org.apache.kafka.common.serialization.StringDeserializer";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_EXCEPTION_HANDLER_CONF = "camel.source.endpoint.exceptionHandler";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_EXCEPTION_HANDLER_DOC = "To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_EXCEPTION_HANDLER_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_EXCHANGE_PATTERN_CONF = "camel.source.endpoint.exchangePattern";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_EXCHANGE_PATTERN_DOC = "Sets the exchange pattern when the consumer creates an exchange. One of: [InOnly] [InOut] [InOptionalOut]";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_EXCHANGE_PATTERN_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_BASIC_PROPERTY_BINDING_CONF = "camel.source.endpoint.basicPropertyBinding";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_BASIC_PROPERTY_BINDING_DOC = "Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities";
+    public static final Boolean CAMEL_SOURCE_KAFKA_ENDPOINT_BASIC_PROPERTY_BINDING_DEFAULT = false;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SYNCHRONOUS_CONF = "camel.source.endpoint.synchronous";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SYNCHRONOUS_DOC = "Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported).";
+    public static final Boolean CAMEL_SOURCE_KAFKA_ENDPOINT_SYNCHRONOUS_DEFAULT = false;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SCHEMA_REGISTRY_URLCONF = "camel.source.endpoint.schemaRegistryURL";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SCHEMA_REGISTRY_URLDOC = "URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka)";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SCHEMA_REGISTRY_URLDEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_INTERCEPTOR_CLASSES_CONF = "camel.source.endpoint.interceptorClasses";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_INTERCEPTOR_CLASSES_DOC = "Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_INTERCEPTOR_CLASSES_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_CONF = "camel.source.endpoint.kerberosBeforeReloginMinTime";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DOC = "Login thread sleep time between refresh attempts.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DEFAULT = "60000";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_INIT_CMD_CONF = "camel.source.endpoint.kerberosInitCmd";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_INIT_CMD_DOC = "Kerberos kinit command path. Default is /usr/bin/kinit";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_INIT_CMD_DEFAULT = "/usr/bin/kinit";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONF = "camel.source.endpoint.kerberosPrincipalToLocalRules";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DOC = "A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form {username}/{hostname}{REALM} are mapped to {username}. For more details on the format plea [...]
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DEFAULT = "DEFAULT";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_RENEW_JITTER_CONF = "camel.source.endpoint.kerberosRenewJitter";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_RENEW_JITTER_DOC = "Percentage of random jitter added to the renewal time.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_RENEW_JITTER_DEFAULT = "0.05";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_RENEW_WINDOW_FACTOR_CONF = "camel.source.endpoint.kerberosRenewWindowFactor";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_RENEW_WINDOW_FACTOR_DOC = "Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_RENEW_WINDOW_FACTOR_DEFAULT = "0.8";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_JAAS_CONFIG_CONF = "camel.source.endpoint.saslJaasConfig";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_JAAS_CONFIG_DOC = "Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD;";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_JAAS_CONFIG_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_KERBEROS_SERVICE_NAME_CONF = "camel.source.endpoint.saslKerberosServiceName";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_KERBEROS_SERVICE_NAME_DOC = "The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_KERBEROS_SERVICE_NAME_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_MECHANISM_CONF = "camel.source.endpoint.saslMechanism";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_MECHANISM_DOC = "The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_MECHANISM_DEFAULT = "GSSAPI";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SECURITY_PROTOCOL_CONF = "camel.source.endpoint.securityProtocol";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SECURITY_PROTOCOL_DEFAULT = "PLAINTEXT";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_CIPHER_SUITES_CONF = "camel.source.endpoint.sslCipherSuites";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_CIPHER_SUITES_DOC = "A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_CIPHER_SUITES_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_CONTEXT_PARAMETERS_CONF = "camel.source.endpoint.sslContextParameters";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_CONTEXT_PARAMETERS_DOC = "SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_CONTEXT_PARAMETERS_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_ENABLED_PROTOCOLS_CONF = "camel.source.endpoint.sslEnabledProtocols";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_ENABLED_PROTOCOLS_DOC = "The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_ENABLED_PROTOCOLS_DEFAULT = "TLSv1.2";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_ENDPOINT_ALGORITHM_CONF = "camel.source.endpoint.sslEndpointAlgorithm";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_ENDPOINT_ALGORITHM_DOC = "The endpoint identification algorithm to validate server hostname using server certificate.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_ENDPOINT_ALGORITHM_DEFAULT = "https";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_KEYMANAGER_ALGORITHM_CONF = "camel.source.endpoint.sslKeymanagerAlgorithm";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_KEYMANAGER_ALGORITHM_DOC = "The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_KEYMANAGER_ALGORITHM_DEFAULT = "SunX509";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_KEYSTORE_TYPE_CONF = "camel.source.endpoint.sslKeystoreType";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_KEYSTORE_TYPE_DOC = "The file format of the key store file. This is optional for client. Default value is JKS";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_KEYSTORE_TYPE_DEFAULT = "JKS";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_PROTOCOL_CONF = "camel.source.endpoint.sslProtocol";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_PROTOCOL_DOC = "The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_PROTOCOL_DEFAULT = "TLSv1.2";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_PROVIDER_CONF = "camel.source.endpoint.sslProvider";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_PROVIDER_DOC = "The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_PROVIDER_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_TRUSTMANAGER_ALGORITHM_CONF = "camel.source.endpoint.sslTrustmanagerAlgorithm";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_TRUSTMANAGER_ALGORITHM_DOC = "The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_TRUSTMANAGER_ALGORITHM_DEFAULT = "PKIX";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_TRUSTSTORE_TYPE_CONF = "camel.source.endpoint.sslTruststoreType";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. Default value is JKS.";
+    public static final String CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_TRUSTSTORE_TYPE_DEFAULT = "JKS";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_ADDITIONAL_PROPERTIES_CONF = "camel.component.kafka.additionalProperties";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_ADDITIONAL_PROPERTIES_DOC = "Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http://localhost:8811/avro";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_ADDITIONAL_PROPERTIES_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_BROKERS_CONF = "camel.component.kafka.brokers";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_BROKERS_DOC = "URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_BROKERS_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CLIENT_ID_CONF = "camel.component.kafka.clientId";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CLIENT_ID_DOC = "The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CLIENT_ID_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CONFIGURATION_CONF = "camel.component.kafka.configuration";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CONFIGURATION_DOC = "Allows to pre-configure the Kafka component with common options that the endpoints will reuse.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CONFIGURATION_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_HEADER_FILTER_STRATEGY_CONF = "camel.component.kafka.headerFilterStrategy";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_HEADER_FILTER_STRATEGY_DOC = "To use a custom HeaderFilterStrategy to filter header to and from Camel message.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_HEADER_FILTER_STRATEGY_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_RECONNECT_BACKOFF_MAX_MS_CONF = "camel.component.kafka.reconnectBackoffMaxMs";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_RECONNECT_BACKOFF_MAX_MS_DOC = "The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_RECONNECT_BACKOFF_MAX_MS_DEFAULT = "1000";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SHUTDOWN_TIMEOUT_CONF = "camel.component.kafka.shutdownTimeout";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SHUTDOWN_TIMEOUT_DOC = "Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads.";
+    public static final Integer CAMEL_SOURCE_KAFKA_COMPONENT_SHUTDOWN_TIMEOUT_DEFAULT = 30000;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_ALLOW_MANUAL_COMMIT_CONF = "camel.component.kafka.allowManualCommit";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_ALLOW_MANUAL_COMMIT_DOC = "Whether to allow doing manual commits via KafkaManualCommit. If this option is enabled then an instance of KafkaManualCommit is stored on the Exchange message header, which allows end users to access this API and perform manual offset commits via the Kafka consumer.";
+    public static final Boolean CAMEL_SOURCE_KAFKA_COMPONENT_ALLOW_MANUAL_COMMIT_DEFAULT = false;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_ENABLE_CONF = "camel.component.kafka.autoCommitEnable";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_ENABLE_DOC = "If true, periodically commit to ZooKeeper the offset of messages already fetched by the consumer. This committed offset will be used when the process fails as the position from which the new consumer will begin.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_ENABLE_DEFAULT = "true";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_INTERVAL_MS_CONF = "camel.component.kafka.autoCommitIntervalMs";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_INTERVAL_MS_DOC = "The frequency in ms that the consumer offsets are committed to zookeeper.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_INTERVAL_MS_DEFAULT = "5000";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_ON_STOP_CONF = "camel.component.kafka.autoCommitOnStop";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_ON_STOP_DOC = "Whether to perform an explicit auto commit when the consumer stops to ensure the broker has a commit from the last consumed message. This requires the option autoCommitEnable is turned on. The possible values are: sync, async, or none. And sync is the default value. One of: [sync] [async] [none]";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_ON_STOP_DEFAULT = "sync";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_OFFSET_RESET_CONF = "camel.component.kafka.autoOffsetReset";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_OFFSET_RESET_DOC = "What to do when there is no initial offset in ZooKeeper or if an offset is out of range: earliest : automatically reset the offset to the earliest offset latest : automatically reset the offset to the latest offset fail: throw exception to the consumer One of: [latest] [earliest] [none]";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_OFFSET_RESET_DEFAULT = "latest";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_BREAK_ON_FIRST_ERROR_CONF = "camel.component.kafka.breakOnFirstError";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_BREAK_ON_FIRST_ERROR_DOC = "This options controls what happens when a consumer is processing an exchange and it fails. If the option is false then the consumer continues to the next message and processes it. If the option is true then the consumer breaks out, and will seek back to offset of the message that caused a failure, and then re-attempt to process this message. However this can lead to endless processing of the same mess [...]
+    public static final Boolean CAMEL_SOURCE_KAFKA_COMPONENT_BREAK_ON_FIRST_ERROR_DEFAULT = false;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_BRIDGE_ERROR_HANDLER_CONF = "camel.component.kafka.bridgeErrorHandler";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_BRIDGE_ERROR_HANDLER_DOC = "Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored.";
+    public static final Boolean CAMEL_SOURCE_KAFKA_COMPONENT_BRIDGE_ERROR_HANDLER_DEFAULT = false;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CHECK_CRCS_CONF = "camel.component.kafka.checkCrcs";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CHECK_CRCS_DOC = "Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CHECK_CRCS_DEFAULT = "true";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMER_REQUEST_TIMEOUT_MS_CONF = "camel.component.kafka.consumerRequestTimeoutMs";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMER_REQUEST_TIMEOUT_MS_DOC = "The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMER_REQUEST_TIMEOUT_MS_DEFAULT = "40000";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMERS_COUNT_CONF = "camel.component.kafka.consumersCount";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMERS_COUNT_DOC = "The number of consumers that connect to kafka server";
+    public static final Integer CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMERS_COUNT_DEFAULT = 1;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMER_STREAMS_CONF = "camel.component.kafka.consumerStreams";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMER_STREAMS_DOC = "Number of concurrent consumers on the consumer";
+    public static final Integer CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMER_STREAMS_DEFAULT = 10;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_MAX_BYTES_CONF = "camel.component.kafka.fetchMaxBytes";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_MAX_BYTES_DOC = "The maximum amount of data the server should return for a fetch request This is not an absolute maximum, if the first message in the first non-empty partition of the fetch is larger than this value, the message will still be returned to ensure that the consumer can make progress. The maximum message size accepted by the broker is defined via message.max.bytes (broker config) or max.message.bytes (topic con [...]
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_MAX_BYTES_DEFAULT = "52428800";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_MIN_BYTES_CONF = "camel.component.kafka.fetchMinBytes";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_MIN_BYTES_DOC = "The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_MIN_BYTES_DEFAULT = "1";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_WAIT_MAX_MS_CONF = "camel.component.kafka.fetchWaitMaxMs";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_WAIT_MAX_MS_DOC = "The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy fetch.min.bytes";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_WAIT_MAX_MS_DEFAULT = "500";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_GROUP_ID_CONF = "camel.component.kafka.groupId";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_GROUP_ID_DOC = "A string that uniquely identifies the group of consumer processes to which this consumer belongs. By setting the same group id multiple processes indicate that they are all part of the same consumer group. This option is required for consumers.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_GROUP_ID_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_HEARTBEAT_INTERVAL_MS_CONF = "camel.component.kafka.heartbeatIntervalMs";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_HEARTBEAT_INTERVAL_MS_DOC = "The expected time between heartbeats to the consumer coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. The value must be set lower than session.timeout.ms, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower  [...]
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_HEARTBEAT_INTERVAL_MS_DEFAULT = "3000";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KAFKA_HEADER_DESERIALIZER_CONF = "camel.component.kafka.kafkaHeaderDeserializer";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KAFKA_HEADER_DESERIALIZER_DOC = "To use a custom KafkaHeaderDeserializer to deserialize kafka headers values";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KAFKA_HEADER_DESERIALIZER_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KEY_DESERIALIZER_CONF = "camel.component.kafka.keyDeserializer";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KEY_DESERIALIZER_DOC = "Deserializer class for key that implements the Deserializer interface.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KEY_DESERIALIZER_DEFAULT = "org.apache.kafka.common.serialization.StringDeserializer";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_MAX_PARTITION_FETCH_BYTES_CONF = "camel.component.kafka.maxPartitionFetchBytes";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_MAX_PARTITION_FETCH_BYTES_DOC = "The maximum amount of data per-partition the server will return. The maximum total memory used for a request will be #partitions max.partition.fetch.bytes. This size must be at least as large as the maximum message size the server allows or else it is possible for the producer to send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying to fetch a large  [...]
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_MAX_PARTITION_FETCH_BYTES_DEFAULT = "1048576";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_MAX_POLL_INTERVAL_MS_CONF = "camel.component.kafka.maxPollIntervalMs";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_MAX_POLL_INTERVAL_MS_DOC = "The maximum delay between invocations of poll() when using consumer group management. This places an upper bound on the amount of time that the consumer can be idle before fetching more records. If poll() is not called before expiration of this timeout, then the consumer is considered failed and the group will rebalance in order to reassign the partitions to another member.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_MAX_POLL_INTERVAL_MS_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_MAX_POLL_RECORDS_CONF = "camel.component.kafka.maxPollRecords";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_MAX_POLL_RECORDS_DOC = "The maximum number of records returned in a single call to poll()";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_MAX_POLL_RECORDS_DEFAULT = "500";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_OFFSET_REPOSITORY_CONF = "camel.component.kafka.offsetRepository";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_OFFSET_REPOSITORY_DOC = "The offset repository to use in order to locally store the offset of each partition of the topic. Defining one will disable the autocommit.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_OFFSET_REPOSITORY_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_PARTITION_ASSIGNOR_CONF = "camel.component.kafka.partitionAssignor";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_PARTITION_ASSIGNOR_DOC = "The class name of the partition assignment strategy that the client will use to distribute partition ownership amongst consumer instances when group management is used";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_PARTITION_ASSIGNOR_DEFAULT = "org.apache.kafka.clients.consumer.RangeAssignor";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_POLL_TIMEOUT_MS_CONF = "camel.component.kafka.pollTimeoutMs";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_POLL_TIMEOUT_MS_DOC = "The timeout used when polling the KafkaConsumer.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_POLL_TIMEOUT_MS_DEFAULT = "5000";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SEEK_TO_CONF = "camel.component.kafka.seekTo";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SEEK_TO_DOC = "Set if KafkaConsumer will read from beginning or end on startup: beginning : read from beginning end : read from end This is replacing the earlier property seekToBeginning One of: [beginning] [end]";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SEEK_TO_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SESSION_TIMEOUT_MS_CONF = "camel.component.kafka.sessionTimeoutMs";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SESSION_TIMEOUT_MS_DOC = "The timeout used to detect failures when using Kafka's group management facilities.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SESSION_TIMEOUT_MS_DEFAULT = "10000";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SPECIFIC_AVRO_READER_CONF = "camel.component.kafka.specificAvroReader";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SPECIFIC_AVRO_READER_DOC = "This enables the use of a specific Avro reader for use with the Confluent Platform schema registry and the io.confluent.kafka.serializers.KafkaAvroDeserializer. This option is only available in the Confluent Platform (not standard Apache Kafka)";
+    public static final Boolean CAMEL_SOURCE_KAFKA_COMPONENT_SPECIFIC_AVRO_READER_DEFAULT = false;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_TOPIC_IS_PATTERN_CONF = "camel.component.kafka.topicIsPattern";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_TOPIC_IS_PATTERN_DOC = "Whether the topic is a pattern (regular expression). This can be used to subscribe to dynamic number of topics matching the pattern.";
+    public static final Boolean CAMEL_SOURCE_KAFKA_COMPONENT_TOPIC_IS_PATTERN_DEFAULT = false;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_VALUE_DESERIALIZER_CONF = "camel.component.kafka.valueDeserializer";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_VALUE_DESERIALIZER_DOC = "Deserializer class for value that implements the Deserializer interface.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_VALUE_DESERIALIZER_DEFAULT = "org.apache.kafka.common.serialization.StringDeserializer";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KAFKA_MANUAL_COMMIT_FACTORY_CONF = "camel.component.kafka.kafkaManualCommitFactory";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KAFKA_MANUAL_COMMIT_FACTORY_DOC = "Factory to use for creating KafkaManualCommit instances. This allows to plugin a custom factory to create custom KafkaManualCommit instances in case special logic is needed when doing manual commits that deviates from the default implementation that comes out of the box.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KAFKA_MANUAL_COMMIT_FACTORY_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_BASIC_PROPERTY_BINDING_CONF = "camel.component.kafka.basicPropertyBinding";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_BASIC_PROPERTY_BINDING_DOC = "Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities";
+    public static final Boolean CAMEL_SOURCE_KAFKA_COMPONENT_BASIC_PROPERTY_BINDING_DEFAULT = false;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SCHEMA_REGISTRY_URLCONF = "camel.component.kafka.schemaRegistryURL";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SCHEMA_REGISTRY_URLDOC = "URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka)";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SCHEMA_REGISTRY_URLDEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_INTERCEPTOR_CLASSES_CONF = "camel.component.kafka.interceptorClasses";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_INTERCEPTOR_CLASSES_DOC = "Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_INTERCEPTOR_CLASSES_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_CONF = "camel.component.kafka.kerberosBeforeReloginMinTime";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DOC = "Login thread sleep time between refresh attempts.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DEFAULT = "60000";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_INIT_CMD_CONF = "camel.component.kafka.kerberosInitCmd";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_INIT_CMD_DOC = "Kerberos kinit command path. Default is /usr/bin/kinit";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_INIT_CMD_DEFAULT = "/usr/bin/kinit";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONF = "camel.component.kafka.kerberosPrincipalToLocalRules";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DOC = "A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form {username}/{hostname}{REALM} are mapped to {username}. For more details on the format ple [...]
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DEFAULT = "DEFAULT";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_RENEW_JITTER_CONF = "camel.component.kafka.kerberosRenewJitter";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_RENEW_JITTER_DOC = "Percentage of random jitter added to the renewal time.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_RENEW_JITTER_DEFAULT = "0.05";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_RENEW_WINDOW_FACTOR_CONF = "camel.component.kafka.kerberosRenewWindowFactor";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_RENEW_WINDOW_FACTOR_DOC = "Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_RENEW_WINDOW_FACTOR_DEFAULT = "0.8";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SASL_JAAS_CONFIG_CONF = "camel.component.kafka.saslJaasConfig";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SASL_JAAS_CONFIG_DOC = "Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD;";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SASL_JAAS_CONFIG_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SASL_KERBEROS_SERVICE_NAME_CONF = "camel.component.kafka.saslKerberosServiceName";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SASL_KERBEROS_SERVICE_NAME_DOC = "The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SASL_KERBEROS_SERVICE_NAME_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SASL_MECHANISM_CONF = "camel.component.kafka.saslMechanism";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SASL_MECHANISM_DOC = "The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SASL_MECHANISM_DEFAULT = "GSSAPI";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SECURITY_PROTOCOL_CONF = "camel.component.kafka.securityProtocol";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SECURITY_PROTOCOL_DEFAULT = "PLAINTEXT";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_CIPHER_SUITES_CONF = "camel.component.kafka.sslCipherSuites";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_CIPHER_SUITES_DOC = "A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_CIPHER_SUITES_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_CONTEXT_PARAMETERS_CONF = "camel.component.kafka.sslContextParameters";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_CONTEXT_PARAMETERS_DOC = "SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_CONTEXT_PARAMETERS_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_ENABLED_PROTOCOLS_CONF = "camel.component.kafka.sslEnabledProtocols";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_ENABLED_PROTOCOLS_DOC = "The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_ENABLED_PROTOCOLS_DEFAULT = "TLSv1.2";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_ENDPOINT_ALGORITHM_CONF = "camel.component.kafka.sslEndpointAlgorithm";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_ENDPOINT_ALGORITHM_DOC = "The endpoint identification algorithm to validate server hostname using server certificate.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_ENDPOINT_ALGORITHM_DEFAULT = "https";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_KEYMANAGER_ALGORITHM_CONF = "camel.component.kafka.sslKeymanagerAlgorithm";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_KEYMANAGER_ALGORITHM_DOC = "The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_KEYMANAGER_ALGORITHM_DEFAULT = "SunX509";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_KEYSTORE_TYPE_CONF = "camel.component.kafka.sslKeystoreType";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_KEYSTORE_TYPE_DOC = "The file format of the key store file. This is optional for client. Default value is JKS";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_KEYSTORE_TYPE_DEFAULT = "JKS";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_PROTOCOL_CONF = "camel.component.kafka.sslProtocol";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_PROTOCOL_DOC = "The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_PROTOCOL_DEFAULT = "TLSv1.2";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_PROVIDER_CONF = "camel.component.kafka.sslProvider";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_PROVIDER_DOC = "The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_PROVIDER_DEFAULT = null;
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_TRUSTMANAGER_ALGORITHM_CONF = "camel.component.kafka.sslTrustmanagerAlgorithm";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_TRUSTMANAGER_ALGORITHM_DOC = "The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_TRUSTMANAGER_ALGORITHM_DEFAULT = "PKIX";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_TRUSTSTORE_TYPE_CONF = "camel.component.kafka.sslTruststoreType";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. Default value is JKS.";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_SSL_TRUSTSTORE_TYPE_DEFAULT = "JKS";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_USE_GLOBAL_SSL_CONTEXT_PARAMETERS_CONF = "camel.component.kafka.useGlobalSslContextParameters";
+    public static final String CAMEL_SOURCE_KAFKA_COMPONENT_USE_GLOBAL_SSL_CONTEXT_PARAMETERS_DOC = "Enable usage of global SSL context parameters.";
+    public static final Boolean CAMEL_SOURCE_KAFKA_COMPONENT_USE_GLOBAL_SSL_CONTEXT_PARAMETERS_DEFAULT = false;
+
+    public CamelKafkaSourceConnectorConfig(
+            ConfigDef config,
+            Map<String, String> parsedConfig) {
+        super(config, parsedConfig);
+    }
+
+    public CamelKafkaSourceConnectorConfig(Map<String, String> parsedConfig) {
+        this(conf(), parsedConfig);
+    }
+
+    public static ConfigDef conf() {
+        ConfigDef conf = new ConfigDef(CamelSourceConnectorConfig.conf());
+        conf.define(CAMEL_SOURCE_KAFKA_PATH_TOPIC_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_PATH_TOPIC_DEFAULT, ConfigDef.Importance.HIGH, CAMEL_SOURCE_KAFKA_PATH_TOPIC_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_ADDITIONAL_PROPERTIES_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_ADDITIONAL_PROPERTIES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_ADDITIONAL_PROPERTIES_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_BROKERS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_BROKERS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_BROKERS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_CLIENT_ID_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_CLIENT_ID_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_CLIENT_ID_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_HEADER_FILTER_STRATEGY_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_HEADER_FILTER_STRATEGY_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_HEADER_FILTER_STRATEGY_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MAX_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MAX_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_RECONNECT_BACKOFF_MAX_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SHUTDOWN_TIMEOUT_CONF, ConfigDef.Type.INT, CAMEL_SOURCE_KAFKA_ENDPOINT_SHUTDOWN_TIMEOUT_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SHUTDOWN_TIMEOUT_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_ALLOW_MANUAL_COMMIT_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SOURCE_KAFKA_ENDPOINT_ALLOW_MANUAL_COMMIT_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_ALLOW_MANUAL_COMMIT_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_ENABLE_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_ENABLE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_ENABLE_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_INTERVAL_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_INTERVAL_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_INTERVAL_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_ON_STOP_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_ON_STOP_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_COMMIT_ON_STOP_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_OFFSET_RESET_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_OFFSET_RESET_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_AUTO_OFFSET_RESET_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_BREAK_ON_FIRST_ERROR_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SOURCE_KAFKA_ENDPOINT_BREAK_ON_FIRST_ERROR_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_BREAK_ON_FIRST_ERROR_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_BRIDGE_ERROR_HANDLER_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SOURCE_KAFKA_ENDPOINT_BRIDGE_ERROR_HANDLER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_BRIDGE_ERROR_HANDLER_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_CHECK_CRCS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_CHECK_CRCS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_CHECK_CRCS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMER_REQUEST_TIMEOUT_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMER_REQUEST_TIMEOUT_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMER_REQUEST_TIMEOUT_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMERS_COUNT_CONF, ConfigDef.Type.INT, CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMERS_COUNT_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMERS_COUNT_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMER_STREAMS_CONF, ConfigDef.Type.INT, CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMER_STREAMS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_CONSUMER_STREAMS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_MAX_BYTES_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_MAX_BYTES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_MAX_BYTES_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_MIN_BYTES_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_MIN_BYTES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_MIN_BYTES_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_WAIT_MAX_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_WAIT_MAX_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_FETCH_WAIT_MAX_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_GROUP_ID_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_GROUP_ID_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_GROUP_ID_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_HEARTBEAT_INTERVAL_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_HEARTBEAT_INTERVAL_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_HEARTBEAT_INTERVAL_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_KAFKA_HEADER_DESERIALIZER_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_KAFKA_HEADER_DESERIALIZER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_KAFKA_HEADER_DESERIALIZER_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_KEY_DESERIALIZER_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_KEY_DESERIALIZER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_KEY_DESERIALIZER_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_PARTITION_FETCH_BYTES_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_PARTITION_FETCH_BYTES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_PARTITION_FETCH_BYTES_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_POLL_INTERVAL_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_POLL_INTERVAL_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_POLL_INTERVAL_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_POLL_RECORDS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_POLL_RECORDS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_MAX_POLL_RECORDS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_OFFSET_REPOSITORY_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_OFFSET_REPOSITORY_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_OFFSET_REPOSITORY_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_PARTITION_ASSIGNOR_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_PARTITION_ASSIGNOR_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_PARTITION_ASSIGNOR_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_POLL_TIMEOUT_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_POLL_TIMEOUT_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_POLL_TIMEOUT_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SEEK_TO_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SEEK_TO_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SEEK_TO_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SESSION_TIMEOUT_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SESSION_TIMEOUT_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SESSION_TIMEOUT_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SPECIFIC_AVRO_READER_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SOURCE_KAFKA_ENDPOINT_SPECIFIC_AVRO_READER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SPECIFIC_AVRO_READER_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_TOPIC_IS_PATTERN_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SOURCE_KAFKA_ENDPOINT_TOPIC_IS_PATTERN_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_TOPIC_IS_PATTERN_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_VALUE_DESERIALIZER_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_VALUE_DESERIALIZER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_VALUE_DESERIALIZER_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_EXCEPTION_HANDLER_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_EXCEPTION_HANDLER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_EXCEPTION_HANDLER_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_EXCHANGE_PATTERN_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_EXCHANGE_PATTERN_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_EXCHANGE_PATTERN_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_BASIC_PROPERTY_BINDING_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SOURCE_KAFKA_ENDPOINT_BASIC_PROPERTY_BINDING_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_BASIC_PROPERTY_BINDING_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SYNCHRONOUS_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SOURCE_KAFKA_ENDPOINT_SYNCHRONOUS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SYNCHRONOUS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SCHEMA_REGISTRY_URLCONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SCHEMA_REGISTRY_URLDEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SCHEMA_REGISTRY_URLDOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_INTERCEPTOR_CLASSES_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_INTERCEPTOR_CLASSES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_INTERCEPTOR_CLASSES_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_INIT_CMD_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_INIT_CMD_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_INIT_CMD_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_RENEW_JITTER_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_RENEW_JITTER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_RENEW_JITTER_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_RENEW_WINDOW_FACTOR_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_RENEW_WINDOW_FACTOR_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_KERBEROS_RENEW_WINDOW_FACTOR_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_JAAS_CONFIG_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_JAAS_CONFIG_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_JAAS_CONFIG_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_KERBEROS_SERVICE_NAME_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_KERBEROS_SERVICE_NAME_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_KERBEROS_SERVICE_NAME_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_MECHANISM_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_MECHANISM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SASL_MECHANISM_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SECURITY_PROTOCOL_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SECURITY_PROTOCOL_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SECURITY_PROTOCOL_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_CIPHER_SUITES_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_CIPHER_SUITES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_CIPHER_SUITES_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_CONTEXT_PARAMETERS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_CONTEXT_PARAMETERS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_CONTEXT_PARAMETERS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_ENABLED_PROTOCOLS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_ENABLED_PROTOCOLS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_ENABLED_PROTOCOLS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_ENDPOINT_ALGORITHM_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_ENDPOINT_ALGORITHM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_ENDPOINT_ALGORITHM_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_KEYMANAGER_ALGORITHM_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_KEYMANAGER_ALGORITHM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_KEYMANAGER_ALGORITHM_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_KEYSTORE_TYPE_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_KEYSTORE_TYPE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_KEYSTORE_TYPE_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_PROTOCOL_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_PROTOCOL_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_PROTOCOL_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_PROVIDER_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_PROVIDER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_PROVIDER_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_TRUSTMANAGER_ALGORITHM_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_TRUSTMANAGER_ALGORITHM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_TRUSTMANAGER_ALGORITHM_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_TRUSTSTORE_TYPE_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_TRUSTSTORE_TYPE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_ENDPOINT_SSL_TRUSTSTORE_TYPE_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_ADDITIONAL_PROPERTIES_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_ADDITIONAL_PROPERTIES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_ADDITIONAL_PROPERTIES_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_BROKERS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_BROKERS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_BROKERS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_CLIENT_ID_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_CLIENT_ID_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_CLIENT_ID_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_CONFIGURATION_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_CONFIGURATION_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_CONFIGURATION_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_HEADER_FILTER_STRATEGY_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_HEADER_FILTER_STRATEGY_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_HEADER_FILTER_STRATEGY_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_RECONNECT_BACKOFF_MAX_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_RECONNECT_BACKOFF_MAX_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_RECONNECT_BACKOFF_MAX_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SHUTDOWN_TIMEOUT_CONF, ConfigDef.Type.INT, CAMEL_SOURCE_KAFKA_COMPONENT_SHUTDOWN_TIMEOUT_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SHUTDOWN_TIMEOUT_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_ALLOW_MANUAL_COMMIT_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SOURCE_KAFKA_COMPONENT_ALLOW_MANUAL_COMMIT_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_ALLOW_MANUAL_COMMIT_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_ENABLE_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_ENABLE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_ENABLE_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_INTERVAL_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_INTERVAL_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_INTERVAL_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_ON_STOP_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_ON_STOP_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_COMMIT_ON_STOP_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_OFFSET_RESET_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_OFFSET_RESET_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_AUTO_OFFSET_RESET_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_BREAK_ON_FIRST_ERROR_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SOURCE_KAFKA_COMPONENT_BREAK_ON_FIRST_ERROR_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_BREAK_ON_FIRST_ERROR_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_BRIDGE_ERROR_HANDLER_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SOURCE_KAFKA_COMPONENT_BRIDGE_ERROR_HANDLER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_BRIDGE_ERROR_HANDLER_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_CHECK_CRCS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_CHECK_CRCS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_CHECK_CRCS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMER_REQUEST_TIMEOUT_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMER_REQUEST_TIMEOUT_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMER_REQUEST_TIMEOUT_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMERS_COUNT_CONF, ConfigDef.Type.INT, CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMERS_COUNT_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMERS_COUNT_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMER_STREAMS_CONF, ConfigDef.Type.INT, CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMER_STREAMS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_CONSUMER_STREAMS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_MAX_BYTES_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_MAX_BYTES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_MAX_BYTES_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_MIN_BYTES_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_MIN_BYTES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_MIN_BYTES_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_WAIT_MAX_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_WAIT_MAX_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_FETCH_WAIT_MAX_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_GROUP_ID_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_GROUP_ID_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_GROUP_ID_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_HEARTBEAT_INTERVAL_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_HEARTBEAT_INTERVAL_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_HEARTBEAT_INTERVAL_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_KAFKA_HEADER_DESERIALIZER_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_KAFKA_HEADER_DESERIALIZER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_KAFKA_HEADER_DESERIALIZER_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_KEY_DESERIALIZER_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_KEY_DESERIALIZER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_KEY_DESERIALIZER_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_MAX_PARTITION_FETCH_BYTES_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_MAX_PARTITION_FETCH_BYTES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_MAX_PARTITION_FETCH_BYTES_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_MAX_POLL_INTERVAL_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_MAX_POLL_INTERVAL_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_MAX_POLL_INTERVAL_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_MAX_POLL_RECORDS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_MAX_POLL_RECORDS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_MAX_POLL_RECORDS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_OFFSET_REPOSITORY_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_OFFSET_REPOSITORY_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_OFFSET_REPOSITORY_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_PARTITION_ASSIGNOR_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_PARTITION_ASSIGNOR_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_PARTITION_ASSIGNOR_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_POLL_TIMEOUT_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_POLL_TIMEOUT_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_POLL_TIMEOUT_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SEEK_TO_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SEEK_TO_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SEEK_TO_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SESSION_TIMEOUT_MS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SESSION_TIMEOUT_MS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SESSION_TIMEOUT_MS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SPECIFIC_AVRO_READER_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SOURCE_KAFKA_COMPONENT_SPECIFIC_AVRO_READER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SPECIFIC_AVRO_READER_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_TOPIC_IS_PATTERN_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SOURCE_KAFKA_COMPONENT_TOPIC_IS_PATTERN_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_TOPIC_IS_PATTERN_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_VALUE_DESERIALIZER_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_VALUE_DESERIALIZER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_VALUE_DESERIALIZER_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_KAFKA_MANUAL_COMMIT_FACTORY_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_KAFKA_MANUAL_COMMIT_FACTORY_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_KAFKA_MANUAL_COMMIT_FACTORY_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_BASIC_PROPERTY_BINDING_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SOURCE_KAFKA_COMPONENT_BASIC_PROPERTY_BINDING_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_BASIC_PROPERTY_BINDING_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SCHEMA_REGISTRY_URLCONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SCHEMA_REGISTRY_URLDEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SCHEMA_REGISTRY_URLDOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_INTERCEPTOR_CLASSES_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_INTERCEPTOR_CLASSES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_INTERCEPTOR_CLASSES_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_BEFORE_RELOGIN_MIN_TIME_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_INIT_CMD_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_INIT_CMD_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_INIT_CMD_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_RENEW_JITTER_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_RENEW_JITTER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_RENEW_JITTER_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_RENEW_WINDOW_FACTOR_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_RENEW_WINDOW_FACTOR_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_KERBEROS_RENEW_WINDOW_FACTOR_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SASL_JAAS_CONFIG_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SASL_JAAS_CONFIG_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SASL_JAAS_CONFIG_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SASL_KERBEROS_SERVICE_NAME_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SASL_KERBEROS_SERVICE_NAME_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SASL_KERBEROS_SERVICE_NAME_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SASL_MECHANISM_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SASL_MECHANISM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SASL_MECHANISM_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SECURITY_PROTOCOL_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SECURITY_PROTOCOL_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SECURITY_PROTOCOL_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SSL_CIPHER_SUITES_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_CIPHER_SUITES_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_CIPHER_SUITES_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SSL_CONTEXT_PARAMETERS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_CONTEXT_PARAMETERS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_CONTEXT_PARAMETERS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SSL_ENABLED_PROTOCOLS_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_ENABLED_PROTOCOLS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_ENABLED_PROTOCOLS_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SSL_ENDPOINT_ALGORITHM_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_ENDPOINT_ALGORITHM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_ENDPOINT_ALGORITHM_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SSL_KEYMANAGER_ALGORITHM_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_KEYMANAGER_ALGORITHM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_KEYMANAGER_ALGORITHM_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SSL_KEYSTORE_TYPE_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_KEYSTORE_TYPE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_KEYSTORE_TYPE_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SSL_PROTOCOL_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_PROTOCOL_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_PROTOCOL_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SSL_PROVIDER_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_PROVIDER_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_PROVIDER_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SSL_TRUSTMANAGER_ALGORITHM_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_TRUSTMANAGER_ALGORITHM_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_TRUSTMANAGER_ALGORITHM_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_SSL_TRUSTSTORE_TYPE_CONF, ConfigDef.Type.STRING, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_TRUSTSTORE_TYPE_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_SSL_TRUSTSTORE_TYPE_DOC);
+        conf.define(CAMEL_SOURCE_KAFKA_COMPONENT_USE_GLOBAL_SSL_CONTEXT_PARAMETERS_CONF, ConfigDef.Type.BOOLEAN, CAMEL_SOURCE_KAFKA_COMPONENT_USE_GLOBAL_SSL_CONTEXT_PARAMETERS_DEFAULT, ConfigDef.Importance.MEDIUM, CAMEL_SOURCE_KAFKA_COMPONENT_USE_GLOBAL_SSL_CONTEXT_PARAMETERS_DOC);
+        return conf;
+    }
+}
\ No newline at end of file
diff --git a/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSourceTask.java b/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSourceTask.java
new file mode 100644
index 0000000..bb80ccc
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/main/java/org/apache/camel/kafkaconnector/kafka/CamelKafkaSourceTask.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.kafkaconnector.kafka;
+
+import java.util.HashMap;
+import java.util.Map;
+import javax.annotation.Generated;
+import org.apache.camel.kafkaconnector.CamelSourceConnectorConfig;
+import org.apache.camel.kafkaconnector.CamelSourceTask;
+
+@Generated("This class has been generated by camel-kafka-connector-generator-maven-plugin, remove this annotation to prevent it from being generated.")
+public class CamelKafkaSourceTask extends CamelSourceTask {
+
+    @Override
+    protected CamelSourceConnectorConfig getCamelSourceConnectorConfig(
+            Map<String, String> props) {
+        return new CamelKafkaSourceConnectorConfig(props);
+    }
+    @Override
+    protected Map<String, String> getDefaultConfig() {
+        return new HashMap<String, String>() {{
+            put(CamelSourceConnectorConfig.CAMEL_SOURCE_COMPONENT_CONF, "kafka");
+        }};
+    }
+}
\ No newline at end of file
diff --git a/connectors/camel-kafka-kafka-connector/src/main/resources/META-INF/LICENSE.txt b/connectors/camel-kafka-kafka-connector/src/main/resources/META-INF/LICENSE.txt
new file mode 100644
index 0000000..6b0b127
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/main/resources/META-INF/LICENSE.txt
@@ -0,0 +1,203 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
diff --git a/connectors/camel-kafka-kafka-connector/src/main/resources/META-INF/NOTICE.txt b/connectors/camel-kafka-kafka-connector/src/main/resources/META-INF/NOTICE.txt
new file mode 100644
index 0000000..2e215bf
--- /dev/null
+++ b/connectors/camel-kafka-kafka-connector/src/main/resources/META-INF/NOTICE.txt
@@ -0,0 +1,11 @@
+   =========================================================================
+   ==  NOTICE file corresponding to the section 4 d of                    ==
+   ==  the Apache License, Version 2.0,                                   ==
+   ==  in this case for the Apache Camel distribution.                    ==
+   =========================================================================
+
+   This product includes software developed by
+   The Apache Software Foundation (http://www.apache.org/).
+
+   Please read the different LICENSE files present in the licenses directory of
+   this distribution.
diff --git a/connectors/pom.xml b/connectors/pom.xml
index bea1ee0..689dd4c 100644
--- a/connectors/pom.xml
+++ b/connectors/pom.xml
@@ -397,6 +397,7 @@
         <module>camel-json-validator-kafka-connector</module>
         <module>camel-jsonata-kafka-connector</module>
         <module>camel-jt400-kafka-connector</module>
+        <module>camel-kafka-kafka-connector</module>
         <module>camel-kubernetes-config-maps-kafka-connector</module>
         <module>camel-kubernetes-deployments-kafka-connector</module>
         <module>camel-kubernetes-hpa-kafka-connector</module>
diff --git a/docs/modules/ROOT/pages/connectors.adoc b/docs/modules/ROOT/pages/connectors.adoc
index e9c07e1..f4d3551 100644
--- a/docs/modules/ROOT/pages/connectors.adoc
+++ b/docs/modules/ROOT/pages/connectors.adoc
@@ -2,7 +2,7 @@
 = Supported connectors and documentation
 
 // kafka-connectors list: START
-Number of Camel Kafka connectors: 345 
+Number of Camel Kafka connectors: 346 
 
 [width="100%",cols="4,1,1,1,1,1,1",options="header"]
 |===
@@ -198,6 +198,7 @@ Number of Camel Kafka connectors: 345
 | *camel-json-validator-kafka-connector* | true | false | xref:connectors/camel-json-validator-kafka-sink-connector.adoc[Sink Docs] |  | https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-json-validator-kafka-connector/0.5.0/camel-json-validator-kafka-connector-0.5.0-package.zip[Download Zip] | https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-json-validator-kafka-connector/0.5.0/camel-json-validator-kafka-connector-0.5.0-package.tar.gz[Do [...]
 | *camel-jsonata-kafka-connector* | true | false | xref:connectors/camel-jsonata-kafka-sink-connector.adoc[Sink Docs] |  | https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-jsonata-kafka-connector/0.5.0/camel-jsonata-kafka-connector-0.5.0-package.zip[Download Zip] | https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-jsonata-kafka-connector/0.5.0/camel-jsonata-kafka-connector-0.5.0-package.tar.gz[Download Tar.gz]
 | *camel-jt400-kafka-connector* | true | true | xref:connectors/camel-jt400-kafka-sink-connector.adoc[Sink Docs] | xref:connectors/camel-jt400-kafka-source-connector.adoc[Source Docs] | https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-jt400-kafka-connector/0.5.0/camel-jt400-kafka-connector-0.5.0-package.zip[Download Zip] | https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-jt400-kafka-connector/0.5.0/camel-jt400-kafka-connector-0.5.0-pack [...]
+| *camel-kafka-kafka-connector* | true | true | xref:connectors/camel-kafka-kafka-sink-connector.adoc[Sink Docs] | xref:connectors/camel-kafka-kafka-source-connector.adoc[Source Docs] | https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-kafka-kafka-connector/0.5.0/camel-kafka-kafka-connector-0.5.0-package.zip[Download Zip] | https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-kafka-kafka-connector/0.5.0/camel-kafka-kafka-connector-0.5.0-pack [...]
 | *camel-kubernetes-config-maps-kafka-connector* | true | false | xref:connectors/camel-kubernetes-config-maps-kafka-sink-connector.adoc[Sink Docs] |  | https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-kubernetes-config-maps-kafka-connector/0.5.0/camel-kubernetes-config-maps-kafka-connector-0.5.0-package.zip[Download Zip] | https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-kubernetes-config-maps-kafka-connector/0.5.0/camel-kubernetes-con [...]
 | *camel-kubernetes-deployments-kafka-connector* | true | true | xref:connectors/camel-kubernetes-deployments-kafka-sink-connector.adoc[Sink Docs] | xref:connectors/camel-kubernetes-deployments-kafka-source-connector.adoc[Source Docs] | https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-kubernetes-deployments-kafka-connector/0.5.0/camel-kubernetes-deployments-kafka-connector-0.5.0-package.zip[Download Zip] | https://repo.maven.apache.org/maven2/org/apache/camel/ka [...]
 | *camel-kubernetes-hpa-kafka-connector* | true | true | xref:connectors/camel-kubernetes-hpa-kafka-sink-connector.adoc[Sink Docs] | xref:connectors/camel-kubernetes-hpa-kafka-source-connector.adoc[Source Docs] | https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-kubernetes-hpa-kafka-connector/0.5.0/camel-kubernetes-hpa-kafka-connector-0.5.0-package.zip[Download Zip] | https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-kubernetes-hpa-kafka- [...]
diff --git a/docs/modules/ROOT/pages/connectors/camel-kafka-kafka-sink-connector.adoc b/docs/modules/ROOT/pages/connectors/camel-kafka-kafka-sink-connector.adoc
new file mode 100644
index 0000000..a2361a0
--- /dev/null
+++ b/docs/modules/ROOT/pages/connectors/camel-kafka-kafka-sink-connector.adoc
@@ -0,0 +1,187 @@
+// kafka-connector options: START
+[[camel-kafka-kafka-connector-sink]]
+= camel-kafka-kafka-connector sink configuration
+
+When using camel-kafka-kafka-connector as sink make sure to use the following Maven dependency to have support for the connector:
+
+[source,xml]
+----
+<dependency>
+  <groupId>org.apache.camel.kafkaconnector</groupId>
+  <artifactId>camel-kafka-kafka-connector</artifactId>
+  <version>x.x.x</version>
+  <!-- use the same version as your Camel Kafka connector version -->
+</dependency>
+----
+
+To use this Sink connector in Kafka connect you'll need to set the following connector.class
+
+[source,java]
+----
+connector.class=org.apache.camel.kafkaconnector.kafka.CamelKafkaSinkConnector
+----
+
+
+The camel-kafka sink connector supports 134 options, which are listed below.
+
+
+
+[width="100%",cols="2,5,^1,2",options="header"]
+|===
+| Name | Description | Default | Priority
+| *camel.sink.path.topic* | Name of the topic to use. On the consumer you can use comma to separate multiple topics. A producer can only send a message to a single topic. | null | HIGH
+| *camel.sink.endpoint.additionalProperties* | Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=\http://localhost:8811/avro | null | MEDIUM
+| *camel.sink.endpoint.brokers* | URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation. | null | MEDIUM
+| *camel.sink.endpoint.clientId* | The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request. | null | MEDIUM
+| *camel.sink.endpoint.headerFilterStrategy* | To use a custom HeaderFilterStrategy to filter header to and from Camel message. | null | MEDIUM
+| *camel.sink.endpoint.reconnectBackoffMaxMs* | The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms. | "1000" | MEDIUM
+| *camel.sink.endpoint.shutdownTimeout* | Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads. | 30000 | MEDIUM
+| *camel.sink.endpoint.bufferMemorySize* | The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are sent faster than they can be delivered to the server the producer will either block or throw an exception based on the preference specified by block.on.buffer.full.This setting should correspond roughly to the total memory the producer will use, but is not a hard bound since not all memory the producer uses is used for buffering. Som [...]
+| *camel.sink.endpoint.compressionCodec* | This parameter allows you to specify the compression codec for all data generated by this producer. Valid values are none, gzip and snappy. One of: [none] [gzip] [snappy] [lz4] | "none" | MEDIUM
+| *camel.sink.endpoint.connectionMaxIdleMs* | Close idle connections after the number of milliseconds specified by this config. | "540000" | MEDIUM
+| *camel.sink.endpoint.enableIdempotence* | If set to 'true' the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries may write duplicates of the retried message in the stream. If set to true this option will require max.in.flight.requests.per.connection to be set to 1 and retries cannot be zero and additionally acks must be set to 'all'. | false | MEDIUM
+| *camel.sink.endpoint.kafkaHeaderSerializer* | To use a custom KafkaHeaderSerializer to serialize kafka headers values | null | MEDIUM
+| *camel.sink.endpoint.key* | The record key (or null if no key is specified). If this option has been configured then it take precedence over header KafkaConstants#KEY | null | MEDIUM
+| *camel.sink.endpoint.keySerializerClass* | The serializer class for keys (defaults to the same as for messages if nothing is given). | "org.apache.kafka.common.serialization.StringSerializer" | MEDIUM
+| *camel.sink.endpoint.lazyStartProducer* | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then cre [...]
+| *camel.sink.endpoint.lingerMs* | The producer groups together any records that arrive in between request transmissions into a single batched request. Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount of artificial delay that is, rather than immediately sending out a record the produc [...]
+| *camel.sink.endpoint.maxBlockMs* | The configuration controls how long sending to kafka will block. These methods can be blocked for multiple reasons. For e.g: buffer full, metadata unavailable.This configuration imposes maximum limit on the total time spent in fetching metadata, serialization of key and value, partitioning and allocation of buffer memory when doing a send(). In case of partitionsFor(), this configuration imposes a maximum time threshold on waiting for metadata | "6000 [...]
+| *camel.sink.endpoint.maxInFlightRequest* | The maximum number of unacknowledged requests the client will send on a single connection before blocking. Note that if this setting is set to be greater than 1 and there are failed sends, there is a risk of message re-ordering due to retries (i.e., if retries are enabled). | "5" | MEDIUM
+| *camel.sink.endpoint.maxRequestSize* | The maximum size of a request. This is also effectively a cap on the maximum record size. Note that the server has its own cap on record size which may be different from this. This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. | "1048576" | MEDIUM
+| *camel.sink.endpoint.metadataMaxAgeMs* | The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions. | "300000" | MEDIUM
+| *camel.sink.endpoint.metricReporters* | A list of classes to use as metrics reporters. Implementing the MetricReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics. | null | MEDIUM
+| *camel.sink.endpoint.metricsSampleWindowMs* | The number of samples maintained to compute metrics. | "30000" | MEDIUM
+| *camel.sink.endpoint.noOfMetricsSample* | The number of samples maintained to compute metrics. | "2" | MEDIUM
+| *camel.sink.endpoint.partitioner* | The partitioner class for partitioning messages amongst sub-topics. The default partitioner is based on the hash of the key. | "org.apache.kafka.clients.producer.internals.DefaultPartitioner" | MEDIUM
+| *camel.sink.endpoint.partitionKey* | The partition to which the record will be sent (or null if no partition was specified). If this option has been configured then it take precedence over header KafkaConstants#PARTITION_KEY | null | MEDIUM
+| *camel.sink.endpoint.producerBatchSize* | The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps performance on both the client and the server. This configuration controls the default batch size in bytes. No attempt will be made to batch records larger than this size.Requests sent to brokers will contain multiple batches, one for each partition with data available to be sent.A small batch size w [...]
+| *camel.sink.endpoint.queueBufferingMaxMessages* | The maximum number of unsent messages that can be queued up the producer when using async mode before either the producer must be blocked or data must be dropped. | "10000" | MEDIUM
+| *camel.sink.endpoint.receiveBufferBytes* | The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. | "65536" | MEDIUM
+| *camel.sink.endpoint.reconnectBackoffMs* | The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker. | "50" | MEDIUM
+| *camel.sink.endpoint.recordMetadata* | Whether the producer should store the RecordMetadata results from sending to Kafka. The results are stored in a List containing the RecordMetadata metadata's. The list is stored on a header with the key KafkaConstants#KAFKA_RECORDMETA | true | MEDIUM
+| *camel.sink.endpoint.requestRequiredAcks* | The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. The following settings are common: acks=0 If set to zero then the producer will not wait for any acknowledgment from the server at all. The record will be immediately added to the socket buffer and considered sent. No guarantee can be made that the server has received the [...]
+| *camel.sink.endpoint.requestTimeoutMs* | The amount of time the broker will wait trying to meet the request.required.acks requirement before sending back an error to the client. | "30000" | MEDIUM
+| *camel.sink.endpoint.retries* | Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. Note that this retry is no different than if the client resent the record upon receiving the error. Allowing retries will potentially change the ordering of records because if two records are sent to a single partition, and the first fails and is retried but the second succeeds, then the second record may appear first. | "0" | MEDIUM
+| *camel.sink.endpoint.retryBackoffMs* | Before each retry, the producer refreshes the metadata of relevant topics to see if a new leader has been elected. Since leader election takes a bit of time, this property specifies the amount of time that the producer waits before refreshing the metadata. | "100" | MEDIUM
+| *camel.sink.endpoint.sendBufferBytes* | Socket write buffer size | "131072" | MEDIUM
+| *camel.sink.endpoint.serializerClass* | The serializer class for messages. | "org.apache.kafka.common.serialization.StringSerializer" | MEDIUM
+| *camel.sink.endpoint.workerPool* | To use a custom worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. If using this option then you must handle the lifecycle of the thread pool to shut the pool down when no longer needed. | null | MEDIUM
+| *camel.sink.endpoint.workerPoolCoreSize* | Number of core threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. | "10" | MEDIUM
+| *camel.sink.endpoint.workerPoolMaxSize* | Maximum number of threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. | "20" | MEDIUM
+| *camel.sink.endpoint.basicPropertyBinding* | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | MEDIUM
+| *camel.sink.endpoint.synchronous* | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | MEDIUM
+| *camel.sink.endpoint.schemaRegistryURL* | URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka) | null | MEDIUM
+| *camel.sink.endpoint.interceptorClasses* | Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime | null | MEDIUM
+| *camel.sink.endpoint.kerberosBeforeReloginMinTime* | Login thread sleep time between refresh attempts. | "60000" | MEDIUM
+| *camel.sink.endpoint.kerberosInitCmd* | Kerberos kinit command path. Default is /usr/bin/kinit | "/usr/bin/kinit" | MEDIUM
+| *camel.sink.endpoint.kerberosPrincipalToLocalRules* | A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form \{username\}/\{hostname\}\{REALM\} are mapped to \{username\}. For more details on the format please see the security authorization an [...]
+| *camel.sink.endpoint.kerberosRenewJitter* | Percentage of random jitter added to the renewal time. | "0.05" | MEDIUM
+| *camel.sink.endpoint.kerberosRenewWindowFactor* | Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket. | "0.8" | MEDIUM
+| *camel.sink.endpoint.saslJaasConfig* | Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD; | null | MEDIUM
+| *camel.sink.endpoint.saslKerberosServiceName* | The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config. | null | MEDIUM
+| *camel.sink.endpoint.saslMechanism* | The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see \http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml | "GSSAPI" | MEDIUM
+| *camel.sink.endpoint.securityProtocol* | Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported | "PLAINTEXT" | MEDIUM
+| *camel.sink.endpoint.sslCipherSuites* | A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported. | null | MEDIUM
+| *camel.sink.endpoint.sslContextParameters* | SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option. | null | MEDIUM
+| *camel.sink.endpoint.sslEnabledProtocols* | The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default. | "TLSv1.2" | MEDIUM
+| *camel.sink.endpoint.sslEndpointAlgorithm* | The endpoint identification algorithm to validate server hostname using server certificate. | "https" | MEDIUM
+| *camel.sink.endpoint.sslKeymanagerAlgorithm* | The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine. | "SunX509" | MEDIUM
+| *camel.sink.endpoint.sslKeyPassword* | The password of the private key in the key store file. This is optional for client. | null | MEDIUM
+| *camel.sink.endpoint.sslKeystoreLocation* | The location of the key store file. This is optional for client and can be used for two-way authentication for client. | null | MEDIUM
+| *camel.sink.endpoint.sslKeystorePassword* | The store password for the key store file.This is optional for client and only needed if ssl.keystore.location is configured. | null | MEDIUM
+| *camel.sink.endpoint.sslKeystoreType* | The file format of the key store file. This is optional for client. Default value is JKS | "JKS" | MEDIUM
+| *camel.sink.endpoint.sslProtocol* | The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities. | "TLSv1.2" | MEDIUM
+| *camel.sink.endpoint.sslProvider* | The name of the security provider used for SSL connections. Default value is the default security provider of the JVM. | null | MEDIUM
+| *camel.sink.endpoint.sslTrustmanagerAlgorithm* | The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine. | "PKIX" | MEDIUM
+| *camel.sink.endpoint.sslTruststoreLocation* | The location of the trust store file. | null | MEDIUM
+| *camel.sink.endpoint.sslTruststorePassword* | The password for the trust store file. | null | MEDIUM
+| *camel.sink.endpoint.sslTruststoreType* | The file format of the trust store file. Default value is JKS. | "JKS" | MEDIUM
+| *camel.component.kafka.additionalProperties* | Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=\http://localhost:8811/avro | null | MEDIUM
+| *camel.component.kafka.brokers* | URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation. | null | MEDIUM
+| *camel.component.kafka.clientId* | The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request. | null | MEDIUM
+| *camel.component.kafka.configuration* | Allows to pre-configure the Kafka component with common options that the endpoints will reuse. | null | MEDIUM
+| *camel.component.kafka.headerFilterStrategy* | To use a custom HeaderFilterStrategy to filter header to and from Camel message. | null | MEDIUM
+| *camel.component.kafka.reconnectBackoffMaxMs* | The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms. | "1000" | MEDIUM
+| *camel.component.kafka.shutdownTimeout* | Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads. | 30000 | MEDIUM
+| *camel.component.kafka.bufferMemorySize* | The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are sent faster than they can be delivered to the server the producer will either block or throw an exception based on the preference specified by block.on.buffer.full.This setting should correspond roughly to the total memory the producer will use, but is not a hard bound since not all memory the producer uses is used for buffering. S [...]
+| *camel.component.kafka.compressionCodec* | This parameter allows you to specify the compression codec for all data generated by this producer. Valid values are none, gzip and snappy. One of: [none] [gzip] [snappy] [lz4] | "none" | MEDIUM
+| *camel.component.kafka.connectionMaxIdleMs* | Close idle connections after the number of milliseconds specified by this config. | "540000" | MEDIUM
+| *camel.component.kafka.enableIdempotence* | If set to 'true' the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries may write duplicates of the retried message in the stream. If set to true this option will require max.in.flight.requests.per.connection to be set to 1 and retries cannot be zero and additionally acks must be set to 'all'. | false | MEDIUM
+| *camel.component.kafka.kafkaHeaderSerializer* | To use a custom KafkaHeaderSerializer to serialize kafka headers values | null | MEDIUM
+| *camel.component.kafka.key* | The record key (or null if no key is specified). If this option has been configured then it take precedence over header KafkaConstants#KEY | null | MEDIUM
+| *camel.component.kafka.keySerializerClass* | The serializer class for keys (defaults to the same as for messages if nothing is given). | "org.apache.kafka.common.serialization.StringSerializer" | MEDIUM
+| *camel.component.kafka.lazyStartProducer* | Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then c [...]
+| *camel.component.kafka.lingerMs* | The producer groups together any records that arrive in between request transmissions into a single batched request. Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount of artificial delay that is, rather than immediately sending out a record the prod [...]
+| *camel.component.kafka.maxBlockMs* | The configuration controls how long sending to kafka will block. These methods can be blocked for multiple reasons. For e.g: buffer full, metadata unavailable.This configuration imposes maximum limit on the total time spent in fetching metadata, serialization of key and value, partitioning and allocation of buffer memory when doing a send(). In case of partitionsFor(), this configuration imposes a maximum time threshold on waiting for metadata | "60 [...]
+| *camel.component.kafka.maxInFlightRequest* | The maximum number of unacknowledged requests the client will send on a single connection before blocking. Note that if this setting is set to be greater than 1 and there are failed sends, there is a risk of message re-ordering due to retries (i.e., if retries are enabled). | "5" | MEDIUM
+| *camel.component.kafka.maxRequestSize* | The maximum size of a request. This is also effectively a cap on the maximum record size. Note that the server has its own cap on record size which may be different from this. This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. | "1048576" | MEDIUM
+| *camel.component.kafka.metadataMaxAgeMs* | The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions. | "300000" | MEDIUM
+| *camel.component.kafka.metricReporters* | A list of classes to use as metrics reporters. Implementing the MetricReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics. | null | MEDIUM
+| *camel.component.kafka.metricsSampleWindowMs* | The number of samples maintained to compute metrics. | "30000" | MEDIUM
+| *camel.component.kafka.noOfMetricsSample* | The number of samples maintained to compute metrics. | "2" | MEDIUM
+| *camel.component.kafka.partitioner* | The partitioner class for partitioning messages amongst sub-topics. The default partitioner is based on the hash of the key. | "org.apache.kafka.clients.producer.internals.DefaultPartitioner" | MEDIUM
+| *camel.component.kafka.partitionKey* | The partition to which the record will be sent (or null if no partition was specified). If this option has been configured then it take precedence over header KafkaConstants#PARTITION_KEY | null | MEDIUM
+| *camel.component.kafka.producerBatchSize* | The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps performance on both the client and the server. This configuration controls the default batch size in bytes. No attempt will be made to batch records larger than this size.Requests sent to brokers will contain multiple batches, one for each partition with data available to be sent.A small batch size [...]
+| *camel.component.kafka.queueBufferingMaxMessages* | The maximum number of unsent messages that can be queued up the producer when using async mode before either the producer must be blocked or data must be dropped. | "10000" | MEDIUM
+| *camel.component.kafka.receiveBufferBytes* | The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. | "65536" | MEDIUM
+| *camel.component.kafka.reconnectBackoffMs* | The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker. | "50" | MEDIUM
+| *camel.component.kafka.recordMetadata* | Whether the producer should store the RecordMetadata results from sending to Kafka. The results are stored in a List containing the RecordMetadata metadata's. The list is stored on a header with the key KafkaConstants#KAFKA_RECORDMETA | true | MEDIUM
+| *camel.component.kafka.requestRequiredAcks* | The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. The following settings are common: acks=0 If set to zero then the producer will not wait for any acknowledgment from the server at all. The record will be immediately added to the socket buffer and considered sent. No guarantee can be made that the server has received t [...]
+| *camel.component.kafka.requestTimeoutMs* | The amount of time the broker will wait trying to meet the request.required.acks requirement before sending back an error to the client. | "30000" | MEDIUM
+| *camel.component.kafka.retries* | Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. Note that this retry is no different than if the client resent the record upon receiving the error. Allowing retries will potentially change the ordering of records because if two records are sent to a single partition, and the first fails and is retried but the second succeeds, then the second record may appear first. | "0" [...]
+| *camel.component.kafka.retryBackoffMs* | Before each retry, the producer refreshes the metadata of relevant topics to see if a new leader has been elected. Since leader election takes a bit of time, this property specifies the amount of time that the producer waits before refreshing the metadata. | "100" | MEDIUM
+| *camel.component.kafka.sendBufferBytes* | Socket write buffer size | "131072" | MEDIUM
+| *camel.component.kafka.serializerClass* | The serializer class for messages. | "org.apache.kafka.common.serialization.StringSerializer" | MEDIUM
+| *camel.component.kafka.workerPool* | To use a custom worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. If using this option then you must handle the lifecycle of the thread pool to shut the pool down when no longer needed. | null | MEDIUM
+| *camel.component.kafka.workerPoolCoreSize* | Number of core threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. | "10" | MEDIUM
+| *camel.component.kafka.workerPoolMaxSize* | Maximum number of threads for the worker pool for continue routing Exchange after kafka server has acknowledge the message that was sent to it from KafkaProducer using asynchronous non-blocking processing. | "20" | MEDIUM
+| *camel.component.kafka.basicPropertyBinding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | MEDIUM
+| *camel.component.kafka.schemaRegistryURL* | URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka) | null | MEDIUM
+| *camel.component.kafka.interceptorClasses* | Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime | null | MEDIUM
+| *camel.component.kafka.kerberosBeforeReloginMinTime* | Login thread sleep time between refresh attempts. | "60000" | MEDIUM
+| *camel.component.kafka.kerberosInitCmd* | Kerberos kinit command path. Default is /usr/bin/kinit | "/usr/bin/kinit" | MEDIUM
+| *camel.component.kafka.kerberosPrincipalToLocal Rules* | A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form \{username\}/\{hostname\}\{REALM\} are mapped to \{username\}. For more details on the format please see the security authorization [...]
+| *camel.component.kafka.kerberosRenewJitter* | Percentage of random jitter added to the renewal time. | "0.05" | MEDIUM
+| *camel.component.kafka.kerberosRenewWindowFactor* | Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket. | "0.8" | MEDIUM
+| *camel.component.kafka.saslJaasConfig* | Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD; | null | MEDIUM
+| *camel.component.kafka.saslKerberosServiceName* | The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config. | null | MEDIUM
+| *camel.component.kafka.saslMechanism* | The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see \http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml | "GSSAPI" | MEDIUM
+| *camel.component.kafka.securityProtocol* | Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported | "PLAINTEXT" | MEDIUM
+| *camel.component.kafka.sslCipherSuites* | A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported. | null | MEDIUM
+| *camel.component.kafka.sslContextParameters* | SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option. | null | MEDIUM
+| *camel.component.kafka.sslEnabledProtocols* | The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default. | "TLSv1.2" | MEDIUM
+| *camel.component.kafka.sslEndpointAlgorithm* | The endpoint identification algorithm to validate server hostname using server certificate. | "https" | MEDIUM
+| *camel.component.kafka.sslKeymanagerAlgorithm* | The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine. | "SunX509" | MEDIUM
+| *camel.component.kafka.sslKeyPassword* | The password of the private key in the key store file. This is optional for client. | null | MEDIUM
+| *camel.component.kafka.sslKeystoreLocation* | The location of the key store file. This is optional for client and can be used for two-way authentication for client. | null | MEDIUM
+| *camel.component.kafka.sslKeystorePassword* | The store password for the key store file.This is optional for client and only needed if ssl.keystore.location is configured. | null | MEDIUM
+| *camel.component.kafka.sslKeystoreType* | The file format of the key store file. This is optional for client. Default value is JKS | "JKS" | MEDIUM
+| *camel.component.kafka.sslProtocol* | The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities. | "TLSv1.2" | MEDIUM
+| *camel.component.kafka.sslProvider* | The name of the security provider used for SSL connections. Default value is the default security provider of the JVM. | null | MEDIUM
+| *camel.component.kafka.sslTrustmanagerAlgorithm* | The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine. | "PKIX" | MEDIUM
+| *camel.component.kafka.sslTruststoreLocation* | The location of the trust store file. | null | MEDIUM
+| *camel.component.kafka.sslTruststorePassword* | The password for the trust store file. | null | MEDIUM
+| *camel.component.kafka.sslTruststoreType* | The file format of the trust store file. Default value is JKS. | "JKS" | MEDIUM
+| *camel.component.kafka.useGlobalSslContext Parameters* | Enable usage of global SSL context parameters. | false | MEDIUM
+|===
+
+
+
+The camel-kafka sink connector has no converters out of the box.
+
+
+
+
+
+The camel-kafka sink connector has no transforms out of the box.
+
+
+
+
+
+The camel-kafka sink connector has no aggregation strategies out of the box.
+
+
+
+
+// kafka-connector options: END
diff --git a/docs/modules/ROOT/pages/connectors/camel-kafka-kafka-source-connector.adoc b/docs/modules/ROOT/pages/connectors/camel-kafka-kafka-source-connector.adoc
new file mode 100644
index 0000000..102aff0
--- /dev/null
+++ b/docs/modules/ROOT/pages/connectors/camel-kafka-kafka-source-connector.adoc
@@ -0,0 +1,174 @@
+// kafka-connector options: START
+[[camel-kafka-kafka-connector-source]]
+= camel-kafka-kafka-connector source configuration
+
+When using camel-kafka-kafka-connector as source make sure to use the following Maven dependency to have support for the connector:
+
+[source,xml]
+----
+<dependency>
+  <groupId>org.apache.camel.kafkaconnector</groupId>
+  <artifactId>camel-kafka-kafka-connector</artifactId>
+  <version>x.x.x</version>
+  <!-- use the same version as your Camel Kafka connector version -->
+</dependency>
+----
+
+To use this Source connector in Kafka connect you'll need to set the following connector.class
+
+[source,java]
+----
+connector.class=org.apache.camel.kafkaconnector.kafka.CamelKafkaSourceConnector
+----
+
+
+The camel-kafka source connector supports 121 options, which are listed below.
+
+
+
+[width="100%",cols="2,5,^1,2",options="header"]
+|===
+| Name | Description | Default | Priority
+| *camel.source.path.topic* | Name of the topic to use. On the consumer you can use comma to separate multiple topics. A producer can only send a message to a single topic. | null | HIGH
+| *camel.source.endpoint.additionalProperties* | Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=\http://localhost:8811/avro | null | MEDIUM
+| *camel.source.endpoint.brokers* | URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation. | null | MEDIUM
+| *camel.source.endpoint.clientId* | The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request. | null | MEDIUM
+| *camel.source.endpoint.headerFilterStrategy* | To use a custom HeaderFilterStrategy to filter header to and from Camel message. | null | MEDIUM
+| *camel.source.endpoint.reconnectBackoffMaxMs* | The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms. | "1000" | MEDIUM
+| *camel.source.endpoint.shutdownTimeout* | Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads. | 30000 | MEDIUM
+| *camel.source.endpoint.allowManualCommit* | Whether to allow doing manual commits via KafkaManualCommit. If this option is enabled then an instance of KafkaManualCommit is stored on the Exchange message header, which allows end users to access this API and perform manual offset commits via the Kafka consumer. | false | MEDIUM
+| *camel.source.endpoint.autoCommitEnable* | If true, periodically commit to ZooKeeper the offset of messages already fetched by the consumer. This committed offset will be used when the process fails as the position from which the new consumer will begin. | "true" | MEDIUM
+| *camel.source.endpoint.autoCommitIntervalMs* | The frequency in ms that the consumer offsets are committed to zookeeper. | "5000" | MEDIUM
+| *camel.source.endpoint.autoCommitOnStop* | Whether to perform an explicit auto commit when the consumer stops to ensure the broker has a commit from the last consumed message. This requires the option autoCommitEnable is turned on. The possible values are: sync, async, or none. And sync is the default value. One of: [sync] [async] [none] | "sync" | MEDIUM
+| *camel.source.endpoint.autoOffsetReset* | What to do when there is no initial offset in ZooKeeper or if an offset is out of range: earliest : automatically reset the offset to the earliest offset latest : automatically reset the offset to the latest offset fail: throw exception to the consumer One of: [latest] [earliest] [none] | "latest" | MEDIUM
+| *camel.source.endpoint.breakOnFirstError* | This options controls what happens when a consumer is processing an exchange and it fails. If the option is false then the consumer continues to the next message and processes it. If the option is true then the consumer breaks out, and will seek back to offset of the message that caused a failure, and then re-attempt to process this message. However this can lead to endless processing of the same message if its bound to fail every time, eg a  [...]
+| *camel.source.endpoint.bridgeErrorHandler* | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | MEDIUM
+| *camel.source.endpoint.checkCrcs* | Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance. | "true" | MEDIUM
+| *camel.source.endpoint.consumerRequestTimeoutMs* | The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. | "40000" | MEDIUM
+| *camel.source.endpoint.consumersCount* | The number of consumers that connect to kafka server | 1 | MEDIUM
+| *camel.source.endpoint.consumerStreams* | Number of concurrent consumers on the consumer | 10 | MEDIUM
+| *camel.source.endpoint.fetchMaxBytes* | The maximum amount of data the server should return for a fetch request This is not an absolute maximum, if the first message in the first non-empty partition of the fetch is larger than this value, the message will still be returned to ensure that the consumer can make progress. The maximum message size accepted by the broker is defined via message.max.bytes (broker config) or max.message.bytes (topic config). Note that the consumer performs mul [...]
+| *camel.source.endpoint.fetchMinBytes* | The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request. | "1" | MEDIUM
+| *camel.source.endpoint.fetchWaitMaxMs* | The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy fetch.min.bytes | "500" | MEDIUM
+| *camel.source.endpoint.groupId* | A string that uniquely identifies the group of consumer processes to which this consumer belongs. By setting the same group id multiple processes indicate that they are all part of the same consumer group. This option is required for consumers. | null | MEDIUM
+| *camel.source.endpoint.heartbeatIntervalMs* | The expected time between heartbeats to the consumer coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. The value must be set lower than session.timeout.ms, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal r [...]
+| *camel.source.endpoint.kafkaHeaderDeserializer* | To use a custom KafkaHeaderDeserializer to deserialize kafka headers values | null | MEDIUM
+| *camel.source.endpoint.keyDeserializer* | Deserializer class for key that implements the Deserializer interface. | "org.apache.kafka.common.serialization.StringDeserializer" | MEDIUM
+| *camel.source.endpoint.maxPartitionFetchBytes* | The maximum amount of data per-partition the server will return. The maximum total memory used for a request will be #partitions max.partition.fetch.bytes. This size must be at least as large as the maximum message size the server allows or else it is possible for the producer to send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying to fetch a large message on a certain partition. | "1048576 [...]
+| *camel.source.endpoint.maxPollIntervalMs* | The maximum delay between invocations of poll() when using consumer group management. This places an upper bound on the amount of time that the consumer can be idle before fetching more records. If poll() is not called before expiration of this timeout, then the consumer is considered failed and the group will rebalance in order to reassign the partitions to another member. | null | MEDIUM
+| *camel.source.endpoint.maxPollRecords* | The maximum number of records returned in a single call to poll() | "500" | MEDIUM
+| *camel.source.endpoint.offsetRepository* | The offset repository to use in order to locally store the offset of each partition of the topic. Defining one will disable the autocommit. | null | MEDIUM
+| *camel.source.endpoint.partitionAssignor* | The class name of the partition assignment strategy that the client will use to distribute partition ownership amongst consumer instances when group management is used | "org.apache.kafka.clients.consumer.RangeAssignor" | MEDIUM
+| *camel.source.endpoint.pollTimeoutMs* | The timeout used when polling the KafkaConsumer. | "5000" | MEDIUM
+| *camel.source.endpoint.seekTo* | Set if KafkaConsumer will read from beginning or end on startup: beginning : read from beginning end : read from end This is replacing the earlier property seekToBeginning One of: [beginning] [end] | null | MEDIUM
+| *camel.source.endpoint.sessionTimeoutMs* | The timeout used to detect failures when using Kafka's group management facilities. | "10000" | MEDIUM
+| *camel.source.endpoint.specificAvroReader* | This enables the use of a specific Avro reader for use with the Confluent Platform schema registry and the io.confluent.kafka.serializers.KafkaAvroDeserializer. This option is only available in the Confluent Platform (not standard Apache Kafka) | false | MEDIUM
+| *camel.source.endpoint.topicIsPattern* | Whether the topic is a pattern (regular expression). This can be used to subscribe to dynamic number of topics matching the pattern. | false | MEDIUM
+| *camel.source.endpoint.valueDeserializer* | Deserializer class for value that implements the Deserializer interface. | "org.apache.kafka.common.serialization.StringDeserializer" | MEDIUM
+| *camel.source.endpoint.exceptionHandler* | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. | null | MEDIUM
+| *camel.source.endpoint.exchangePattern* | Sets the exchange pattern when the consumer creates an exchange. One of: [InOnly] [InOut] [InOptionalOut] | null | MEDIUM
+| *camel.source.endpoint.basicPropertyBinding* | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | MEDIUM
+| *camel.source.endpoint.synchronous* | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | MEDIUM
+| *camel.source.endpoint.schemaRegistryURL* | URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka) | null | MEDIUM
+| *camel.source.endpoint.interceptorClasses* | Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime | null | MEDIUM
+| *camel.source.endpoint.kerberosBeforeReloginMinTime* | Login thread sleep time between refresh attempts. | "60000" | MEDIUM
+| *camel.source.endpoint.kerberosInitCmd* | Kerberos kinit command path. Default is /usr/bin/kinit | "/usr/bin/kinit" | MEDIUM
+| *camel.source.endpoint.kerberosPrincipalToLocal Rules* | A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form \{username\}/\{hostname\}\{REALM\} are mapped to \{username\}. For more details on the format please see the security authorization [...]
+| *camel.source.endpoint.kerberosRenewJitter* | Percentage of random jitter added to the renewal time. | "0.05" | MEDIUM
+| *camel.source.endpoint.kerberosRenewWindowFactor* | Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket. | "0.8" | MEDIUM
+| *camel.source.endpoint.saslJaasConfig* | Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD; | null | MEDIUM
+| *camel.source.endpoint.saslKerberosServiceName* | The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config. | null | MEDIUM
+| *camel.source.endpoint.saslMechanism* | The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see \http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml | "GSSAPI" | MEDIUM
+| *camel.source.endpoint.securityProtocol* | Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported | "PLAINTEXT" | MEDIUM
+| *camel.source.endpoint.sslCipherSuites* | A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported. | null | MEDIUM
+| *camel.source.endpoint.sslContextParameters* | SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option. | null | MEDIUM
+| *camel.source.endpoint.sslEnabledProtocols* | The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default. | "TLSv1.2" | MEDIUM
+| *camel.source.endpoint.sslEndpointAlgorithm* | The endpoint identification algorithm to validate server hostname using server certificate. | "https" | MEDIUM
+| *camel.source.endpoint.sslKeymanagerAlgorithm* | The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine. | "SunX509" | MEDIUM
+| *camel.source.endpoint.sslKeystoreType* | The file format of the key store file. This is optional for client. Default value is JKS | "JKS" | MEDIUM
+| *camel.source.endpoint.sslProtocol* | The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities. | "TLSv1.2" | MEDIUM
+| *camel.source.endpoint.sslProvider* | The name of the security provider used for SSL connections. Default value is the default security provider of the JVM. | null | MEDIUM
+| *camel.source.endpoint.sslTrustmanagerAlgorithm* | The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine. | "PKIX" | MEDIUM
+| *camel.source.endpoint.sslTruststoreType* | The file format of the trust store file. Default value is JKS. | "JKS" | MEDIUM
+| *camel.component.kafka.additionalProperties* | Sets additional properties for either kafka consumer or kafka producer in case they can't be set directly on the camel configurations (e.g: new Kafka properties that are not reflected yet in Camel configurations), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=\http://localhost:8811/avro | null | MEDIUM
+| *camel.component.kafka.brokers* | URL of the Kafka brokers to use. The format is host1:port1,host2:port2, and the list can be a subset of brokers or a VIP pointing to a subset of brokers. This option is known as bootstrap.servers in the Kafka documentation. | null | MEDIUM
+| *camel.component.kafka.clientId* | The client id is a user-specified string sent in each request to help trace calls. It should logically identify the application making the request. | null | MEDIUM
+| *camel.component.kafka.configuration* | Allows to pre-configure the Kafka component with common options that the endpoints will reuse. | null | MEDIUM
+| *camel.component.kafka.headerFilterStrategy* | To use a custom HeaderFilterStrategy to filter header to and from Camel message. | null | MEDIUM
+| *camel.component.kafka.reconnectBackoffMaxMs* | The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms. | "1000" | MEDIUM
+| *camel.component.kafka.shutdownTimeout* | Timeout in milli seconds to wait gracefully for the consumer or producer to shutdown and terminate its worker threads. | 30000 | MEDIUM
+| *camel.component.kafka.allowManualCommit* | Whether to allow doing manual commits via KafkaManualCommit. If this option is enabled then an instance of KafkaManualCommit is stored on the Exchange message header, which allows end users to access this API and perform manual offset commits via the Kafka consumer. | false | MEDIUM
+| *camel.component.kafka.autoCommitEnable* | If true, periodically commit to ZooKeeper the offset of messages already fetched by the consumer. This committed offset will be used when the process fails as the position from which the new consumer will begin. | "true" | MEDIUM
+| *camel.component.kafka.autoCommitIntervalMs* | The frequency in ms that the consumer offsets are committed to zookeeper. | "5000" | MEDIUM
+| *camel.component.kafka.autoCommitOnStop* | Whether to perform an explicit auto commit when the consumer stops to ensure the broker has a commit from the last consumed message. This requires the option autoCommitEnable is turned on. The possible values are: sync, async, or none. And sync is the default value. One of: [sync] [async] [none] | "sync" | MEDIUM
+| *camel.component.kafka.autoOffsetReset* | What to do when there is no initial offset in ZooKeeper or if an offset is out of range: earliest : automatically reset the offset to the earliest offset latest : automatically reset the offset to the latest offset fail: throw exception to the consumer One of: [latest] [earliest] [none] | "latest" | MEDIUM
+| *camel.component.kafka.breakOnFirstError* | This options controls what happens when a consumer is processing an exchange and it fails. If the option is false then the consumer continues to the next message and processes it. If the option is true then the consumer breaks out, and will seek back to offset of the message that caused a failure, and then re-attempt to process this message. However this can lead to endless processing of the same message if its bound to fail every time, eg a  [...]
+| *camel.component.kafka.bridgeErrorHandler* | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | MEDIUM
+| *camel.component.kafka.checkCrcs* | Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance. | "true" | MEDIUM
+| *camel.component.kafka.consumerRequestTimeoutMs* | The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. | "40000" | MEDIUM
+| *camel.component.kafka.consumersCount* | The number of consumers that connect to kafka server | 1 | MEDIUM
+| *camel.component.kafka.consumerStreams* | Number of concurrent consumers on the consumer | 10 | MEDIUM
+| *camel.component.kafka.fetchMaxBytes* | The maximum amount of data the server should return for a fetch request This is not an absolute maximum, if the first message in the first non-empty partition of the fetch is larger than this value, the message will still be returned to ensure that the consumer can make progress. The maximum message size accepted by the broker is defined via message.max.bytes (broker config) or max.message.bytes (topic config). Note that the consumer performs mul [...]
+| *camel.component.kafka.fetchMinBytes* | The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request. | "1" | MEDIUM
+| *camel.component.kafka.fetchWaitMaxMs* | The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy fetch.min.bytes | "500" | MEDIUM
+| *camel.component.kafka.groupId* | A string that uniquely identifies the group of consumer processes to which this consumer belongs. By setting the same group id multiple processes indicate that they are all part of the same consumer group. This option is required for consumers. | null | MEDIUM
+| *camel.component.kafka.heartbeatIntervalMs* | The expected time between heartbeats to the consumer coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. The value must be set lower than session.timeout.ms, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal r [...]
+| *camel.component.kafka.kafkaHeaderDeserializer* | To use a custom KafkaHeaderDeserializer to deserialize kafka headers values | null | MEDIUM
+| *camel.component.kafka.keyDeserializer* | Deserializer class for key that implements the Deserializer interface. | "org.apache.kafka.common.serialization.StringDeserializer" | MEDIUM
+| *camel.component.kafka.maxPartitionFetchBytes* | The maximum amount of data per-partition the server will return. The maximum total memory used for a request will be #partitions max.partition.fetch.bytes. This size must be at least as large as the maximum message size the server allows or else it is possible for the producer to send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying to fetch a large message on a certain partition. | "1048576 [...]
+| *camel.component.kafka.maxPollIntervalMs* | The maximum delay between invocations of poll() when using consumer group management. This places an upper bound on the amount of time that the consumer can be idle before fetching more records. If poll() is not called before expiration of this timeout, then the consumer is considered failed and the group will rebalance in order to reassign the partitions to another member. | null | MEDIUM
+| *camel.component.kafka.maxPollRecords* | The maximum number of records returned in a single call to poll() | "500" | MEDIUM
+| *camel.component.kafka.offsetRepository* | The offset repository to use in order to locally store the offset of each partition of the topic. Defining one will disable the autocommit. | null | MEDIUM
+| *camel.component.kafka.partitionAssignor* | The class name of the partition assignment strategy that the client will use to distribute partition ownership amongst consumer instances when group management is used | "org.apache.kafka.clients.consumer.RangeAssignor" | MEDIUM
+| *camel.component.kafka.pollTimeoutMs* | The timeout used when polling the KafkaConsumer. | "5000" | MEDIUM
+| *camel.component.kafka.seekTo* | Set if KafkaConsumer will read from beginning or end on startup: beginning : read from beginning end : read from end This is replacing the earlier property seekToBeginning One of: [beginning] [end] | null | MEDIUM
+| *camel.component.kafka.sessionTimeoutMs* | The timeout used to detect failures when using Kafka's group management facilities. | "10000" | MEDIUM
+| *camel.component.kafka.specificAvroReader* | This enables the use of a specific Avro reader for use with the Confluent Platform schema registry and the io.confluent.kafka.serializers.KafkaAvroDeserializer. This option is only available in the Confluent Platform (not standard Apache Kafka) | false | MEDIUM
+| *camel.component.kafka.topicIsPattern* | Whether the topic is a pattern (regular expression). This can be used to subscribe to dynamic number of topics matching the pattern. | false | MEDIUM
+| *camel.component.kafka.valueDeserializer* | Deserializer class for value that implements the Deserializer interface. | "org.apache.kafka.common.serialization.StringDeserializer" | MEDIUM
+| *camel.component.kafka.kafkaManualCommitFactory* | Factory to use for creating KafkaManualCommit instances. This allows to plugin a custom factory to create custom KafkaManualCommit instances in case special logic is needed when doing manual commits that deviates from the default implementation that comes out of the box. | null | MEDIUM
+| *camel.component.kafka.basicPropertyBinding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | MEDIUM
+| *camel.component.kafka.schemaRegistryURL* | URL of the Confluent Platform schema registry servers to use. The format is host1:port1,host2:port2. This is known as schema.registry.url in the Confluent Platform documentation. This option is only available in the Confluent Platform (not standard Apache Kafka) | null | MEDIUM
+| *camel.component.kafka.interceptorClasses* | Sets interceptors for producer or consumers. Producer interceptors have to be classes implementing org.apache.kafka.clients.producer.ProducerInterceptor Consumer interceptors have to be classes implementing org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you use Producer interceptor on a consumer it will throw a class cast exception in runtime | null | MEDIUM
+| *camel.component.kafka.kerberosBeforeReloginMinTime* | Login thread sleep time between refresh attempts. | "60000" | MEDIUM
+| *camel.component.kafka.kerberosInitCmd* | Kerberos kinit command path. Default is /usr/bin/kinit | "/usr/bin/kinit" | MEDIUM
+| *camel.component.kafka.kerberosPrincipalToLocal Rules* | A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form \{username\}/\{hostname\}\{REALM\} are mapped to \{username\}. For more details on the format please see the security authorization [...]
+| *camel.component.kafka.kerberosRenewJitter* | Percentage of random jitter added to the renewal time. | "0.05" | MEDIUM
+| *camel.component.kafka.kerberosRenewWindowFactor* | Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket. | "0.8" | MEDIUM
+| *camel.component.kafka.saslJaasConfig* | Expose the kafka sasl.jaas.config parameter Example: org.apache.kafka.common.security.plain.PlainLoginModule required username=USERNAME password=PASSWORD; | null | MEDIUM
+| *camel.component.kafka.saslKerberosServiceName* | The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config. | null | MEDIUM
+| *camel.component.kafka.saslMechanism* | The Simple Authentication and Security Layer (SASL) Mechanism used. For the valid values see \http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml | "GSSAPI" | MEDIUM
+| *camel.component.kafka.securityProtocol* | Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT and SSL are supported | "PLAINTEXT" | MEDIUM
+| *camel.component.kafka.sslCipherSuites* | A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol.By default all the available cipher suites are supported. | null | MEDIUM
+| *camel.component.kafka.sslContextParameters* | SSL configuration using a Camel SSLContextParameters object. If configured it's applied before the other SSL endpoint parameters. NOTE: Kafka only supports loading keystore from file locations, so prefix the location with file: in the KeyStoreParameters.resource option. | null | MEDIUM
+| *camel.component.kafka.sslEnabledProtocols* | The list of protocols enabled for SSL connections. TLSv1.2, TLSv1.1 and TLSv1 are enabled by default. | "TLSv1.2" | MEDIUM
+| *camel.component.kafka.sslEndpointAlgorithm* | The endpoint identification algorithm to validate server hostname using server certificate. | "https" | MEDIUM
+| *camel.component.kafka.sslKeymanagerAlgorithm* | The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine. | "SunX509" | MEDIUM
+| *camel.component.kafka.sslKeystoreType* | The file format of the key store file. This is optional for client. Default value is JKS | "JKS" | MEDIUM
+| *camel.component.kafka.sslProtocol* | The SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities. | "TLSv1.2" | MEDIUM
+| *camel.component.kafka.sslProvider* | The name of the security provider used for SSL connections. Default value is the default security provider of the JVM. | null | MEDIUM
+| *camel.component.kafka.sslTrustmanagerAlgorithm* | The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine. | "PKIX" | MEDIUM
+| *camel.component.kafka.sslTruststoreType* | The file format of the trust store file. Default value is JKS. | "JKS" | MEDIUM
+| *camel.component.kafka.useGlobalSslContext Parameters* | Enable usage of global SSL context parameters. | false | MEDIUM
+|===
+
+
+
+The camel-kafka sink connector has no converters out of the box.
+
+
+
+
+
+The camel-kafka sink connector has no transforms out of the box.
+
+
+
+
+
+The camel-kafka sink connector has no aggregation strategies out of the box.
+
+
+
+
+// kafka-connector options: END