You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@camel.apache.org by da...@apache.org on 2022/04/04 05:31:48 UTC

[camel-spring-boot] branch main updated: "CAMEL-17092:camel-debezium-oracle-starter" (#495)

This is an automated email from the ASF dual-hosted git repository.

davsclaus pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/camel-spring-boot.git


The following commit(s) were added to refs/heads/main by this push:
     new 6e2d6b055b9 "CAMEL-17092:camel-debezium-oracle-starter" (#495)
6e2d6b055b9 is described below

commit 6e2d6b055b9d9c874bd7450415f0095d62cfea2a
Author: Ramu <kr...@gmail.com>
AuthorDate: Mon Apr 4 11:01:42 2022 +0530

    "CAMEL-17092:camel-debezium-oracle-starter" (#495)
    
    Co-authored-by: Kodanda Ramu Kakarla <kk...@kkakarla.pnq.csb>
---
 .../camel-debezium-oracle-starter/pom.xml          |   48 +
 .../src/main/docs/debezium-oracle.json             |  700 +++++++++
 .../DebeziumOracleComponentAutoConfiguration.java  |   79 ++
 .../DebeziumOracleComponentConfiguration.java      | 1494 ++++++++++++++++++++
 .../DebeziumOracleComponentConverter.java          |   65 +
 .../src/main/resources/META-INF/LICENSE.txt        |  203 +++
 .../src/main/resources/META-INF/NOTICE.txt         |   11 +
 .../src/main/resources/META-INF/spring.factories   |   21 +
 .../src/main/resources/META-INF/spring.provides    |   17 +
 9 files changed, 2638 insertions(+)

diff --git a/components-starter/camel-debezium-oracle-starter/pom.xml b/components-starter/camel-debezium-oracle-starter/pom.xml
new file mode 100644
index 00000000000..e1b6683455b
--- /dev/null
+++ b/components-starter/camel-debezium-oracle-starter/pom.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+         http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.camel.springboot</groupId>
+    <artifactId>components-starter</artifactId>
+    <version>3.17.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>camel-debezium-oracle-starter</artifactId>
+  <packaging>jar</packaging>
+  <!--    <name>Camel SB Starters :: Name Here</name>-->
+  <dependencies>
+    <dependency>
+      <groupId>org.springframework.boot</groupId>
+      <artifactId>spring-boot-starter</artifactId>
+      <version>${spring-boot-version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.camel</groupId>
+      <artifactId>camel-debezium-oracle</artifactId>
+      <version>${camel-version}</version>
+    </dependency>
+    <!--START OF GENERATED CODE-->
+    <dependency>
+      <groupId>org.apache.camel.springboot</groupId>
+      <artifactId>camel-core-starter</artifactId>
+    </dependency>
+    <!--END OF GENERATED CODE-->
+  </dependencies>
+</project>
diff --git a/components-starter/camel-debezium-oracle-starter/src/main/docs/debezium-oracle.json b/components-starter/camel-debezium-oracle-starter/src/main/docs/debezium-oracle.json
new file mode 100644
index 00000000000..39127669735
--- /dev/null
+++ b/components-starter/camel-debezium-oracle-starter/src/main/docs/debezium-oracle.json
@@ -0,0 +1,700 @@
+{
+  "groups": [
+    {
+      "name": "camel.component.debezium-oracle",
+      "type": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.customizer",
+      "type": "org.apache.camel.spring.boot.ComponentConfigurationPropertiesCommon$CustomizerProperties",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "sourceMethod": "getCustomizer()"
+    }
+  ],
+  "properties": [
+    {
+      "name": "camel.component.debezium-oracle.additional-properties",
+      "type": "java.util.Map<java.lang.String,java.lang.Object>",
+      "description": "Additional properties for debezium components in case they can't be set directly on the camel configurations (e.g: setting Kafka Connect properties needed by Debezium engine, for example setting KafkaOffsetBackingStore), the properties have to be prefixed with additionalProperties.. E.g: additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http:\/\/localhost:8811\/avro",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.autowired-enabled",
+      "type": "java.lang.Boolean",
+      "description": "Whether autowiring is enabled. This is used for automatic autowiring options (the option must be marked as autowired) by looking up in the registry to find if there is a single instance of matching type, which then gets configured on the component. This can be used for automatic configuring JDBC data sources, JMS connection factories, AWS Clients, etc.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": true
+    },
+    {
+      "name": "camel.component.debezium-oracle.binary-handling-mode",
+      "type": "java.lang.String",
+      "description": "Specify how binary (blob, binary, etc.) columns should be represented in change events, including:'bytes' represents binary data as byte array (default)'base64' represents binary data as base64-encoded string'hex' represents binary data as hex-encoded (base16) string",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "bytes"
+    },
+    {
+      "name": "camel.component.debezium-oracle.bridge-error-handler",
+      "type": "java.lang.Boolean",
+      "description": "Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": false
+    },
+    {
+      "name": "camel.component.debezium-oracle.column-blacklist",
+      "type": "java.lang.String",
+      "description": "Regular expressions matching columns to exclude from change events (deprecated, use column.exclude.list instead)",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.column-exclude-list",
+      "type": "java.lang.String",
+      "description": "Regular expressions matching columns to exclude from change events",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.column-include-list",
+      "type": "java.lang.String",
+      "description": "Regular expressions matching columns to include in change events",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.column-propagate-source-type",
+      "type": "java.lang.String",
+      "description": "A comma-separated list of regular expressions matching fully-qualified names of columns that adds the columns original type and original length as parameters to the corresponding field schemas in the emitted change records.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.column-whitelist",
+      "type": "java.lang.String",
+      "description": "Regular expressions matching columns to include in change events (deprecated, use column.include.list instead)",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.configuration",
+      "type": "org.apache.camel.component.debezium.configuration.OracleConnectorEmbeddedDebeziumConfiguration",
+      "description": "Allow pre-configured Configurations to be set. The option is a org.apache.camel.component.debezium.configuration.OracleConnectorEmbeddedDebeziumConfiguration type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.converters",
+      "type": "java.lang.String",
+      "description": "Optional list of custom converters that would be used instead of default ones. The converters are defined using '.type' config option and configured using options '.'",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.customizer.enabled",
+      "type": "java.lang.Boolean",
+      "sourceType": "org.apache.camel.spring.boot.ComponentConfigurationPropertiesCommon$CustomizerProperties"
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-connection-adapter",
+      "type": "java.lang.String",
+      "description": "The adapter to use when capturing changes from the database. Options include: 'logminer': (the default) to capture changes using native Oracle LogMiner; 'xstream' to capture changes using Oracle XStreams",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "LogMiner"
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-dbname",
+      "type": "java.lang.String",
+      "description": "The name of the database from which the connector should capture changes",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-history",
+      "type": "java.lang.String",
+      "description": "The name of the DatabaseHistory class that should be used to store and recover database schema changes. The configuration properties for the history are prefixed with the 'database.history.' string.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "class io.debezium.relational.history.KafkaDatabaseHistory"
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-history-file-filename",
+      "type": "java.lang.String",
+      "description": "The path to the file that will be used to record the database history",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-history-kafka-bootstrap-servers",
+      "type": "java.lang.String",
+      "description": "A list of host\/port pairs that the connector will use for establishing the initial connection to the Kafka cluster for retrieving database schema history previously stored by the connector. This should point to the same Kafka cluster used by the Kafka Connect process.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-history-kafka-recovery-attempts",
+      "type": "java.lang.Integer",
+      "description": "The number of attempts in a row that no data are returned from Kafka before recover completes. The maximum amount of time to wait after receiving no data is (recovery.attempts) x (recovery.poll.interval.ms).",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 100
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-history-kafka-recovery-poll-interval-ms",
+      "type": "java.lang.Integer",
+      "description": "The number of milliseconds to wait while polling for persisted data during recovery. The option is a int type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 100
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-history-kafka-topic",
+      "type": "java.lang.String",
+      "description": "The name of the topic for the database schema history",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-history-skip-unparseable-ddl",
+      "type": "java.lang.Boolean",
+      "description": "Controls the action Debezium will take when it meets a DDL statement in binlog, that it cannot parse.By default the connector will stop operating but by changing the setting it can ignore the statements which it cannot parse. If skipping is enabled then Debezium can miss metadata changes.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": false
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-history-store-only-captured-tables-ddl",
+      "type": "java.lang.Boolean",
+      "description": "Controls what DDL will Debezium store in database history. By default (false) Debezium will store all incoming DDL statements. If set to true, then only DDL that manipulates a captured table will be stored.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": false
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-history-store-only-monitored-tables-ddl",
+      "type": "java.lang.Boolean",
+      "description": "Controls what DDL will Debezium store in database history. By default (false) Debezium will store all incoming DDL statements. If set to true, then only DDL that manipulates a monitored table will be stored (deprecated, use database.history.store.only.captured.tables.ddl instead)",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": false
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-hostname",
+      "type": "java.lang.String",
+      "description": "Resolvable hostname or IP address of the database server.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-out-server-name",
+      "type": "java.lang.String",
+      "description": "Name of the XStream Out server to connect to.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-password",
+      "type": "java.lang.String",
+      "description": "Password of the database user to be used when connecting to the database.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-pdb-name",
+      "type": "java.lang.String",
+      "description": "Name of the pluggable database when working with a multi-tenant set-up. The CDB name must be given via database.dbname in this case.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-port",
+      "type": "java.lang.Integer",
+      "description": "Port of the database server.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 1528
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-server-name",
+      "type": "java.lang.String",
+      "description": "Unique name that identifies the database server and all recorded offsets, and that is used as a prefix for all schemas and topics. Each distinct installation should have a separate namespace and be monitored by at most one Debezium connector.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-url",
+      "type": "java.lang.String",
+      "description": "Complete JDBC URL as an alternative to specifying hostname, port and database provided as a way to support alternative connection scenarios.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.database-user",
+      "type": "java.lang.String",
+      "description": "Name of the database user to be used when connecting to the database.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.datatype-propagate-source-type",
+      "type": "java.lang.String",
+      "description": "A comma-separated list of regular expressions matching the database-specific data type names that adds the data type's original type and original length as parameters to the corresponding field schemas in the emitted change records.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.decimal-handling-mode",
+      "type": "java.lang.String",
+      "description": "Specify how DECIMAL and NUMERIC columns should be represented in change events, including:'precise' (the default) uses java.math.BigDecimal to represent values, which are encoded in the change events using a binary representation and Kafka Connect's 'org.apache.kafka.connect.data.Decimal' type; 'string' uses string to represent values; 'double' represents values using Java's 'double', which may not offer the precision but will be far easier to use in consumers.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "precise"
+    },
+    {
+      "name": "camel.component.debezium-oracle.enabled",
+      "type": "java.lang.Boolean",
+      "description": "Whether to enable auto configuration of the debezium-oracle component. This is enabled by default.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.event-processing-failure-handling-mode",
+      "type": "java.lang.String",
+      "description": "Specify how failures during processing of events (i.e. when encountering a corrupted event) should be handled, including:'fail' (the default) an exception indicating the problematic event and its position is raised, causing the connector to be stopped; 'warn' the problematic event and its position will be logged and the event will be skipped;'ignore' the problematic event will be skipped.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "fail"
+    },
+    {
+      "name": "camel.component.debezium-oracle.heartbeat-action-query",
+      "type": "java.lang.String",
+      "description": "The query executed with every heartbeat.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.heartbeat-interval-ms",
+      "type": "java.lang.Integer",
+      "description": "Length of an interval in milli-seconds in in which the connector periodically sends heartbeat messages to a heartbeat topic. Use 0 to disable heartbeat messages. Disabled by default. The option is a int type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 0
+    },
+    {
+      "name": "camel.component.debezium-oracle.heartbeat-topics-prefix",
+      "type": "java.lang.String",
+      "description": "The prefix that is used to name heartbeat topics.Defaults to __debezium-heartbeat.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "__debezium-heartbeat"
+    },
+    {
+      "name": "camel.component.debezium-oracle.include-schema-changes",
+      "type": "java.lang.Boolean",
+      "description": "Whether the connector should publish changes in the database schema to a Kafka topic with the same name as the database server ID. Each schema change will be recorded using a key that contains the database name and whose value include logical description of the new schema and optionally the DDL statement(s).The default is 'true'. This is independent of how the connector internally records database history.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": true
+    },
+    {
+      "name": "camel.component.debezium-oracle.include-schema-comments",
+      "type": "java.lang.Boolean",
+      "description": "Whether the connector parse table and column's comment to metadata object.Note: Enable this option will bring the implications on memory usage. The number and size of ColumnImpl objects is what largely impacts how much memory is consumed by the Debezium connectors, and adding a String to each of them can potentially be quite heavy. The default is 'false'.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": false
+    },
+    {
+      "name": "camel.component.debezium-oracle.internal-key-converter",
+      "type": "java.lang.String",
+      "description": "The Converter class that should be used to serialize and deserialize key data for offsets. The default is JSON converter.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "org.apache.kafka.connect.json.JsonConverter"
+    },
+    {
+      "name": "camel.component.debezium-oracle.internal-value-converter",
+      "type": "java.lang.String",
+      "description": "The Converter class that should be used to serialize and deserialize value data for offsets. The default is JSON converter.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "org.apache.kafka.connect.json.JsonConverter"
+    },
+    {
+      "name": "camel.component.debezium-oracle.interval-handling-mode",
+      "type": "java.lang.String",
+      "description": "Specify how INTERVAL columns should be represented in change events, including:'string' represents values as an exact ISO formatted string'numeric' (default) represents values using the inexact conversion into microseconds",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "numeric"
+    },
+    {
+      "name": "camel.component.debezium-oracle.lob-enabled",
+      "type": "java.lang.Boolean",
+      "description": "When set to false, the default, LOB fields will not be captured nor emitted. When set to true, the connector will capture LOB fields and emit changes for those fields like any other column type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": false
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-archive-destination-name",
+      "type": "java.lang.String",
+      "description": "Sets the specific archive log destination as the source for reading archive logs.When not set, the connector will automatically select the first LOCAL and VALID destination.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-archive-log-hours",
+      "type": "java.lang.Long",
+      "description": "The number of hours in the past from SYSDATE to mine archive logs. Using 0 mines all available archive logs",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 0
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-archive-log-only-mode",
+      "type": "java.lang.Boolean",
+      "description": "When set to false, the default, the connector will mine both archive log and redo logs to emit change events. When set to true, the connector will only mine archive logs. There are circumstances where its advantageous to only mine archive logs and accept latency in event emission due to frequent revolving redo logs.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": false
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-archive-log-only-scn-poll-interval-ms",
+      "type": "java.lang.Long",
+      "description": "The interval in milliseconds to wait between polls checking to see if the SCN is in the archive logs. The option is a long type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 10000
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-batch-size-default",
+      "type": "java.lang.Long",
+      "description": "The starting SCN interval size that the connector will use for reading data from redo\/archive logs.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 20000
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-batch-size-max",
+      "type": "java.lang.Long",
+      "description": "The maximum SCN interval size that this connector will use when reading from redo\/archive logs.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 100000
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-batch-size-min",
+      "type": "java.lang.Long",
+      "description": "The minimum SCN interval size that this connector will try to read from redo\/archive logs. Active batch size will be also increased\/decreased by this amount for tuning connector throughput when needed.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 1000
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-buffer-drop-on-stop",
+      "type": "java.lang.Boolean",
+      "description": "When set to true the underlying buffer cache is not retained when the connector is stopped. When set to false (the default), the buffer cache is retained across restarts.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": false
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-buffer-infinispan-cache-events",
+      "type": "java.lang.String",
+      "description": "Specifies the XML configuration for the Infinispan 'events' cache",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-buffer-infinispan-cache-processed-transactions",
+      "type": "java.lang.String",
+      "description": "Specifies the XML configuration for the Infinispan 'processed-transactions' cache",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-buffer-infinispan-cache-schema-changes",
+      "type": "java.lang.String",
+      "description": "Specifies the XML configuration for the Infinispan 'schema-changes' cache",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-buffer-infinispan-cache-transactions",
+      "type": "java.lang.String",
+      "description": "Specifies the XML configuration for the Infinispan 'transactions' cache",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-buffer-type",
+      "type": "java.lang.String",
+      "description": "The buffer type controls how the connector manages buffering transaction data. memory - Uses the JVM process' heap to buffer all transaction data. infinispan_embedded - This option uses an embedded Infinispan cache to buffer transaction data and persist it to disk. infinispan_remote - This option uses a remote Infinispan cluster to buffer transaction data and persist it to disk.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "memory"
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-scn-gap-detection-gap-size-min",
+      "type": "java.lang.Long",
+      "description": "Used for SCN gap detection, if the difference between current SCN and previous end SCN is bigger than this value, and the time difference of current SCN and previous end SCN is smaller than log.mining.scn.gap.detection.time.interval.max.ms, consider it a SCN gap.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 1000000
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-scn-gap-detection-time-interval-max-ms",
+      "type": "java.lang.Long",
+      "description": "Used for SCN gap detection, if the difference between current SCN and previous end SCN is bigger than log.mining.scn.gap.detection.gap.size.min, and the time difference of current SCN and previous end SCN is smaller than this value, consider it a SCN gap. The option is a long type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 20000
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-sleep-time-default-ms",
+      "type": "java.lang.Long",
+      "description": "The amount of time that the connector will sleep after reading data from redo\/archive logs and before starting reading data again. Value is in milliseconds. The option is a long type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 1000
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-sleep-time-increment-ms",
+      "type": "java.lang.Long",
+      "description": "The maximum amount of time that the connector will use to tune the optimal sleep time when reading data from LogMiner. Value is in milliseconds. The option is a long type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 200
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-sleep-time-max-ms",
+      "type": "java.lang.Long",
+      "description": "The maximum amount of time that the connector will sleep after reading data from redo\/archive logs and before starting reading data again. Value is in milliseconds. The option is a long type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 3000
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-sleep-time-min-ms",
+      "type": "java.lang.Long",
+      "description": "The minimum amount of time that the connector will sleep after reading data from redo\/archive logs and before starting reading data again. Value is in milliseconds. The option is a long type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 0
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-strategy",
+      "type": "java.lang.String",
+      "description": "There are strategies: Online catalog with faster mining but no captured DDL. Another - with data dictionary loaded into REDO LOG files",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "redo_log_catalog"
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-transaction-retention-hours",
+      "type": "java.lang.Long",
+      "description": "Hours to keep long running transactions in transaction buffer between log mining sessions. By default, all transactions are retained.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 0
+    },
+    {
+      "name": "camel.component.debezium-oracle.log-mining-username-exclude-list",
+      "type": "java.lang.String",
+      "description": "Comma separated list of usernames to exclude from LogMiner query.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.max-batch-size",
+      "type": "java.lang.Integer",
+      "description": "Maximum size of each batch of source records. Defaults to 2048.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 2048
+    },
+    {
+      "name": "camel.component.debezium-oracle.max-queue-size",
+      "type": "java.lang.Integer",
+      "description": "Maximum size of the queue for change events read from the database log but not yet recorded or forwarded. Defaults to 8192, and should always be larger than the maximum batch size.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 8192
+    },
+    {
+      "name": "camel.component.debezium-oracle.max-queue-size-in-bytes",
+      "type": "java.lang.Long",
+      "description": "Maximum size of the queue in bytes for change events read from the database log but not yet recorded or forwarded. Defaults to 0. Mean the feature is not enabled",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 0
+    },
+    {
+      "name": "camel.component.debezium-oracle.message-key-columns",
+      "type": "java.lang.String",
+      "description": "A semicolon-separated list of expressions that match fully-qualified tables and column(s) to be used as message key. Each expression must match the pattern ':',where the table names could be defined as (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on the specific connector,and the key columns are a comma-separated list of columns representing the custom key. For any table without an explicit key configuration the table's primary key column(s) will be u [...]
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.offset-commit-policy",
+      "type": "java.lang.String",
+      "description": "The name of the Java class of the commit policy. It defines when offsets commit has to be triggered based on the number of events processed and the time elapsed since the last commit. This class must implement the interface 'OffsetCommitPolicy'. The default is a periodic commit policy based upon time intervals.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "io.debezium.embedded.spi.OffsetCommitPolicy.PeriodicCommitOffsetPolicy"
+    },
+    {
+      "name": "camel.component.debezium-oracle.offset-commit-timeout-ms",
+      "type": "java.lang.Long",
+      "description": "Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt. The default is 5 seconds. The option is a long type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 5000
+    },
+    {
+      "name": "camel.component.debezium-oracle.offset-flush-interval-ms",
+      "type": "java.lang.Long",
+      "description": "Interval at which to try committing offsets. The default is 1 minute. The option is a long type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 60000
+    },
+    {
+      "name": "camel.component.debezium-oracle.offset-storage",
+      "type": "java.lang.String",
+      "description": "The name of the Java class that is responsible for persistence of connector offsets.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "org.apache.kafka.connect.storage.FileOffsetBackingStore"
+    },
+    {
+      "name": "camel.component.debezium-oracle.offset-storage-file-name",
+      "type": "java.lang.String",
+      "description": "Path to file where offsets are to be stored. Required when offset.storage is set to the FileOffsetBackingStore.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.offset-storage-partitions",
+      "type": "java.lang.Integer",
+      "description": "The number of partitions used when creating the offset storage topic. Required when offset.storage is set to the 'KafkaOffsetBackingStore'.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.offset-storage-replication-factor",
+      "type": "java.lang.Integer",
+      "description": "Replication factor used when creating the offset storage topic. Required when offset.storage is set to the KafkaOffsetBackingStore",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.offset-storage-topic",
+      "type": "java.lang.String",
+      "description": "The name of the Kafka topic where offsets are to be stored. Required when offset.storage is set to the KafkaOffsetBackingStore.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.poll-interval-ms",
+      "type": "java.lang.Long",
+      "description": "Time to wait for new change events to appear after receiving no events, given in milliseconds. Defaults to 500 ms. The option is a long type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 500
+    },
+    {
+      "name": "camel.component.debezium-oracle.provide-transaction-metadata",
+      "type": "java.lang.Boolean",
+      "description": "Enables transaction metadata extraction together with event counting",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": false
+    },
+    {
+      "name": "camel.component.debezium-oracle.query-fetch-size",
+      "type": "java.lang.Integer",
+      "description": "The maximum number of records that should be loaded into memory while streaming. A value of 0 uses the default JDBC fetch size.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 0
+    },
+    {
+      "name": "camel.component.debezium-oracle.rac-nodes",
+      "type": "java.lang.String",
+      "description": "A comma-separated list of RAC node hostnames or ip addresses",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.retriable-restart-connector-wait-ms",
+      "type": "java.lang.Long",
+      "description": "Time to wait before restarting connector after retriable exception occurs. Defaults to 10000ms. The option is a long type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 10000
+    },
+    {
+      "name": "camel.component.debezium-oracle.sanitize-field-names",
+      "type": "java.lang.Boolean",
+      "description": "Whether field names will be sanitized to Avro naming conventions",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": false
+    },
+    {
+      "name": "camel.component.debezium-oracle.signal-data-collection",
+      "type": "java.lang.String",
+      "description": "The name of the data collection that is used to send signals\/commands to Debezium. Signaling is disabled when not set.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.skipped-operations",
+      "type": "java.lang.String",
+      "description": "The comma-separated list of operations to skip during streaming, defined as: 'c' for inserts\/create; 'u' for updates; 'd' for deletes. By default, no operations will be skipped.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.snapshot-delay-ms",
+      "type": "java.lang.Long",
+      "description": "A delay period before a snapshot will begin, given in milliseconds. Defaults to 0 ms. The option is a long type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 0
+    },
+    {
+      "name": "camel.component.debezium-oracle.snapshot-enhance-predicate-scn",
+      "type": "java.lang.String",
+      "description": "A token to replace on snapshot predicate template",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.snapshot-fetch-size",
+      "type": "java.lang.Integer",
+      "description": "The maximum number of records that should be loaded into memory while performing a snapshot",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.snapshot-include-collection-list",
+      "type": "java.lang.String",
+      "description": "this setting must be set to specify a list of tables\/collections whose snapshot must be taken on creating or restarting the connector.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.snapshot-lock-timeout-ms",
+      "type": "java.lang.Long",
+      "description": "The maximum number of millis to wait for table locks at the beginning of a snapshot. If locks cannot be acquired in this time frame, the snapshot will be aborted. Defaults to 10 seconds. The option is a long type.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 10000
+    },
+    {
+      "name": "camel.component.debezium-oracle.snapshot-locking-mode",
+      "type": "java.lang.String",
+      "description": "Controls how the connector holds locks on tables while performing the schema snapshot. The default is 'shared', which means the connector will hold a table lock that prevents exclusive table access for just the initial portion of the snapshot while the database schemas and other metadata are being read. The remaining work in a snapshot involves selecting all rows from each table, and this is done using a flashback query that requires no locks. However, in some cases [...]
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "shared"
+    },
+    {
+      "name": "camel.component.debezium-oracle.snapshot-max-threads",
+      "type": "java.lang.Integer",
+      "description": "The maximum number of threads used to perform the snapshot. Defaults to 1.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": 1
+    },
+    {
+      "name": "camel.component.debezium-oracle.snapshot-mode",
+      "type": "java.lang.String",
+      "description": "The criteria for running a snapshot upon startup of the connector. Options include: 'initial' (the default) to specify the connector should run a snapshot only when no offsets are available for the logical server name; 'schema_only' to specify the connector should run a snapshot of the schema when no offsets are available for the logical server name.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "initial"
+    },
+    {
+      "name": "camel.component.debezium-oracle.snapshot-select-statement-overrides",
+      "type": "java.lang.String",
+      "description": "This property contains a comma-separated list of fully-qualified tables (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on thespecific connectors. Select statements for the individual tables are specified in further configuration properties, one for each table, identified by the id 'snapshot.select.statement.overrides.DB_NAME.TABLE_NAME' or 'snapshot.select.statement.overrides.SCHEMA_NAME.TABLE_NAME', respectively. The value of those properties is the se [...]
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.source-struct-version",
+      "type": "java.lang.String",
+      "description": "A version of the format of the publicly visible source part in the message",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "v2"
+    },
+    {
+      "name": "camel.component.debezium-oracle.table-blacklist",
+      "type": "java.lang.String",
+      "description": "A comma-separated list of regular expressions that match the fully-qualified names of tables to be excluded from monitoring (deprecated, use table.exclude.list instead)",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.table-exclude-list",
+      "type": "java.lang.String",
+      "description": "A comma-separated list of regular expressions that match the fully-qualified names of tables to be excluded from monitoring",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.table-include-list",
+      "type": "java.lang.String",
+      "description": "The tables for which changes are to be captured",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.table-whitelist",
+      "type": "java.lang.String",
+      "description": "The tables for which changes are to be captured (deprecated, use table.include.list instead)",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration"
+    },
+    {
+      "name": "camel.component.debezium-oracle.time-precision-mode",
+      "type": "java.lang.String",
+      "description": "Time, date, and timestamps can be represented with different kinds of precisions, including:'adaptive' (the default) bases the precision of time, date, and timestamp values on the database column's precision; 'adaptive_time_microseconds' like 'adaptive' mode, but TIME fields always use microseconds precision;'connect' always represents time, date, and timestamp values using Kafka Connect's built-in representations for Time, Date, and Timestamp, which uses millisecon [...]
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "adaptive"
+    },
+    {
+      "name": "camel.component.debezium-oracle.tombstones-on-delete",
+      "type": "java.lang.Boolean",
+      "description": "Whether delete operations should be represented by a delete event and a subsquenttombstone event (true) or only by a delete event (false). Emitting the tombstone event (the default behavior) allows Kafka to completely delete all events pertaining to the given key once the source record got deleted.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": false
+    },
+    {
+      "name": "camel.component.debezium-oracle.transaction-topic",
+      "type": "java.lang.String",
+      "description": "The name of the transaction metadata topic. The placeholder ${database.server.name} can be used for referring to the connector's logical name; defaults to ${database.server.name}.transaction.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "${database.server.name}.transaction"
+    },
+    {
+      "name": "camel.component.debezium-oracle.unavailable-value-placeholder",
+      "type": "java.lang.String",
+      "description": "Specify the constant that will be provided by Debezium to indicate that the original value is unavailable and not provided by the database.",
+      "sourceType": "org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConfiguration",
+      "defaultValue": "__debezium_unavailable_value"
+    }
+  ],
+  "hints": []
+}
\ No newline at end of file
diff --git a/components-starter/camel-debezium-oracle-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumOracleComponentAutoConfiguration.java b/components-starter/camel-debezium-oracle-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumOracleComponentAutoConfiguration.java
new file mode 100644
index 00000000000..08fc0ed73aa
--- /dev/null
+++ b/components-starter/camel-debezium-oracle-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumOracleComponentAutoConfiguration.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.debezium.springboot;
+
+import javax.annotation.Generated;
+import org.apache.camel.CamelContext;
+import org.apache.camel.Component;
+import org.apache.camel.component.debezium.DebeziumOracleComponent;
+import org.apache.camel.spi.ComponentCustomizer;
+import org.apache.camel.spring.boot.CamelAutoConfiguration;
+import org.apache.camel.spring.boot.ComponentConfigurationProperties;
+import org.apache.camel.spring.boot.util.CamelPropertiesHelper;
+import org.apache.camel.spring.boot.util.ConditionalOnCamelContextAndAutoConfigurationBeans;
+import org.apache.camel.spring.boot.util.ConditionalOnHierarchicalProperties;
+import org.apache.camel.spring.boot.util.HierarchicalPropertiesEvaluator;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.AutoConfigureAfter;
+import org.springframework.boot.context.properties.EnableConfigurationProperties;
+import org.springframework.context.ApplicationContext;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Conditional;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.context.annotation.Lazy;
+
+/**
+ * Generated by camel-package-maven-plugin - do not edit this file!
+ */
+@Generated("org.apache.camel.springboot.maven.SpringBootAutoConfigurationMojo")
+@Configuration(proxyBeanMethods = false)
+@Conditional(ConditionalOnCamelContextAndAutoConfigurationBeans.class)
+@EnableConfigurationProperties({ComponentConfigurationProperties.class,DebeziumOracleComponentConfiguration.class})
+@ConditionalOnHierarchicalProperties({"camel.component", "camel.component.debezium-oracle"})
+@AutoConfigureAfter({CamelAutoConfiguration.class, DebeziumOracleComponentConverter.class})
+public class DebeziumOracleComponentAutoConfiguration {
+
+    @Autowired
+    private ApplicationContext applicationContext;
+    private final CamelContext camelContext;
+    @Autowired
+    private DebeziumOracleComponentConfiguration configuration;
+
+    public DebeziumOracleComponentAutoConfiguration(
+            org.apache.camel.CamelContext camelContext) {
+        this.camelContext = camelContext;
+    }
+
+    @Lazy
+    @Bean
+    public ComponentCustomizer configureDebeziumOracleComponent() {
+        return new ComponentCustomizer() {
+            @Override
+            public void configure(String name, Component target) {
+                CamelPropertiesHelper.copyProperties(camelContext, configuration, target);
+            }
+            @Override
+            public boolean isEnabled(String name, Component target) {
+                return HierarchicalPropertiesEvaluator.evaluate(
+                        applicationContext,
+                        "camel.component.customizer",
+                        "camel.component.debezium-oracle.customizer")
+                    && target instanceof DebeziumOracleComponent;
+            }
+        };
+    }
+}
\ No newline at end of file
diff --git a/components-starter/camel-debezium-oracle-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumOracleComponentConfiguration.java b/components-starter/camel-debezium-oracle-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumOracleComponentConfiguration.java
new file mode 100644
index 00000000000..3ac5453eabe
--- /dev/null
+++ b/components-starter/camel-debezium-oracle-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumOracleComponentConfiguration.java
@@ -0,0 +1,1494 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.debezium.springboot;
+
+import java.util.Map;
+import javax.annotation.Generated;
+import org.apache.camel.component.debezium.configuration.OracleConnectorEmbeddedDebeziumConfiguration;
+import org.apache.camel.spring.boot.ComponentConfigurationPropertiesCommon;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+
+/**
+ * Capture changes from a Oracle database.
+ * 
+ * Generated by camel-package-maven-plugin - do not edit this file!
+ */
+@Generated("org.apache.camel.springboot.maven.SpringBootAutoConfigurationMojo")
+@ConfigurationProperties(prefix = "camel.component.debezium-oracle")
+public class DebeziumOracleComponentConfiguration
+        extends
+            ComponentConfigurationPropertiesCommon {
+
+    /**
+     * Whether to enable auto configuration of the debezium-oracle component.
+     * This is enabled by default.
+     */
+    private Boolean enabled;
+    /**
+     * Additional properties for debezium components in case they can't be set
+     * directly on the camel configurations (e.g: setting Kafka Connect
+     * properties needed by Debezium engine, for example setting
+     * KafkaOffsetBackingStore), the properties have to be prefixed with
+     * additionalProperties.. E.g:
+     * additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http://localhost:8811/avro
+     */
+    private Map<String, Object> additionalProperties;
+    /**
+     * Allows for bridging the consumer to the Camel routing Error Handler,
+     * which mean any exceptions occurred while the consumer is trying to pickup
+     * incoming messages, or the likes, will now be processed as a message and
+     * handled by the routing Error Handler. By default the consumer will use
+     * the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that
+     * will be logged at WARN or ERROR level and ignored.
+     */
+    private Boolean bridgeErrorHandler = false;
+    /**
+     * Allow pre-configured Configurations to be set. The option is a
+     * org.apache.camel.component.debezium.configuration.OracleConnectorEmbeddedDebeziumConfiguration type.
+     */
+    private OracleConnectorEmbeddedDebeziumConfiguration configuration;
+    /**
+     * The Converter class that should be used to serialize and deserialize key
+     * data for offsets. The default is JSON converter.
+     */
+    private String internalKeyConverter = "org.apache.kafka.connect.json.JsonConverter";
+    /**
+     * The Converter class that should be used to serialize and deserialize
+     * value data for offsets. The default is JSON converter.
+     */
+    private String internalValueConverter = "org.apache.kafka.connect.json.JsonConverter";
+    /**
+     * The name of the Java class of the commit policy. It defines when offsets
+     * commit has to be triggered based on the number of events processed and
+     * the time elapsed since the last commit. This class must implement the
+     * interface 'OffsetCommitPolicy'. The default is a periodic commit policy
+     * based upon time intervals.
+     */
+    private String offsetCommitPolicy = "io.debezium.embedded.spi.OffsetCommitPolicy.PeriodicCommitOffsetPolicy";
+    /**
+     * Maximum number of milliseconds to wait for records to flush and partition
+     * offset data to be committed to offset storage before cancelling the
+     * process and restoring the offset data to be committed in a future
+     * attempt. The default is 5 seconds. The option is a long type.
+     */
+    private Long offsetCommitTimeoutMs = 5000L;
+    /**
+     * Interval at which to try committing offsets. The default is 1 minute. The
+     * option is a long type.
+     */
+    private Long offsetFlushIntervalMs = 60000L;
+    /**
+     * The name of the Java class that is responsible for persistence of
+     * connector offsets.
+     */
+    private String offsetStorage = "org.apache.kafka.connect.storage.FileOffsetBackingStore";
+    /**
+     * Path to file where offsets are to be stored. Required when offset.storage
+     * is set to the FileOffsetBackingStore.
+     */
+    private String offsetStorageFileName;
+    /**
+     * The number of partitions used when creating the offset storage topic.
+     * Required when offset.storage is set to the 'KafkaOffsetBackingStore'.
+     */
+    private Integer offsetStoragePartitions;
+    /**
+     * Replication factor used when creating the offset storage topic. Required
+     * when offset.storage is set to the KafkaOffsetBackingStore
+     */
+    private Integer offsetStorageReplicationFactor;
+    /**
+     * The name of the Kafka topic where offsets are to be stored. Required when
+     * offset.storage is set to the KafkaOffsetBackingStore.
+     */
+    private String offsetStorageTopic;
+    /**
+     * Whether autowiring is enabled. This is used for automatic autowiring
+     * options (the option must be marked as autowired) by looking up in the
+     * registry to find if there is a single instance of matching type, which
+     * then gets configured on the component. This can be used for automatic
+     * configuring JDBC data sources, JMS connection factories, AWS Clients,
+     * etc.
+     */
+    private Boolean autowiredEnabled = true;
+    /**
+     * Specify how binary (blob, binary, etc.) columns should be represented in
+     * change events, including:'bytes' represents binary data as byte array
+     * (default)'base64' represents binary data as base64-encoded string'hex'
+     * represents binary data as hex-encoded (base16) string
+     */
+    private String binaryHandlingMode = "bytes";
+    /**
+     * Regular expressions matching columns to exclude from change events
+     * (deprecated, use column.exclude.list instead)
+     */
+    private String columnBlacklist;
+    /**
+     * Regular expressions matching columns to exclude from change events
+     */
+    private String columnExcludeList;
+    /**
+     * Regular expressions matching columns to include in change events
+     */
+    private String columnIncludeList;
+    /**
+     * A comma-separated list of regular expressions matching fully-qualified
+     * names of columns that adds the columns original type and original length
+     * as parameters to the corresponding field schemas in the emitted change
+     * records.
+     */
+    private String columnPropagateSourceType;
+    /**
+     * Regular expressions matching columns to include in change events
+     * (deprecated, use column.include.list instead)
+     */
+    private String columnWhitelist;
+    /**
+     * Optional list of custom converters that would be used instead of default
+     * ones. The converters are defined using '.type' config option and
+     * configured using options '.'
+     */
+    private String converters;
+    /**
+     * The adapter to use when capturing changes from the database. Options
+     * include: 'logminer': (the default) to capture changes using native Oracle
+     * LogMiner; 'xstream' to capture changes using Oracle XStreams
+     */
+    private String databaseConnectionAdapter = "LogMiner";
+    /**
+     * The name of the database from which the connector should capture changes
+     */
+    private String databaseDbname;
+    /**
+     * The name of the DatabaseHistory class that should be used to store and
+     * recover database schema changes. The configuration properties for the
+     * history are prefixed with the 'database.history.' string.
+     */
+    private String databaseHistory = "class io.debezium.relational.history.KafkaDatabaseHistory";
+    /**
+     * The path to the file that will be used to record the database history
+     */
+    private String databaseHistoryFileFilename;
+    /**
+     * A list of host/port pairs that the connector will use for establishing
+     * the initial connection to the Kafka cluster for retrieving database
+     * schema history previously stored by the connector. This should point to
+     * the same Kafka cluster used by the Kafka Connect process.
+     */
+    private String databaseHistoryKafkaBootstrapServers;
+    /**
+     * The number of attempts in a row that no data are returned from Kafka
+     * before recover completes. The maximum amount of time to wait after
+     * receiving no data is (recovery.attempts) x (recovery.poll.interval.ms).
+     */
+    private Integer databaseHistoryKafkaRecoveryAttempts = 100;
+    /**
+     * The number of milliseconds to wait while polling for persisted data
+     * during recovery. The option is a int type.
+     */
+    private Integer databaseHistoryKafkaRecoveryPollIntervalMs = 100;
+    /**
+     * The name of the topic for the database schema history
+     */
+    private String databaseHistoryKafkaTopic;
+    /**
+     * Controls the action Debezium will take when it meets a DDL statement in
+     * binlog, that it cannot parse.By default the connector will stop operating
+     * but by changing the setting it can ignore the statements which it cannot
+     * parse. If skipping is enabled then Debezium can miss metadata changes.
+     */
+    private Boolean databaseHistorySkipUnparseableDdl = false;
+    /**
+     * Controls what DDL will Debezium store in database history. By default
+     * (false) Debezium will store all incoming DDL statements. If set to true,
+     * then only DDL that manipulates a captured table will be stored.
+     */
+    private Boolean databaseHistoryStoreOnlyCapturedTablesDdl = false;
+    /**
+     * Controls what DDL will Debezium store in database history. By default
+     * (false) Debezium will store all incoming DDL statements. If set to true,
+     * then only DDL that manipulates a monitored table will be stored
+     * (deprecated, use database.history.store.only.captured.tables.ddl instead)
+     */
+    private Boolean databaseHistoryStoreOnlyMonitoredTablesDdl = false;
+    /**
+     * Resolvable hostname or IP address of the database server.
+     */
+    private String databaseHostname;
+    /**
+     * Name of the XStream Out server to connect to.
+     */
+    private String databaseOutServerName;
+    /**
+     * Password of the database user to be used when connecting to the database.
+     */
+    private String databasePassword;
+    /**
+     * Name of the pluggable database when working with a multi-tenant set-up.
+     * The CDB name must be given via database.dbname in this case.
+     */
+    private String databasePdbName;
+    /**
+     * Port of the database server.
+     */
+    private Integer databasePort = 1528;
+    /**
+     * Unique name that identifies the database server and all recorded offsets,
+     * and that is used as a prefix for all schemas and topics. Each distinct
+     * installation should have a separate namespace and be monitored by at most
+     * one Debezium connector.
+     */
+    private String databaseServerName;
+    /**
+     * Complete JDBC URL as an alternative to specifying hostname, port and
+     * database provided as a way to support alternative connection scenarios.
+     */
+    private String databaseUrl;
+    /**
+     * Name of the database user to be used when connecting to the database.
+     */
+    private String databaseUser;
+    /**
+     * A comma-separated list of regular expressions matching the
+     * database-specific data type names that adds the data type's original type
+     * and original length as parameters to the corresponding field schemas in
+     * the emitted change records.
+     */
+    private String datatypePropagateSourceType;
+    /**
+     * Specify how DECIMAL and NUMERIC columns should be represented in change
+     * events, including:'precise' (the default) uses java.math.BigDecimal to
+     * represent values, which are encoded in the change events using a binary
+     * representation and Kafka Connect's
+     * 'org.apache.kafka.connect.data.Decimal' type; 'string' uses string to
+     * represent values; 'double' represents values using Java's 'double', which
+     * may not offer the precision but will be far easier to use in consumers.
+     */
+    private String decimalHandlingMode = "precise";
+    /**
+     * Specify how failures during processing of events (i.e. when encountering
+     * a corrupted event) should be handled, including:'fail' (the default) an
+     * exception indicating the problematic event and its position is raised,
+     * causing the connector to be stopped; 'warn' the problematic event and its
+     * position will be logged and the event will be skipped;'ignore' the
+     * problematic event will be skipped.
+     */
+    private String eventProcessingFailureHandlingMode = "fail";
+    /**
+     * The query executed with every heartbeat.
+     */
+    private String heartbeatActionQuery;
+    /**
+     * Length of an interval in milli-seconds in in which the connector
+     * periodically sends heartbeat messages to a heartbeat topic. Use 0 to
+     * disable heartbeat messages. Disabled by default. The option is a int
+     * type.
+     */
+    private Integer heartbeatIntervalMs = 0;
+    /**
+     * The prefix that is used to name heartbeat topics.Defaults to
+     * __debezium-heartbeat.
+     */
+    private String heartbeatTopicsPrefix = "__debezium-heartbeat";
+    /**
+     * Whether the connector should publish changes in the database schema to a
+     * Kafka topic with the same name as the database server ID. Each schema
+     * change will be recorded using a key that contains the database name and
+     * whose value include logical description of the new schema and optionally
+     * the DDL statement(s).The default is 'true'. This is independent of how
+     * the connector internally records database history.
+     */
+    private Boolean includeSchemaChanges = true;
+    /**
+     * Whether the connector parse table and column's comment to metadata
+     * object.Note: Enable this option will bring the implications on memory
+     * usage. The number and size of ColumnImpl objects is what largely impacts
+     * how much memory is consumed by the Debezium connectors, and adding a
+     * String to each of them can potentially be quite heavy. The default is
+     * 'false'.
+     */
+    private Boolean includeSchemaComments = false;
+    /**
+     * Specify how INTERVAL columns should be represented in change events,
+     * including:'string' represents values as an exact ISO formatted
+     * string'numeric' (default) represents values using the inexact conversion
+     * into microseconds
+     */
+    private String intervalHandlingMode = "numeric";
+    /**
+     * When set to false, the default, LOB fields will not be captured nor
+     * emitted. When set to true, the connector will capture LOB fields and emit
+     * changes for those fields like any other column type.
+     */
+    private Boolean lobEnabled = false;
+    /**
+     * Sets the specific archive log destination as the source for reading
+     * archive logs.When not set, the connector will automatically select the
+     * first LOCAL and VALID destination.
+     */
+    private String logMiningArchiveDestinationName;
+    /**
+     * The number of hours in the past from SYSDATE to mine archive logs. Using
+     * 0 mines all available archive logs
+     */
+    private Long logMiningArchiveLogHours = 0L;
+    /**
+     * When set to false, the default, the connector will mine both archive log
+     * and redo logs to emit change events. When set to true, the connector will
+     * only mine archive logs. There are circumstances where its advantageous to
+     * only mine archive logs and accept latency in event emission due to
+     * frequent revolving redo logs.
+     */
+    private Boolean logMiningArchiveLogOnlyMode = false;
+    /**
+     * The interval in milliseconds to wait between polls checking to see if the
+     * SCN is in the archive logs. The option is a long type.
+     */
+    private Long logMiningArchiveLogOnlyScnPollIntervalMs = 10000L;
+    /**
+     * The starting SCN interval size that the connector will use for reading
+     * data from redo/archive logs.
+     */
+    private Long logMiningBatchSizeDefault = 20000L;
+    /**
+     * The maximum SCN interval size that this connector will use when reading
+     * from redo/archive logs.
+     */
+    private Long logMiningBatchSizeMax = 100000L;
+    /**
+     * The minimum SCN interval size that this connector will try to read from
+     * redo/archive logs. Active batch size will be also increased/decreased by
+     * this amount for tuning connector throughput when needed.
+     */
+    private Long logMiningBatchSizeMin = 1000L;
+    /**
+     * When set to true the underlying buffer cache is not retained when the
+     * connector is stopped. When set to false (the default), the buffer cache
+     * is retained across restarts.
+     */
+    private Boolean logMiningBufferDropOnStop = false;
+    /**
+     * Specifies the XML configuration for the Infinispan 'events' cache
+     */
+    private String logMiningBufferInfinispanCacheEvents;
+    /**
+     * Specifies the XML configuration for the Infinispan
+     * 'processed-transactions' cache
+     */
+    private String logMiningBufferInfinispanCacheProcessedTransactions;
+    /**
+     * Specifies the XML configuration for the Infinispan 'schema-changes' cache
+     */
+    private String logMiningBufferInfinispanCacheSchemaChanges;
+    /**
+     * Specifies the XML configuration for the Infinispan 'transactions' cache
+     */
+    private String logMiningBufferInfinispanCacheTransactions;
+    /**
+     * The buffer type controls how the connector manages buffering transaction
+     * data. memory - Uses the JVM process' heap to buffer all transaction data.
+     * infinispan_embedded - This option uses an embedded Infinispan cache to
+     * buffer transaction data and persist it to disk. infinispan_remote - This
+     * option uses a remote Infinispan cluster to buffer transaction data and
+     * persist it to disk.
+     */
+    private String logMiningBufferType = "memory";
+    /**
+     * Used for SCN gap detection, if the difference between current SCN and
+     * previous end SCN is bigger than this value, and the time difference of
+     * current SCN and previous end SCN is smaller than
+     * log.mining.scn.gap.detection.time.interval.max.ms, consider it a SCN gap.
+     */
+    private Long logMiningScnGapDetectionGapSizeMin = 1000000L;
+    /**
+     * Used for SCN gap detection, if the difference between current SCN and
+     * previous end SCN is bigger than
+     * log.mining.scn.gap.detection.gap.size.min, and the time difference of
+     * current SCN and previous end SCN is smaller than this value, consider it
+     * a SCN gap. The option is a long type.
+     */
+    private Long logMiningScnGapDetectionTimeIntervalMaxMs = 20000L;
+    /**
+     * The amount of time that the connector will sleep after reading data from
+     * redo/archive logs and before starting reading data again. Value is in
+     * milliseconds. The option is a long type.
+     */
+    private Long logMiningSleepTimeDefaultMs = 1000L;
+    /**
+     * The maximum amount of time that the connector will use to tune the
+     * optimal sleep time when reading data from LogMiner. Value is in
+     * milliseconds. The option is a long type.
+     */
+    private Long logMiningSleepTimeIncrementMs = 200L;
+    /**
+     * The maximum amount of time that the connector will sleep after reading
+     * data from redo/archive logs and before starting reading data again. Value
+     * is in milliseconds. The option is a long type.
+     */
+    private Long logMiningSleepTimeMaxMs = 3000L;
+    /**
+     * The minimum amount of time that the connector will sleep after reading
+     * data from redo/archive logs and before starting reading data again. Value
+     * is in milliseconds. The option is a long type.
+     */
+    private Long logMiningSleepTimeMinMs = 0L;
+    /**
+     * There are strategies: Online catalog with faster mining but no captured
+     * DDL. Another - with data dictionary loaded into REDO LOG files
+     */
+    private String logMiningStrategy = "redo_log_catalog";
+    /**
+     * Hours to keep long running transactions in transaction buffer between log
+     * mining sessions. By default, all transactions are retained.
+     */
+    private Long logMiningTransactionRetentionHours = 0L;
+    /**
+     * Comma separated list of usernames to exclude from LogMiner query.
+     */
+    private String logMiningUsernameExcludeList;
+    /**
+     * Maximum size of each batch of source records. Defaults to 2048.
+     */
+    private Integer maxBatchSize = 2048;
+    /**
+     * Maximum size of the queue for change events read from the database log
+     * but not yet recorded or forwarded. Defaults to 8192, and should always be
+     * larger than the maximum batch size.
+     */
+    private Integer maxQueueSize = 8192;
+    /**
+     * Maximum size of the queue in bytes for change events read from the
+     * database log but not yet recorded or forwarded. Defaults to 0. Mean the
+     * feature is not enabled
+     */
+    private Long maxQueueSizeInBytes = 0L;
+    /**
+     * A semicolon-separated list of expressions that match fully-qualified
+     * tables and column(s) to be used as message key. Each expression must
+     * match the pattern ':',where the table names could be defined as
+     * (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on the
+     * specific connector,and the key columns are a comma-separated list of
+     * columns representing the custom key. For any table without an explicit
+     * key configuration the table's primary key column(s) will be used as
+     * message key.Example:
+     * dbserver1.inventory.orderlines:orderId,orderLineId;dbserver1.inventory.orders:id
+     */
+    private String messageKeyColumns;
+    /**
+     * Time to wait for new change events to appear after receiving no events,
+     * given in milliseconds. Defaults to 500 ms. The option is a long type.
+     */
+    private Long pollIntervalMs = 500L;
+    /**
+     * Enables transaction metadata extraction together with event counting
+     */
+    private Boolean provideTransactionMetadata = false;
+    /**
+     * The maximum number of records that should be loaded into memory while
+     * streaming. A value of 0 uses the default JDBC fetch size.
+     */
+    private Integer queryFetchSize = 0;
+    /**
+     * A comma-separated list of RAC node hostnames or ip addresses
+     */
+    private String racNodes;
+    /**
+     * Time to wait before restarting connector after retriable exception
+     * occurs. Defaults to 10000ms. The option is a long type.
+     */
+    private Long retriableRestartConnectorWaitMs = 10000L;
+    /**
+     * Whether field names will be sanitized to Avro naming conventions
+     */
+    private Boolean sanitizeFieldNames = false;
+    /**
+     * The name of the data collection that is used to send signals/commands to
+     * Debezium. Signaling is disabled when not set.
+     */
+    private String signalDataCollection;
+    /**
+     * The comma-separated list of operations to skip during streaming, defined
+     * as: 'c' for inserts/create; 'u' for updates; 'd' for deletes. By default,
+     * no operations will be skipped.
+     */
+    private String skippedOperations;
+    /**
+     * A delay period before a snapshot will begin, given in milliseconds.
+     * Defaults to 0 ms. The option is a long type.
+     */
+    private Long snapshotDelayMs = 0L;
+    /**
+     * A token to replace on snapshot predicate template
+     */
+    private String snapshotEnhancePredicateScn;
+    /**
+     * The maximum number of records that should be loaded into memory while
+     * performing a snapshot
+     */
+    private Integer snapshotFetchSize;
+    /**
+     * this setting must be set to specify a list of tables/collections whose
+     * snapshot must be taken on creating or restarting the connector.
+     */
+    private String snapshotIncludeCollectionList;
+    /**
+     * Controls how the connector holds locks on tables while performing the
+     * schema snapshot. The default is 'shared', which means the connector will
+     * hold a table lock that prevents exclusive table access for just the
+     * initial portion of the snapshot while the database schemas and other
+     * metadata are being read. The remaining work in a snapshot involves
+     * selecting all rows from each table, and this is done using a flashback
+     * query that requires no locks. However, in some cases it may be desirable
+     * to avoid locks entirely which can be done by specifying 'none'. This mode
+     * is only safe to use if no schema changes are happening while the snapshot
+     * is taken.
+     */
+    private String snapshotLockingMode = "shared";
+    /**
+     * The maximum number of millis to wait for table locks at the beginning of
+     * a snapshot. If locks cannot be acquired in this time frame, the snapshot
+     * will be aborted. Defaults to 10 seconds. The option is a long type.
+     */
+    private Long snapshotLockTimeoutMs = 10000L;
+    /**
+     * The maximum number of threads used to perform the snapshot. Defaults to
+     * 1.
+     */
+    private Integer snapshotMaxThreads = 1;
+    /**
+     * The criteria for running a snapshot upon startup of the connector.
+     * Options include: 'initial' (the default) to specify the connector should
+     * run a snapshot only when no offsets are available for the logical server
+     * name; 'schema_only' to specify the connector should run a snapshot of the
+     * schema when no offsets are available for the logical server name.
+     */
+    private String snapshotMode = "initial";
+    /**
+     * This property contains a comma-separated list of fully-qualified tables
+     * (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on
+     * thespecific connectors. Select statements for the individual tables are
+     * specified in further configuration properties, one for each table,
+     * identified by the id
+     * 'snapshot.select.statement.overrides.DB_NAME.TABLE_NAME' or
+     * 'snapshot.select.statement.overrides.SCHEMA_NAME.TABLE_NAME',
+     * respectively. The value of those properties is the select statement to
+     * use when retrieving data from the specific table during snapshotting. A
+     * possible use case for large append-only tables is setting a specific
+     * point where to start (resume) snapshotting, in case a previous
+     * snapshotting was interrupted.
+     */
+    private String snapshotSelectStatementOverrides;
+    /**
+     * A version of the format of the publicly visible source part in the
+     * message
+     */
+    private String sourceStructVersion = "v2";
+    /**
+     * A comma-separated list of regular expressions that match the
+     * fully-qualified names of tables to be excluded from monitoring
+     * (deprecated, use table.exclude.list instead)
+     */
+    private String tableBlacklist;
+    /**
+     * A comma-separated list of regular expressions that match the
+     * fully-qualified names of tables to be excluded from monitoring
+     */
+    private String tableExcludeList;
+    /**
+     * The tables for which changes are to be captured
+     */
+    private String tableIncludeList;
+    /**
+     * The tables for which changes are to be captured (deprecated, use
+     * table.include.list instead)
+     */
+    private String tableWhitelist;
+    /**
+     * Time, date, and timestamps can be represented with different kinds of
+     * precisions, including:'adaptive' (the default) bases the precision of
+     * time, date, and timestamp values on the database column's precision;
+     * 'adaptive_time_microseconds' like 'adaptive' mode, but TIME fields always
+     * use microseconds precision;'connect' always represents time, date, and
+     * timestamp values using Kafka Connect's built-in representations for Time,
+     * Date, and Timestamp, which uses millisecond precision regardless of the
+     * database columns' precision .
+     */
+    private String timePrecisionMode = "adaptive";
+    /**
+     * Whether delete operations should be represented by a delete event and a
+     * subsquenttombstone event (true) or only by a delete event (false).
+     * Emitting the tombstone event (the default behavior) allows Kafka to
+     * completely delete all events pertaining to the given key once the source
+     * record got deleted.
+     */
+    private Boolean tombstonesOnDelete = false;
+    /**
+     * The name of the transaction metadata topic. The placeholder
+     * ${database.server.name} can be used for referring to the connector's
+     * logical name; defaults to ${database.server.name}.transaction.
+     */
+    private String transactionTopic = "${database.server.name}.transaction";
+    /**
+     * Specify the constant that will be provided by Debezium to indicate that
+     * the original value is unavailable and not provided by the database.
+     */
+    private String unavailableValuePlaceholder = "__debezium_unavailable_value";
+
+    public Map<String, Object> getAdditionalProperties() {
+        return additionalProperties;
+    }
+
+    public void setAdditionalProperties(Map<String, Object> additionalProperties) {
+        this.additionalProperties = additionalProperties;
+    }
+
+    public Boolean getBridgeErrorHandler() {
+        return bridgeErrorHandler;
+    }
+
+    public void setBridgeErrorHandler(Boolean bridgeErrorHandler) {
+        this.bridgeErrorHandler = bridgeErrorHandler;
+    }
+
+    public OracleConnectorEmbeddedDebeziumConfiguration getConfiguration() {
+        return configuration;
+    }
+
+    public void setConfiguration(
+            OracleConnectorEmbeddedDebeziumConfiguration configuration) {
+        this.configuration = configuration;
+    }
+
+    public String getInternalKeyConverter() {
+        return internalKeyConverter;
+    }
+
+    public void setInternalKeyConverter(String internalKeyConverter) {
+        this.internalKeyConverter = internalKeyConverter;
+    }
+
+    public String getInternalValueConverter() {
+        return internalValueConverter;
+    }
+
+    public void setInternalValueConverter(String internalValueConverter) {
+        this.internalValueConverter = internalValueConverter;
+    }
+
+    public String getOffsetCommitPolicy() {
+        return offsetCommitPolicy;
+    }
+
+    public void setOffsetCommitPolicy(String offsetCommitPolicy) {
+        this.offsetCommitPolicy = offsetCommitPolicy;
+    }
+
+    public Long getOffsetCommitTimeoutMs() {
+        return offsetCommitTimeoutMs;
+    }
+
+    public void setOffsetCommitTimeoutMs(Long offsetCommitTimeoutMs) {
+        this.offsetCommitTimeoutMs = offsetCommitTimeoutMs;
+    }
+
+    public Long getOffsetFlushIntervalMs() {
+        return offsetFlushIntervalMs;
+    }
+
+    public void setOffsetFlushIntervalMs(Long offsetFlushIntervalMs) {
+        this.offsetFlushIntervalMs = offsetFlushIntervalMs;
+    }
+
+    public String getOffsetStorage() {
+        return offsetStorage;
+    }
+
+    public void setOffsetStorage(String offsetStorage) {
+        this.offsetStorage = offsetStorage;
+    }
+
+    public String getOffsetStorageFileName() {
+        return offsetStorageFileName;
+    }
+
+    public void setOffsetStorageFileName(String offsetStorageFileName) {
+        this.offsetStorageFileName = offsetStorageFileName;
+    }
+
+    public Integer getOffsetStoragePartitions() {
+        return offsetStoragePartitions;
+    }
+
+    public void setOffsetStoragePartitions(Integer offsetStoragePartitions) {
+        this.offsetStoragePartitions = offsetStoragePartitions;
+    }
+
+    public Integer getOffsetStorageReplicationFactor() {
+        return offsetStorageReplicationFactor;
+    }
+
+    public void setOffsetStorageReplicationFactor(
+            Integer offsetStorageReplicationFactor) {
+        this.offsetStorageReplicationFactor = offsetStorageReplicationFactor;
+    }
+
+    public String getOffsetStorageTopic() {
+        return offsetStorageTopic;
+    }
+
+    public void setOffsetStorageTopic(String offsetStorageTopic) {
+        this.offsetStorageTopic = offsetStorageTopic;
+    }
+
+    public Boolean getAutowiredEnabled() {
+        return autowiredEnabled;
+    }
+
+    public void setAutowiredEnabled(Boolean autowiredEnabled) {
+        this.autowiredEnabled = autowiredEnabled;
+    }
+
+    public String getBinaryHandlingMode() {
+        return binaryHandlingMode;
+    }
+
+    public void setBinaryHandlingMode(String binaryHandlingMode) {
+        this.binaryHandlingMode = binaryHandlingMode;
+    }
+
+    public String getColumnBlacklist() {
+        return columnBlacklist;
+    }
+
+    public void setColumnBlacklist(String columnBlacklist) {
+        this.columnBlacklist = columnBlacklist;
+    }
+
+    public String getColumnExcludeList() {
+        return columnExcludeList;
+    }
+
+    public void setColumnExcludeList(String columnExcludeList) {
+        this.columnExcludeList = columnExcludeList;
+    }
+
+    public String getColumnIncludeList() {
+        return columnIncludeList;
+    }
+
+    public void setColumnIncludeList(String columnIncludeList) {
+        this.columnIncludeList = columnIncludeList;
+    }
+
+    public String getColumnPropagateSourceType() {
+        return columnPropagateSourceType;
+    }
+
+    public void setColumnPropagateSourceType(String columnPropagateSourceType) {
+        this.columnPropagateSourceType = columnPropagateSourceType;
+    }
+
+    public String getColumnWhitelist() {
+        return columnWhitelist;
+    }
+
+    public void setColumnWhitelist(String columnWhitelist) {
+        this.columnWhitelist = columnWhitelist;
+    }
+
+    public String getConverters() {
+        return converters;
+    }
+
+    public void setConverters(String converters) {
+        this.converters = converters;
+    }
+
+    public String getDatabaseConnectionAdapter() {
+        return databaseConnectionAdapter;
+    }
+
+    public void setDatabaseConnectionAdapter(String databaseConnectionAdapter) {
+        this.databaseConnectionAdapter = databaseConnectionAdapter;
+    }
+
+    public String getDatabaseDbname() {
+        return databaseDbname;
+    }
+
+    public void setDatabaseDbname(String databaseDbname) {
+        this.databaseDbname = databaseDbname;
+    }
+
+    public String getDatabaseHistory() {
+        return databaseHistory;
+    }
+
+    public void setDatabaseHistory(String databaseHistory) {
+        this.databaseHistory = databaseHistory;
+    }
+
+    public String getDatabaseHistoryFileFilename() {
+        return databaseHistoryFileFilename;
+    }
+
+    public void setDatabaseHistoryFileFilename(
+            String databaseHistoryFileFilename) {
+        this.databaseHistoryFileFilename = databaseHistoryFileFilename;
+    }
+
+    public String getDatabaseHistoryKafkaBootstrapServers() {
+        return databaseHistoryKafkaBootstrapServers;
+    }
+
+    public void setDatabaseHistoryKafkaBootstrapServers(
+            String databaseHistoryKafkaBootstrapServers) {
+        this.databaseHistoryKafkaBootstrapServers = databaseHistoryKafkaBootstrapServers;
+    }
+
+    public Integer getDatabaseHistoryKafkaRecoveryAttempts() {
+        return databaseHistoryKafkaRecoveryAttempts;
+    }
+
+    public void setDatabaseHistoryKafkaRecoveryAttempts(
+            Integer databaseHistoryKafkaRecoveryAttempts) {
+        this.databaseHistoryKafkaRecoveryAttempts = databaseHistoryKafkaRecoveryAttempts;
+    }
+
+    public Integer getDatabaseHistoryKafkaRecoveryPollIntervalMs() {
+        return databaseHistoryKafkaRecoveryPollIntervalMs;
+    }
+
+    public void setDatabaseHistoryKafkaRecoveryPollIntervalMs(
+            Integer databaseHistoryKafkaRecoveryPollIntervalMs) {
+        this.databaseHistoryKafkaRecoveryPollIntervalMs = databaseHistoryKafkaRecoveryPollIntervalMs;
+    }
+
+    public String getDatabaseHistoryKafkaTopic() {
+        return databaseHistoryKafkaTopic;
+    }
+
+    public void setDatabaseHistoryKafkaTopic(String databaseHistoryKafkaTopic) {
+        this.databaseHistoryKafkaTopic = databaseHistoryKafkaTopic;
+    }
+
+    public Boolean getDatabaseHistorySkipUnparseableDdl() {
+        return databaseHistorySkipUnparseableDdl;
+    }
+
+    public void setDatabaseHistorySkipUnparseableDdl(
+            Boolean databaseHistorySkipUnparseableDdl) {
+        this.databaseHistorySkipUnparseableDdl = databaseHistorySkipUnparseableDdl;
+    }
+
+    public Boolean getDatabaseHistoryStoreOnlyCapturedTablesDdl() {
+        return databaseHistoryStoreOnlyCapturedTablesDdl;
+    }
+
+    public void setDatabaseHistoryStoreOnlyCapturedTablesDdl(
+            Boolean databaseHistoryStoreOnlyCapturedTablesDdl) {
+        this.databaseHistoryStoreOnlyCapturedTablesDdl = databaseHistoryStoreOnlyCapturedTablesDdl;
+    }
+
+    public Boolean getDatabaseHistoryStoreOnlyMonitoredTablesDdl() {
+        return databaseHistoryStoreOnlyMonitoredTablesDdl;
+    }
+
+    public void setDatabaseHistoryStoreOnlyMonitoredTablesDdl(
+            Boolean databaseHistoryStoreOnlyMonitoredTablesDdl) {
+        this.databaseHistoryStoreOnlyMonitoredTablesDdl = databaseHistoryStoreOnlyMonitoredTablesDdl;
+    }
+
+    public String getDatabaseHostname() {
+        return databaseHostname;
+    }
+
+    public void setDatabaseHostname(String databaseHostname) {
+        this.databaseHostname = databaseHostname;
+    }
+
+    public String getDatabaseOutServerName() {
+        return databaseOutServerName;
+    }
+
+    public void setDatabaseOutServerName(String databaseOutServerName) {
+        this.databaseOutServerName = databaseOutServerName;
+    }
+
+    public String getDatabasePassword() {
+        return databasePassword;
+    }
+
+    public void setDatabasePassword(String databasePassword) {
+        this.databasePassword = databasePassword;
+    }
+
+    public String getDatabasePdbName() {
+        return databasePdbName;
+    }
+
+    public void setDatabasePdbName(String databasePdbName) {
+        this.databasePdbName = databasePdbName;
+    }
+
+    public Integer getDatabasePort() {
+        return databasePort;
+    }
+
+    public void setDatabasePort(Integer databasePort) {
+        this.databasePort = databasePort;
+    }
+
+    public String getDatabaseServerName() {
+        return databaseServerName;
+    }
+
+    public void setDatabaseServerName(String databaseServerName) {
+        this.databaseServerName = databaseServerName;
+    }
+
+    public String getDatabaseUrl() {
+        return databaseUrl;
+    }
+
+    public void setDatabaseUrl(String databaseUrl) {
+        this.databaseUrl = databaseUrl;
+    }
+
+    public String getDatabaseUser() {
+        return databaseUser;
+    }
+
+    public void setDatabaseUser(String databaseUser) {
+        this.databaseUser = databaseUser;
+    }
+
+    public String getDatatypePropagateSourceType() {
+        return datatypePropagateSourceType;
+    }
+
+    public void setDatatypePropagateSourceType(
+            String datatypePropagateSourceType) {
+        this.datatypePropagateSourceType = datatypePropagateSourceType;
+    }
+
+    public String getDecimalHandlingMode() {
+        return decimalHandlingMode;
+    }
+
+    public void setDecimalHandlingMode(String decimalHandlingMode) {
+        this.decimalHandlingMode = decimalHandlingMode;
+    }
+
+    public String getEventProcessingFailureHandlingMode() {
+        return eventProcessingFailureHandlingMode;
+    }
+
+    public void setEventProcessingFailureHandlingMode(
+            String eventProcessingFailureHandlingMode) {
+        this.eventProcessingFailureHandlingMode = eventProcessingFailureHandlingMode;
+    }
+
+    public String getHeartbeatActionQuery() {
+        return heartbeatActionQuery;
+    }
+
+    public void setHeartbeatActionQuery(String heartbeatActionQuery) {
+        this.heartbeatActionQuery = heartbeatActionQuery;
+    }
+
+    public Integer getHeartbeatIntervalMs() {
+        return heartbeatIntervalMs;
+    }
+
+    public void setHeartbeatIntervalMs(Integer heartbeatIntervalMs) {
+        this.heartbeatIntervalMs = heartbeatIntervalMs;
+    }
+
+    public String getHeartbeatTopicsPrefix() {
+        return heartbeatTopicsPrefix;
+    }
+
+    public void setHeartbeatTopicsPrefix(String heartbeatTopicsPrefix) {
+        this.heartbeatTopicsPrefix = heartbeatTopicsPrefix;
+    }
+
+    public Boolean getIncludeSchemaChanges() {
+        return includeSchemaChanges;
+    }
+
+    public void setIncludeSchemaChanges(Boolean includeSchemaChanges) {
+        this.includeSchemaChanges = includeSchemaChanges;
+    }
+
+    public Boolean getIncludeSchemaComments() {
+        return includeSchemaComments;
+    }
+
+    public void setIncludeSchemaComments(Boolean includeSchemaComments) {
+        this.includeSchemaComments = includeSchemaComments;
+    }
+
+    public String getIntervalHandlingMode() {
+        return intervalHandlingMode;
+    }
+
+    public void setIntervalHandlingMode(String intervalHandlingMode) {
+        this.intervalHandlingMode = intervalHandlingMode;
+    }
+
+    public Boolean getLobEnabled() {
+        return lobEnabled;
+    }
+
+    public void setLobEnabled(Boolean lobEnabled) {
+        this.lobEnabled = lobEnabled;
+    }
+
+    public String getLogMiningArchiveDestinationName() {
+        return logMiningArchiveDestinationName;
+    }
+
+    public void setLogMiningArchiveDestinationName(
+            String logMiningArchiveDestinationName) {
+        this.logMiningArchiveDestinationName = logMiningArchiveDestinationName;
+    }
+
+    public Long getLogMiningArchiveLogHours() {
+        return logMiningArchiveLogHours;
+    }
+
+    public void setLogMiningArchiveLogHours(Long logMiningArchiveLogHours) {
+        this.logMiningArchiveLogHours = logMiningArchiveLogHours;
+    }
+
+    public Boolean getLogMiningArchiveLogOnlyMode() {
+        return logMiningArchiveLogOnlyMode;
+    }
+
+    public void setLogMiningArchiveLogOnlyMode(
+            Boolean logMiningArchiveLogOnlyMode) {
+        this.logMiningArchiveLogOnlyMode = logMiningArchiveLogOnlyMode;
+    }
+
+    public Long getLogMiningArchiveLogOnlyScnPollIntervalMs() {
+        return logMiningArchiveLogOnlyScnPollIntervalMs;
+    }
+
+    public void setLogMiningArchiveLogOnlyScnPollIntervalMs(
+            Long logMiningArchiveLogOnlyScnPollIntervalMs) {
+        this.logMiningArchiveLogOnlyScnPollIntervalMs = logMiningArchiveLogOnlyScnPollIntervalMs;
+    }
+
+    public Long getLogMiningBatchSizeDefault() {
+        return logMiningBatchSizeDefault;
+    }
+
+    public void setLogMiningBatchSizeDefault(Long logMiningBatchSizeDefault) {
+        this.logMiningBatchSizeDefault = logMiningBatchSizeDefault;
+    }
+
+    public Long getLogMiningBatchSizeMax() {
+        return logMiningBatchSizeMax;
+    }
+
+    public void setLogMiningBatchSizeMax(Long logMiningBatchSizeMax) {
+        this.logMiningBatchSizeMax = logMiningBatchSizeMax;
+    }
+
+    public Long getLogMiningBatchSizeMin() {
+        return logMiningBatchSizeMin;
+    }
+
+    public void setLogMiningBatchSizeMin(Long logMiningBatchSizeMin) {
+        this.logMiningBatchSizeMin = logMiningBatchSizeMin;
+    }
+
+    public Boolean getLogMiningBufferDropOnStop() {
+        return logMiningBufferDropOnStop;
+    }
+
+    public void setLogMiningBufferDropOnStop(Boolean logMiningBufferDropOnStop) {
+        this.logMiningBufferDropOnStop = logMiningBufferDropOnStop;
+    }
+
+    public String getLogMiningBufferInfinispanCacheEvents() {
+        return logMiningBufferInfinispanCacheEvents;
+    }
+
+    public void setLogMiningBufferInfinispanCacheEvents(
+            String logMiningBufferInfinispanCacheEvents) {
+        this.logMiningBufferInfinispanCacheEvents = logMiningBufferInfinispanCacheEvents;
+    }
+
+    public String getLogMiningBufferInfinispanCacheProcessedTransactions() {
+        return logMiningBufferInfinispanCacheProcessedTransactions;
+    }
+
+    public void setLogMiningBufferInfinispanCacheProcessedTransactions(
+            String logMiningBufferInfinispanCacheProcessedTransactions) {
+        this.logMiningBufferInfinispanCacheProcessedTransactions = logMiningBufferInfinispanCacheProcessedTransactions;
+    }
+
+    public String getLogMiningBufferInfinispanCacheSchemaChanges() {
+        return logMiningBufferInfinispanCacheSchemaChanges;
+    }
+
+    public void setLogMiningBufferInfinispanCacheSchemaChanges(
+            String logMiningBufferInfinispanCacheSchemaChanges) {
+        this.logMiningBufferInfinispanCacheSchemaChanges = logMiningBufferInfinispanCacheSchemaChanges;
+    }
+
+    public String getLogMiningBufferInfinispanCacheTransactions() {
+        return logMiningBufferInfinispanCacheTransactions;
+    }
+
+    public void setLogMiningBufferInfinispanCacheTransactions(
+            String logMiningBufferInfinispanCacheTransactions) {
+        this.logMiningBufferInfinispanCacheTransactions = logMiningBufferInfinispanCacheTransactions;
+    }
+
+    public String getLogMiningBufferType() {
+        return logMiningBufferType;
+    }
+
+    public void setLogMiningBufferType(String logMiningBufferType) {
+        this.logMiningBufferType = logMiningBufferType;
+    }
+
+    public Long getLogMiningScnGapDetectionGapSizeMin() {
+        return logMiningScnGapDetectionGapSizeMin;
+    }
+
+    public void setLogMiningScnGapDetectionGapSizeMin(
+            Long logMiningScnGapDetectionGapSizeMin) {
+        this.logMiningScnGapDetectionGapSizeMin = logMiningScnGapDetectionGapSizeMin;
+    }
+
+    public Long getLogMiningScnGapDetectionTimeIntervalMaxMs() {
+        return logMiningScnGapDetectionTimeIntervalMaxMs;
+    }
+
+    public void setLogMiningScnGapDetectionTimeIntervalMaxMs(
+            Long logMiningScnGapDetectionTimeIntervalMaxMs) {
+        this.logMiningScnGapDetectionTimeIntervalMaxMs = logMiningScnGapDetectionTimeIntervalMaxMs;
+    }
+
+    public Long getLogMiningSleepTimeDefaultMs() {
+        return logMiningSleepTimeDefaultMs;
+    }
+
+    public void setLogMiningSleepTimeDefaultMs(Long logMiningSleepTimeDefaultMs) {
+        this.logMiningSleepTimeDefaultMs = logMiningSleepTimeDefaultMs;
+    }
+
+    public Long getLogMiningSleepTimeIncrementMs() {
+        return logMiningSleepTimeIncrementMs;
+    }
+
+    public void setLogMiningSleepTimeIncrementMs(
+            Long logMiningSleepTimeIncrementMs) {
+        this.logMiningSleepTimeIncrementMs = logMiningSleepTimeIncrementMs;
+    }
+
+    public Long getLogMiningSleepTimeMaxMs() {
+        return logMiningSleepTimeMaxMs;
+    }
+
+    public void setLogMiningSleepTimeMaxMs(Long logMiningSleepTimeMaxMs) {
+        this.logMiningSleepTimeMaxMs = logMiningSleepTimeMaxMs;
+    }
+
+    public Long getLogMiningSleepTimeMinMs() {
+        return logMiningSleepTimeMinMs;
+    }
+
+    public void setLogMiningSleepTimeMinMs(Long logMiningSleepTimeMinMs) {
+        this.logMiningSleepTimeMinMs = logMiningSleepTimeMinMs;
+    }
+
+    public String getLogMiningStrategy() {
+        return logMiningStrategy;
+    }
+
+    public void setLogMiningStrategy(String logMiningStrategy) {
+        this.logMiningStrategy = logMiningStrategy;
+    }
+
+    public Long getLogMiningTransactionRetentionHours() {
+        return logMiningTransactionRetentionHours;
+    }
+
+    public void setLogMiningTransactionRetentionHours(
+            Long logMiningTransactionRetentionHours) {
+        this.logMiningTransactionRetentionHours = logMiningTransactionRetentionHours;
+    }
+
+    public String getLogMiningUsernameExcludeList() {
+        return logMiningUsernameExcludeList;
+    }
+
+    public void setLogMiningUsernameExcludeList(
+            String logMiningUsernameExcludeList) {
+        this.logMiningUsernameExcludeList = logMiningUsernameExcludeList;
+    }
+
+    public Integer getMaxBatchSize() {
+        return maxBatchSize;
+    }
+
+    public void setMaxBatchSize(Integer maxBatchSize) {
+        this.maxBatchSize = maxBatchSize;
+    }
+
+    public Integer getMaxQueueSize() {
+        return maxQueueSize;
+    }
+
+    public void setMaxQueueSize(Integer maxQueueSize) {
+        this.maxQueueSize = maxQueueSize;
+    }
+
+    public Long getMaxQueueSizeInBytes() {
+        return maxQueueSizeInBytes;
+    }
+
+    public void setMaxQueueSizeInBytes(Long maxQueueSizeInBytes) {
+        this.maxQueueSizeInBytes = maxQueueSizeInBytes;
+    }
+
+    public String getMessageKeyColumns() {
+        return messageKeyColumns;
+    }
+
+    public void setMessageKeyColumns(String messageKeyColumns) {
+        this.messageKeyColumns = messageKeyColumns;
+    }
+
+    public Long getPollIntervalMs() {
+        return pollIntervalMs;
+    }
+
+    public void setPollIntervalMs(Long pollIntervalMs) {
+        this.pollIntervalMs = pollIntervalMs;
+    }
+
+    public Boolean getProvideTransactionMetadata() {
+        return provideTransactionMetadata;
+    }
+
+    public void setProvideTransactionMetadata(Boolean provideTransactionMetadata) {
+        this.provideTransactionMetadata = provideTransactionMetadata;
+    }
+
+    public Integer getQueryFetchSize() {
+        return queryFetchSize;
+    }
+
+    public void setQueryFetchSize(Integer queryFetchSize) {
+        this.queryFetchSize = queryFetchSize;
+    }
+
+    public String getRacNodes() {
+        return racNodes;
+    }
+
+    public void setRacNodes(String racNodes) {
+        this.racNodes = racNodes;
+    }
+
+    public Long getRetriableRestartConnectorWaitMs() {
+        return retriableRestartConnectorWaitMs;
+    }
+
+    public void setRetriableRestartConnectorWaitMs(
+            Long retriableRestartConnectorWaitMs) {
+        this.retriableRestartConnectorWaitMs = retriableRestartConnectorWaitMs;
+    }
+
+    public Boolean getSanitizeFieldNames() {
+        return sanitizeFieldNames;
+    }
+
+    public void setSanitizeFieldNames(Boolean sanitizeFieldNames) {
+        this.sanitizeFieldNames = sanitizeFieldNames;
+    }
+
+    public String getSignalDataCollection() {
+        return signalDataCollection;
+    }
+
+    public void setSignalDataCollection(String signalDataCollection) {
+        this.signalDataCollection = signalDataCollection;
+    }
+
+    public String getSkippedOperations() {
+        return skippedOperations;
+    }
+
+    public void setSkippedOperations(String skippedOperations) {
+        this.skippedOperations = skippedOperations;
+    }
+
+    public Long getSnapshotDelayMs() {
+        return snapshotDelayMs;
+    }
+
+    public void setSnapshotDelayMs(Long snapshotDelayMs) {
+        this.snapshotDelayMs = snapshotDelayMs;
+    }
+
+    public String getSnapshotEnhancePredicateScn() {
+        return snapshotEnhancePredicateScn;
+    }
+
+    public void setSnapshotEnhancePredicateScn(
+            String snapshotEnhancePredicateScn) {
+        this.snapshotEnhancePredicateScn = snapshotEnhancePredicateScn;
+    }
+
+    public Integer getSnapshotFetchSize() {
+        return snapshotFetchSize;
+    }
+
+    public void setSnapshotFetchSize(Integer snapshotFetchSize) {
+        this.snapshotFetchSize = snapshotFetchSize;
+    }
+
+    public String getSnapshotIncludeCollectionList() {
+        return snapshotIncludeCollectionList;
+    }
+
+    public void setSnapshotIncludeCollectionList(
+            String snapshotIncludeCollectionList) {
+        this.snapshotIncludeCollectionList = snapshotIncludeCollectionList;
+    }
+
+    public String getSnapshotLockingMode() {
+        return snapshotLockingMode;
+    }
+
+    public void setSnapshotLockingMode(String snapshotLockingMode) {
+        this.snapshotLockingMode = snapshotLockingMode;
+    }
+
+    public Long getSnapshotLockTimeoutMs() {
+        return snapshotLockTimeoutMs;
+    }
+
+    public void setSnapshotLockTimeoutMs(Long snapshotLockTimeoutMs) {
+        this.snapshotLockTimeoutMs = snapshotLockTimeoutMs;
+    }
+
+    public Integer getSnapshotMaxThreads() {
+        return snapshotMaxThreads;
+    }
+
+    public void setSnapshotMaxThreads(Integer snapshotMaxThreads) {
+        this.snapshotMaxThreads = snapshotMaxThreads;
+    }
+
+    public String getSnapshotMode() {
+        return snapshotMode;
+    }
+
+    public void setSnapshotMode(String snapshotMode) {
+        this.snapshotMode = snapshotMode;
+    }
+
+    public String getSnapshotSelectStatementOverrides() {
+        return snapshotSelectStatementOverrides;
+    }
+
+    public void setSnapshotSelectStatementOverrides(
+            String snapshotSelectStatementOverrides) {
+        this.snapshotSelectStatementOverrides = snapshotSelectStatementOverrides;
+    }
+
+    public String getSourceStructVersion() {
+        return sourceStructVersion;
+    }
+
+    public void setSourceStructVersion(String sourceStructVersion) {
+        this.sourceStructVersion = sourceStructVersion;
+    }
+
+    public String getTableBlacklist() {
+        return tableBlacklist;
+    }
+
+    public void setTableBlacklist(String tableBlacklist) {
+        this.tableBlacklist = tableBlacklist;
+    }
+
+    public String getTableExcludeList() {
+        return tableExcludeList;
+    }
+
+    public void setTableExcludeList(String tableExcludeList) {
+        this.tableExcludeList = tableExcludeList;
+    }
+
+    public String getTableIncludeList() {
+        return tableIncludeList;
+    }
+
+    public void setTableIncludeList(String tableIncludeList) {
+        this.tableIncludeList = tableIncludeList;
+    }
+
+    public String getTableWhitelist() {
+        return tableWhitelist;
+    }
+
+    public void setTableWhitelist(String tableWhitelist) {
+        this.tableWhitelist = tableWhitelist;
+    }
+
+    public String getTimePrecisionMode() {
+        return timePrecisionMode;
+    }
+
+    public void setTimePrecisionMode(String timePrecisionMode) {
+        this.timePrecisionMode = timePrecisionMode;
+    }
+
+    public Boolean getTombstonesOnDelete() {
+        return tombstonesOnDelete;
+    }
+
+    public void setTombstonesOnDelete(Boolean tombstonesOnDelete) {
+        this.tombstonesOnDelete = tombstonesOnDelete;
+    }
+
+    public String getTransactionTopic() {
+        return transactionTopic;
+    }
+
+    public void setTransactionTopic(String transactionTopic) {
+        this.transactionTopic = transactionTopic;
+    }
+
+    public String getUnavailableValuePlaceholder() {
+        return unavailableValuePlaceholder;
+    }
+
+    public void setUnavailableValuePlaceholder(
+            String unavailableValuePlaceholder) {
+        this.unavailableValuePlaceholder = unavailableValuePlaceholder;
+    }
+}
\ No newline at end of file
diff --git a/components-starter/camel-debezium-oracle-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumOracleComponentConverter.java b/components-starter/camel-debezium-oracle-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumOracleComponentConverter.java
new file mode 100644
index 00000000000..a1180157cf6
--- /dev/null
+++ b/components-starter/camel-debezium-oracle-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumOracleComponentConverter.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.debezium.springboot;
+
+import java.util.LinkedHashSet;
+import java.util.Set;
+import javax.annotation.Generated;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.context.properties.ConfigurationPropertiesBinding;
+import org.springframework.context.ApplicationContext;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.core.convert.TypeDescriptor;
+import org.springframework.core.convert.converter.GenericConverter;
+import org.springframework.stereotype.Component;
+
+/**
+ * Generated by camel-package-maven-plugin - do not edit this file!
+ */
+@Generated("org.apache.camel.springboot.maven.SpringBootAutoConfigurationMojo")
+@Configuration(proxyBeanMethods = false)
+@ConfigurationPropertiesBinding
+@Component
+public class DebeziumOracleComponentConverter implements GenericConverter {
+
+    @Autowired
+    private ApplicationContext applicationContext;
+
+    public Set<ConvertiblePair> getConvertibleTypes() {
+        Set<ConvertiblePair> answer = new LinkedHashSet<>();
+        answer.add(new ConvertiblePair(String.class, org.apache.camel.component.debezium.configuration.OracleConnectorEmbeddedDebeziumConfiguration.class));
+        return answer;
+    }
+
+    public Object convert(
+            Object source,
+            TypeDescriptor sourceType,
+            TypeDescriptor targetType) {
+        if (source == null) {
+            return null;
+        }
+        String ref = source.toString();
+        if (!ref.startsWith("#")) {
+            return null;
+        }
+        ref = ref.startsWith("#bean:") ? ref.substring(6) : ref.substring(1);
+        switch (targetType.getName()) {
+            case "org.apache.camel.component.debezium.configuration.OracleConnectorEmbeddedDebeziumConfiguration": return applicationContext.getBean(ref, org.apache.camel.component.debezium.configuration.OracleConnectorEmbeddedDebeziumConfiguration.class);
+        }
+        return null;
+    }
+}
\ No newline at end of file
diff --git a/components-starter/camel-debezium-oracle-starter/src/main/resources/META-INF/LICENSE.txt b/components-starter/camel-debezium-oracle-starter/src/main/resources/META-INF/LICENSE.txt
new file mode 100644
index 00000000000..6b0b1270ff0
--- /dev/null
+++ b/components-starter/camel-debezium-oracle-starter/src/main/resources/META-INF/LICENSE.txt
@@ -0,0 +1,203 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
diff --git a/components-starter/camel-debezium-oracle-starter/src/main/resources/META-INF/NOTICE.txt b/components-starter/camel-debezium-oracle-starter/src/main/resources/META-INF/NOTICE.txt
new file mode 100644
index 00000000000..2e215bf2e6b
--- /dev/null
+++ b/components-starter/camel-debezium-oracle-starter/src/main/resources/META-INF/NOTICE.txt
@@ -0,0 +1,11 @@
+   =========================================================================
+   ==  NOTICE file corresponding to the section 4 d of                    ==
+   ==  the Apache License, Version 2.0,                                   ==
+   ==  in this case for the Apache Camel distribution.                    ==
+   =========================================================================
+
+   This product includes software developed by
+   The Apache Software Foundation (http://www.apache.org/).
+
+   Please read the different LICENSE files present in the licenses directory of
+   this distribution.
diff --git a/components-starter/camel-debezium-oracle-starter/src/main/resources/META-INF/spring.factories b/components-starter/camel-debezium-oracle-starter/src/main/resources/META-INF/spring.factories
new file mode 100644
index 00000000000..c71264c6d2b
--- /dev/null
+++ b/components-starter/camel-debezium-oracle-starter/src/main/resources/META-INF/spring.factories
@@ -0,0 +1,21 @@
+## ---------------------------------------------------------------------------
+## Licensed to the Apache Software Foundation (ASF) under one or more
+## contributor license agreements.  See the NOTICE file distributed with
+## this work for additional information regarding copyright ownership.
+## The ASF licenses this file to You under the Apache License, Version 2.0
+## (the "License"); you may not use this file except in compliance with
+## the License.  You may obtain a copy of the License at
+##
+##      http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+## ---------------------------------------------------------------------------
+
+org.springframework.boot.autoconfigure.EnableAutoConfiguration=\
+org.apache.camel.component.debezium.springboot.DebeziumOracleComponentConverter,\
+org.apache.camel.component.debezium.springboot.DebeziumOracleComponentAutoConfiguration
+
diff --git a/components-starter/camel-debezium-oracle-starter/src/main/resources/META-INF/spring.provides b/components-starter/camel-debezium-oracle-starter/src/main/resources/META-INF/spring.provides
new file mode 100644
index 00000000000..0e0c095352d
--- /dev/null
+++ b/components-starter/camel-debezium-oracle-starter/src/main/resources/META-INF/spring.provides
@@ -0,0 +1,17 @@
+## ---------------------------------------------------------------------------
+## Licensed to the Apache Software Foundation (ASF) under one or more
+## contributor license agreements.  See the NOTICE file distributed with
+## this work for additional information regarding copyright ownership.
+## The ASF licenses this file to You under the Apache License, Version 2.0
+## (the "License"); you may not use this file except in compliance with
+## the License.  You may obtain a copy of the License at
+##
+##      http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+## ---------------------------------------------------------------------------
+provides: camel-debezium-oracle