You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@camel.apache.org by ac...@apache.org on 2019/11/05 16:26:57 UTC

[camel] branch master updated (a47fafe -> 9c6de95)

This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/camel.git.


    from a47fafe  Regen docs
     new 27c936a   CAMEL-14137 Thread leak in camel-jetty component if maxThreads or minThreads property is set
     new 918d8d3   CAMEL-14137 Thread leak in camel-jetty component if maxThreads or minThreads property is set
     new bdc2e05   CAMEL-14137: removed qtp variable according to pr comment.
     new 6d64b13   CAMEL-14137: removed qtp variable according to pr comment.
     new 9ad8e96   CAMEL-14137: code changed according to pr change request.
     new 3a45224   CAMEL-14137: removed qtp variable according to pr comment.
     new 98e4fe4   CAMEL-14137: revert doc changes
     new 8ce85d6   CAMEL-14137: code changed according to pr change request.
     new aa12763  CAMEL-14093: Add camel-debezium-sqlserver component
     new d1f2782  CAMEL-14093: Remove junit assert all import and replace it with specific imports
     new 9c6de95  Regen docs

The 11 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 apache-camel/pom.xml                               |  10 +
 apache-camel/src/main/descriptors/common-bin.xml   |   2 +
 bom/camel-bom/pom.xml                              |  10 +
 components/camel-debezium-sqlserver/pom.xml        | 152 ++++
 .../main/docs/debezium-sqlserver-component.adoc    | 254 ++++++
 .../debezium/DebeziumSqlserverComponent.java       |  57 ++
 .../debezium/DebeziumSqlserverEndpoint.java        |  49 +
 .../debezium/DebeziumSqlserverComponentTest.java   | 120 +++
 ...ConnectorEmbeddedDebeziumConfigurationTest.java |  90 ++
 .../src/test/resources/log4j2.properties           |   0
 .../camel/component/jetty/JettyHttpComponent.java  |  29 +-
 .../component/jetty/JettyThreadPoolSizeTest.java   |  82 ++
 components/pom.xml                                 |   1 +
 .../DebeziumSqlserverEndpointBuilderFactory.java   | 985 +++++++++++++++++++++
 docs/components/modules/ROOT/nav.adoc              |   1 +
 .../ROOT/pages/debezium-sqlserver-component.adoc   | 255 ++++++
 docs/components/modules/ROOT/pages/index.adoc      |   4 +-
 parent/pom.xml                                     |  10 +
 .../camel-debezium-sqlserver-starter/pom.xml       |  53 ++
 ...ebeziumSqlserverComponentAutoConfiguration.java | 129 +++
 .../DebeziumSqlserverComponentConfiguration.java   | 663 ++++++++++++++
 .../src/main/resources/META-INF/LICENSE.txt        |   0
 .../src/main/resources/META-INF/NOTICE.txt         |   0
 .../src/main/resources/META-INF/spring.factories   |  19 +
 .../src/main/resources/META-INF/spring.provides    |  17 +
 platforms/spring-boot/components-starter/pom.xml   |   1 +
 .../camel-spring-boot-dependencies/pom.xml         |  10 +
 .../springboot/CamelDebeziumSqlserverTest.java     |  46 +
 28 files changed, 3039 insertions(+), 10 deletions(-)
 create mode 100644 components/camel-debezium-sqlserver/pom.xml
 create mode 100644 components/camel-debezium-sqlserver/src/main/docs/debezium-sqlserver-component.adoc
 create mode 100644 components/camel-debezium-sqlserver/src/main/java/org/apache/camel/component/debezium/DebeziumSqlserverComponent.java
 create mode 100644 components/camel-debezium-sqlserver/src/main/java/org/apache/camel/component/debezium/DebeziumSqlserverEndpoint.java
 create mode 100644 components/camel-debezium-sqlserver/src/test/java/org/apache/camel/component/debezium/DebeziumSqlserverComponentTest.java
 create mode 100644 components/camel-debezium-sqlserver/src/test/java/org/apache/camel/component/debezium/configuration/SqlserverConnectorEmbeddedDebeziumConfigurationTest.java
 copy components/{camel-debezium-postgres => camel-debezium-sqlserver}/src/test/resources/log4j2.properties (100%)
 create mode 100644 components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java
 create mode 100644 core/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/dsl/DebeziumSqlserverEndpointBuilderFactory.java
 create mode 100644 docs/components/modules/ROOT/pages/debezium-sqlserver-component.adoc
 create mode 100644 platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/pom.xml
 create mode 100644 platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumSqlserverComponentAutoConfiguration.java
 create mode 100644 platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumSqlserverComponentConfiguration.java
 copy {tooling/maven/camel-package-maven-plugin => platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter}/src/main/resources/META-INF/LICENSE.txt (100%)
 copy {tooling/maven/camel-package-maven-plugin => platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter}/src/main/resources/META-INF/NOTICE.txt (100%)
 create mode 100644 platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/resources/META-INF/spring.factories
 create mode 100644 platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/resources/META-INF/spring.provides
 create mode 100644 tests/camel-itest-spring-boot/src/test/java/org/apache/camel/itest/springboot/CamelDebeziumSqlserverTest.java


[camel] 03/11: CAMEL-14137: removed qtp variable according to pr comment.

Posted by ac...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel.git

commit bdc2e0543a8c2defc4fb000068f25ed92206ec7e
Author: Luigi De Masi <ld...@redhat.com>
AuthorDate: Tue Nov 5 10:52:34 2019 +0100

     CAMEL-14137: removed qtp variable according to pr comment.
---
 .../camel/component/jetty/JettyHttpComponent.java  | 29 +++++++++++-----------
 1 file changed, 14 insertions(+), 15 deletions(-)

diff --git a/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java b/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java
index cd7645d..d6de2a1 100644
--- a/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java
+++ b/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java
@@ -134,7 +134,7 @@ public abstract class JettyHttpComponent extends HttpCommonComponent implements
     protected boolean useXForwardedForHeader;
     private Integer proxyPort;
     private boolean sendServerVersion = true;
-    private QueuedThreadPool _queuedThreadPool;
+    private QueuedThreadPool defaultThreadPool;
 
     public JettyHttpComponent() {
     }
@@ -481,13 +481,13 @@ public abstract class JettyHttpComponent extends HttpCommonComponent implements
                         this.removeServerMBean(connectorRef.server);
                         //mbContainer.removeBean(connectorRef.connector);
                     }
-                    if (_queuedThreadPool !=null){
+                    if (defaultThreadPool !=null){
                         try {
-                            _queuedThreadPool.stop();
+                            defaultThreadPool.stop();
                         }catch(Throwable t){
-                            _queuedThreadPool.destroy();
+                            defaultThreadPool.destroy();
                         }finally {
-                            _queuedThreadPool=null;
+                            defaultThreadPool =null;
                         }
                     }
                 }
@@ -1315,21 +1315,20 @@ public abstract class JettyHttpComponent extends HttpCommonComponent implements
     protected Server createServer() {
         Server s = null;
         ThreadPool tp = threadPool;
-        QueuedThreadPool qtp = null;
+        defaultThreadPool = null;
         // configure thread pool if min/max given
         if (minThreads != null || maxThreads != null) {
             if (getThreadPool() != null) {
                 throw new IllegalArgumentException("You cannot configure both minThreads/maxThreads and a custom threadPool on JettyHttpComponent: " + this);
             }
-            qtp = new QueuedThreadPool();
+            defaultThreadPool = new QueuedThreadPool();
             if (minThreads != null) {
-                qtp.setMinThreads(minThreads.intValue());
+                defaultThreadPool.setMinThreads(minThreads.intValue());
             }
             if (maxThreads != null) {
-                qtp.setMaxThreads(maxThreads.intValue());
+                defaultThreadPool.setMaxThreads(maxThreads.intValue());
             }
-            tp = qtp;
-            _queuedThreadPool=qtp;
+            tp = defaultThreadPool;
 
         }
         if (tp != null) {
@@ -1351,13 +1350,13 @@ public abstract class JettyHttpComponent extends HttpCommonComponent implements
         if (s == null) {
             s = new Server();
         }
-        if (qtp != null) {
+        if (defaultThreadPool != null) {
             // let the thread names indicate they are from the server
-            qtp.setName("CamelJettyServer(" + ObjectHelper.getIdentityHashCode(s) + ")");
+            defaultThreadPool.setName("CamelJettyServer(" + ObjectHelper.getIdentityHashCode(s) + ")");
             try {
-                qtp.start();
+                defaultThreadPool.start();
             } catch (Exception e) {
-                throw new RuntimeCamelException("Error starting JettyServer thread pool: " + qtp, e);
+                throw new RuntimeCamelException("Error starting JettyServer thread pool: " + defaultThreadPool, e);
             }
         }
         ContextHandlerCollection collection = new ContextHandlerCollection();


[camel] 10/11: CAMEL-14093: Remove junit assert all import and replace it with specific imports

Posted by ac...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel.git

commit d1f27829662b2f0bc599649a17e493bffc0a7372
Author: Omar Al-Safi <om...@gmail.com>
AuthorDate: Tue Nov 5 16:11:45 2019 +0100

    CAMEL-14093: Remove junit assert all import and replace it with specific imports
---
 .../SqlserverConnectorEmbeddedDebeziumConfigurationTest.java          | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/components/camel-debezium-sqlserver/src/test/java/org/apache/camel/component/debezium/configuration/SqlserverConnectorEmbeddedDebeziumConfigurationTest.java b/components/camel-debezium-sqlserver/src/test/java/org/apache/camel/component/debezium/configuration/SqlserverConnectorEmbeddedDebeziumConfigurationTest.java
index f209b0f..e18a54f 100644
--- a/components/camel-debezium-sqlserver/src/test/java/org/apache/camel/component/debezium/configuration/SqlserverConnectorEmbeddedDebeziumConfigurationTest.java
+++ b/components/camel-debezium-sqlserver/src/test/java/org/apache/camel/component/debezium/configuration/SqlserverConnectorEmbeddedDebeziumConfigurationTest.java
@@ -23,7 +23,9 @@ import io.debezium.embedded.EmbeddedEngine;
 import org.apache.camel.component.debezium.DebeziumConstants;
 import org.junit.Test;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 public class SqlserverConnectorEmbeddedDebeziumConfigurationTest {
 


[camel] 01/11: CAMEL-14137 Thread leak in camel-jetty component if maxThreads or minThreads property is set

Posted by ac...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel.git

commit 27c936a6ff85e54a7cc7a70ce7c15aba762b1fd8
Author: Luigi De Masi <ld...@redhat.com>
AuthorDate: Mon Nov 4 23:30:23 2019 +0100

     CAMEL-14137 Thread leak in camel-jetty component if maxThreads or minThreads property is set
---
 .../camel/component/jetty/JettyHttpComponent.java  | 12 ++++
 .../component/jetty/JettyThreadPoolSizeTest.java   | 72 ++++++++++++++++++++++
 2 files changed, 84 insertions(+)

diff --git a/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java b/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java
index 22c2c85..cd7645d 100644
--- a/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java
+++ b/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java
@@ -134,6 +134,7 @@ public abstract class JettyHttpComponent extends HttpCommonComponent implements
     protected boolean useXForwardedForHeader;
     private Integer proxyPort;
     private boolean sendServerVersion = true;
+    private QueuedThreadPool _queuedThreadPool;
 
     public JettyHttpComponent() {
     }
@@ -480,6 +481,15 @@ public abstract class JettyHttpComponent extends HttpCommonComponent implements
                         this.removeServerMBean(connectorRef.server);
                         //mbContainer.removeBean(connectorRef.connector);
                     }
+                    if (_queuedThreadPool !=null){
+                        try {
+                            _queuedThreadPool.stop();
+                        }catch(Throwable t){
+                            _queuedThreadPool.destroy();
+                        }finally {
+                            _queuedThreadPool=null;
+                        }
+                    }
                 }
             }
         }
@@ -1319,6 +1329,8 @@ public abstract class JettyHttpComponent extends HttpCommonComponent implements
                 qtp.setMaxThreads(maxThreads.intValue());
             }
             tp = qtp;
+            _queuedThreadPool=qtp;
+
         }
         if (tp != null) {
             try {
diff --git a/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java b/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java
new file mode 100644
index 0000000..27aae78
--- /dev/null
+++ b/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java
@@ -0,0 +1,72 @@
+package org.apache.camel.component.jetty;
+
+import org.apache.camel.builder.RouteBuilder;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Set;
+
+public class JettyThreadPoolSizeTest extends BaseJettyTest {
+
+
+    private static final Logger log =  LoggerFactory.getLogger(JettyThreadPoolSizeTest.class);
+
+
+    private JettyHttpComponent jettyComponent;
+
+    private RouteBuilder builder;
+
+    @Test
+    public void threadPoolTest(){
+
+
+        Set<Thread> threadSet = Thread.getAllStackTraces().keySet();
+        long initialJettyThreadNumber = threadSet.stream().filter(thread -> thread.getName().contains("CamelJettyServer")).count();
+
+        log.info("initial Jetty thread number (expected 5): "+ initialJettyThreadNumber);
+
+        context.stop();
+
+        Set<Thread> threadSetAfterStop = Thread.getAllStackTraces().keySet();
+        long jettyThreadNumberAfterStop = threadSetAfterStop.stream().filter(thread -> thread.getName().contains("CamelJettyServer")).count();
+
+        log.info("Jetty thread number after stopping Camel Context: (expected 0): "+ jettyThreadNumberAfterStop);
+
+
+        jettyComponent = (JettyHttpComponent)context.getComponent("jetty");
+        jettyComponent.setMinThreads(5);
+        jettyComponent.setMaxThreads(5);
+
+        context.start();
+        Set<Thread> threadSetAfterRestart = Thread.getAllStackTraces().keySet();
+        long jettyThreadNumberAfterRestart = threadSetAfterRestart.stream().filter(thread -> thread.getName().contains("CamelJettyServer")).count();
+
+        log.info("Jetty thread number after starting Camel Context: (expected 5): "+ jettyThreadNumberAfterRestart);
+
+
+        assertEquals(5,initialJettyThreadNumber);
+
+        assertEquals(0,jettyThreadNumberAfterStop);
+
+        assertEquals(5,jettyThreadNumberAfterRestart);
+    }
+
+    @Override
+    protected RouteBuilder createRouteBuilder() throws Exception {
+
+        builder = new RouteBuilder() {
+            @Override
+            public void configure() throws Exception {
+                // setup the jetty component with the custom minThreads
+                jettyComponent = (JettyHttpComponent)context.getComponent("jetty");
+                jettyComponent.setMinThreads(5);
+                jettyComponent.setMaxThreads(5);
+
+                from("jetty://http://localhost:{{port}}/myserverWithCustomPoolSize").to("mock:result");
+            }
+        };
+        return builder;
+    }
+
+}


[camel] 08/11: CAMEL-14137: code changed according to pr change request.

Posted by ac...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel.git

commit 8ce85d642046b92217eada1c27ab74ff0ea8bd9d
Author: Luigi De Masi <>
AuthorDate: Tue Nov 5 13:53:23 2019 +0100

     CAMEL-14137: code changed according to pr change request.
---
 .../component/jetty/JettyThreadPoolSizeTest.java    | 21 ++++++++++++++++++---
 1 file changed, 18 insertions(+), 3 deletions(-)

diff --git a/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java b/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java
index 9e8d9d7..433b486 100644
--- a/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java
+++ b/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java
@@ -1,12 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.camel.component.jetty;
 
+import java.util.Set;
 import org.apache.camel.builder.RouteBuilder;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.Set;
-
 public class JettyThreadPoolSizeTest extends BaseJettyTest {
 
 
@@ -33,7 +48,7 @@ public class JettyThreadPoolSizeTest extends BaseJettyTest {
 
         long jettyThreadNumberAfterRestart = countJettyThread();
 
-        LOG.info("Jetty thread number after starting Camel Context: (expected 5): "+ jettyThreadNumberAfterRestart);
+        LOG.info("Jetty thread number after starting Camel Context: (expected 5): " + jettyThreadNumberAfterRestart);
 
         assertEquals(5L, initialJettyThreadNumber);
 


[camel] 07/11: CAMEL-14137: revert doc changes

Posted by ac...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel.git

commit 98e4fe40db64af577924f4293b9dd1c8fd047a9c
Author: Andrea Cosentino <an...@gmail.com>
AuthorDate: Tue Nov 5 16:38:49 2019 +0100

     CAMEL-14137: revert doc changes
---
 .../ROOT/pages/debezium-postgres-component.adoc    | 11 +++++
 docs/components/modules/ROOT/pages/index.adoc      | 56 +++++++++++-----------
 2 files changed, 39 insertions(+), 28 deletions(-)

diff --git a/docs/components/modules/ROOT/pages/debezium-postgres-component.adoc b/docs/components/modules/ROOT/pages/debezium-postgres-component.adoc
index 400477a..8df0b5b 100644
--- a/docs/components/modules/ROOT/pages/debezium-postgres-component.adoc
+++ b/docs/components/modules/ROOT/pages/debezium-postgres-component.adoc
@@ -122,11 +122,15 @@ with the following path and query parameters:
 | *schemaBlacklist* (postgres) | The schemas for which events must not be captured |  | String
 | *schemaRefreshMode* (postgres) | Specify the conditions that trigger a refresh of the in-memory schema for a table. 'columns_diff' (the default) is the safest mode, ensuring the in-memory schema stays in-sync with the database table's schema at all times. 'columns_diff_exclude_unchanged_toast' instructs the connector to refresh the in-memory schema cache if there is a discrepancy between it and the schema derived from the incoming message, unless unchanged TOASTable data fully accounts [...]
 | *schemaWhitelist* (postgres) | The schemas for which events should be captured |  | String
+<<<<<<< HEAD
 | *slotDropOnStop* (postgres) | Whether or not to drop the logical replication slot when the connector finishes orderlyBy default the replication is kept so that on restart progress can resume from the last recorded location | false | boolean
 <<<<<<< HEAD
 | *slotMaxRetries* (postgres) | How many times to retry connecting to a replication slot when an attempt fails. | 6 | int
 =======
 >>>>>>> 2f2d808...  CAMEL-14137: removed qtp variable according to pr comment.
+=======
+| *slotDrop_on_stop* (postgres) | Whether or not to drop the logical replication slot when the connector finishes orderlyBy default the replication is kept so that on restart progress can resume from the last recorded location | false | boolean
+>>>>>>> b982055...  CAMEL-14137: revert doc changes
 | *slotName* (postgres) | The name of the Postgres logical decoding slot created for streaming changes from a plugin.Defaults to 'debezium | debezium | String
 | *slotRetryDelayMs* (postgres) | The number of milli-seconds to wait between retry attempts when the connector fails to connect to a replication slot. | 10000 | long
 | *slotStreamParams* (postgres) | Any optional parameters used by logical decoding plugin. Semi-colon separated. E.g. 'add-tables=public.table,public.table2;include-lsn=true' |  | String
@@ -163,10 +167,14 @@ When using Spring Boot make sure to use the following Maven dependency to have s
 
 
 <<<<<<< HEAD
+<<<<<<< HEAD
 The component supports 63 options, which are listed below.
 =======
 The component supports 58 options, which are listed below.
 >>>>>>> 2f2d808...  CAMEL-14137: removed qtp variable according to pr comment.
+=======
+The component supports 57 options, which are listed below.
+>>>>>>> b982055...  CAMEL-14137: revert doc changes
 
 
 
@@ -216,11 +224,14 @@ The component supports 58 options, which are listed below.
 | *camel.component.debezium-postgres.configuration.schema-blacklist* | The schemas for which events must not be captured |  | String
 | *camel.component.debezium-postgres.configuration.schema-refresh-mode* | Specify the conditions that trigger a refresh of the in-memory schema for a table. 'columns_diff' (the default) is the safest mode, ensuring the in-memory schema stays in-sync with the database table's schema at all times. 'columns_diff_exclude_unchanged_toast' instructs the connector to refresh the in-memory schema cache if there is a discrepancy between it and the schema derived from the incoming message, unless  [...]
 | *camel.component.debezium-postgres.configuration.schema-whitelist* | The schemas for which events should be captured |  | String
+<<<<<<< HEAD
 | *camel.component.debezium-postgres.configuration.slot-drop-on-stop* | Whether or not to drop the logical replication slot when the connector finishes orderlyBy default the replication is kept so that on restart progress can resume from the last recorded location | false | Boolean
 <<<<<<< HEAD
 | *camel.component.debezium-postgres.configuration.slot-max-retries* | How many times to retry connecting to a replication slot when an attempt fails. | 6 | Integer
 =======
 >>>>>>> 2f2d808...  CAMEL-14137: removed qtp variable according to pr comment.
+=======
+>>>>>>> b982055...  CAMEL-14137: revert doc changes
 | *camel.component.debezium-postgres.configuration.slot-name* | The name of the Postgres logical decoding slot created for streaming changes from a plugin.Defaults to 'debezium | debezium | String
 | *camel.component.debezium-postgres.configuration.slot-retry-delay-ms* | The number of milli-seconds to wait between retry attempts when the connector fails to connect to a replication slot. | 10000 | Long
 | *camel.component.debezium-postgres.configuration.slot-stream-params* | Any optional parameters used by logical decoding plugin. Semi-colon separated. E.g. 'add-tables=public.table,public.table2;include-lsn=true' |  | String
diff --git a/docs/components/modules/ROOT/pages/index.adoc b/docs/components/modules/ROOT/pages/index.adoc
index 224bb69..13312cd 100644
--- a/docs/components/modules/ROOT/pages/index.adoc
+++ b/docs/components/modules/ROOT/pages/index.adoc
@@ -6,7 +6,7 @@ The following Apache Camel artifacts are provided:
 == Components
 
 // components: START
-Number of Components: 302 in 217 JAR artifacts (0 deprecated)
+Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 [width="100%",cols="4,1,5",options="header"]
 |===
@@ -90,7 +90,7 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:azure-queue-component.adoc[Azure Storage Queue Service] (camel-azure) | 2.19 | The azure-queue component is used for storing and retrieving messages from Azure Storage Queue Service.
 
-| xref:bean-component.adoc[Bean] (@@@ARTIFACTID@@@) | 1.0 | The bean component is for invoking Java beans from Camel.
+| xref:bean-component.adoc[Bean] (camel-bean) | 1.0 | The bean component is for invoking Java beans from Camel.
 
 | xref:bean-validator-component.adoc[Bean Validator] (camel-bean-validator) | 2.3 | The Validator component performs bean validation of the message body using the Java Bean Validation API.
 
@@ -102,7 +102,7 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:braintree-component.adoc[Braintree] (camel-braintree) | 2.17 | The braintree component is used for integrating with the Braintree Payment System.
 
-| xref:browse-component.adoc[Browse] (@@@ARTIFACTID@@@) | 1.3 | The browse component is used for viewing the messages received on endpoints that supports BrowsableEndpoint.
+| xref:browse-component.adoc[Browse] (camel-browse) | 1.3 | The browse component is used for viewing the messages received on endpoints that supports BrowsableEndpoint.
 
 | xref:caffeine-cache-component.adoc[Caffeine Cache] (camel-caffeine) | 2.20 | The caffeine-cache component is used for integration with Caffeine Cache.
 
@@ -114,7 +114,7 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:chunk-component.adoc[Chunk] (camel-chunk) | 2.15 | Transforms the message using a Chunk template.
 
-| xref:class-component.adoc[Class] (@@@ARTIFACTID@@@) | 2.4 | The class component is for invoking Java classes (Java beans) from Camel.
+| xref:class-component.adoc[Class] (camel-bean) | 2.4 | The class component is for invoking Java classes (Java beans) from Camel.
 
 | xref:cm-sms-component.adoc[CM SMS Gateway] (camel-cm-sms) | 2.18 | The cm-sms component allows to integrate with CM SMS Gateway.
 
@@ -126,7 +126,7 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:consul-component.adoc[Consul] (camel-consul) | 2.18 | The camel consul component allows you to work with Consul, a distributed, highly available, datacenter-aware, service discovery and configuration system.
 
-| xref:controlbus-component.adoc[Control Bus] (@@@ARTIFACTID@@@) | 2.11 | The controlbus component provides easy management of Camel applications based on the Control Bus EIP pattern.
+| xref:controlbus-component.adoc[Control Bus] (camel-controlbus) | 2.11 | The controlbus component provides easy management of Camel applications based on the Control Bus EIP pattern.
 
 | xref:corda-component.adoc[Corda] (camel-corda) | 2.23 | The corda component uses the corda-rpc to interact with corda nodes.
 
@@ -142,11 +142,11 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:cxfrs-component.adoc[CXF-RS] (camel-cxf) | 2.0 | The cxfrs component is used for JAX-RS REST services using Apache CXF.
 
-| xref:dataformat-component.adoc[Data Format] (@@@ARTIFACTID@@@) | 2.12 | The dataformat component is used for working with Data Formats as if it was a regular Component supporting Endpoints and URIs.
+| xref:dataformat-component.adoc[Data Format] (camel-dataformat) | 2.12 | The dataformat component is used for working with Data Formats as if it was a regular Component supporting Endpoints and URIs.
 
-| xref:dataset-component.adoc[Dataset] (@@@ARTIFACTID@@@) | 1.3 | The dataset component provides a mechanism to easily perform load & soak testing of your system.
+| xref:dataset-component.adoc[Dataset] (camel-dataset) | 1.3 | The dataset component provides a mechanism to easily perform load & soak testing of your system.
 
-| xref:dataset-test-component.adoc[DataSet Test] (@@@ARTIFACTID@@@) | 1.3 | The dataset-test component extends the mock component by on startup to pull messages from another endpoint to set the expected message bodies.
+| xref:dataset-test-component.adoc[DataSet Test] (camel-dataset) | 1.3 | The dataset-test component extends the mock component by on startup to pull messages from another endpoint to set the expected message bodies.
 
 | xref:debezium-mongodb-component.adoc[Debezium MongoDB Connector] (camel-debezium-mongodb) | 3.0 | Represents a Debezium MongoDB endpoint which is used to capture changes in MongoDB database so that that applications can see those changes and respond to them.
 
@@ -156,9 +156,9 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:digitalocean-component.adoc[DigitalOcean] (camel-digitalocean) | 2.19 | The DigitalOcean component allows you to manage Droplets and resources within the DigitalOcean cloud.
 
-| xref:direct-component.adoc[Direct] (@@@ARTIFACTID@@@) | 1.0 | The direct component provides direct, synchronous call to another endpoint from the same CamelContext.
+| xref:direct-component.adoc[Direct] (camel-direct) | 1.0 | The direct component provides direct, synchronous call to another endpoint from the same CamelContext.
 
-| xref:direct-vm-component.adoc[Direct VM] (@@@ARTIFACTID@@@) | 2.10 | The direct-vm component provides direct, synchronous call to another endpoint from any CamelContext in the same JVM.
+| xref:direct-vm-component.adoc[Direct VM] (camel-directvm) | 2.10 | The direct-vm component provides direct, synchronous call to another endpoint from any CamelContext in the same JVM.
 
 | xref:disruptor-component.adoc[Disruptor] (camel-disruptor) | 2.12 | The disruptor component provides asynchronous SEDA behavior using LMAX Disruptor.
 
@@ -186,7 +186,7 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:fhir-component.adoc[FHIR] (camel-fhir) | 2.23 | The fhir component is used for working with the FHIR protocol (health care).
 
-| xref:file-component.adoc[File] (@@@ARTIFACTID@@@) | 1.0 | The file component is used for reading or writing files.
+| xref:file-component.adoc[File] (camel-file) | 1.0 | The file component is used for reading or writing files.
 
 | xref:file-watch-component.adoc[file-watch] (camel-file-watch) | 3.0 | The file-watch is used to monitor file events in directory using java.nio.file.WatchService
 
@@ -268,7 +268,7 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:hipchat-component.adoc[Hipchat] (camel-hipchat) | 2.15 | The hipchat component supports producing and consuming messages from/to Hipchat service.
 
-| xref:http-component.adoc[HTTP] (@@@ARTIFACTID@@@) | 2.3 | For calling out to external HTTP servers using Apache HTTP Client 4.x.
+| xref:http-component.adoc[HTTP] (camel-http) | 2.3 | For calling out to external HTTP servers using Apache HTTP Client 4.x.
 
 | xref:iec60870-client-component.adoc[IEC 60870 Client] (camel-iec60870) | 2.20 | IEC 60870 component used for telecontrol (supervisory control and data acquisition) such as controlling electric power transmission grids and other geographically widespread control systems.
 
@@ -312,7 +312,7 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:jdbc-component.adoc[JDBC] (camel-jdbc) | 1.2 | The jdbc component enables you to access databases through JDBC, where SQL queries are sent in the message body.
 
-| xref:jetty-component.adoc[Jetty] (@@@ARTIFACTID@@@) | 1.2 | To use Jetty as a HTTP server as consumer for Camel routes.
+| xref:jetty-component.adoc[Jetty] (camel-jetty) | 1.2 | To use Jetty as a HTTP server as consumer for Camel routes.
 
 | xref:websocket-component.adoc[Jetty Websocket] (camel-websocket) | 2.10 | The websocket component provides websocket endpoints with Jetty for communicating with clients using websocket.
 
@@ -368,7 +368,7 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:kubernetes-services-component.adoc[Kubernetes Services] (camel-kubernetes) | 2.17 | The Kubernetes Service Accounts component provides a producer to execute service operations and a consumer to consume service events.
 
-| xref:language-component.adoc[Language] (@@@ARTIFACTID@@@) | 2.5 | The language component allows you to send a message to an endpoint which executes a script by any of the supported Languages in Camel.
+| xref:language-component.adoc[Language] (camel-language) | 2.5 | The language component allows you to send a message to an endpoint which executes a script by any of the supported Languages in Camel.
 
 | xref:ldap-component.adoc[LDAP] (camel-ldap) | 1.5 | The ldap component allows you to perform searches in LDAP servers using filters as the message payload.
 
@@ -376,7 +376,7 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:linkedin-component.adoc[Linkedin] (camel-linkedin) | 2.14 | The linkedin component is used for retrieving LinkedIn user profiles, connections, companies, groups, posts, etc.
 
-| xref:log-component.adoc[Log] (@@@ARTIFACTID@@@) | 1.1 | The log component logs message exchanges to the underlying logging mechanism.
+| xref:log-component.adoc[Log] (camel-log) | 1.1 | The log component logs message exchanges to the underlying logging mechanism.
 
 | xref:lucene-component.adoc[Lucene] (camel-lucene) | 2.2 | To insert or query from Apache Lucene databases.
 
@@ -396,7 +396,7 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:mllp-component.adoc[MLLP] (camel-mllp) | 2.17 | Provides functionality required by Healthcare providers to communicate with other systems using the MLLP protocol.
 
-| xref:mock-component.adoc[Mock] (@@@ARTIFACTID@@@) | 1.0 | The mock component is used for testing routes and mediation rules using mocks.
+| xref:mock-component.adoc[Mock] (camel-mock) | 1.0 | The mock component is used for testing routes and mediation rules using mocks.
 
 | xref:mongodb-component.adoc[MongoDB] (camel-mongodb) | 2.19 | Component for working with documents stored in MongoDB database.
 
@@ -476,11 +476,11 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:reactive-streams-component.adoc[Reactive Streams] (camel-reactive-streams) | 2.19 | Reactive Camel using reactive streams
 
-| xref:ref-component.adoc[Ref] (@@@ARTIFACTID@@@) | 1.2 | The ref component is used for lookup of existing endpoints bound in the Registry.
+| xref:ref-component.adoc[Ref] (camel-ref) | 1.2 | The ref component is used for lookup of existing endpoints bound in the Registry.
 
-| xref:rest-component.adoc[REST] (@@@ARTIFACTID@@@) | 2.14 | The rest component is used for either hosting REST services (consumer) or calling external REST services (producer).
+| xref:rest-component.adoc[REST] (camel-rest) | 2.14 | The rest component is used for either hosting REST services (consumer) or calling external REST services (producer).
 
-| xref:rest-api-component.adoc[REST API] (@@@ARTIFACTID@@@) | 2.16 | The rest-api component is used for providing Swagger API of the REST services which has been defined using the rest-dsl in Camel.
+| xref:rest-api-component.adoc[REST API] (camel-rest) | 2.16 | The rest-api component is used for providing Swagger API of the REST services which has been defined using the rest-dsl in Camel.
 
 | xref:rest-swagger-component.adoc[REST Swagger] (camel-rest-swagger) | 2.19 | An awesome REST endpoint backed by Swagger specifications.
 
@@ -488,19 +488,19 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:rss-component.adoc[RSS] (camel-rss) | 2.0 | The rss component is used for consuming RSS feeds.
 
-| xref:saga-component.adoc[Saga] (@@@ARTIFACTID@@@) | 2.21 | The saga component provides access to advanced options for managing the flow in the Saga EIP.
+| xref:saga-component.adoc[Saga] (camel-saga) | 2.21 | The saga component provides access to advanced options for managing the flow in the Saga EIP.
 
 | xref:salesforce-component.adoc[Salesforce] (camel-salesforce) | 2.12 | The salesforce component is used for integrating Camel with the massive Salesforce API.
 
 | xref:sap-netweaver-component.adoc[SAP NetWeaver] (camel-sap-netweaver) | 2.12 | The sap-netweaver component integrates with the SAP NetWeaver Gateway using HTTP transports.
 
-| xref:scheduler-component.adoc[Scheduler] (@@@ARTIFACTID@@@) | 2.15 | The scheduler component is used for generating message exchanges when a scheduler fires.
+| xref:scheduler-component.adoc[Scheduler] (camel-scheduler) | 2.15 | The scheduler component is used for generating message exchanges when a scheduler fires.
 
 | xref:schematron-component.adoc[Schematron] (camel-schematron) | 2.15 | Validates the payload of a message using the Schematron Library.
 
 | xref:scp-component.adoc[SCP] (camel-jsch) | 2.10 | To copy files using the secure copy protocol (SCP).
 
-| xref:seda-component.adoc[SEDA] (@@@ARTIFACTID@@@) | 1.1 | The seda component provides asynchronous call to another endpoint from any CamelContext in the same JVM.
+| xref:seda-component.adoc[SEDA] (camel-seda) | 1.1 | The seda component provides asynchronous call to another endpoint from any CamelContext in the same JVM.
 
 | xref:service-component.adoc[Service] (camel-service) | 2.22 | Represents an endpoint which is registered to a Service Registry such as Consul, Etcd.
 
@@ -536,7 +536,7 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:spring-batch-component.adoc[Spring Batch] (camel-spring-batch) | 2.10 | The spring-batch component allows to send messages to Spring Batch for further processing.
 
-| xref:spring-event-component.adoc[Spring Event] (@@@ARTIFACTID@@@) | 1.4 | The spring-event component allows to listen for Spring Application Events.
+| xref:spring-event-component.adoc[Spring Event] (camel-spring) | 1.4 | The spring-event component allows to listen for Spring Application Events.
 
 | xref:spring-integration-component.adoc[Spring Integration] (camel-spring-integration) | 1.4 | Bridges Camel with Spring Integration.
 
@@ -560,7 +560,7 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:string-template-component.adoc[String Template] (camel-stringtemplate) | 1.2 | Transforms the message using a String template.
 
-| xref:stub-component.adoc[Stub] (@@@ARTIFACTID@@@) | 2.10 | The stub component provides a simple way to stub out any physical endpoints while in development or testing.
+| xref:stub-component.adoc[Stub] (camel-stub) | 2.10 | The stub component provides a simple way to stub out any physical endpoints while in development or testing.
 
 | xref:telegram-component.adoc[Telegram] (camel-telegram) | 2.18 | The telegram component provides access to the Telegram Bot API.
 
@@ -568,7 +568,7 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:tika-component.adoc[Tika] (camel-tika) | 2.19 | This component integrates with Apache Tika to extract content and metadata from thousands of file types.
 
-| xref:timer-component.adoc[Timer] (@@@ARTIFACTID@@@) | 1.0 | The timer component is used for generating message exchanges when a timer fires.
+| xref:timer-component.adoc[Timer] (camel-timer) | 1.0 | The timer component is used for generating message exchanges when a timer fires.
 
 | xref:twilio-component.adoc[Twilio] (camel-twilio) | 2.20 | The Twilio component allows you to interact with the Twilio REST APIs using Twilio Java SDK.
 
@@ -580,13 +580,13 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:undertow-component.adoc[Undertow] (camel-undertow) | 2.16 | The undertow component provides HTTP and WebSocket based endpoints for consuming and producing HTTP/WebSocket requests.
 
-| xref:validator-component.adoc[Validator] (@@@ARTIFACTID@@@) | 1.1 | Validates the payload of a message using XML Schema and JAXP Validation.
+| xref:validator-component.adoc[Validator] (camel-validator) | 1.1 | Validates the payload of a message using XML Schema and JAXP Validation.
 
 | xref:velocity-component.adoc[Velocity] (camel-velocity) | 1.2 | Transforms the message using a Velocity template.
 
 | xref:vertx-component.adoc[Vert.x] (camel-vertx) | 2.12 | The vertx component is used for sending and receive messages from a vertx event bus.
 
-| xref:vm-component.adoc[VM] (@@@ARTIFACTID@@@) | 1.1 | The vm component provides asynchronous call to another endpoint from the same CamelContext.
+| xref:vm-component.adoc[VM] (camel-vm) | 1.1 | The vm component provides asynchronous call to another endpoint from the same CamelContext.
 
 | xref:weather-component.adoc[Weather] (camel-weather) | 2.12 | Polls the weather information from Open Weather Map.
 
@@ -606,7 +606,7 @@ Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 | xref:xquery-component.adoc[XQuery] (camel-saxon) | 1.0 | Transforms the message using a XQuery template using Saxon.
 
-| xref:xslt-component.adoc[XSLT] (@@@ARTIFACTID@@@) | 1.3 | Transforms the message using a XSLT template.
+| xref:xslt-component.adoc[XSLT] (camel-xslt) | 1.3 | Transforms the message using a XSLT template.
 
 | xref:yammer-component.adoc[Yammer] (camel-yammer) | 2.12 | The yammer component allows you to interact with the Yammer enterprise social network.
 


[camel] 06/11: CAMEL-14137: removed qtp variable according to pr comment.

Posted by ac...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel.git

commit 3a4522463d877433a8cb137309323208b3540200
Author: Andrea Cosentino <an...@gmail.com>
AuthorDate: Tue Nov 5 16:38:19 2019 +0100

     CAMEL-14137: removed qtp variable according to pr comment.
---
 .../component/jetty/JettyThreadPoolSizeTest.java   | 21 ++++----
 .../ROOT/pages/debezium-postgres-component.adoc    | 10 ++++
 docs/components/modules/ROOT/pages/index.adoc      | 56 +++++++++++-----------
 3 files changed, 48 insertions(+), 39 deletions(-)

diff --git a/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java b/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java
index 760b285..9e8d9d7 100644
--- a/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java
+++ b/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java
@@ -10,20 +10,20 @@ import java.util.Set;
 public class JettyThreadPoolSizeTest extends BaseJettyTest {
 
 
-    private static final Logger log =  LoggerFactory.getLogger(JettyThreadPoolSizeTest.class);
+    private static final Logger LOG =  LoggerFactory.getLogger(JettyThreadPoolSizeTest.class);
 
     @Test
-    public void threadPoolTest(){
+    public void threadPoolTest() {
 
         long initialJettyThreadNumber = countJettyThread();
 
-        log.info("initial Jetty thread number (expected 5): "+ initialJettyThreadNumber);
+        LOG.info("initial Jetty thread number (expected 5): " + initialJettyThreadNumber);
 
         context.stop();
 
         long jettyThreadNumberAfterStop =  countJettyThread();
 
-        log.info("Jetty thread number after stopping Camel Context: (expected 0): "+ jettyThreadNumberAfterStop);
+        LOG.info("Jetty thread number after stopping Camel Context: (expected 0): " + jettyThreadNumberAfterStop);
 
         JettyHttpComponent jettyComponent = (JettyHttpComponent)context.getComponent("jetty");
         jettyComponent.setMinThreads(5);
@@ -33,13 +33,13 @@ public class JettyThreadPoolSizeTest extends BaseJettyTest {
 
         long jettyThreadNumberAfterRestart = countJettyThread();
 
-        log.info("Jetty thread number after starting Camel Context: (expected 5): "+ jettyThreadNumberAfterRestart);
+        LOG.info("Jetty thread number after starting Camel Context: (expected 5): "+ jettyThreadNumberAfterRestart);
 
-        assertEquals(5L,initialJettyThreadNumber);
+        assertEquals(5L, initialJettyThreadNumber);
 
-        assertEquals(0L,jettyThreadNumberAfterStop);
+        assertEquals(0L, jettyThreadNumberAfterStop);
 
-        assertEquals(5L,jettyThreadNumberAfterRestart);
+        assertEquals(5L, jettyThreadNumberAfterRestart);
     }
 
     @Override
@@ -49,7 +49,7 @@ public class JettyThreadPoolSizeTest extends BaseJettyTest {
             @Override
             public void configure() throws Exception {
                 // setup the jetty component with the custom minThreads
-               JettyHttpComponent jettyComponent = (JettyHttpComponent)context.getComponent("jetty");
+                JettyHttpComponent jettyComponent = (JettyHttpComponent)context.getComponent("jetty");
                 jettyComponent.setMinThreads(5);
                 jettyComponent.setMaxThreads(5);
 
@@ -58,11 +58,10 @@ public class JettyThreadPoolSizeTest extends BaseJettyTest {
         };
     }
 
-    private long countJettyThread(){
+    private long countJettyThread() {
 
         Set<Thread> threadSet = Thread.getAllStackTraces().keySet();
         return threadSet.stream().filter(thread -> thread.getName().contains("CamelJettyServer")).count();
-
     }
 
 }
diff --git a/docs/components/modules/ROOT/pages/debezium-postgres-component.adoc b/docs/components/modules/ROOT/pages/debezium-postgres-component.adoc
index f9f3a49..400477a 100644
--- a/docs/components/modules/ROOT/pages/debezium-postgres-component.adoc
+++ b/docs/components/modules/ROOT/pages/debezium-postgres-component.adoc
@@ -123,7 +123,10 @@ with the following path and query parameters:
 | *schemaRefreshMode* (postgres) | Specify the conditions that trigger a refresh of the in-memory schema for a table. 'columns_diff' (the default) is the safest mode, ensuring the in-memory schema stays in-sync with the database table's schema at all times. 'columns_diff_exclude_unchanged_toast' instructs the connector to refresh the in-memory schema cache if there is a discrepancy between it and the schema derived from the incoming message, unless unchanged TOASTable data fully accounts [...]
 | *schemaWhitelist* (postgres) | The schemas for which events should be captured |  | String
 | *slotDropOnStop* (postgres) | Whether or not to drop the logical replication slot when the connector finishes orderlyBy default the replication is kept so that on restart progress can resume from the last recorded location | false | boolean
+<<<<<<< HEAD
 | *slotMaxRetries* (postgres) | How many times to retry connecting to a replication slot when an attempt fails. | 6 | int
+=======
+>>>>>>> 2f2d808...  CAMEL-14137: removed qtp variable according to pr comment.
 | *slotName* (postgres) | The name of the Postgres logical decoding slot created for streaming changes from a plugin.Defaults to 'debezium | debezium | String
 | *slotRetryDelayMs* (postgres) | The number of milli-seconds to wait between retry attempts when the connector fails to connect to a replication slot. | 10000 | long
 | *slotStreamParams* (postgres) | Any optional parameters used by logical decoding plugin. Semi-colon separated. E.g. 'add-tables=public.table,public.table2;include-lsn=true' |  | String
@@ -159,7 +162,11 @@ When using Spring Boot make sure to use the following Maven dependency to have s
 ----
 
 
+<<<<<<< HEAD
 The component supports 63 options, which are listed below.
+=======
+The component supports 58 options, which are listed below.
+>>>>>>> 2f2d808...  CAMEL-14137: removed qtp variable according to pr comment.
 
 
 
@@ -210,7 +217,10 @@ The component supports 63 options, which are listed below.
 | *camel.component.debezium-postgres.configuration.schema-refresh-mode* | Specify the conditions that trigger a refresh of the in-memory schema for a table. 'columns_diff' (the default) is the safest mode, ensuring the in-memory schema stays in-sync with the database table's schema at all times. 'columns_diff_exclude_unchanged_toast' instructs the connector to refresh the in-memory schema cache if there is a discrepancy between it and the schema derived from the incoming message, unless  [...]
 | *camel.component.debezium-postgres.configuration.schema-whitelist* | The schemas for which events should be captured |  | String
 | *camel.component.debezium-postgres.configuration.slot-drop-on-stop* | Whether or not to drop the logical replication slot when the connector finishes orderlyBy default the replication is kept so that on restart progress can resume from the last recorded location | false | Boolean
+<<<<<<< HEAD
 | *camel.component.debezium-postgres.configuration.slot-max-retries* | How many times to retry connecting to a replication slot when an attempt fails. | 6 | Integer
+=======
+>>>>>>> 2f2d808...  CAMEL-14137: removed qtp variable according to pr comment.
 | *camel.component.debezium-postgres.configuration.slot-name* | The name of the Postgres logical decoding slot created for streaming changes from a plugin.Defaults to 'debezium | debezium | String
 | *camel.component.debezium-postgres.configuration.slot-retry-delay-ms* | The number of milli-seconds to wait between retry attempts when the connector fails to connect to a replication slot. | 10000 | Long
 | *camel.component.debezium-postgres.configuration.slot-stream-params* | Any optional parameters used by logical decoding plugin. Semi-colon separated. E.g. 'add-tables=public.table,public.table2;include-lsn=true' |  | String
diff --git a/docs/components/modules/ROOT/pages/index.adoc b/docs/components/modules/ROOT/pages/index.adoc
index 13312cd..224bb69 100644
--- a/docs/components/modules/ROOT/pages/index.adoc
+++ b/docs/components/modules/ROOT/pages/index.adoc
@@ -6,7 +6,7 @@ The following Apache Camel artifacts are provided:
 == Components
 
 // components: START
-Number of Components: 302 in 240 JAR artifacts (0 deprecated)
+Number of Components: 302 in 217 JAR artifacts (0 deprecated)
 
 [width="100%",cols="4,1,5",options="header"]
 |===
@@ -90,7 +90,7 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:azure-queue-component.adoc[Azure Storage Queue Service] (camel-azure) | 2.19 | The azure-queue component is used for storing and retrieving messages from Azure Storage Queue Service.
 
-| xref:bean-component.adoc[Bean] (camel-bean) | 1.0 | The bean component is for invoking Java beans from Camel.
+| xref:bean-component.adoc[Bean] (@@@ARTIFACTID@@@) | 1.0 | The bean component is for invoking Java beans from Camel.
 
 | xref:bean-validator-component.adoc[Bean Validator] (camel-bean-validator) | 2.3 | The Validator component performs bean validation of the message body using the Java Bean Validation API.
 
@@ -102,7 +102,7 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:braintree-component.adoc[Braintree] (camel-braintree) | 2.17 | The braintree component is used for integrating with the Braintree Payment System.
 
-| xref:browse-component.adoc[Browse] (camel-browse) | 1.3 | The browse component is used for viewing the messages received on endpoints that supports BrowsableEndpoint.
+| xref:browse-component.adoc[Browse] (@@@ARTIFACTID@@@) | 1.3 | The browse component is used for viewing the messages received on endpoints that supports BrowsableEndpoint.
 
 | xref:caffeine-cache-component.adoc[Caffeine Cache] (camel-caffeine) | 2.20 | The caffeine-cache component is used for integration with Caffeine Cache.
 
@@ -114,7 +114,7 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:chunk-component.adoc[Chunk] (camel-chunk) | 2.15 | Transforms the message using a Chunk template.
 
-| xref:class-component.adoc[Class] (camel-bean) | 2.4 | The class component is for invoking Java classes (Java beans) from Camel.
+| xref:class-component.adoc[Class] (@@@ARTIFACTID@@@) | 2.4 | The class component is for invoking Java classes (Java beans) from Camel.
 
 | xref:cm-sms-component.adoc[CM SMS Gateway] (camel-cm-sms) | 2.18 | The cm-sms component allows to integrate with CM SMS Gateway.
 
@@ -126,7 +126,7 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:consul-component.adoc[Consul] (camel-consul) | 2.18 | The camel consul component allows you to work with Consul, a distributed, highly available, datacenter-aware, service discovery and configuration system.
 
-| xref:controlbus-component.adoc[Control Bus] (camel-controlbus) | 2.11 | The controlbus component provides easy management of Camel applications based on the Control Bus EIP pattern.
+| xref:controlbus-component.adoc[Control Bus] (@@@ARTIFACTID@@@) | 2.11 | The controlbus component provides easy management of Camel applications based on the Control Bus EIP pattern.
 
 | xref:corda-component.adoc[Corda] (camel-corda) | 2.23 | The corda component uses the corda-rpc to interact with corda nodes.
 
@@ -142,11 +142,11 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:cxfrs-component.adoc[CXF-RS] (camel-cxf) | 2.0 | The cxfrs component is used for JAX-RS REST services using Apache CXF.
 
-| xref:dataformat-component.adoc[Data Format] (camel-dataformat) | 2.12 | The dataformat component is used for working with Data Formats as if it was a regular Component supporting Endpoints and URIs.
+| xref:dataformat-component.adoc[Data Format] (@@@ARTIFACTID@@@) | 2.12 | The dataformat component is used for working with Data Formats as if it was a regular Component supporting Endpoints and URIs.
 
-| xref:dataset-component.adoc[Dataset] (camel-dataset) | 1.3 | The dataset component provides a mechanism to easily perform load & soak testing of your system.
+| xref:dataset-component.adoc[Dataset] (@@@ARTIFACTID@@@) | 1.3 | The dataset component provides a mechanism to easily perform load & soak testing of your system.
 
-| xref:dataset-test-component.adoc[DataSet Test] (camel-dataset) | 1.3 | The dataset-test component extends the mock component by on startup to pull messages from another endpoint to set the expected message bodies.
+| xref:dataset-test-component.adoc[DataSet Test] (@@@ARTIFACTID@@@) | 1.3 | The dataset-test component extends the mock component by on startup to pull messages from another endpoint to set the expected message bodies.
 
 | xref:debezium-mongodb-component.adoc[Debezium MongoDB Connector] (camel-debezium-mongodb) | 3.0 | Represents a Debezium MongoDB endpoint which is used to capture changes in MongoDB database so that that applications can see those changes and respond to them.
 
@@ -156,9 +156,9 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:digitalocean-component.adoc[DigitalOcean] (camel-digitalocean) | 2.19 | The DigitalOcean component allows you to manage Droplets and resources within the DigitalOcean cloud.
 
-| xref:direct-component.adoc[Direct] (camel-direct) | 1.0 | The direct component provides direct, synchronous call to another endpoint from the same CamelContext.
+| xref:direct-component.adoc[Direct] (@@@ARTIFACTID@@@) | 1.0 | The direct component provides direct, synchronous call to another endpoint from the same CamelContext.
 
-| xref:direct-vm-component.adoc[Direct VM] (camel-directvm) | 2.10 | The direct-vm component provides direct, synchronous call to another endpoint from any CamelContext in the same JVM.
+| xref:direct-vm-component.adoc[Direct VM] (@@@ARTIFACTID@@@) | 2.10 | The direct-vm component provides direct, synchronous call to another endpoint from any CamelContext in the same JVM.
 
 | xref:disruptor-component.adoc[Disruptor] (camel-disruptor) | 2.12 | The disruptor component provides asynchronous SEDA behavior using LMAX Disruptor.
 
@@ -186,7 +186,7 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:fhir-component.adoc[FHIR] (camel-fhir) | 2.23 | The fhir component is used for working with the FHIR protocol (health care).
 
-| xref:file-component.adoc[File] (camel-file) | 1.0 | The file component is used for reading or writing files.
+| xref:file-component.adoc[File] (@@@ARTIFACTID@@@) | 1.0 | The file component is used for reading or writing files.
 
 | xref:file-watch-component.adoc[file-watch] (camel-file-watch) | 3.0 | The file-watch is used to monitor file events in directory using java.nio.file.WatchService
 
@@ -268,7 +268,7 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:hipchat-component.adoc[Hipchat] (camel-hipchat) | 2.15 | The hipchat component supports producing and consuming messages from/to Hipchat service.
 
-| xref:http-component.adoc[HTTP] (camel-http) | 2.3 | For calling out to external HTTP servers using Apache HTTP Client 4.x.
+| xref:http-component.adoc[HTTP] (@@@ARTIFACTID@@@) | 2.3 | For calling out to external HTTP servers using Apache HTTP Client 4.x.
 
 | xref:iec60870-client-component.adoc[IEC 60870 Client] (camel-iec60870) | 2.20 | IEC 60870 component used for telecontrol (supervisory control and data acquisition) such as controlling electric power transmission grids and other geographically widespread control systems.
 
@@ -312,7 +312,7 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:jdbc-component.adoc[JDBC] (camel-jdbc) | 1.2 | The jdbc component enables you to access databases through JDBC, where SQL queries are sent in the message body.
 
-| xref:jetty-component.adoc[Jetty] (camel-jetty) | 1.2 | To use Jetty as a HTTP server as consumer for Camel routes.
+| xref:jetty-component.adoc[Jetty] (@@@ARTIFACTID@@@) | 1.2 | To use Jetty as a HTTP server as consumer for Camel routes.
 
 | xref:websocket-component.adoc[Jetty Websocket] (camel-websocket) | 2.10 | The websocket component provides websocket endpoints with Jetty for communicating with clients using websocket.
 
@@ -368,7 +368,7 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:kubernetes-services-component.adoc[Kubernetes Services] (camel-kubernetes) | 2.17 | The Kubernetes Service Accounts component provides a producer to execute service operations and a consumer to consume service events.
 
-| xref:language-component.adoc[Language] (camel-language) | 2.5 | The language component allows you to send a message to an endpoint which executes a script by any of the supported Languages in Camel.
+| xref:language-component.adoc[Language] (@@@ARTIFACTID@@@) | 2.5 | The language component allows you to send a message to an endpoint which executes a script by any of the supported Languages in Camel.
 
 | xref:ldap-component.adoc[LDAP] (camel-ldap) | 1.5 | The ldap component allows you to perform searches in LDAP servers using filters as the message payload.
 
@@ -376,7 +376,7 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:linkedin-component.adoc[Linkedin] (camel-linkedin) | 2.14 | The linkedin component is used for retrieving LinkedIn user profiles, connections, companies, groups, posts, etc.
 
-| xref:log-component.adoc[Log] (camel-log) | 1.1 | The log component logs message exchanges to the underlying logging mechanism.
+| xref:log-component.adoc[Log] (@@@ARTIFACTID@@@) | 1.1 | The log component logs message exchanges to the underlying logging mechanism.
 
 | xref:lucene-component.adoc[Lucene] (camel-lucene) | 2.2 | To insert or query from Apache Lucene databases.
 
@@ -396,7 +396,7 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:mllp-component.adoc[MLLP] (camel-mllp) | 2.17 | Provides functionality required by Healthcare providers to communicate with other systems using the MLLP protocol.
 
-| xref:mock-component.adoc[Mock] (camel-mock) | 1.0 | The mock component is used for testing routes and mediation rules using mocks.
+| xref:mock-component.adoc[Mock] (@@@ARTIFACTID@@@) | 1.0 | The mock component is used for testing routes and mediation rules using mocks.
 
 | xref:mongodb-component.adoc[MongoDB] (camel-mongodb) | 2.19 | Component for working with documents stored in MongoDB database.
 
@@ -476,11 +476,11 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:reactive-streams-component.adoc[Reactive Streams] (camel-reactive-streams) | 2.19 | Reactive Camel using reactive streams
 
-| xref:ref-component.adoc[Ref] (camel-ref) | 1.2 | The ref component is used for lookup of existing endpoints bound in the Registry.
+| xref:ref-component.adoc[Ref] (@@@ARTIFACTID@@@) | 1.2 | The ref component is used for lookup of existing endpoints bound in the Registry.
 
-| xref:rest-component.adoc[REST] (camel-rest) | 2.14 | The rest component is used for either hosting REST services (consumer) or calling external REST services (producer).
+| xref:rest-component.adoc[REST] (@@@ARTIFACTID@@@) | 2.14 | The rest component is used for either hosting REST services (consumer) or calling external REST services (producer).
 
-| xref:rest-api-component.adoc[REST API] (camel-rest) | 2.16 | The rest-api component is used for providing Swagger API of the REST services which has been defined using the rest-dsl in Camel.
+| xref:rest-api-component.adoc[REST API] (@@@ARTIFACTID@@@) | 2.16 | The rest-api component is used for providing Swagger API of the REST services which has been defined using the rest-dsl in Camel.
 
 | xref:rest-swagger-component.adoc[REST Swagger] (camel-rest-swagger) | 2.19 | An awesome REST endpoint backed by Swagger specifications.
 
@@ -488,19 +488,19 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:rss-component.adoc[RSS] (camel-rss) | 2.0 | The rss component is used for consuming RSS feeds.
 
-| xref:saga-component.adoc[Saga] (camel-saga) | 2.21 | The saga component provides access to advanced options for managing the flow in the Saga EIP.
+| xref:saga-component.adoc[Saga] (@@@ARTIFACTID@@@) | 2.21 | The saga component provides access to advanced options for managing the flow in the Saga EIP.
 
 | xref:salesforce-component.adoc[Salesforce] (camel-salesforce) | 2.12 | The salesforce component is used for integrating Camel with the massive Salesforce API.
 
 | xref:sap-netweaver-component.adoc[SAP NetWeaver] (camel-sap-netweaver) | 2.12 | The sap-netweaver component integrates with the SAP NetWeaver Gateway using HTTP transports.
 
-| xref:scheduler-component.adoc[Scheduler] (camel-scheduler) | 2.15 | The scheduler component is used for generating message exchanges when a scheduler fires.
+| xref:scheduler-component.adoc[Scheduler] (@@@ARTIFACTID@@@) | 2.15 | The scheduler component is used for generating message exchanges when a scheduler fires.
 
 | xref:schematron-component.adoc[Schematron] (camel-schematron) | 2.15 | Validates the payload of a message using the Schematron Library.
 
 | xref:scp-component.adoc[SCP] (camel-jsch) | 2.10 | To copy files using the secure copy protocol (SCP).
 
-| xref:seda-component.adoc[SEDA] (camel-seda) | 1.1 | The seda component provides asynchronous call to another endpoint from any CamelContext in the same JVM.
+| xref:seda-component.adoc[SEDA] (@@@ARTIFACTID@@@) | 1.1 | The seda component provides asynchronous call to another endpoint from any CamelContext in the same JVM.
 
 | xref:service-component.adoc[Service] (camel-service) | 2.22 | Represents an endpoint which is registered to a Service Registry such as Consul, Etcd.
 
@@ -536,7 +536,7 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:spring-batch-component.adoc[Spring Batch] (camel-spring-batch) | 2.10 | The spring-batch component allows to send messages to Spring Batch for further processing.
 
-| xref:spring-event-component.adoc[Spring Event] (camel-spring) | 1.4 | The spring-event component allows to listen for Spring Application Events.
+| xref:spring-event-component.adoc[Spring Event] (@@@ARTIFACTID@@@) | 1.4 | The spring-event component allows to listen for Spring Application Events.
 
 | xref:spring-integration-component.adoc[Spring Integration] (camel-spring-integration) | 1.4 | Bridges Camel with Spring Integration.
 
@@ -560,7 +560,7 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:string-template-component.adoc[String Template] (camel-stringtemplate) | 1.2 | Transforms the message using a String template.
 
-| xref:stub-component.adoc[Stub] (camel-stub) | 2.10 | The stub component provides a simple way to stub out any physical endpoints while in development or testing.
+| xref:stub-component.adoc[Stub] (@@@ARTIFACTID@@@) | 2.10 | The stub component provides a simple way to stub out any physical endpoints while in development or testing.
 
 | xref:telegram-component.adoc[Telegram] (camel-telegram) | 2.18 | The telegram component provides access to the Telegram Bot API.
 
@@ -568,7 +568,7 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:tika-component.adoc[Tika] (camel-tika) | 2.19 | This component integrates with Apache Tika to extract content and metadata from thousands of file types.
 
-| xref:timer-component.adoc[Timer] (camel-timer) | 1.0 | The timer component is used for generating message exchanges when a timer fires.
+| xref:timer-component.adoc[Timer] (@@@ARTIFACTID@@@) | 1.0 | The timer component is used for generating message exchanges when a timer fires.
 
 | xref:twilio-component.adoc[Twilio] (camel-twilio) | 2.20 | The Twilio component allows you to interact with the Twilio REST APIs using Twilio Java SDK.
 
@@ -580,13 +580,13 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:undertow-component.adoc[Undertow] (camel-undertow) | 2.16 | The undertow component provides HTTP and WebSocket based endpoints for consuming and producing HTTP/WebSocket requests.
 
-| xref:validator-component.adoc[Validator] (camel-validator) | 1.1 | Validates the payload of a message using XML Schema and JAXP Validation.
+| xref:validator-component.adoc[Validator] (@@@ARTIFACTID@@@) | 1.1 | Validates the payload of a message using XML Schema and JAXP Validation.
 
 | xref:velocity-component.adoc[Velocity] (camel-velocity) | 1.2 | Transforms the message using a Velocity template.
 
 | xref:vertx-component.adoc[Vert.x] (camel-vertx) | 2.12 | The vertx component is used for sending and receive messages from a vertx event bus.
 
-| xref:vm-component.adoc[VM] (camel-vm) | 1.1 | The vm component provides asynchronous call to another endpoint from the same CamelContext.
+| xref:vm-component.adoc[VM] (@@@ARTIFACTID@@@) | 1.1 | The vm component provides asynchronous call to another endpoint from the same CamelContext.
 
 | xref:weather-component.adoc[Weather] (camel-weather) | 2.12 | Polls the weather information from Open Weather Map.
 
@@ -606,7 +606,7 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:xquery-component.adoc[XQuery] (camel-saxon) | 1.0 | Transforms the message using a XQuery template using Saxon.
 
-| xref:xslt-component.adoc[XSLT] (camel-xslt) | 1.3 | Transforms the message using a XSLT template.
+| xref:xslt-component.adoc[XSLT] (@@@ARTIFACTID@@@) | 1.3 | Transforms the message using a XSLT template.
 
 | xref:yammer-component.adoc[Yammer] (camel-yammer) | 2.12 | The yammer component allows you to interact with the Yammer enterprise social network.
 


[camel] 11/11: Regen docs

Posted by ac...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel.git

commit 9c6de951a539a5589b6a30243f252e7f1536d7b0
Author: Andrea Cosentino <an...@gmail.com>
AuthorDate: Tue Nov 5 17:24:30 2019 +0100

    Regen docs
---
 docs/components/modules/ROOT/nav.adoc              |   1 +
 .../ROOT/pages/debezium-postgres-component.adoc    |  21 --
 .../ROOT/pages/debezium-sqlserver-component.adoc   | 255 +++++++++++++++++++++
 docs/components/modules/ROOT/pages/index.adoc      |   4 +-
 4 files changed, 259 insertions(+), 22 deletions(-)

diff --git a/docs/components/modules/ROOT/nav.adoc b/docs/components/modules/ROOT/nav.adoc
index 69b7ce8..bbfd108 100644
--- a/docs/components/modules/ROOT/nav.adoc
+++ b/docs/components/modules/ROOT/nav.adoc
@@ -90,6 +90,7 @@
 * xref:debezium-mongodb-component.adoc[Debezium MongoDB Connector Component]
 * xref:debezium-mysql-component.adoc[Debezium MySQL Connector Component]
 * xref:debezium-postgres-component.adoc[Debezium PostgresSQL Connector Component]
+* xref:debezium-sqlserver-component.adoc[Debezium SQL Server Connector Component]
 * xref:digitalocean-component.adoc[DigitalOcean Component]
 * xref:direct-component.adoc[Direct Component]
 * xref:direct-vm-component.adoc[Direct VM Component]
diff --git a/docs/components/modules/ROOT/pages/debezium-postgres-component.adoc b/docs/components/modules/ROOT/pages/debezium-postgres-component.adoc
index 8df0b5b..f9f3a49 100644
--- a/docs/components/modules/ROOT/pages/debezium-postgres-component.adoc
+++ b/docs/components/modules/ROOT/pages/debezium-postgres-component.adoc
@@ -122,15 +122,8 @@ with the following path and query parameters:
 | *schemaBlacklist* (postgres) | The schemas for which events must not be captured |  | String
 | *schemaRefreshMode* (postgres) | Specify the conditions that trigger a refresh of the in-memory schema for a table. 'columns_diff' (the default) is the safest mode, ensuring the in-memory schema stays in-sync with the database table's schema at all times. 'columns_diff_exclude_unchanged_toast' instructs the connector to refresh the in-memory schema cache if there is a discrepancy between it and the schema derived from the incoming message, unless unchanged TOASTable data fully accounts [...]
 | *schemaWhitelist* (postgres) | The schemas for which events should be captured |  | String
-<<<<<<< HEAD
 | *slotDropOnStop* (postgres) | Whether or not to drop the logical replication slot when the connector finishes orderlyBy default the replication is kept so that on restart progress can resume from the last recorded location | false | boolean
-<<<<<<< HEAD
 | *slotMaxRetries* (postgres) | How many times to retry connecting to a replication slot when an attempt fails. | 6 | int
-=======
->>>>>>> 2f2d808...  CAMEL-14137: removed qtp variable according to pr comment.
-=======
-| *slotDrop_on_stop* (postgres) | Whether or not to drop the logical replication slot when the connector finishes orderlyBy default the replication is kept so that on restart progress can resume from the last recorded location | false | boolean
->>>>>>> b982055...  CAMEL-14137: revert doc changes
 | *slotName* (postgres) | The name of the Postgres logical decoding slot created for streaming changes from a plugin.Defaults to 'debezium | debezium | String
 | *slotRetryDelayMs* (postgres) | The number of milli-seconds to wait between retry attempts when the connector fails to connect to a replication slot. | 10000 | long
 | *slotStreamParams* (postgres) | Any optional parameters used by logical decoding plugin. Semi-colon separated. E.g. 'add-tables=public.table,public.table2;include-lsn=true' |  | String
@@ -166,15 +159,7 @@ When using Spring Boot make sure to use the following Maven dependency to have s
 ----
 
 
-<<<<<<< HEAD
-<<<<<<< HEAD
 The component supports 63 options, which are listed below.
-=======
-The component supports 58 options, which are listed below.
->>>>>>> 2f2d808...  CAMEL-14137: removed qtp variable according to pr comment.
-=======
-The component supports 57 options, which are listed below.
->>>>>>> b982055...  CAMEL-14137: revert doc changes
 
 
 
@@ -224,14 +209,8 @@ The component supports 57 options, which are listed below.
 | *camel.component.debezium-postgres.configuration.schema-blacklist* | The schemas for which events must not be captured |  | String
 | *camel.component.debezium-postgres.configuration.schema-refresh-mode* | Specify the conditions that trigger a refresh of the in-memory schema for a table. 'columns_diff' (the default) is the safest mode, ensuring the in-memory schema stays in-sync with the database table's schema at all times. 'columns_diff_exclude_unchanged_toast' instructs the connector to refresh the in-memory schema cache if there is a discrepancy between it and the schema derived from the incoming message, unless  [...]
 | *camel.component.debezium-postgres.configuration.schema-whitelist* | The schemas for which events should be captured |  | String
-<<<<<<< HEAD
 | *camel.component.debezium-postgres.configuration.slot-drop-on-stop* | Whether or not to drop the logical replication slot when the connector finishes orderlyBy default the replication is kept so that on restart progress can resume from the last recorded location | false | Boolean
-<<<<<<< HEAD
 | *camel.component.debezium-postgres.configuration.slot-max-retries* | How many times to retry connecting to a replication slot when an attempt fails. | 6 | Integer
-=======
->>>>>>> 2f2d808...  CAMEL-14137: removed qtp variable according to pr comment.
-=======
->>>>>>> b982055...  CAMEL-14137: revert doc changes
 | *camel.component.debezium-postgres.configuration.slot-name* | The name of the Postgres logical decoding slot created for streaming changes from a plugin.Defaults to 'debezium | debezium | String
 | *camel.component.debezium-postgres.configuration.slot-retry-delay-ms* | The number of milli-seconds to wait between retry attempts when the connector fails to connect to a replication slot. | 10000 | Long
 | *camel.component.debezium-postgres.configuration.slot-stream-params* | Any optional parameters used by logical decoding plugin. Semi-colon separated. E.g. 'add-tables=public.table,public.table2;include-lsn=true' |  | String
diff --git a/docs/components/modules/ROOT/pages/debezium-sqlserver-component.adoc b/docs/components/modules/ROOT/pages/debezium-sqlserver-component.adoc
new file mode 100644
index 0000000..9ef3da4
--- /dev/null
+++ b/docs/components/modules/ROOT/pages/debezium-sqlserver-component.adoc
@@ -0,0 +1,255 @@
+[[debezium-sqlserver-component]]
+= Debezium SQL Server Connector Component
+:page-source: components/camel-debezium-sqlserver/src/main/docs/debezium-sqlserver-component.adoc
+
+*Since Camel 3.0*
+
+The Debezium SQL Server component is wrapper around https://debezium.io/[Debezium] using https://debezium.io/documentation/reference/0.10/operations/embedded.html[Debezium Embedded], which enables Change Data Capture from SQL Server database using Debezium without the need for Kafka or Kafka Connect.
+
+*Note on handling failures:* Per https://debezium.io/documentation/reference/0.10/operations/embedded.html#_handling_failures[Debezium Embedded Engine] documentation, the engines is actively recording source offsets and periodically flushes these offsets to a persistent storage, so when the application is restarted or crashed, the engine will resume from the last recorded offset.
+Thus, at normal operation, your downstream routes will receive each event exactly once, however in case of an application crash (not having a graceful shutdown), the application will resume from the last recorded offset,
+which may result in receiving duplicate events immediately after the restart. Therefore, your downstream routes should be tolerant enough of such case and deduplicate events if needed.
+
+*Note:* The Debezium SQL Server component is currently not supported in OSGi
+
+Maven users will need to add the following dependency to their `pom.xml`
+for this component.
+
+[source,xml]
+----
+<dependency>
+    <groupId>org.apache.camel</groupId>
+    <artifactId>camel-debezium-sqlserver</artifactId>
+    <version>x.x.x</version>
+    <!-- use the same version as your Camel core version -->
+</dependency>
+----
+
+== URI format
+
+[source,text]
+---------------------------
+debezium-sqlserver:name[?options]
+---------------------------
+
+== Options
+
+
+// component options: START
+The Debezium SQL Server Connector component supports 2 options, which are listed below.
+
+
+
+[width="100%",cols="2,5,^1,2",options="header"]
+|===
+| Name | Description | Default | Type
+| *configuration* (consumer) | Allow pre-configured Configurations to be set. |  | SqlServerConnectorEmbeddedDebeziumConfiguration
+| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean
+|===
+// component options: END
+
+
+// endpoint options: START
+The Debezium SQL Server Connector endpoint is configured using URI syntax:
+
+----
+debezium-sqlserver:name
+----
+
+with the following path and query parameters:
+
+=== Path Parameters (1 parameters):
+
+
+[width="100%",cols="2,5,^1,2",options="header"]
+|===
+| Name | Description | Default | Type
+| *name* | *Required* Unique name for the connector. Attempting to register again with the same name will fail. |  | String
+|===
+
+
+=== Query Parameters (45 parameters):
+
+
+[width="100%",cols="2,5,^1,2",options="header"]
+|===
+| Name | Description | Default | Type
+| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean
+| *internalKeyConverter* (consumer) | The Converter class that should be used to serialize and deserialize key data for offsets. The default is JSON converter. | org.apache.kafka.connect.json.JsonConverter | String
+| *internalValueConverter* (consumer) | The Converter class that should be used to serialize and deserialize value data for offsets. The default is JSON converter. | org.apache.kafka.connect.json.JsonConverter | String
+| *offsetCommitPolicy* (consumer) | The name of the Java class of the commit policy. It defines when offsets commit has to be triggered based on the number of events processed and the time elapsed since the last commit. This class must implement the interface 'OffsetCommitPolicy'. The default is a periodic commit policy based upon time intervals. | io.debezium.embedded.spi.OffsetCommitPolicy.PeriodicCommitOffsetPolicy | String
+| *offsetCommitTimeoutMs* (consumer) | Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt. The default is 5 seconds. | 5000 | long
+| *offsetFlushIntervalMs* (consumer) | Interval at which to try committing offsets. The default is 1 minute. | 60000 | long
+| *offsetStorage* (consumer) | The name of the Java class that is responsible for persistence of connector offsets. | org.apache.kafka.connect.storage.FileOffsetBackingStore | String
+| *offsetStorageFileName* (consumer) | Path to file where offsets are to be stored. Required when offset.storage is set to the FileOffsetBackingStore |  | String
+| *offsetStoragePartitions* (consumer) | The number of partitions used when creating the offset storage topic. Required when offset.storage is set to the 'KafkaOffsetBackingStore'. |  | int
+| *offsetStorageReplication Factor* (consumer) | Replication factor used when creating the offset storage topic. Required when offset.storage is set to the KafkaOffsetBackingStore |  | int
+| *offsetStorageTopic* (consumer) | The name of the Kafka topic where offsets are to be stored. Required when offset.storage is set to the KafkaOffsetBackingStore. |  | String
+| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. |  | ExceptionHandler
+| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. |  | ExchangePattern
+| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean
+| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean
+| *columnBlacklist* (sqlserver) | Description is not available here, please check Debezium website for corresponding key 'column.blacklist' description. |  | String
+| *databaseDbname* (sqlserver) | The name of the database the connector should be monitoring. When working with a multi-tenant set-up, must be set to the CDB name. |  | String
+| *databaseHistory* (sqlserver) | The name of the DatabaseHistory class that should be used to store and recover database schema changes. The configuration properties for the history are prefixed with the 'database.history.' string. | io.debezium.relational.history.FileDatabaseHistory | String
+| *databaseHistoryFileFilename* (sqlserver) | The path to the file that will be used to record the database history |  | String
+| *databaseHistoryKafka BootstrapServers* (sqlserver) | A list of host/port pairs that the connector will use for establishing the initial connection to the Kafka cluster for retrieving database schema history previously stored by the connector. This should point to the same Kafka cluster used by the Kafka Connect process. |  | String
+| *databaseHistoryKafka RecoveryAttempts* (sqlserver) | The number of attempts in a row that no data are returned from Kafka before recover completes. The maximum amount of time to wait after receiving no data is (recovery.attempts) x (recovery.poll.interval.ms). | 100 | int
+| *databaseHistoryKafka RecoveryPollIntervalMs* (sqlserver) | The number of milliseconds to wait while polling for persisted data during recovery. | 100 | int
+| *databaseHistoryKafkaTopic* (sqlserver) | The name of the topic for the database schema history |  | String
+| *databaseHostname* (sqlserver) | Resolvable hostname or IP address of the SQL Server database server. |  | String
+| *databasePassword* (sqlserver) | *Required* Password of the SQL Server database user to be used when connecting to the database. |  | String
+| *databasePort* (sqlserver) | Port of the SQL Server database server. | 1433 | int
+| *databaseServerName* (sqlserver) | *Required* Unique name that identifies the database server and all recorded offsets, and that is used as a prefix for all schemas and topics. Each distinct installation should have a separate namespace and be monitored by at most one Debezium connector. |  | String
+| *databaseUser* (sqlserver) | Name of the SQL Server database user to be used when connecting to the database. |  | String
+| *decimalHandlingMode* (sqlserver) | Specify how DECIMAL and NUMERIC columns should be represented in change events, including:'precise' (the default) uses java.math.BigDecimal to represent values, which are encoded in the change events using a binary representation and Kafka Connect's 'org.apache.kafka.connect.data.Decimal' type; 'string' uses string to represent values; 'double' represents values using Java's 'double', which may not offer the precision but will be far easier to use in [...]
+| *heartbeatIntervalMs* (sqlserver) | Length of an interval in milli-seconds in in which the connector periodically sends heartbeat messages to a heartbeat topic. Use 0 to disable heartbeat messages. Disabled by default. | 0 | int
+| *heartbeatTopicsPrefix* (sqlserver) | The prefix that is used to name heartbeat topics.Defaults to __debezium-heartbeat. | __debezium-heartbeat | String
+| *maxBatchSize* (sqlserver) | Maximum size of each batch of source records. Defaults to 2048. | 2048 | int
+| *maxQueueSize* (sqlserver) | Maximum size of the queue for change events read from the database log but not yet recorded or forwarded. Defaults to 8192, and should always be larger than the maximum batch size. | 8192 | int
+| *messageKeyColumns* (sqlserver) | A semicolon-separated list of expressions that match fully-qualified tables and column(s) to be used as message key. Each expression must match the pattern ':',where the table names could be defined as (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on the specific connector,and the key columns are a comma-separated list of columns representing the custom key. For any table without an explicit key configuration the table's primary key colum [...]
+| *pollIntervalMs* (sqlserver) | Frequency in milliseconds to wait for new change events to appear after receiving no events. Defaults to 500ms. | 500 | long
+| *snapshotDelayMs* (sqlserver) | The number of milliseconds to delay before a snapshot will begin. | 0 | long
+| *snapshotFetchSize* (sqlserver) | The maximum number of records that should be loaded into memory while performing a snapshot |  | int
+| *snapshotLockTimeoutMs* (sqlserver) | The maximum number of millis to wait for table locks at the beginning of a snapshot. If locks cannot be acquired in this time frame, the snapshot will be aborted. Defaults to 10 seconds | 10000 | long
+| *snapshotMode* (sqlserver) | The criteria for running a snapshot upon startup of the connector. Options include: 'initial' (the default) to specify the connector should run a snapshot only when no offsets are available for the logical server name; 'initial_schema_only' to specify the connector should run a snapshot of the schema when no offsets are available for the logical server name. | initial | String
+| *snapshotSelectStatement Overrides* (sqlserver) | This property contains a comma-separated list of fully-qualified tables (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on thespecific connectors . Select statements for the individual tables are specified in further configuration properties, one for each table, identified by the id 'snapshot.select.statement.overrides.DB_NAME.TABLE_NAME' or 'snapshot.select.statement.overrides.SCHEMA_NAME.TABLE_NAME', respectively. The valu [...]
+| *sourceStructVersion* (sqlserver) | A version of the format of the publicly visible source part in the message | v2 | String
+| *tableBlacklist* (sqlserver) | Description is not available here, please check Debezium website for corresponding key 'table.blacklist' description. |  | String
+| *tableIgnoreBuiltin* (sqlserver) | Flag specifying whether built-in tables should be ignored. | true | boolean
+| *tableWhitelist* (sqlserver) | The tables for which changes are to be captured |  | String
+| *timePrecisionMode* (sqlserver) | Time, date, and timestamps can be represented with different kinds of precisions, including:'adaptive' (the default) bases the precision of time, date, and timestamp values on the database column's precision; 'adaptive_time_microseconds' like 'adaptive' mode, but TIME fields always use microseconds precision;'connect' always represents time, date, and timestamp values using Kafka Connect's built-in representations for Time, Date, and Timestamp, which u [...]
+|===
+// endpoint options: END
+// spring-boot-auto-configure options: START
+== Spring Boot Auto-Configuration
+
+When using Spring Boot make sure to use the following Maven dependency to have support for auto configuration:
+
+[source,xml]
+----
+<dependency>
+  <groupId>org.apache.camel</groupId>
+  <artifactId>camel-debezium-sqlserver-starter</artifactId>
+  <version>x.x.x</version>
+  <!-- use the same version as your Camel core version -->
+</dependency>
+----
+
+
+The component supports 44 options, which are listed below.
+
+
+
+[width="100%",cols="2,5,^1,2",options="header"]
+|===
+| Name | Description | Default | Type
+| *camel.component.debezium-sqlserver.basic-property-binding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | Boolean
+| *camel.component.debezium-sqlserver.configuration.column-blacklist* | Description is not available here, please check Debezium website for corresponding key 'column.blacklist' description. |  | String
+| *camel.component.debezium-sqlserver.configuration.connector-class* | The name of the Java class for the connector |  | Class
+| *camel.component.debezium-sqlserver.configuration.database-dbname* | The name of the database the connector should be monitoring. When working with a multi-tenant set-up, must be set to the CDB name. |  | String
+| *camel.component.debezium-sqlserver.configuration.database-history* | The name of the DatabaseHistory class that should be used to store and recover database schema changes. The configuration properties for the history are prefixed with the 'database.history.' string. | io.debezium.relational.history.FileDatabaseHistory | String
+| *camel.component.debezium-sqlserver.configuration.database-history-file-filename* | The path to the file that will be used to record the database history |  | String
+| *camel.component.debezium-sqlserver.configuration.database-history-kafka-bootstrap-servers* | A list of host/port pairs that the connector will use for establishing the initial connection to the Kafka cluster for retrieving database schema history previously stored by the connector. This should point to the same Kafka cluster used by the Kafka Connect process. |  | String
+| *camel.component.debezium-sqlserver.configuration.database-history-kafka-recovery-attempts* | The number of attempts in a row that no data are returned from Kafka before recover completes. The maximum amount of time to wait after receiving no data is (recovery.attempts) x (recovery.poll.interval.ms). | 100 | Integer
+| *camel.component.debezium-sqlserver.configuration.database-history-kafka-recovery-poll-interval-ms* | The number of milliseconds to wait while polling for persisted data during recovery. | 100 | Integer
+| *camel.component.debezium-sqlserver.configuration.database-history-kafka-topic* | The name of the topic for the database schema history |  | String
+| *camel.component.debezium-sqlserver.configuration.database-hostname* | Resolvable hostname or IP address of the SQL Server database server. |  | String
+| *camel.component.debezium-sqlserver.configuration.database-password* | Password of the SQL Server database user to be used when connecting to the database. |  | String
+| *camel.component.debezium-sqlserver.configuration.database-port* | Port of the SQL Server database server. | 1433 | Integer
+| *camel.component.debezium-sqlserver.configuration.database-server-name* | Unique name that identifies the database server and all recorded offsets, and that is used as a prefix for all schemas and topics. Each distinct installation should have a separate namespace and be monitored by at most one Debezium connector. |  | String
+| *camel.component.debezium-sqlserver.configuration.database-user* | Name of the SQL Server database user to be used when connecting to the database. |  | String
+| *camel.component.debezium-sqlserver.configuration.decimal-handling-mode* | Specify how DECIMAL and NUMERIC columns should be represented in change events, including:'precise' (the default) uses java.math.BigDecimal to represent values, which are encoded in the change events using a binary representation and Kafka Connect's 'org.apache.kafka.connect.data.Decimal' type; 'string' uses string to represent values; 'double' represents values using Java's 'double', which may not offer the pre [...]
+| *camel.component.debezium-sqlserver.configuration.heartbeat-interval-ms* | Length of an interval in milli-seconds in in which the connector periodically sends heartbeat messages to a heartbeat topic. Use 0 to disable heartbeat messages. Disabled by default. | 0 | Integer
+| *camel.component.debezium-sqlserver.configuration.heartbeat-topics-prefix* | The prefix that is used to name heartbeat topics.Defaults to __debezium-heartbeat. | __debezium-heartbeat | String
+| *camel.component.debezium-sqlserver.configuration.internal-key-converter* | The Converter class that should be used to serialize and deserialize key data for offsets. The default is JSON converter. | org.apache.kafka.connect.json.JsonConverter | String
+| *camel.component.debezium-sqlserver.configuration.internal-value-converter* | The Converter class that should be used to serialize and deserialize value data for offsets. The default is JSON converter. | org.apache.kafka.connect.json.JsonConverter | String
+| *camel.component.debezium-sqlserver.configuration.max-batch-size* | Maximum size of each batch of source records. Defaults to 2048. | 2048 | Integer
+| *camel.component.debezium-sqlserver.configuration.max-queue-size* | Maximum size of the queue for change events read from the database log but not yet recorded or forwarded. Defaults to 8192, and should always be larger than the maximum batch size. | 8192 | Integer
+| *camel.component.debezium-sqlserver.configuration.message-key-columns* | A semicolon-separated list of expressions that match fully-qualified tables and column(s) to be used as message key. Each expression must match the pattern '<fully-qualified table name>:<key columns>',where the table names could be defined as (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on the specific connector,and the key columns are a comma-separated list of columns representing the custom key. F [...]
+| *camel.component.debezium-sqlserver.configuration.name* | Unique name for the connector. Attempting to register again with the same name will fail. |  | String
+| *camel.component.debezium-sqlserver.configuration.offset-commit-policy* | The name of the Java class of the commit policy. It defines when offsets commit has to be triggered based on the number of events processed and the time elapsed since the last commit. This class must implement the interface 'OffsetCommitPolicy'. The default is a periodic commit policy based upon time intervals. | io.debezium.embedded.spi.OffsetCommitPolicy.PeriodicCommitOffsetPolicy | String
+| *camel.component.debezium-sqlserver.configuration.offset-commit-timeout-ms* | Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt. The default is 5 seconds. | 5000 | Long
+| *camel.component.debezium-sqlserver.configuration.offset-flush-interval-ms* | Interval at which to try committing offsets. The default is 1 minute. | 60000 | Long
+| *camel.component.debezium-sqlserver.configuration.offset-storage* | The name of the Java class that is responsible for persistence of connector offsets. | org.apache.kafka.connect.storage.FileOffsetBackingStore | String
+| *camel.component.debezium-sqlserver.configuration.offset-storage-file-name* | Path to file where offsets are to be stored. Required when offset.storage is set to the FileOffsetBackingStore |  | String
+| *camel.component.debezium-sqlserver.configuration.offset-storage-partitions* | The number of partitions used when creating the offset storage topic. Required when offset.storage is set to the 'KafkaOffsetBackingStore'. |  | Integer
+| *camel.component.debezium-sqlserver.configuration.offset-storage-replication-factor* | Replication factor used when creating the offset storage topic. Required when offset.storage is set to the KafkaOffsetBackingStore |  | Integer
+| *camel.component.debezium-sqlserver.configuration.offset-storage-topic* | The name of the Kafka topic where offsets are to be stored. Required when offset.storage is set to the KafkaOffsetBackingStore. |  | String
+| *camel.component.debezium-sqlserver.configuration.poll-interval-ms* | Frequency in milliseconds to wait for new change events to appear after receiving no events. Defaults to 500ms. | 500 | Long
+| *camel.component.debezium-sqlserver.configuration.snapshot-delay-ms* | The number of milliseconds to delay before a snapshot will begin. | 0 | Long
+| *camel.component.debezium-sqlserver.configuration.snapshot-fetch-size* | The maximum number of records that should be loaded into memory while performing a snapshot |  | Integer
+| *camel.component.debezium-sqlserver.configuration.snapshot-lock-timeout-ms* | The maximum number of millis to wait for table locks at the beginning of a snapshot. If locks cannot be acquired in this time frame, the snapshot will be aborted. Defaults to 10 seconds | 10000 | Long
+| *camel.component.debezium-sqlserver.configuration.snapshot-mode* | The criteria for running a snapshot upon startup of the connector. Options include: 'initial' (the default) to specify the connector should run a snapshot only when no offsets are available for the logical server name; 'initial_schema_only' to specify the connector should run a snapshot of the schema when no offsets are available for the logical server name. | initial | String
+| *camel.component.debezium-sqlserver.configuration.snapshot-select-statement-overrides* | This property contains a comma-separated list of fully-qualified tables (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on thespecific connectors . Select statements for the individual tables are specified in further configuration properties, one for each table, identified by the id 'snapshot.select.statement.overrides.[DB_NAME].[TABLE_NAME]' or 'snapshot.select.statement.overrides.[SCH [...]
+| *camel.component.debezium-sqlserver.configuration.source-struct-version* | A version of the format of the publicly visible source part in the message | v2 | String
+| *camel.component.debezium-sqlserver.configuration.table-blacklist* | Description is not available here, please check Debezium website for corresponding key 'table.blacklist' description. |  | String
+| *camel.component.debezium-sqlserver.configuration.table-ignore-builtin* | Flag specifying whether built-in tables should be ignored. | true | Boolean
+| *camel.component.debezium-sqlserver.configuration.table-whitelist* | The tables for which changes are to be captured |  | String
+| *camel.component.debezium-sqlserver.configuration.time-precision-mode* | Time, date, and timestamps can be represented with different kinds of precisions, including:'adaptive' (the default) bases the precision of time, date, and timestamp values on the database column's precision; 'adaptive_time_microseconds' like 'adaptive' mode, but TIME fields always use microseconds precision;'connect' always represents time, date, and timestamp values using Kafka Connect's built-in representations [...]
+| *camel.component.debezium-sqlserver.enabled* | Whether to enable auto configuration of the debezium-sqlserver component. This is enabled by default. |  | Boolean
+|===
+// spring-boot-auto-configure options: END
+
+For more information about configuration:
+https://debezium.io/documentation/reference/0.10/operations/embedded.html#engine-properties[https://debezium.io/documentation/reference/0.10/operations/embedded.html#engine-properties]
+https://debezium.io/documentation/reference/0.10/connectors/sqlserver.html#connector-properties[https://debezium.io/documentation/reference/0.10/connectors/sqlserver.html#connector-properties]
+
+== Message headers
+
+=== Consumer headers
+
+The following headers are available when consuming change events from Debezium.
+[width="100%",cols="2m,2m,1m,5",options="header"]
+|===
+| Header constant                           | Header value                                   | Type        | Description
+| DebeziumConstants.HEADER_IDENTIFIER       | "CamelDebeziumIdentifier"                      | String      | The identifier of the connector, normally is this format "{server-name}.{database-name}.{table-name}".
+| DebeziumConstants.HEADER_KEY              | "CamelDebeziumKey"                             | Struct      | The key of the event, normally is the table Primary Key.
+| DebeziumConstants.HEADER_SOURCE_METADATA  | "CamelDebeziumSourceMetadata"                  | Map         | The metadata about the source event, for example `table` name, database `name`, log position, etc, please refer to the Debezium documentation for more info.
+| DebeziumConstants.HEADER_OPERATION        | "CamelDebeziumOperation"                       | String      | If presents, the type of event operation. Values for the connector are `c` for create (or insert), `u` for update, `d` for delete or `r` in case of a snapshot event.
+| DebeziumConstants.HEADER_TIMESTAMP        | "CamelDebeziumTimestamp"                       | Long        | If presents, the time (using the system clock in the JVM) at which the connector processed the event.
+| DebeziumConstants.HEADER_BEFORE           | "CamelDebeziumBefore"                          | Struct     | If presents, contains the state of the row before the event occurred.
+|===
+
+== Message body
+The message body if is not `null` (in case of tombstones), it contains the state of the row after the event occurred as `Struct` format or `Map` format if you use the included Type Converter from `Struct` to `Map` (please look below for more explanation).
+
+== Samples
+
+=== Consuming events
+
+Here is a very simple route that you can use in order to listen to Debezium events from SQL Server connector.
+[source,java]
+----
+from("debezium-sqlserver:dbz-test-1?offsetStorageFileName=/usr/offset-file-1.dat&databaseHostName=localhost&databaseUser=debezium&databasePassword=dbz&databaseServerName=my-app-connector&databaseHistoryFileName=/usr/history-file-1.dat")
+    .log("Event received from Debezium : ${body}")
+    .log("    with this identifier ${headers.CamelDebeziumIdentifier}")
+    .log("    with these source metadata ${headers.CamelDebeziumSourceMetadata}")
+    .log("    the event occured upon this operation '${headers.CamelDebeziumSourceOperation}'")
+    .log("    on this database '${headers.CamelDebeziumSourceMetadata[db]}' and this table '${headers.CamelDebeziumSourceMetadata[table]}'")
+    .log("    with the key ${headers.CamelDebeziumKey}")
+    .log("    the previous value is ${headers.CamelDebeziumBefore}")
+----
+
+By default, the component will emit the events in the body and `CamelDebeziumBefore` header as https://kafka.apache.org/22/javadoc/org/apache/kafka/connect/data/Struct.html[`Struct`] data type, the reasoning behind this, is to perceive the schema information in case is needed.
+However, the component as well contains a xref:manual::type-converter.adoc[Type Converter] that converts
+from default output type of https://kafka.apache.org/22/javadoc/org/apache/kafka/connect/data/Struct.html[`Struct`] to `Map` in order to leverage Camel's rich xref:manual::data-format.adoc[Data Format] types which many of them work out of box with `Map` data type.
+To use it, you can either add `Map.class` type when you access the message e.g: `exchange.getIn().getBody(Map.class)`, or you can convert the body always to `Map` from the route builder by adding `.convertBodyTo(Map.class)` to your Camel Route DSL after `from` statement.
+
+We mentioned above about the schema, which can be used in case you need to perform advance data transformation and the schema is needed for that. If you choose not to convert your body to `Map`,
+you can obtain the schema information as https://kafka.apache.org/22/javadoc/org/apache/kafka/connect/data/Schema.html[`Schema`] type from `Struct` like this:
+[source,java]
+----
+from("debezium-sqlserver:[name]?[options]])
+    .process(exchange -> {
+        final Struct bodyValue = exchange.getIn().getBody(Struct.class);
+        final Schema schemaValue = bodyValue.schema();
+
+        log.info("Body value is :" + bodyValue);
+        log.info("With Schema : " + schemaValue);
+        log.info("And fields of :" + schemaValue.fields());
+        log.info("Field name has `" + schemaValue.field("name").schema() + "` type");
+    });
+----
+
+*Important Note:* This component is a thin wrapper around Debezium Engine as mentioned, therefore before using this component in production, you need to understand how Debezium works and how configurations can reflect the expected behavior, especially in regards to https://debezium.io/documentation/reference/0.9/operations/embedded.html#_handling_failures[handling failures].
diff --git a/docs/components/modules/ROOT/pages/index.adoc b/docs/components/modules/ROOT/pages/index.adoc
index 13312cd..2a48ba0 100644
--- a/docs/components/modules/ROOT/pages/index.adoc
+++ b/docs/components/modules/ROOT/pages/index.adoc
@@ -6,7 +6,7 @@ The following Apache Camel artifacts are provided:
 == Components
 
 // components: START
-Number of Components: 302 in 240 JAR artifacts (0 deprecated)
+Number of Components: 303 in 241 JAR artifacts (0 deprecated)
 
 [width="100%",cols="4,1,5",options="header"]
 |===
@@ -154,6 +154,8 @@ Number of Components: 302 in 240 JAR artifacts (0 deprecated)
 
 | xref:debezium-postgres-component.adoc[Debezium PostgresSQL Connector] (camel-debezium-postgres) | 3.0 | Represents a Debezium PostgresSQL endpoint which is used to capture changes in PostgresSQL database so that that applications can see those changes and respond to them.
 
+| xref:debezium-sqlserver-component.adoc[Debezium SQL Server Connector] (camel-debezium-sqlserver) | 3.0 | Represents a Debezium SQL Server endpoint which is used to capture changes in SQL Server database so that that applications can see those changes and respond to them.
+
 | xref:digitalocean-component.adoc[DigitalOcean] (camel-digitalocean) | 2.19 | The DigitalOcean component allows you to manage Droplets and resources within the DigitalOcean cloud.
 
 | xref:direct-component.adoc[Direct] (camel-direct) | 1.0 | The direct component provides direct, synchronous call to another endpoint from the same CamelContext.


[camel] 05/11: CAMEL-14137: code changed according to pr change request.

Posted by ac...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel.git

commit 9ad8e96fb48434573b678906fd2fabe0064a5af8
Author: Luigi De Masi <ld...@redhat.com>
AuthorDate: Tue Nov 5 11:18:22 2019 +0100

     CAMEL-14137: code changed according to pr change request.
---
 .../java/org/apache/camel/component/jetty/JettyHttpComponent.java | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java b/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java
index c01f769..3689017 100644
--- a/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java
+++ b/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java
@@ -481,13 +481,13 @@ public abstract class JettyHttpComponent extends HttpCommonComponent implements
                         this.removeServerMBean(connectorRef.server);
                         //mbContainer.removeBean(connectorRef.connector);
                     }
-                    if (defaultQueuedThreadPool !=null){
+                    if (defaultQueuedThreadPool != null) {
                         try {
                             defaultQueuedThreadPool.stop();
-                        }catch(Throwable t){
+                        } catch (Throwable t) {
                             defaultQueuedThreadPool.destroy();
-                        }finally {
-                            defaultQueuedThreadPool =null;
+                        } finally {
+                            defaultQueuedThreadPool = null;
                         }
                     }
                 }


[camel] 09/11: CAMEL-14093: Add camel-debezium-sqlserver component

Posted by ac...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel.git

commit aa1276338a260765a3002d5904ae59d50096c747
Author: Omar Al-Safi <om...@gmail.com>
AuthorDate: Tue Nov 5 15:36:26 2019 +0100

    CAMEL-14093: Add camel-debezium-sqlserver component
---
 apache-camel/pom.xml                               |  10 +
 apache-camel/src/main/descriptors/common-bin.xml   |   2 +
 bom/camel-bom/pom.xml                              |  10 +
 components/camel-debezium-sqlserver/pom.xml        | 152 ++++
 .../main/docs/debezium-sqlserver-component.adoc    | 254 ++++++
 .../debezium/DebeziumSqlserverComponent.java       |  57 ++
 .../debezium/DebeziumSqlserverEndpoint.java        |  49 +
 .../debezium/DebeziumSqlserverComponentTest.java   | 120 +++
 ...ConnectorEmbeddedDebeziumConfigurationTest.java |  88 ++
 .../src/test/resources/log4j2.properties           |  30 +
 components/pom.xml                                 |   1 +
 .../DebeziumSqlserverEndpointBuilderFactory.java   | 985 +++++++++++++++++++++
 parent/pom.xml                                     |  10 +
 .../camel-debezium-sqlserver-starter/pom.xml       |  53 ++
 ...ebeziumSqlserverComponentAutoConfiguration.java | 129 +++
 .../DebeziumSqlserverComponentConfiguration.java   | 663 ++++++++++++++
 .../src/main/resources/META-INF/LICENSE.txt        | 203 +++++
 .../src/main/resources/META-INF/NOTICE.txt         |  11 +
 .../src/main/resources/META-INF/spring.factories   |  19 +
 .../src/main/resources/META-INF/spring.provides    |  17 +
 platforms/spring-boot/components-starter/pom.xml   |   1 +
 .../camel-spring-boot-dependencies/pom.xml         |  10 +
 .../springboot/CamelDebeziumSqlserverTest.java     |  46 +
 23 files changed, 2920 insertions(+)

diff --git a/apache-camel/pom.xml b/apache-camel/pom.xml
index 663fb53..82343aa 100644
--- a/apache-camel/pom.xml
+++ b/apache-camel/pom.xml
@@ -488,6 +488,11 @@
     </dependency>
     <dependency>
       <groupId>org.apache.camel</groupId>
+      <artifactId>camel-debezium-sqlserver</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.camel</groupId>
       <artifactId>camel-digitalocean</artifactId>
       <version>${project.version}</version>
     </dependency>
@@ -2072,6 +2077,11 @@
     </dependency>
     <dependency>
       <groupId>org.apache.camel</groupId>
+      <artifactId>camel-debezium-sqlserver-starter</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.camel</groupId>
       <artifactId>camel-digitalocean-starter</artifactId>
       <version>${project.version}</version>
     </dependency>
diff --git a/apache-camel/src/main/descriptors/common-bin.xml b/apache-camel/src/main/descriptors/common-bin.xml
index ee12fc4..4348697 100644
--- a/apache-camel/src/main/descriptors/common-bin.xml
+++ b/apache-camel/src/main/descriptors/common-bin.xml
@@ -118,6 +118,7 @@
         <include>org.apache.camel:camel-debezium-mongodb</include>
         <include>org.apache.camel:camel-debezium-mysql</include>
         <include>org.apache.camel:camel-debezium-postgres</include>
+        <include>org.apache.camel:camel-debezium-sqlserver</include>
         <include>org.apache.camel:camel-digitalocean</include>
         <include>org.apache.camel:camel-direct</include>
         <include>org.apache.camel:camel-directvm</include>
@@ -474,6 +475,7 @@
         <include>org.apache.camel:camel-debezium-mongodb-starter</include>
         <include>org.apache.camel:camel-debezium-mysql-starter</include>
         <include>org.apache.camel:camel-debezium-postgres-starter</include>
+        <include>org.apache.camel:camel-debezium-sqlserver-starter</include>
         <include>org.apache.camel:camel-digitalocean-starter</include>
         <include>org.apache.camel:camel-direct-starter</include>
         <include>org.apache.camel:camel-directvm-starter</include>
diff --git a/bom/camel-bom/pom.xml b/bom/camel-bom/pom.xml
index 246ed7e..f51ef5b 100644
--- a/bom/camel-bom/pom.xml
+++ b/bom/camel-bom/pom.xml
@@ -864,6 +864,16 @@
       </dependency>
       <dependency>
         <groupId>org.apache.camel</groupId>
+        <artifactId>camel-debezium-sqlserver</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.camel</groupId>
+        <artifactId>camel-debezium-sqlserver-starter</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.camel</groupId>
         <artifactId>camel-digitalocean</artifactId>
         <version>${project.version}</version>
       </dependency>
diff --git a/components/camel-debezium-sqlserver/pom.xml b/components/camel-debezium-sqlserver/pom.xml
new file mode 100644
index 0000000..c23e2b5
--- /dev/null
+++ b/components/camel-debezium-sqlserver/pom.xml
@@ -0,0 +1,152 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+         http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.camel</groupId>
+        <artifactId>components</artifactId>
+        <version>3.0.0-SNAPSHOT</version>
+    </parent>
+
+    <artifactId>camel-debezium-sqlserver</artifactId>
+    <packaging>jar</packaging>
+    <name>Camel :: Debezium :: SQL Server</name>
+    <description>Camel Debezium SQL Server support</description>
+
+    <dependencies>
+        <!-- camel -->
+        <dependency>
+            <groupId>org.apache.camel</groupId>
+            <artifactId>camel-debezium-common</artifactId>
+        </dependency>
+
+        <!-- debezium SQL Server Connector -->
+        <dependency>
+            <groupId>io.debezium</groupId>
+            <artifactId>debezium-connector-sqlserver</artifactId>
+            <version>${debezium-version}</version>
+        </dependency>
+
+        <!-- test -->
+        <dependency>
+            <groupId>org.apache.camel</groupId>
+            <artifactId>camel-test</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.logging.log4j</groupId>
+            <artifactId>log4j-api</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.logging.log4j</groupId>
+            <artifactId>log4j-core</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.logging.log4j</groupId>
+            <artifactId>log4j-slf4j-impl</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.logging.log4j</groupId>
+            <artifactId>log4j-1.2-api</artifactId>
+            <scope>test</scope>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.camel.maven</groupId>
+                <artifactId>camel-debezium-maven-plugin</artifactId>
+                <version>${project.version}</version>
+                <executions>
+                    <execution>
+                        <id>generate</id>
+                        <goals>
+                            <goal>generate-connector-config</goal>
+                        </goals>
+                    </execution>
+                </executions>
+                <dependencies>
+                    <dependency>
+                        <groupId>io.debezium</groupId>
+                        <artifactId>debezium-connector-sqlserver</artifactId>
+                        <!-- Only for the maven plugin, we will use 1.0.0.Beta2 to generate the config due to missing configs on 0.10 -->
+                        <!-- Once we have 1.0.0.Final, we will change this back to ${debezium-version} -->
+                        <version>1.0.0.Beta2</version>
+                        <scope>runtime</scope>
+                    </dependency>
+                </dependencies>
+                <configuration>
+                    <connectorClassName>io.debezium.connector.sqlserver.SqlServerConnector</connectorClassName>
+                    <connectorConfigClassName>io.debezium.connector.sqlserver.SqlServerConnectorConfig</connectorConfigClassName>
+                    <fields>
+                        <database.history>io.debezium.relational.history.FileDatabaseHistory</database.history>
+                        <tombstones.on.delete>false</tombstones.on.delete>
+                    </fields>
+                    <requiredFields>
+                        <field>database.password</field>
+                        <field>database.server.name</field>
+                    </requiredFields>
+                </configuration>
+            </plugin>
+            <plugin>
+                <groupId>org.codehaus.mojo</groupId>
+                <artifactId>build-helper-maven-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <id>add-generated-configurations-sources</id>
+                        <goals>
+                            <goal>add-source</goal>
+                        </goals>
+                        <configuration>
+                            <sources>
+                                <source>${project.build.directory}/generated-sources/connector-configurations</source>
+                            </sources>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <!-- This used to copy EmbeddedDebeziumConfiguration source in order to generate the metadata via APT plugin-->
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-dependency-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <id>src-dependencies</id>
+                        <goals>
+                            <goal>unpack-dependencies</goal>
+                        </goals>
+                        <configuration>
+                            <classifier>sources</classifier>
+                            <includeArtifactIds>camel-debezium-common</includeArtifactIds>
+                            <includes>**/*EmbeddedDebeziumConfiguration.java</includes>
+                            <outputDirectory>${project.build.directory}/generated-sources/connector-configurations</outputDirectory>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+</project>
diff --git a/components/camel-debezium-sqlserver/src/main/docs/debezium-sqlserver-component.adoc b/components/camel-debezium-sqlserver/src/main/docs/debezium-sqlserver-component.adoc
new file mode 100644
index 0000000..783dcff
--- /dev/null
+++ b/components/camel-debezium-sqlserver/src/main/docs/debezium-sqlserver-component.adoc
@@ -0,0 +1,254 @@
+[[debezium-sqlserver-component]]
+= Debezium SQL Server Connector Component
+
+*Since Camel 3.0*
+
+The Debezium SQL Server component is wrapper around https://debezium.io/[Debezium] using https://debezium.io/documentation/reference/0.10/operations/embedded.html[Debezium Embedded], which enables Change Data Capture from SQL Server database using Debezium without the need for Kafka or Kafka Connect.
+
+*Note on handling failures:* Per https://debezium.io/documentation/reference/0.10/operations/embedded.html#_handling_failures[Debezium Embedded Engine] documentation, the engines is actively recording source offsets and periodically flushes these offsets to a persistent storage, so when the application is restarted or crashed, the engine will resume from the last recorded offset.
+Thus, at normal operation, your downstream routes will receive each event exactly once, however in case of an application crash (not having a graceful shutdown), the application will resume from the last recorded offset,
+which may result in receiving duplicate events immediately after the restart. Therefore, your downstream routes should be tolerant enough of such case and deduplicate events if needed.
+
+*Note:* The Debezium SQL Server component is currently not supported in OSGi
+
+Maven users will need to add the following dependency to their `pom.xml`
+for this component.
+
+[source,xml]
+----
+<dependency>
+    <groupId>org.apache.camel</groupId>
+    <artifactId>camel-debezium-sqlserver</artifactId>
+    <version>x.x.x</version>
+    <!-- use the same version as your Camel core version -->
+</dependency>
+----
+
+== URI format
+
+[source,text]
+---------------------------
+debezium-sqlserver:name[?options]
+---------------------------
+
+== Options
+
+
+// component options: START
+The Debezium SQL Server Connector component supports 2 options, which are listed below.
+
+
+
+[width="100%",cols="2,5,^1,2",options="header"]
+|===
+| Name | Description | Default | Type
+| *configuration* (consumer) | Allow pre-configured Configurations to be set. |  | SqlServerConnectorEmbeddedDebeziumConfiguration
+| *basicPropertyBinding* (advanced) | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean
+|===
+// component options: END
+
+
+// endpoint options: START
+The Debezium SQL Server Connector endpoint is configured using URI syntax:
+
+----
+debezium-sqlserver:name
+----
+
+with the following path and query parameters:
+
+=== Path Parameters (1 parameters):
+
+
+[width="100%",cols="2,5,^1,2",options="header"]
+|===
+| Name | Description | Default | Type
+| *name* | *Required* Unique name for the connector. Attempting to register again with the same name will fail. |  | String
+|===
+
+
+=== Query Parameters (45 parameters):
+
+
+[width="100%",cols="2,5,^1,2",options="header"]
+|===
+| Name | Description | Default | Type
+| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. | false | boolean
+| *internalKeyConverter* (consumer) | The Converter class that should be used to serialize and deserialize key data for offsets. The default is JSON converter. | org.apache.kafka.connect.json.JsonConverter | String
+| *internalValueConverter* (consumer) | The Converter class that should be used to serialize and deserialize value data for offsets. The default is JSON converter. | org.apache.kafka.connect.json.JsonConverter | String
+| *offsetCommitPolicy* (consumer) | The name of the Java class of the commit policy. It defines when offsets commit has to be triggered based on the number of events processed and the time elapsed since the last commit. This class must implement the interface 'OffsetCommitPolicy'. The default is a periodic commit policy based upon time intervals. | io.debezium.embedded.spi.OffsetCommitPolicy.PeriodicCommitOffsetPolicy | String
+| *offsetCommitTimeoutMs* (consumer) | Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt. The default is 5 seconds. | 5000 | long
+| *offsetFlushIntervalMs* (consumer) | Interval at which to try committing offsets. The default is 1 minute. | 60000 | long
+| *offsetStorage* (consumer) | The name of the Java class that is responsible for persistence of connector offsets. | org.apache.kafka.connect.storage.FileOffsetBackingStore | String
+| *offsetStorageFileName* (consumer) | Path to file where offsets are to be stored. Required when offset.storage is set to the FileOffsetBackingStore |  | String
+| *offsetStoragePartitions* (consumer) | The number of partitions used when creating the offset storage topic. Required when offset.storage is set to the 'KafkaOffsetBackingStore'. |  | int
+| *offsetStorageReplication Factor* (consumer) | Replication factor used when creating the offset storage topic. Required when offset.storage is set to the KafkaOffsetBackingStore |  | int
+| *offsetStorageTopic* (consumer) | The name of the Kafka topic where offsets are to be stored. Required when offset.storage is set to the KafkaOffsetBackingStore. |  | String
+| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. |  | ExceptionHandler
+| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. |  | ExchangePattern
+| *basicPropertyBinding* (advanced) | Whether the endpoint should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | boolean
+| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). | false | boolean
+| *columnBlacklist* (sqlserver) | Description is not available here, please check Debezium website for corresponding key 'column.blacklist' description. |  | String
+| *databaseDbname* (sqlserver) | The name of the database the connector should be monitoring. When working with a multi-tenant set-up, must be set to the CDB name. |  | String
+| *databaseHistory* (sqlserver) | The name of the DatabaseHistory class that should be used to store and recover database schema changes. The configuration properties for the history are prefixed with the 'database.history.' string. | io.debezium.relational.history.FileDatabaseHistory | String
+| *databaseHistoryFileFilename* (sqlserver) | The path to the file that will be used to record the database history |  | String
+| *databaseHistoryKafka BootstrapServers* (sqlserver) | A list of host/port pairs that the connector will use for establishing the initial connection to the Kafka cluster for retrieving database schema history previously stored by the connector. This should point to the same Kafka cluster used by the Kafka Connect process. |  | String
+| *databaseHistoryKafka RecoveryAttempts* (sqlserver) | The number of attempts in a row that no data are returned from Kafka before recover completes. The maximum amount of time to wait after receiving no data is (recovery.attempts) x (recovery.poll.interval.ms). | 100 | int
+| *databaseHistoryKafka RecoveryPollIntervalMs* (sqlserver) | The number of milliseconds to wait while polling for persisted data during recovery. | 100 | int
+| *databaseHistoryKafkaTopic* (sqlserver) | The name of the topic for the database schema history |  | String
+| *databaseHostname* (sqlserver) | Resolvable hostname or IP address of the SQL Server database server. |  | String
+| *databasePassword* (sqlserver) | *Required* Password of the SQL Server database user to be used when connecting to the database. |  | String
+| *databasePort* (sqlserver) | Port of the SQL Server database server. | 1433 | int
+| *databaseServerName* (sqlserver) | *Required* Unique name that identifies the database server and all recorded offsets, and that is used as a prefix for all schemas and topics. Each distinct installation should have a separate namespace and be monitored by at most one Debezium connector. |  | String
+| *databaseUser* (sqlserver) | Name of the SQL Server database user to be used when connecting to the database. |  | String
+| *decimalHandlingMode* (sqlserver) | Specify how DECIMAL and NUMERIC columns should be represented in change events, including:'precise' (the default) uses java.math.BigDecimal to represent values, which are encoded in the change events using a binary representation and Kafka Connect's 'org.apache.kafka.connect.data.Decimal' type; 'string' uses string to represent values; 'double' represents values using Java's 'double', which may not offer the precision but will be far easier to use in [...]
+| *heartbeatIntervalMs* (sqlserver) | Length of an interval in milli-seconds in in which the connector periodically sends heartbeat messages to a heartbeat topic. Use 0 to disable heartbeat messages. Disabled by default. | 0 | int
+| *heartbeatTopicsPrefix* (sqlserver) | The prefix that is used to name heartbeat topics.Defaults to __debezium-heartbeat. | __debezium-heartbeat | String
+| *maxBatchSize* (sqlserver) | Maximum size of each batch of source records. Defaults to 2048. | 2048 | int
+| *maxQueueSize* (sqlserver) | Maximum size of the queue for change events read from the database log but not yet recorded or forwarded. Defaults to 8192, and should always be larger than the maximum batch size. | 8192 | int
+| *messageKeyColumns* (sqlserver) | A semicolon-separated list of expressions that match fully-qualified tables and column(s) to be used as message key. Each expression must match the pattern ':',where the table names could be defined as (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on the specific connector,and the key columns are a comma-separated list of columns representing the custom key. For any table without an explicit key configuration the table's primary key colum [...]
+| *pollIntervalMs* (sqlserver) | Frequency in milliseconds to wait for new change events to appear after receiving no events. Defaults to 500ms. | 500 | long
+| *snapshotDelayMs* (sqlserver) | The number of milliseconds to delay before a snapshot will begin. | 0 | long
+| *snapshotFetchSize* (sqlserver) | The maximum number of records that should be loaded into memory while performing a snapshot |  | int
+| *snapshotLockTimeoutMs* (sqlserver) | The maximum number of millis to wait for table locks at the beginning of a snapshot. If locks cannot be acquired in this time frame, the snapshot will be aborted. Defaults to 10 seconds | 10000 | long
+| *snapshotMode* (sqlserver) | The criteria for running a snapshot upon startup of the connector. Options include: 'initial' (the default) to specify the connector should run a snapshot only when no offsets are available for the logical server name; 'initial_schema_only' to specify the connector should run a snapshot of the schema when no offsets are available for the logical server name. | initial | String
+| *snapshotSelectStatement Overrides* (sqlserver) | This property contains a comma-separated list of fully-qualified tables (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on thespecific connectors . Select statements for the individual tables are specified in further configuration properties, one for each table, identified by the id 'snapshot.select.statement.overrides.DB_NAME.TABLE_NAME' or 'snapshot.select.statement.overrides.SCHEMA_NAME.TABLE_NAME', respectively. The valu [...]
+| *sourceStructVersion* (sqlserver) | A version of the format of the publicly visible source part in the message | v2 | String
+| *tableBlacklist* (sqlserver) | Description is not available here, please check Debezium website for corresponding key 'table.blacklist' description. |  | String
+| *tableIgnoreBuiltin* (sqlserver) | Flag specifying whether built-in tables should be ignored. | true | boolean
+| *tableWhitelist* (sqlserver) | The tables for which changes are to be captured |  | String
+| *timePrecisionMode* (sqlserver) | Time, date, and timestamps can be represented with different kinds of precisions, including:'adaptive' (the default) bases the precision of time, date, and timestamp values on the database column's precision; 'adaptive_time_microseconds' like 'adaptive' mode, but TIME fields always use microseconds precision;'connect' always represents time, date, and timestamp values using Kafka Connect's built-in representations for Time, Date, and Timestamp, which u [...]
+|===
+// endpoint options: END
+// spring-boot-auto-configure options: START
+== Spring Boot Auto-Configuration
+
+When using Spring Boot make sure to use the following Maven dependency to have support for auto configuration:
+
+[source,xml]
+----
+<dependency>
+  <groupId>org.apache.camel</groupId>
+  <artifactId>camel-debezium-sqlserver-starter</artifactId>
+  <version>x.x.x</version>
+  <!-- use the same version as your Camel core version -->
+</dependency>
+----
+
+
+The component supports 44 options, which are listed below.
+
+
+
+[width="100%",cols="2,5,^1,2",options="header"]
+|===
+| Name | Description | Default | Type
+| *camel.component.debezium-sqlserver.basic-property-binding* | Whether the component should use basic property binding (Camel 2.x) or the newer property binding with additional capabilities | false | Boolean
+| *camel.component.debezium-sqlserver.configuration.column-blacklist* | Description is not available here, please check Debezium website for corresponding key 'column.blacklist' description. |  | String
+| *camel.component.debezium-sqlserver.configuration.connector-class* | The name of the Java class for the connector |  | Class
+| *camel.component.debezium-sqlserver.configuration.database-dbname* | The name of the database the connector should be monitoring. When working with a multi-tenant set-up, must be set to the CDB name. |  | String
+| *camel.component.debezium-sqlserver.configuration.database-history* | The name of the DatabaseHistory class that should be used to store and recover database schema changes. The configuration properties for the history are prefixed with the 'database.history.' string. | io.debezium.relational.history.FileDatabaseHistory | String
+| *camel.component.debezium-sqlserver.configuration.database-history-file-filename* | The path to the file that will be used to record the database history |  | String
+| *camel.component.debezium-sqlserver.configuration.database-history-kafka-bootstrap-servers* | A list of host/port pairs that the connector will use for establishing the initial connection to the Kafka cluster for retrieving database schema history previously stored by the connector. This should point to the same Kafka cluster used by the Kafka Connect process. |  | String
+| *camel.component.debezium-sqlserver.configuration.database-history-kafka-recovery-attempts* | The number of attempts in a row that no data are returned from Kafka before recover completes. The maximum amount of time to wait after receiving no data is (recovery.attempts) x (recovery.poll.interval.ms). | 100 | Integer
+| *camel.component.debezium-sqlserver.configuration.database-history-kafka-recovery-poll-interval-ms* | The number of milliseconds to wait while polling for persisted data during recovery. | 100 | Integer
+| *camel.component.debezium-sqlserver.configuration.database-history-kafka-topic* | The name of the topic for the database schema history |  | String
+| *camel.component.debezium-sqlserver.configuration.database-hostname* | Resolvable hostname or IP address of the SQL Server database server. |  | String
+| *camel.component.debezium-sqlserver.configuration.database-password* | Password of the SQL Server database user to be used when connecting to the database. |  | String
+| *camel.component.debezium-sqlserver.configuration.database-port* | Port of the SQL Server database server. | 1433 | Integer
+| *camel.component.debezium-sqlserver.configuration.database-server-name* | Unique name that identifies the database server and all recorded offsets, and that is used as a prefix for all schemas and topics. Each distinct installation should have a separate namespace and be monitored by at most one Debezium connector. |  | String
+| *camel.component.debezium-sqlserver.configuration.database-user* | Name of the SQL Server database user to be used when connecting to the database. |  | String
+| *camel.component.debezium-sqlserver.configuration.decimal-handling-mode* | Specify how DECIMAL and NUMERIC columns should be represented in change events, including:'precise' (the default) uses java.math.BigDecimal to represent values, which are encoded in the change events using a binary representation and Kafka Connect's 'org.apache.kafka.connect.data.Decimal' type; 'string' uses string to represent values; 'double' represents values using Java's 'double', which may not offer the pre [...]
+| *camel.component.debezium-sqlserver.configuration.heartbeat-interval-ms* | Length of an interval in milli-seconds in in which the connector periodically sends heartbeat messages to a heartbeat topic. Use 0 to disable heartbeat messages. Disabled by default. | 0 | Integer
+| *camel.component.debezium-sqlserver.configuration.heartbeat-topics-prefix* | The prefix that is used to name heartbeat topics.Defaults to __debezium-heartbeat. | __debezium-heartbeat | String
+| *camel.component.debezium-sqlserver.configuration.internal-key-converter* | The Converter class that should be used to serialize and deserialize key data for offsets. The default is JSON converter. | org.apache.kafka.connect.json.JsonConverter | String
+| *camel.component.debezium-sqlserver.configuration.internal-value-converter* | The Converter class that should be used to serialize and deserialize value data for offsets. The default is JSON converter. | org.apache.kafka.connect.json.JsonConverter | String
+| *camel.component.debezium-sqlserver.configuration.max-batch-size* | Maximum size of each batch of source records. Defaults to 2048. | 2048 | Integer
+| *camel.component.debezium-sqlserver.configuration.max-queue-size* | Maximum size of the queue for change events read from the database log but not yet recorded or forwarded. Defaults to 8192, and should always be larger than the maximum batch size. | 8192 | Integer
+| *camel.component.debezium-sqlserver.configuration.message-key-columns* | A semicolon-separated list of expressions that match fully-qualified tables and column(s) to be used as message key. Each expression must match the pattern '<fully-qualified table name>:<key columns>',where the table names could be defined as (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on the specific connector,and the key columns are a comma-separated list of columns representing the custom key. F [...]
+| *camel.component.debezium-sqlserver.configuration.name* | Unique name for the connector. Attempting to register again with the same name will fail. |  | String
+| *camel.component.debezium-sqlserver.configuration.offset-commit-policy* | The name of the Java class of the commit policy. It defines when offsets commit has to be triggered based on the number of events processed and the time elapsed since the last commit. This class must implement the interface 'OffsetCommitPolicy'. The default is a periodic commit policy based upon time intervals. | io.debezium.embedded.spi.OffsetCommitPolicy.PeriodicCommitOffsetPolicy | String
+| *camel.component.debezium-sqlserver.configuration.offset-commit-timeout-ms* | Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt. The default is 5 seconds. | 5000 | Long
+| *camel.component.debezium-sqlserver.configuration.offset-flush-interval-ms* | Interval at which to try committing offsets. The default is 1 minute. | 60000 | Long
+| *camel.component.debezium-sqlserver.configuration.offset-storage* | The name of the Java class that is responsible for persistence of connector offsets. | org.apache.kafka.connect.storage.FileOffsetBackingStore | String
+| *camel.component.debezium-sqlserver.configuration.offset-storage-file-name* | Path to file where offsets are to be stored. Required when offset.storage is set to the FileOffsetBackingStore |  | String
+| *camel.component.debezium-sqlserver.configuration.offset-storage-partitions* | The number of partitions used when creating the offset storage topic. Required when offset.storage is set to the 'KafkaOffsetBackingStore'. |  | Integer
+| *camel.component.debezium-sqlserver.configuration.offset-storage-replication-factor* | Replication factor used when creating the offset storage topic. Required when offset.storage is set to the KafkaOffsetBackingStore |  | Integer
+| *camel.component.debezium-sqlserver.configuration.offset-storage-topic* | The name of the Kafka topic where offsets are to be stored. Required when offset.storage is set to the KafkaOffsetBackingStore. |  | String
+| *camel.component.debezium-sqlserver.configuration.poll-interval-ms* | Frequency in milliseconds to wait for new change events to appear after receiving no events. Defaults to 500ms. | 500 | Long
+| *camel.component.debezium-sqlserver.configuration.snapshot-delay-ms* | The number of milliseconds to delay before a snapshot will begin. | 0 | Long
+| *camel.component.debezium-sqlserver.configuration.snapshot-fetch-size* | The maximum number of records that should be loaded into memory while performing a snapshot |  | Integer
+| *camel.component.debezium-sqlserver.configuration.snapshot-lock-timeout-ms* | The maximum number of millis to wait for table locks at the beginning of a snapshot. If locks cannot be acquired in this time frame, the snapshot will be aborted. Defaults to 10 seconds | 10000 | Long
+| *camel.component.debezium-sqlserver.configuration.snapshot-mode* | The criteria for running a snapshot upon startup of the connector. Options include: 'initial' (the default) to specify the connector should run a snapshot only when no offsets are available for the logical server name; 'initial_schema_only' to specify the connector should run a snapshot of the schema when no offsets are available for the logical server name. | initial | String
+| *camel.component.debezium-sqlserver.configuration.snapshot-select-statement-overrides* | This property contains a comma-separated list of fully-qualified tables (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on thespecific connectors . Select statements for the individual tables are specified in further configuration properties, one for each table, identified by the id 'snapshot.select.statement.overrides.[DB_NAME].[TABLE_NAME]' or 'snapshot.select.statement.overrides.[SCH [...]
+| *camel.component.debezium-sqlserver.configuration.source-struct-version* | A version of the format of the publicly visible source part in the message | v2 | String
+| *camel.component.debezium-sqlserver.configuration.table-blacklist* | Description is not available here, please check Debezium website for corresponding key 'table.blacklist' description. |  | String
+| *camel.component.debezium-sqlserver.configuration.table-ignore-builtin* | Flag specifying whether built-in tables should be ignored. | true | Boolean
+| *camel.component.debezium-sqlserver.configuration.table-whitelist* | The tables for which changes are to be captured |  | String
+| *camel.component.debezium-sqlserver.configuration.time-precision-mode* | Time, date, and timestamps can be represented with different kinds of precisions, including:'adaptive' (the default) bases the precision of time, date, and timestamp values on the database column's precision; 'adaptive_time_microseconds' like 'adaptive' mode, but TIME fields always use microseconds precision;'connect' always represents time, date, and timestamp values using Kafka Connect's built-in representations [...]
+| *camel.component.debezium-sqlserver.enabled* | Whether to enable auto configuration of the debezium-sqlserver component. This is enabled by default. |  | Boolean
+|===
+// spring-boot-auto-configure options: END
+
+For more information about configuration:
+https://debezium.io/documentation/reference/0.10/operations/embedded.html#engine-properties[https://debezium.io/documentation/reference/0.10/operations/embedded.html#engine-properties]
+https://debezium.io/documentation/reference/0.10/connectors/sqlserver.html#connector-properties[https://debezium.io/documentation/reference/0.10/connectors/sqlserver.html#connector-properties]
+
+== Message headers
+
+=== Consumer headers
+
+The following headers are available when consuming change events from Debezium.
+[width="100%",cols="2m,2m,1m,5",options="header"]
+|===
+| Header constant                           | Header value                                   | Type        | Description
+| DebeziumConstants.HEADER_IDENTIFIER       | "CamelDebeziumIdentifier"                      | String      | The identifier of the connector, normally is this format "{server-name}.{database-name}.{table-name}".
+| DebeziumConstants.HEADER_KEY              | "CamelDebeziumKey"                             | Struct      | The key of the event, normally is the table Primary Key.
+| DebeziumConstants.HEADER_SOURCE_METADATA  | "CamelDebeziumSourceMetadata"                  | Map         | The metadata about the source event, for example `table` name, database `name`, log position, etc, please refer to the Debezium documentation for more info.
+| DebeziumConstants.HEADER_OPERATION        | "CamelDebeziumOperation"                       | String      | If presents, the type of event operation. Values for the connector are `c` for create (or insert), `u` for update, `d` for delete or `r` in case of a snapshot event.
+| DebeziumConstants.HEADER_TIMESTAMP        | "CamelDebeziumTimestamp"                       | Long        | If presents, the time (using the system clock in the JVM) at which the connector processed the event.
+| DebeziumConstants.HEADER_BEFORE           | "CamelDebeziumBefore"                          | Struct     | If presents, contains the state of the row before the event occurred.
+|===
+
+== Message body
+The message body if is not `null` (in case of tombstones), it contains the state of the row after the event occurred as `Struct` format or `Map` format if you use the included Type Converter from `Struct` to `Map` (please look below for more explanation).
+
+== Samples
+
+=== Consuming events
+
+Here is a very simple route that you can use in order to listen to Debezium events from SQL Server connector.
+[source,java]
+----
+from("debezium-sqlserver:dbz-test-1?offsetStorageFileName=/usr/offset-file-1.dat&databaseHostName=localhost&databaseUser=debezium&databasePassword=dbz&databaseServerName=my-app-connector&databaseHistoryFileName=/usr/history-file-1.dat")
+    .log("Event received from Debezium : ${body}")
+    .log("    with this identifier ${headers.CamelDebeziumIdentifier}")
+    .log("    with these source metadata ${headers.CamelDebeziumSourceMetadata}")
+    .log("    the event occured upon this operation '${headers.CamelDebeziumSourceOperation}'")
+    .log("    on this database '${headers.CamelDebeziumSourceMetadata[db]}' and this table '${headers.CamelDebeziumSourceMetadata[table]}'")
+    .log("    with the key ${headers.CamelDebeziumKey}")
+    .log("    the previous value is ${headers.CamelDebeziumBefore}")
+----
+
+By default, the component will emit the events in the body and `CamelDebeziumBefore` header as https://kafka.apache.org/22/javadoc/org/apache/kafka/connect/data/Struct.html[`Struct`] data type, the reasoning behind this, is to perceive the schema information in case is needed.
+However, the component as well contains a xref:manual::type-converter.adoc[Type Converter] that converts
+from default output type of https://kafka.apache.org/22/javadoc/org/apache/kafka/connect/data/Struct.html[`Struct`] to `Map` in order to leverage Camel's rich xref:manual::data-format.adoc[Data Format] types which many of them work out of box with `Map` data type.
+To use it, you can either add `Map.class` type when you access the message e.g: `exchange.getIn().getBody(Map.class)`, or you can convert the body always to `Map` from the route builder by adding `.convertBodyTo(Map.class)` to your Camel Route DSL after `from` statement.
+
+We mentioned above about the schema, which can be used in case you need to perform advance data transformation and the schema is needed for that. If you choose not to convert your body to `Map`,
+you can obtain the schema information as https://kafka.apache.org/22/javadoc/org/apache/kafka/connect/data/Schema.html[`Schema`] type from `Struct` like this:
+[source,java]
+----
+from("debezium-sqlserver:[name]?[options]])
+    .process(exchange -> {
+        final Struct bodyValue = exchange.getIn().getBody(Struct.class);
+        final Schema schemaValue = bodyValue.schema();
+
+        log.info("Body value is :" + bodyValue);
+        log.info("With Schema : " + schemaValue);
+        log.info("And fields of :" + schemaValue.fields());
+        log.info("Field name has `" + schemaValue.field("name").schema() + "` type");
+    });
+----
+
+*Important Note:* This component is a thin wrapper around Debezium Engine as mentioned, therefore before using this component in production, you need to understand how Debezium works and how configurations can reflect the expected behavior, especially in regards to https://debezium.io/documentation/reference/0.9/operations/embedded.html#_handling_failures[handling failures].
diff --git a/components/camel-debezium-sqlserver/src/main/java/org/apache/camel/component/debezium/DebeziumSqlserverComponent.java b/components/camel-debezium-sqlserver/src/main/java/org/apache/camel/component/debezium/DebeziumSqlserverComponent.java
new file mode 100644
index 0000000..dfb5f8e
--- /dev/null
+++ b/components/camel-debezium-sqlserver/src/main/java/org/apache/camel/component/debezium/DebeziumSqlserverComponent.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.debezium;
+
+import org.apache.camel.CamelContext;
+import org.apache.camel.component.debezium.configuration.SqlServerConnectorEmbeddedDebeziumConfiguration;
+import org.apache.camel.spi.annotations.Component;
+
+@Component("debezium-sqlserver")
+public final class DebeziumSqlserverComponent extends DebeziumComponent<SqlServerConnectorEmbeddedDebeziumConfiguration> {
+
+    private SqlServerConnectorEmbeddedDebeziumConfiguration configuration;
+
+    public DebeziumSqlserverComponent() {
+    }
+
+    public DebeziumSqlserverComponent(final CamelContext context) {
+        super(context);
+    }
+
+    /**
+     * Allow pre-configured Configurations to be set.
+     *
+     * @return {@link SqlServerConnectorEmbeddedDebeziumConfiguration}
+     */
+    @Override
+    public SqlServerConnectorEmbeddedDebeziumConfiguration getConfiguration() {
+        if (configuration == null) {
+            return new SqlServerConnectorEmbeddedDebeziumConfiguration();
+        }
+        return configuration;
+    }
+
+    @Override
+    public void setConfiguration(SqlServerConnectorEmbeddedDebeziumConfiguration configuration) {
+        this.configuration = configuration;
+    }
+
+    @Override
+    protected DebeziumEndpoint initializeDebeziumEndpoint(String uri, SqlServerConnectorEmbeddedDebeziumConfiguration configuration) {
+        return new DebeziumSqlserverEndpoint(uri, this,  configuration);
+    }
+}
diff --git a/components/camel-debezium-sqlserver/src/main/java/org/apache/camel/component/debezium/DebeziumSqlserverEndpoint.java b/components/camel-debezium-sqlserver/src/main/java/org/apache/camel/component/debezium/DebeziumSqlserverEndpoint.java
new file mode 100644
index 0000000..c8d42cb
--- /dev/null
+++ b/components/camel-debezium-sqlserver/src/main/java/org/apache/camel/component/debezium/DebeziumSqlserverEndpoint.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.debezium;
+
+import org.apache.camel.component.debezium.configuration.SqlServerConnectorEmbeddedDebeziumConfiguration;
+import org.apache.camel.spi.UriEndpoint;
+import org.apache.camel.spi.UriParam;
+
+/**
+ * Represents a Debezium SQL Server endpoint which is used to capture changes in SQL Server database so that that applications can see those changes and respond to them.
+ */
+@UriEndpoint(firstVersion = "3.0.0", scheme = "debezium-sqlserver", title = "Debezium SQL Server Connector", syntax = "debezium-sqlserver:name", label = "database,sql,sqlserver", consumerOnly = true)
+public final class DebeziumSqlserverEndpoint extends DebeziumEndpoint<SqlServerConnectorEmbeddedDebeziumConfiguration> {
+
+    @UriParam
+    private SqlServerConnectorEmbeddedDebeziumConfiguration configuration;
+
+    public DebeziumSqlserverEndpoint(final String uri, final DebeziumSqlserverComponent component, final SqlServerConnectorEmbeddedDebeziumConfiguration configuration) {
+        super(uri, component);
+        this.configuration = configuration;
+    }
+
+    public DebeziumSqlserverEndpoint() {
+    }
+
+    @Override
+    public SqlServerConnectorEmbeddedDebeziumConfiguration getConfiguration() {
+        return configuration;
+    }
+
+    @Override
+    public void setConfiguration(final SqlServerConnectorEmbeddedDebeziumConfiguration configuration) {
+        this.configuration = configuration;
+    }
+}
diff --git a/components/camel-debezium-sqlserver/src/test/java/org/apache/camel/component/debezium/DebeziumSqlserverComponentTest.java b/components/camel-debezium-sqlserver/src/test/java/org/apache/camel/component/debezium/DebeziumSqlserverComponentTest.java
new file mode 100644
index 0000000..5d2c869
--- /dev/null
+++ b/components/camel-debezium-sqlserver/src/test/java/org/apache/camel/component/debezium/DebeziumSqlserverComponentTest.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.debezium;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.camel.component.debezium.configuration.SqlServerConnectorEmbeddedDebeziumConfiguration;
+import org.apache.camel.impl.DefaultCamelContext;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+public class DebeziumSqlserverComponentTest {
+
+    @Test
+    public void testIfConnectorEndpointCreatedWithConfig() throws Exception {
+        final Map<String, Object> params = new HashMap<>();
+        params.put("offsetStorageFileName", "/offset_test_file");
+        params.put("databaseHostname", "localhost");
+        params.put("databaseUser", "dbz");
+        params.put("databasePassword", "pwd");
+        params.put("databaseServerName", "test");
+        params.put("databaseServerId", 1234);
+        params.put("databaseHistoryFileFilename", "/db_history_file_test");
+
+        final String remaining = "test_name";
+        final String uri = "debezium?name=test_name&offsetStorageFileName=/test&"
+                + "databaseHostName=localhost&databaseServerId=1234&databaseUser=dbz&databasePassword=pwd&"
+                + "databaseServerName=test&databaseHistoryFileName=/test";
+
+        final DebeziumComponent debeziumComponent = new DebeziumSqlserverComponent(new DefaultCamelContext());
+        final DebeziumEndpoint debeziumEndpoint = debeziumComponent.createEndpoint(uri, remaining, params);
+
+        assertNotNull(debeziumEndpoint);
+
+        // test for config
+        final SqlServerConnectorEmbeddedDebeziumConfiguration configuration = (SqlServerConnectorEmbeddedDebeziumConfiguration)debeziumEndpoint
+                .getConfiguration();
+        assertEquals("test_name", configuration.getName());
+        assertEquals("/offset_test_file", configuration.getOffsetStorageFileName());
+        assertEquals("localhost", configuration.getDatabaseHostname());
+        assertEquals("dbz", configuration.getDatabaseUser());
+        assertEquals("pwd", configuration.getDatabasePassword());
+        assertEquals("test", configuration.getDatabaseServerName());
+        assertEquals("/db_history_file_test", configuration.getDatabaseHistoryFileFilename());
+    }
+
+    @Test
+    public void testIfCreatesComponentWithExternalConfiguration() throws Exception {
+        final SqlServerConnectorEmbeddedDebeziumConfiguration configuration = new SqlServerConnectorEmbeddedDebeziumConfiguration();
+        configuration.setName("test_config");
+        configuration.setDatabaseUser("test_db");
+        configuration.setDatabasePassword("pwd");
+        configuration.setOffsetStorageFileName("/offset/file");
+        configuration.setDatabaseServerName("test");
+
+        final String uri = "debezium:dummy";
+        final DebeziumComponent debeziumComponent = new DebeziumSqlserverComponent(new DefaultCamelContext());
+
+        // set configurations
+        debeziumComponent.setConfiguration(configuration);
+
+        final DebeziumEndpoint debeziumEndpoint = debeziumComponent.createEndpoint(uri, null,
+                Collections.emptyMap());
+
+        assertNotNull(debeziumEndpoint);
+
+        // assert configurations
+        final SqlServerConnectorEmbeddedDebeziumConfiguration actualConfigurations = (SqlServerConnectorEmbeddedDebeziumConfiguration)debeziumEndpoint
+                .getConfiguration();
+        assertNotNull(actualConfigurations);
+        assertEquals(configuration.getName(), actualConfigurations.getName());
+        assertEquals(configuration.getDatabaseUser(),
+                actualConfigurations.getDatabaseUser());
+        assertEquals(configuration.getConnectorClass(), actualConfigurations.getConnectorClass());
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testIfItHandlesNullExternalConfigurations() throws Exception {
+        final String remaining = "";
+        final String uri = "debezium:";
+        final DebeziumComponent debeziumComponent = new DebeziumSqlserverComponent(new DefaultCamelContext());
+
+        // set configurations
+        debeziumComponent.setConfiguration(null);
+
+        final DebeziumEndpoint debeziumEndpoint = debeziumComponent.createEndpoint(uri, remaining,
+                Collections.emptyMap());
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testIfItHandlesNullExternalConfigurationsWithValidUri() throws Exception {
+        final String remaining = "dummy";
+        final String uri = "debezium:dummy";
+        final DebeziumComponent debeziumComponent = new DebeziumSqlserverComponent(new DefaultCamelContext());
+
+        // set configurations
+        debeziumComponent.setConfiguration(null);
+
+        final DebeziumEndpoint debeziumEndpoint = debeziumComponent.createEndpoint(uri, remaining,
+                Collections.emptyMap());
+    }
+}
\ No newline at end of file
diff --git a/components/camel-debezium-sqlserver/src/test/java/org/apache/camel/component/debezium/configuration/SqlserverConnectorEmbeddedDebeziumConfigurationTest.java b/components/camel-debezium-sqlserver/src/test/java/org/apache/camel/component/debezium/configuration/SqlserverConnectorEmbeddedDebeziumConfigurationTest.java
new file mode 100644
index 0000000..f209b0f
--- /dev/null
+++ b/components/camel-debezium-sqlserver/src/test/java/org/apache/camel/component/debezium/configuration/SqlserverConnectorEmbeddedDebeziumConfigurationTest.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.debezium.configuration;
+
+import io.debezium.config.CommonConnectorConfig;
+import io.debezium.config.Configuration;
+import io.debezium.connector.sqlserver.SqlServerConnector;
+import io.debezium.embedded.EmbeddedEngine;
+import org.apache.camel.component.debezium.DebeziumConstants;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class SqlserverConnectorEmbeddedDebeziumConfigurationTest {
+
+    @Test
+    public void testIfCreatesConfig() {
+        final SqlServerConnectorEmbeddedDebeziumConfiguration configuration = new SqlServerConnectorEmbeddedDebeziumConfiguration();
+        configuration.setName("test_config");
+        configuration.setDatabaseUser("test_user");
+        configuration.setMaxQueueSize(1212);
+
+        final Configuration dbzConfigurations = configuration.createDebeziumConfiguration();
+
+        assertEquals("test_config", dbzConfigurations.getString(EmbeddedEngine.ENGINE_NAME));
+        assertEquals("test_user", dbzConfigurations.getString("database.user"));
+        assertEquals(1212, dbzConfigurations.getInteger(CommonConnectorConfig.MAX_QUEUE_SIZE));
+        assertEquals(SqlServerConnector.class.getName(), dbzConfigurations.getString(EmbeddedEngine.CONNECTOR_CLASS));
+        assertEquals(DebeziumConstants.DEFAULT_OFFSET_STORAGE,
+                dbzConfigurations.getString(EmbeddedEngine.OFFSET_STORAGE));
+    }
+
+    @Test
+    public void testIfValidatesConfigurationCorrectly() {
+        final SqlServerConnectorEmbeddedDebeziumConfiguration configuration = new SqlServerConnectorEmbeddedDebeziumConfiguration();
+
+        configuration.setName("test_config");
+        configuration.setDatabaseUser("test_db");
+        configuration.setDatabaseServerName("test_server");
+        configuration.setOffsetStorageFileName("/offset/file");
+        configuration.setDatabaseHistoryFileFilename("/database_history/file");
+
+        assertFalse(configuration.validateConfiguration().isValid());
+
+        configuration.setDatabaseHostname("localhost");
+        configuration.setDatabasePassword("test_pwd");
+
+        assertTrue(configuration.validateConfiguration().isValid());
+    }
+
+    @Test
+    public void testValidateConfigurationsForAllRequiredFields() {
+        final SqlServerConnectorEmbeddedDebeziumConfiguration configuration = new SqlServerConnectorEmbeddedDebeziumConfiguration();
+        configuration.setName("test_config");
+        configuration.setDatabaseUser("test_db");
+        configuration.setDatabaseHostname("localhost");
+        configuration.setDatabasePassword("test_pwd");
+        configuration.setDatabaseServerName("test_server");
+        configuration.setOffsetStorageFileName("/offset/file");
+        configuration.setDatabaseHistoryFileFilename("/database_history/file");
+
+        final ConfigurationValidation validation = configuration.validateConfiguration();
+        assertTrue(validation.isValid());
+
+        assertEquals("test_config", configuration.getName());
+        assertEquals("test_db", configuration.getDatabaseUser());
+        assertEquals("localhost", configuration.getDatabaseHostname());
+        assertEquals("test_pwd", configuration.getDatabasePassword());
+        assertEquals("test_server", configuration.getDatabaseServerName());
+        assertEquals("/offset/file", configuration.getOffsetStorageFileName());
+        assertEquals("/database_history/file", configuration.getDatabaseHistoryFileFilename());
+    }
+
+}
\ No newline at end of file
diff --git a/components/camel-debezium-sqlserver/src/test/resources/log4j2.properties b/components/camel-debezium-sqlserver/src/test/resources/log4j2.properties
new file mode 100644
index 0000000..a3dd382
--- /dev/null
+++ b/components/camel-debezium-sqlserver/src/test/resources/log4j2.properties
@@ -0,0 +1,30 @@
+## ---------------------------------------------------------------------------
+## Licensed to the Apache Software Foundation (ASF) under one or more
+## contributor license agreements.  See the NOTICE file distributed with
+## this work for additional information regarding copyright ownership.
+## The ASF licenses this file to You under the Apache License, Version 2.0
+## (the "License"); you may not use this file except in compliance with
+## the License.  You may obtain a copy of the License at
+##
+##      http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+## ---------------------------------------------------------------------------
+
+appender.file.type = File
+appender.file.name = file
+appender.file.fileName = target/camel-debezium-test.log
+appender.file.layout.type = PatternLayout
+appender.file.layout.pattern = %d [%-15.15t] %-5p %-30.30c{1} - %m%n
+
+appender.out.type = Console
+appender.out.name = out
+appender.out.layout.type = PatternLayout
+appender.out.layout.pattern = %d [%-15.15t] %-5p %-30.30c{1} - %m%n
+
+rootLogger.level = INFO
+rootLogger.appenderRef.file.ref = file
\ No newline at end of file
diff --git a/components/pom.xml b/components/pom.xml
index 7ac64b3..4e27792 100644
--- a/components/pom.xml
+++ b/components/pom.xml
@@ -161,6 +161,7 @@
         <module>camel-debezium-mysql</module>
         <module>camel-debezium-postgres</module>
         <module>camel-debezium-mongodb</module>
+        <module>camel-debezium-sqlserver</module>
         <module>camel-ehcache</module>
         <module>camel-elasticsearch-rest</module>
         <module>camel-elsql</module>
diff --git a/core/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/dsl/DebeziumSqlserverEndpointBuilderFactory.java b/core/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/dsl/DebeziumSqlserverEndpointBuilderFactory.java
new file mode 100644
index 0000000..2d7963d
--- /dev/null
+++ b/core/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/dsl/DebeziumSqlserverEndpointBuilderFactory.java
@@ -0,0 +1,985 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.builder.endpoint.dsl;
+
+import javax.annotation.Generated;
+import org.apache.camel.ExchangePattern;
+import org.apache.camel.builder.EndpointConsumerBuilder;
+import org.apache.camel.builder.EndpointProducerBuilder;
+import org.apache.camel.builder.endpoint.AbstractEndpointBuilder;
+import org.apache.camel.spi.ExceptionHandler;
+
+/**
+ * Represents a Debezium SQL Server endpoint which is used to capture changes in
+ * SQL Server database so that that applications can see those changes and
+ * respond to them.
+ * 
+ * Generated by camel-package-maven-plugin - do not edit this file!
+ */
+@Generated("org.apache.camel.maven.packaging.EndpointDslMojo")
+public interface DebeziumSqlserverEndpointBuilderFactory {
+
+
+    /**
+     * Builder for endpoint for the Debezium SQL Server Connector component.
+     */
+    public interface DebeziumSqlserverEndpointBuilder
+            extends
+                EndpointConsumerBuilder {
+        default AdvancedDebeziumSqlserverEndpointBuilder advanced() {
+            return (AdvancedDebeziumSqlserverEndpointBuilder) this;
+        }
+        /**
+         * Allows for bridging the consumer to the Camel routing Error Handler,
+         * which mean any exceptions occurred while the consumer is trying to
+         * pickup incoming messages, or the likes, will now be processed as a
+         * message and handled by the routing Error Handler. By default the
+         * consumer will use the org.apache.camel.spi.ExceptionHandler to deal
+         * with exceptions, that will be logged at WARN or ERROR level and
+         * ignored.
+         * 
+         * The option is a: <code>boolean</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder bridgeErrorHandler(
+                boolean bridgeErrorHandler) {
+            doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
+            return this;
+        }
+        /**
+         * Allows for bridging the consumer to the Camel routing Error Handler,
+         * which mean any exceptions occurred while the consumer is trying to
+         * pickup incoming messages, or the likes, will now be processed as a
+         * message and handled by the routing Error Handler. By default the
+         * consumer will use the org.apache.camel.spi.ExceptionHandler to deal
+         * with exceptions, that will be logged at WARN or ERROR level and
+         * ignored.
+         * 
+         * The option will be converted to a <code>boolean</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder bridgeErrorHandler(
+                String bridgeErrorHandler) {
+            doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
+            return this;
+        }
+        /**
+         * The Converter class that should be used to serialize and deserialize
+         * key data for offsets. The default is JSON converter.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder internalKeyConverter(
+                String internalKeyConverter) {
+            doSetProperty("internalKeyConverter", internalKeyConverter);
+            return this;
+        }
+        /**
+         * The Converter class that should be used to serialize and deserialize
+         * value data for offsets. The default is JSON converter.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder internalValueConverter(
+                String internalValueConverter) {
+            doSetProperty("internalValueConverter", internalValueConverter);
+            return this;
+        }
+        /**
+         * The name of the Java class of the commit policy. It defines when
+         * offsets commit has to be triggered based on the number of events
+         * processed and the time elapsed since the last commit. This class must
+         * implement the interface 'OffsetCommitPolicy'. The default is a
+         * periodic commit policy based upon time intervals.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder offsetCommitPolicy(
+                String offsetCommitPolicy) {
+            doSetProperty("offsetCommitPolicy", offsetCommitPolicy);
+            return this;
+        }
+        /**
+         * Maximum number of milliseconds to wait for records to flush and
+         * partition offset data to be committed to offset storage before
+         * cancelling the process and restoring the offset data to be committed
+         * in a future attempt. The default is 5 seconds.
+         * 
+         * The option is a: <code>long</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder offsetCommitTimeoutMs(
+                long offsetCommitTimeoutMs) {
+            doSetProperty("offsetCommitTimeoutMs", offsetCommitTimeoutMs);
+            return this;
+        }
+        /**
+         * Maximum number of milliseconds to wait for records to flush and
+         * partition offset data to be committed to offset storage before
+         * cancelling the process and restoring the offset data to be committed
+         * in a future attempt. The default is 5 seconds.
+         * 
+         * The option will be converted to a <code>long</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder offsetCommitTimeoutMs(
+                String offsetCommitTimeoutMs) {
+            doSetProperty("offsetCommitTimeoutMs", offsetCommitTimeoutMs);
+            return this;
+        }
+        /**
+         * Interval at which to try committing offsets. The default is 1 minute.
+         * 
+         * The option is a: <code>long</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder offsetFlushIntervalMs(
+                long offsetFlushIntervalMs) {
+            doSetProperty("offsetFlushIntervalMs", offsetFlushIntervalMs);
+            return this;
+        }
+        /**
+         * Interval at which to try committing offsets. The default is 1 minute.
+         * 
+         * The option will be converted to a <code>long</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder offsetFlushIntervalMs(
+                String offsetFlushIntervalMs) {
+            doSetProperty("offsetFlushIntervalMs", offsetFlushIntervalMs);
+            return this;
+        }
+        /**
+         * The name of the Java class that is responsible for persistence of
+         * connector offsets.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder offsetStorage(
+                String offsetStorage) {
+            doSetProperty("offsetStorage", offsetStorage);
+            return this;
+        }
+        /**
+         * Path to file where offsets are to be stored. Required when
+         * offset.storage is set to the FileOffsetBackingStore.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder offsetStorageFileName(
+                String offsetStorageFileName) {
+            doSetProperty("offsetStorageFileName", offsetStorageFileName);
+            return this;
+        }
+        /**
+         * The number of partitions used when creating the offset storage topic.
+         * Required when offset.storage is set to the 'KafkaOffsetBackingStore'.
+         * 
+         * The option is a: <code>int</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder offsetStoragePartitions(
+                int offsetStoragePartitions) {
+            doSetProperty("offsetStoragePartitions", offsetStoragePartitions);
+            return this;
+        }
+        /**
+         * The number of partitions used when creating the offset storage topic.
+         * Required when offset.storage is set to the 'KafkaOffsetBackingStore'.
+         * 
+         * The option will be converted to a <code>int</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder offsetStoragePartitions(
+                String offsetStoragePartitions) {
+            doSetProperty("offsetStoragePartitions", offsetStoragePartitions);
+            return this;
+        }
+        /**
+         * Replication factor used when creating the offset storage topic.
+         * Required when offset.storage is set to the KafkaOffsetBackingStore.
+         * 
+         * The option is a: <code>int</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder offsetStorageReplicationFactor(
+                int offsetStorageReplicationFactor) {
+            doSetProperty("offsetStorageReplicationFactor", offsetStorageReplicationFactor);
+            return this;
+        }
+        /**
+         * Replication factor used when creating the offset storage topic.
+         * Required when offset.storage is set to the KafkaOffsetBackingStore.
+         * 
+         * The option will be converted to a <code>int</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder offsetStorageReplicationFactor(
+                String offsetStorageReplicationFactor) {
+            doSetProperty("offsetStorageReplicationFactor", offsetStorageReplicationFactor);
+            return this;
+        }
+        /**
+         * The name of the Kafka topic where offsets are to be stored. Required
+         * when offset.storage is set to the KafkaOffsetBackingStore.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: consumer
+         */
+        default DebeziumSqlserverEndpointBuilder offsetStorageTopic(
+                String offsetStorageTopic) {
+            doSetProperty("offsetStorageTopic", offsetStorageTopic);
+            return this;
+        }
+        /**
+         * Description is not available here, please check Debezium website for
+         * corresponding key 'column.blacklist' description.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder columnBlacklist(
+                String columnBlacklist) {
+            doSetProperty("columnBlacklist", columnBlacklist);
+            return this;
+        }
+        /**
+         * The name of the database the connector should be monitoring. When
+         * working with a multi-tenant set-up, must be set to the CDB name.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder databaseDbname(
+                String databaseDbname) {
+            doSetProperty("databaseDbname", databaseDbname);
+            return this;
+        }
+        /**
+         * The name of the DatabaseHistory class that should be used to store
+         * and recover database schema changes. The configuration properties for
+         * the history are prefixed with the 'database.history.' string.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder databaseHistory(
+                String databaseHistory) {
+            doSetProperty("databaseHistory", databaseHistory);
+            return this;
+        }
+        /**
+         * The path to the file that will be used to record the database
+         * history.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder databaseHistoryFileFilename(
+                String databaseHistoryFileFilename) {
+            doSetProperty("databaseHistoryFileFilename", databaseHistoryFileFilename);
+            return this;
+        }
+        /**
+         * A list of host/port pairs that the connector will use for
+         * establishing the initial connection to the Kafka cluster for
+         * retrieving database schema history previously stored by the
+         * connector. This should point to the same Kafka cluster used by the
+         * Kafka Connect process.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder databaseHistoryKafkaBootstrapServers(
+                String databaseHistoryKafkaBootstrapServers) {
+            doSetProperty("databaseHistoryKafkaBootstrapServers", databaseHistoryKafkaBootstrapServers);
+            return this;
+        }
+        /**
+         * The number of attempts in a row that no data are returned from Kafka
+         * before recover completes. The maximum amount of time to wait after
+         * receiving no data is (recovery.attempts) x
+         * (recovery.poll.interval.ms).
+         * 
+         * The option is a: <code>int</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder databaseHistoryKafkaRecoveryAttempts(
+                int databaseHistoryKafkaRecoveryAttempts) {
+            doSetProperty("databaseHistoryKafkaRecoveryAttempts", databaseHistoryKafkaRecoveryAttempts);
+            return this;
+        }
+        /**
+         * The number of attempts in a row that no data are returned from Kafka
+         * before recover completes. The maximum amount of time to wait after
+         * receiving no data is (recovery.attempts) x
+         * (recovery.poll.interval.ms).
+         * 
+         * The option will be converted to a <code>int</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder databaseHistoryKafkaRecoveryAttempts(
+                String databaseHistoryKafkaRecoveryAttempts) {
+            doSetProperty("databaseHistoryKafkaRecoveryAttempts", databaseHistoryKafkaRecoveryAttempts);
+            return this;
+        }
+        /**
+         * The number of milliseconds to wait while polling for persisted data
+         * during recovery.
+         * 
+         * The option is a: <code>int</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder databaseHistoryKafkaRecoveryPollIntervalMs(
+                int databaseHistoryKafkaRecoveryPollIntervalMs) {
+            doSetProperty("databaseHistoryKafkaRecoveryPollIntervalMs", databaseHistoryKafkaRecoveryPollIntervalMs);
+            return this;
+        }
+        /**
+         * The number of milliseconds to wait while polling for persisted data
+         * during recovery.
+         * 
+         * The option will be converted to a <code>int</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder databaseHistoryKafkaRecoveryPollIntervalMs(
+                String databaseHistoryKafkaRecoveryPollIntervalMs) {
+            doSetProperty("databaseHistoryKafkaRecoveryPollIntervalMs", databaseHistoryKafkaRecoveryPollIntervalMs);
+            return this;
+        }
+        /**
+         * The name of the topic for the database schema history.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder databaseHistoryKafkaTopic(
+                String databaseHistoryKafkaTopic) {
+            doSetProperty("databaseHistoryKafkaTopic", databaseHistoryKafkaTopic);
+            return this;
+        }
+        /**
+         * Resolvable hostname or IP address of the SQL Server database server.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder databaseHostname(
+                String databaseHostname) {
+            doSetProperty("databaseHostname", databaseHostname);
+            return this;
+        }
+        /**
+         * Password of the SQL Server database user to be used when connecting
+         * to the database.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Required: true
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder databasePassword(
+                String databasePassword) {
+            doSetProperty("databasePassword", databasePassword);
+            return this;
+        }
+        /**
+         * Port of the SQL Server database server.
+         * 
+         * The option is a: <code>int</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder databasePort(int databasePort) {
+            doSetProperty("databasePort", databasePort);
+            return this;
+        }
+        /**
+         * Port of the SQL Server database server.
+         * 
+         * The option will be converted to a <code>int</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder databasePort(
+                String databasePort) {
+            doSetProperty("databasePort", databasePort);
+            return this;
+        }
+        /**
+         * Unique name that identifies the database server and all recorded
+         * offsets, and that is used as a prefix for all schemas and topics.
+         * Each distinct installation should have a separate namespace and be
+         * monitored by at most one Debezium connector.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Required: true
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder databaseServerName(
+                String databaseServerName) {
+            doSetProperty("databaseServerName", databaseServerName);
+            return this;
+        }
+        /**
+         * Name of the SQL Server database user to be used when connecting to
+         * the database.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder databaseUser(
+                String databaseUser) {
+            doSetProperty("databaseUser", databaseUser);
+            return this;
+        }
+        /**
+         * Specify how DECIMAL and NUMERIC columns should be represented in
+         * change events, including:'precise' (the default) uses
+         * java.math.BigDecimal to represent values, which are encoded in the
+         * change events using a binary representation and Kafka Connect's
+         * 'org.apache.kafka.connect.data.Decimal' type; 'string' uses string to
+         * represent values; 'double' represents values using Java's 'double',
+         * which may not offer the precision but will be far easier to use in
+         * consumers.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder decimalHandlingMode(
+                String decimalHandlingMode) {
+            doSetProperty("decimalHandlingMode", decimalHandlingMode);
+            return this;
+        }
+        /**
+         * Length of an interval in milli-seconds in in which the connector
+         * periodically sends heartbeat messages to a heartbeat topic. Use 0 to
+         * disable heartbeat messages. Disabled by default.
+         * 
+         * The option is a: <code>int</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder heartbeatIntervalMs(
+                int heartbeatIntervalMs) {
+            doSetProperty("heartbeatIntervalMs", heartbeatIntervalMs);
+            return this;
+        }
+        /**
+         * Length of an interval in milli-seconds in in which the connector
+         * periodically sends heartbeat messages to a heartbeat topic. Use 0 to
+         * disable heartbeat messages. Disabled by default.
+         * 
+         * The option will be converted to a <code>int</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder heartbeatIntervalMs(
+                String heartbeatIntervalMs) {
+            doSetProperty("heartbeatIntervalMs", heartbeatIntervalMs);
+            return this;
+        }
+        /**
+         * The prefix that is used to name heartbeat topics.Defaults to
+         * __debezium-heartbeat.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder heartbeatTopicsPrefix(
+                String heartbeatTopicsPrefix) {
+            doSetProperty("heartbeatTopicsPrefix", heartbeatTopicsPrefix);
+            return this;
+        }
+        /**
+         * Maximum size of each batch of source records. Defaults to 2048.
+         * 
+         * The option is a: <code>int</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder maxBatchSize(int maxBatchSize) {
+            doSetProperty("maxBatchSize", maxBatchSize);
+            return this;
+        }
+        /**
+         * Maximum size of each batch of source records. Defaults to 2048.
+         * 
+         * The option will be converted to a <code>int</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder maxBatchSize(
+                String maxBatchSize) {
+            doSetProperty("maxBatchSize", maxBatchSize);
+            return this;
+        }
+        /**
+         * Maximum size of the queue for change events read from the database
+         * log but not yet recorded or forwarded. Defaults to 8192, and should
+         * always be larger than the maximum batch size.
+         * 
+         * The option is a: <code>int</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder maxQueueSize(int maxQueueSize) {
+            doSetProperty("maxQueueSize", maxQueueSize);
+            return this;
+        }
+        /**
+         * Maximum size of the queue for change events read from the database
+         * log but not yet recorded or forwarded. Defaults to 8192, and should
+         * always be larger than the maximum batch size.
+         * 
+         * The option will be converted to a <code>int</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder maxQueueSize(
+                String maxQueueSize) {
+            doSetProperty("maxQueueSize", maxQueueSize);
+            return this;
+        }
+        /**
+         * A semicolon-separated list of expressions that match fully-qualified
+         * tables and column(s) to be used as message key. Each expression must
+         * match the pattern ':',where the table names could be defined as
+         * (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on the
+         * specific connector,and the key columns are a comma-separated list of
+         * columns representing the custom key. For any table without an
+         * explicit key configuration the table's primary key column(s) will be
+         * used as message key.Example:
+         * dbserver1.inventory.orderlines:orderId,orderLineId;dbserver1.inventory.orders:id.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder messageKeyColumns(
+                String messageKeyColumns) {
+            doSetProperty("messageKeyColumns", messageKeyColumns);
+            return this;
+        }
+        /**
+         * Frequency in milliseconds to wait for new change events to appear
+         * after receiving no events. Defaults to 500ms.
+         * 
+         * The option is a: <code>long</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder pollIntervalMs(
+                long pollIntervalMs) {
+            doSetProperty("pollIntervalMs", pollIntervalMs);
+            return this;
+        }
+        /**
+         * Frequency in milliseconds to wait for new change events to appear
+         * after receiving no events. Defaults to 500ms.
+         * 
+         * The option will be converted to a <code>long</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder pollIntervalMs(
+                String pollIntervalMs) {
+            doSetProperty("pollIntervalMs", pollIntervalMs);
+            return this;
+        }
+        /**
+         * The number of milliseconds to delay before a snapshot will begin.
+         * 
+         * The option is a: <code>long</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder snapshotDelayMs(
+                long snapshotDelayMs) {
+            doSetProperty("snapshotDelayMs", snapshotDelayMs);
+            return this;
+        }
+        /**
+         * The number of milliseconds to delay before a snapshot will begin.
+         * 
+         * The option will be converted to a <code>long</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder snapshotDelayMs(
+                String snapshotDelayMs) {
+            doSetProperty("snapshotDelayMs", snapshotDelayMs);
+            return this;
+        }
+        /**
+         * The maximum number of records that should be loaded into memory while
+         * performing a snapshot.
+         * 
+         * The option is a: <code>int</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder snapshotFetchSize(
+                int snapshotFetchSize) {
+            doSetProperty("snapshotFetchSize", snapshotFetchSize);
+            return this;
+        }
+        /**
+         * The maximum number of records that should be loaded into memory while
+         * performing a snapshot.
+         * 
+         * The option will be converted to a <code>int</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder snapshotFetchSize(
+                String snapshotFetchSize) {
+            doSetProperty("snapshotFetchSize", snapshotFetchSize);
+            return this;
+        }
+        /**
+         * The maximum number of millis to wait for table locks at the beginning
+         * of a snapshot. If locks cannot be acquired in this time frame, the
+         * snapshot will be aborted. Defaults to 10 seconds.
+         * 
+         * The option is a: <code>long</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder snapshotLockTimeoutMs(
+                long snapshotLockTimeoutMs) {
+            doSetProperty("snapshotLockTimeoutMs", snapshotLockTimeoutMs);
+            return this;
+        }
+        /**
+         * The maximum number of millis to wait for table locks at the beginning
+         * of a snapshot. If locks cannot be acquired in this time frame, the
+         * snapshot will be aborted. Defaults to 10 seconds.
+         * 
+         * The option will be converted to a <code>long</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder snapshotLockTimeoutMs(
+                String snapshotLockTimeoutMs) {
+            doSetProperty("snapshotLockTimeoutMs", snapshotLockTimeoutMs);
+            return this;
+        }
+        /**
+         * The criteria for running a snapshot upon startup of the connector.
+         * Options include: 'initial' (the default) to specify the connector
+         * should run a snapshot only when no offsets are available for the
+         * logical server name; 'initial_schema_only' to specify the connector
+         * should run a snapshot of the schema when no offsets are available for
+         * the logical server name.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder snapshotMode(
+                String snapshotMode) {
+            doSetProperty("snapshotMode", snapshotMode);
+            return this;
+        }
+        /**
+         * This property contains a comma-separated list of fully-qualified
+         * tables (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on
+         * thespecific connectors . Select statements for the individual tables
+         * are specified in further configuration properties, one for each
+         * table, identified by the id
+         * 'snapshot.select.statement.overrides.DB_NAME.TABLE_NAME' or
+         * 'snapshot.select.statement.overrides.SCHEMA_NAME.TABLE_NAME',
+         * respectively. The value of those properties is the select statement
+         * to use when retrieving data from the specific table during
+         * snapshotting. A possible use case for large append-only tables is
+         * setting a specific point where to start (resume) snapshotting, in
+         * case a previous snapshotting was interrupted.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder snapshotSelectStatementOverrides(
+                String snapshotSelectStatementOverrides) {
+            doSetProperty("snapshotSelectStatementOverrides", snapshotSelectStatementOverrides);
+            return this;
+        }
+        /**
+         * A version of the format of the publicly visible source part in the
+         * message.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder sourceStructVersion(
+                String sourceStructVersion) {
+            doSetProperty("sourceStructVersion", sourceStructVersion);
+            return this;
+        }
+        /**
+         * Description is not available here, please check Debezium website for
+         * corresponding key 'table.blacklist' description.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder tableBlacklist(
+                String tableBlacklist) {
+            doSetProperty("tableBlacklist", tableBlacklist);
+            return this;
+        }
+        /**
+         * Flag specifying whether built-in tables should be ignored.
+         * 
+         * The option is a: <code>boolean</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder tableIgnoreBuiltin(
+                boolean tableIgnoreBuiltin) {
+            doSetProperty("tableIgnoreBuiltin", tableIgnoreBuiltin);
+            return this;
+        }
+        /**
+         * Flag specifying whether built-in tables should be ignored.
+         * 
+         * The option will be converted to a <code>boolean</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder tableIgnoreBuiltin(
+                String tableIgnoreBuiltin) {
+            doSetProperty("tableIgnoreBuiltin", tableIgnoreBuiltin);
+            return this;
+        }
+        /**
+         * The tables for which changes are to be captured.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder tableWhitelist(
+                String tableWhitelist) {
+            doSetProperty("tableWhitelist", tableWhitelist);
+            return this;
+        }
+        /**
+         * Time, date, and timestamps can be represented with different kinds of
+         * precisions, including:'adaptive' (the default) bases the precision of
+         * time, date, and timestamp values on the database column's precision;
+         * 'adaptive_time_microseconds' like 'adaptive' mode, but TIME fields
+         * always use microseconds precision;'connect' always represents time,
+         * date, and timestamp values using Kafka Connect's built-in
+         * representations for Time, Date, and Timestamp, which uses millisecond
+         * precision regardless of the database columns' precision .
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: sqlserver
+         */
+        default DebeziumSqlserverEndpointBuilder timePrecisionMode(
+                String timePrecisionMode) {
+            doSetProperty("timePrecisionMode", timePrecisionMode);
+            return this;
+        }
+    }
+
+    /**
+     * Advanced builder for endpoint for the Debezium SQL Server Connector
+     * component.
+     */
+    public interface AdvancedDebeziumSqlserverEndpointBuilder
+            extends
+                EndpointConsumerBuilder {
+        default DebeziumSqlserverEndpointBuilder basic() {
+            return (DebeziumSqlserverEndpointBuilder) this;
+        }
+        /**
+         * To let the consumer use a custom ExceptionHandler. Notice if the
+         * option bridgeErrorHandler is enabled then this option is not in use.
+         * By default the consumer will deal with exceptions, that will be
+         * logged at WARN or ERROR level and ignored.
+         * 
+         * The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
+         * type.
+         * 
+         * Group: consumer (advanced)
+         */
+        default AdvancedDebeziumSqlserverEndpointBuilder exceptionHandler(
+                ExceptionHandler exceptionHandler) {
+            doSetProperty("exceptionHandler", exceptionHandler);
+            return this;
+        }
+        /**
+         * To let the consumer use a custom ExceptionHandler. Notice if the
+         * option bridgeErrorHandler is enabled then this option is not in use.
+         * By default the consumer will deal with exceptions, that will be
+         * logged at WARN or ERROR level and ignored.
+         * 
+         * The option will be converted to a
+         * <code>org.apache.camel.spi.ExceptionHandler</code> type.
+         * 
+         * Group: consumer (advanced)
+         */
+        default AdvancedDebeziumSqlserverEndpointBuilder exceptionHandler(
+                String exceptionHandler) {
+            doSetProperty("exceptionHandler", exceptionHandler);
+            return this;
+        }
+        /**
+         * Sets the exchange pattern when the consumer creates an exchange.
+         * 
+         * The option is a: <code>org.apache.camel.ExchangePattern</code> type.
+         * 
+         * Group: consumer (advanced)
+         */
+        default AdvancedDebeziumSqlserverEndpointBuilder exchangePattern(
+                ExchangePattern exchangePattern) {
+            doSetProperty("exchangePattern", exchangePattern);
+            return this;
+        }
+        /**
+         * Sets the exchange pattern when the consumer creates an exchange.
+         * 
+         * The option will be converted to a
+         * <code>org.apache.camel.ExchangePattern</code> type.
+         * 
+         * Group: consumer (advanced)
+         */
+        default AdvancedDebeziumSqlserverEndpointBuilder exchangePattern(
+                String exchangePattern) {
+            doSetProperty("exchangePattern", exchangePattern);
+            return this;
+        }
+        /**
+         * Whether the endpoint should use basic property binding (Camel 2.x) or
+         * the newer property binding with additional capabilities.
+         * 
+         * The option is a: <code>boolean</code> type.
+         * 
+         * Group: advanced
+         */
+        default AdvancedDebeziumSqlserverEndpointBuilder basicPropertyBinding(
+                boolean basicPropertyBinding) {
+            doSetProperty("basicPropertyBinding", basicPropertyBinding);
+            return this;
+        }
+        /**
+         * Whether the endpoint should use basic property binding (Camel 2.x) or
+         * the newer property binding with additional capabilities.
+         * 
+         * The option will be converted to a <code>boolean</code> type.
+         * 
+         * Group: advanced
+         */
+        default AdvancedDebeziumSqlserverEndpointBuilder basicPropertyBinding(
+                String basicPropertyBinding) {
+            doSetProperty("basicPropertyBinding", basicPropertyBinding);
+            return this;
+        }
+        /**
+         * Sets whether synchronous processing should be strictly used, or Camel
+         * is allowed to use asynchronous processing (if supported).
+         * 
+         * The option is a: <code>boolean</code> type.
+         * 
+         * Group: advanced
+         */
+        default AdvancedDebeziumSqlserverEndpointBuilder synchronous(
+                boolean synchronous) {
+            doSetProperty("synchronous", synchronous);
+            return this;
+        }
+        /**
+         * Sets whether synchronous processing should be strictly used, or Camel
+         * is allowed to use asynchronous processing (if supported).
+         * 
+         * The option will be converted to a <code>boolean</code> type.
+         * 
+         * Group: advanced
+         */
+        default AdvancedDebeziumSqlserverEndpointBuilder synchronous(
+                String synchronous) {
+            doSetProperty("synchronous", synchronous);
+            return this;
+        }
+    }
+    /**
+     * Debezium SQL Server Connector (camel-debezium-sqlserver)
+     * Represents a Debezium SQL Server endpoint which is used to capture
+     * changes in SQL Server database so that that applications can see those
+     * changes and respond to them.
+     * 
+     * Category: database,sql,sqlserver
+     * Available as of version: 3.0
+     * Maven coordinates: org.apache.camel:camel-debezium-sqlserver
+     * 
+     * Syntax: <code>debezium-sqlserver:name</code>
+     * 
+     * Path parameter: name (required)
+     * Unique name for the connector. Attempting to register again with the same
+     * name will fail.
+     */
+    default DebeziumSqlserverEndpointBuilder debeziumSqlserver(String path) {
+        class DebeziumSqlserverEndpointBuilderImpl extends AbstractEndpointBuilder implements DebeziumSqlserverEndpointBuilder, AdvancedDebeziumSqlserverEndpointBuilder {
+            public DebeziumSqlserverEndpointBuilderImpl(String path) {
+                super("debezium-sqlserver", path);
+            }
+        }
+        return new DebeziumSqlserverEndpointBuilderImpl(path);
+    }
+}
\ No newline at end of file
diff --git a/parent/pom.xml b/parent/pom.xml
index 7e91c02..fb17ae4 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -1210,6 +1210,11 @@
       </dependency>
       <dependency>
         <groupId>org.apache.camel</groupId>
+        <artifactId>camel-debezium-sqlserver</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.camel</groupId>
         <artifactId>camel-digitalocean</artifactId>
         <version>${project.version}</version>
       </dependency>
@@ -2834,6 +2839,11 @@
       </dependency>
       <dependency>
         <groupId>org.apache.camel</groupId>
+        <artifactId>camel-debezium-sqlserver-starter</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.camel</groupId>
         <artifactId>camel-digitalocean-starter</artifactId>
         <version>${project.version}</version>
       </dependency>
diff --git a/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/pom.xml b/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/pom.xml
new file mode 100644
index 0000000..b6e5522
--- /dev/null
+++ b/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/pom.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+         http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.camel</groupId>
+    <artifactId>components-starter</artifactId>
+    <version>3.0.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>camel-debezium-sqlserver-starter</artifactId>
+  <packaging>jar</packaging>
+  <name>Spring-Boot Starter :: Camel :: Debezium :: SQL Server</name>
+  <description>Spring-Boot Starter for Camel Debezium SQL Server support</description>
+  <dependencies>
+    <dependency>
+      <groupId>org.springframework.boot</groupId>
+      <artifactId>spring-boot-starter</artifactId>
+      <version>${spring-boot-version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.camel</groupId>
+      <artifactId>camel-debezium-sqlserver</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <!--START OF GENERATED CODE-->
+    <dependency>
+      <groupId>org.apache.camel</groupId>
+      <artifactId>camel-core-starter</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.camel</groupId>
+      <artifactId>camel-spring-boot-starter</artifactId>
+    </dependency>
+    <!--END OF GENERATED CODE-->
+  </dependencies>
+</project>
diff --git a/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumSqlserverComponentAutoConfiguration.java b/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumSqlserverComponentAutoConfiguration.java
new file mode 100644
index 0000000..2eb1b65
--- /dev/null
+++ b/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumSqlserverComponentAutoConfiguration.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.debezium.springboot;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import javax.annotation.Generated;
+import org.apache.camel.CamelContext;
+import org.apache.camel.component.debezium.DebeziumSqlserverComponent;
+import org.apache.camel.spi.ComponentCustomizer;
+import org.apache.camel.spi.HasId;
+import org.apache.camel.spring.boot.CamelAutoConfiguration;
+import org.apache.camel.spring.boot.ComponentConfigurationProperties;
+import org.apache.camel.spring.boot.util.CamelPropertiesHelper;
+import org.apache.camel.spring.boot.util.ConditionalOnCamelContextAndAutoConfigurationBeans;
+import org.apache.camel.spring.boot.util.GroupCondition;
+import org.apache.camel.spring.boot.util.HierarchicalPropertiesEvaluator;
+import org.apache.camel.support.IntrospectionSupport;
+import org.apache.camel.util.ObjectHelper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.AutoConfigureAfter;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
+import org.springframework.boot.context.properties.EnableConfigurationProperties;
+import org.springframework.context.ApplicationContext;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Conditional;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.context.annotation.Lazy;
+
+/**
+ * Generated by camel-package-maven-plugin - do not edit this file!
+ */
+@Generated("org.apache.camel.maven.packaging.SpringBootAutoConfigurationMojo")
+@Configuration
+@Conditional({ConditionalOnCamelContextAndAutoConfigurationBeans.class,
+        DebeziumSqlserverComponentAutoConfiguration.GroupConditions.class})
+@AutoConfigureAfter(CamelAutoConfiguration.class)
+@EnableConfigurationProperties({ComponentConfigurationProperties.class,
+        DebeziumSqlserverComponentConfiguration.class})
+public class DebeziumSqlserverComponentAutoConfiguration {
+
+    private static final Logger LOGGER = LoggerFactory
+            .getLogger(DebeziumSqlserverComponentAutoConfiguration.class);
+    @Autowired
+    private ApplicationContext applicationContext;
+    @Autowired
+    private CamelContext camelContext;
+    @Autowired
+    private DebeziumSqlserverComponentConfiguration configuration;
+    @Autowired(required = false)
+    private List<ComponentCustomizer<DebeziumSqlserverComponent>> customizers;
+
+    static class GroupConditions extends GroupCondition {
+        public GroupConditions() {
+            super("camel.component", "camel.component.debezium-sqlserver");
+        }
+    }
+
+    @Lazy
+    @Bean(name = "debezium-sqlserver-component")
+    @ConditionalOnMissingBean(DebeziumSqlserverComponent.class)
+    public DebeziumSqlserverComponent configureDebeziumSqlserverComponent()
+            throws Exception {
+        DebeziumSqlserverComponent component = new DebeziumSqlserverComponent();
+        component.setCamelContext(camelContext);
+        Map<String, Object> parameters = new HashMap<>();
+        IntrospectionSupport.getProperties(configuration, parameters, null,
+                false);
+        for (Map.Entry<String, Object> entry : parameters.entrySet()) {
+            Object value = entry.getValue();
+            Class<?> paramClass = value.getClass();
+            if (paramClass.getName().endsWith("NestedConfiguration")) {
+                Class nestedClass = null;
+                try {
+                    nestedClass = (Class) paramClass.getDeclaredField(
+                            "CAMEL_NESTED_CLASS").get(null);
+                    HashMap<String, Object> nestedParameters = new HashMap<>();
+                    IntrospectionSupport.getProperties(value, nestedParameters,
+                            null, false);
+                    Object nestedProperty = nestedClass.newInstance();
+                    CamelPropertiesHelper.setCamelProperties(camelContext,
+                            nestedProperty, nestedParameters, false);
+                    entry.setValue(nestedProperty);
+                } catch (NoSuchFieldException e) {
+                }
+            }
+        }
+        CamelPropertiesHelper.setCamelProperties(camelContext, component,
+                parameters, false);
+        if (ObjectHelper.isNotEmpty(customizers)) {
+            for (ComponentCustomizer<DebeziumSqlserverComponent> customizer : customizers) {
+                boolean useCustomizer = (customizer instanceof HasId)
+                        ? HierarchicalPropertiesEvaluator.evaluate(
+                                applicationContext.getEnvironment(),
+                                "camel.component.customizer",
+                                "camel.component.debezium-sqlserver.customizer",
+                                ((HasId) customizer).getId())
+                        : HierarchicalPropertiesEvaluator.evaluate(
+                                applicationContext.getEnvironment(),
+                                "camel.component.customizer",
+                                "camel.component.debezium-sqlserver.customizer");
+                if (useCustomizer) {
+                    LOGGER.debug("Configure component {}, with customizer {}",
+                            component, customizer);
+                    customizer.customize(component);
+                }
+            }
+        }
+        return component;
+    }
+}
\ No newline at end of file
diff --git a/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumSqlserverComponentConfiguration.java b/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumSqlserverComponentConfiguration.java
new file mode 100644
index 0000000..61e8f39
--- /dev/null
+++ b/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/java/org/apache/camel/component/debezium/springboot/DebeziumSqlserverComponentConfiguration.java
@@ -0,0 +1,663 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.debezium.springboot;
+
+import javax.annotation.Generated;
+import org.apache.camel.spring.boot.ComponentConfigurationPropertiesCommon;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+
+/**
+ * Represents a Debezium SQL Server endpoint which is used to capture changes in
+ * SQL Server database so that that applications can see those changes and
+ * respond to them.
+ * 
+ * Generated by camel-package-maven-plugin - do not edit this file!
+ */
+@Generated("org.apache.camel.maven.packaging.SpringBootAutoConfigurationMojo")
+@ConfigurationProperties(prefix = "camel.component.debezium-sqlserver")
+public class DebeziumSqlserverComponentConfiguration
+        extends
+            ComponentConfigurationPropertiesCommon {
+
+    /**
+     * Whether to enable auto configuration of the debezium-sqlserver component.
+     * This is enabled by default.
+     */
+    private Boolean enabled;
+    /**
+     * Allow pre-configured Configurations to be set.
+     */
+    private SqlServerConnectorEmbeddedDebeziumConfigurationNestedConfiguration configuration;
+    /**
+     * Whether the component should use basic property binding (Camel 2.x) or
+     * the newer property binding with additional capabilities
+     */
+    private Boolean basicPropertyBinding = false;
+
+    public SqlServerConnectorEmbeddedDebeziumConfigurationNestedConfiguration getConfiguration() {
+        return configuration;
+    }
+
+    public void setConfiguration(
+            SqlServerConnectorEmbeddedDebeziumConfigurationNestedConfiguration configuration) {
+        this.configuration = configuration;
+    }
+
+    public Boolean getBasicPropertyBinding() {
+        return basicPropertyBinding;
+    }
+
+    public void setBasicPropertyBinding(Boolean basicPropertyBinding) {
+        this.basicPropertyBinding = basicPropertyBinding;
+    }
+
+    public static class SqlServerConnectorEmbeddedDebeziumConfigurationNestedConfiguration {
+        public static final Class CAMEL_NESTED_CLASS = org.apache.camel.component.debezium.configuration.SqlServerConnectorEmbeddedDebeziumConfiguration.class;
+        /**
+         * A semicolon-separated list of expressions that match fully-qualified
+         * tables and column(s) to be used as message key. Each expression must
+         * match the pattern '<fully-qualified table name>:<key columns>',where
+         * the table names could be defined as (DB_NAME.TABLE_NAME) or
+         * (SCHEMA_NAME.TABLE_NAME), depending on the specific connector,and the
+         * key columns are a comma-separated list of columns representing the
+         * custom key. For any table without an explicit key configuration the
+         * table's primary key column(s) will be used as message key.Example:
+         * dbserver1.inventory.orderlines:orderId,orderLineId;dbserver1.inventory.orders:id
+         */
+        private String messageKeyColumns;
+        /**
+         * Maximum size of the queue for change events read from the database
+         * log but not yet recorded or forwarded. Defaults to 8192, and should
+         * always be larger than the maximum batch size.
+         */
+        private Integer maxQueueSize = 8192;
+        /**
+         * The name of the topic for the database schema history
+         */
+        private String databaseHistoryKafkaTopic;
+        /**
+         * Description is not available here, please check Debezium website for
+         * corresponding key 'column.blacklist' description.
+         */
+        private String columnBlacklist;
+        /**
+         * The number of milliseconds to delay before a snapshot will begin.
+         */
+        private Long snapshotDelayMs = 0L;
+        /**
+         * The number of attempts in a row that no data are returned from Kafka
+         * before recover completes. The maximum amount of time to wait after
+         * receiving no data is (recovery.attempts) x
+         * (recovery.poll.interval.ms).
+         */
+        private Integer databaseHistoryKafkaRecoveryAttempts = 100;
+        /**
+         * Description is not available here, please check Debezium website for
+         * corresponding key 'table.blacklist' description.
+         */
+        private String tableBlacklist;
+        /**
+         * The tables for which changes are to be captured
+         */
+        private String tableWhitelist;
+        /**
+         * Specify how DECIMAL and NUMERIC columns should be represented in
+         * change events, including:'precise' (the default) uses
+         * java.math.BigDecimal to represent values, which are encoded in the
+         * change events using a binary representation and Kafka Connect's
+         * 'org.apache.kafka.connect.data.Decimal' type; 'string' uses string to
+         * represent values; 'double' represents values using Java's 'double',
+         * which may not offer the precision but will be far easier to use in
+         * consumers.
+         */
+        private String decimalHandlingMode = "precise";
+        /**
+         * The number of milliseconds to wait while polling for persisted data
+         * during recovery.
+         */
+        private Integer databaseHistoryKafkaRecoveryPollIntervalMs = 100;
+        /**
+         * Frequency in milliseconds to wait for new change events to appear
+         * after receiving no events. Defaults to 500ms.
+         */
+        private Long pollIntervalMs = 500L;
+        /**
+         * The prefix that is used to name heartbeat topics.Defaults to
+         * __debezium-heartbeat.
+         */
+        private String heartbeatTopicsPrefix = "__debezium-heartbeat";
+        /**
+         * Flag specifying whether built-in tables should be ignored.
+         */
+        private Boolean tableIgnoreBuiltin = true;
+        /**
+         * The maximum number of records that should be loaded into memory while
+         * performing a snapshot
+         */
+        private Integer snapshotFetchSize;
+        /**
+         * The maximum number of millis to wait for table locks at the beginning
+         * of a snapshot. If locks cannot be acquired in this time frame, the
+         * snapshot will be aborted. Defaults to 10 seconds
+         */
+        private Long snapshotLockTimeoutMs = 10000L;
+        /**
+         * The path to the file that will be used to record the database history
+         */
+        private String databaseHistoryFileFilename;
+        /**
+         * The name of the database the connector should be monitoring. When
+         * working with a multi-tenant set-up, must be set to the CDB name.
+         */
+        private String databaseDbname;
+        /**
+         * Name of the SQL Server database user to be used when connecting to
+         * the database.
+         */
+        private String databaseUser;
+        /**
+         * This property contains a comma-separated list of fully-qualified
+         * tables (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on
+         * thespecific connectors . Select statements for the individual tables
+         * are specified in further configuration properties, one for each
+         * table, identified by the id
+         * 'snapshot.select.statement.overrides.[DB_NAME].[TABLE_NAME]' or
+         * 'snapshot.select.statement.overrides.[SCHEMA_NAME].[TABLE_NAME]',
+         * respectively. The value of those properties is the select statement
+         * to use when retrieving data from the specific table during
+         * snapshotting. A possible use case for large append-only tables is
+         * setting a specific point where to start (resume) snapshotting, in
+         * case a previous snapshotting was interrupted.
+         */
+        private String snapshotSelectStatementOverrides;
+        /**
+         * A list of host/port pairs that the connector will use for
+         * establishing the initial connection to the Kafka cluster for
+         * retrieving database schema history previously stored by the
+         * connector. This should point to the same Kafka cluster used by the
+         * Kafka Connect process.
+         */
+        private String databaseHistoryKafkaBootstrapServers;
+        /**
+         * Time, date, and timestamps can be represented with different kinds of
+         * precisions, including:'adaptive' (the default) bases the precision of
+         * time, date, and timestamp values on the database column's precision;
+         * 'adaptive_time_microseconds' like 'adaptive' mode, but TIME fields
+         * always use microseconds precision;'connect' always represents time,
+         * date, and timestamp values using Kafka Connect's built-in
+         * representations for Time, Date, and Timestamp, which uses millisecond
+         * precision regardless of the database columns' precision .
+         */
+        private String timePrecisionMode = "adaptive";
+        /**
+         * Unique name that identifies the database server and all recorded
+         * offsets, and that is used as a prefix for all schemas and topics.
+         * Each distinct installation should have a separate namespace and be
+         * monitored by at most one Debezium connector.
+         */
+        private String databaseServerName;
+        /**
+         * Length of an interval in milli-seconds in in which the connector
+         * periodically sends heartbeat messages to a heartbeat topic. Use 0 to
+         * disable heartbeat messages. Disabled by default.
+         */
+        private Integer heartbeatIntervalMs = 0;
+        /**
+         * A version of the format of the publicly visible source part in the
+         * message
+         */
+        private String sourceStructVersion = "v2";
+        /**
+         * Port of the SQL Server database server.
+         */
+        private Integer databasePort = 1433;
+        /**
+         * Resolvable hostname or IP address of the SQL Server database server.
+         */
+        private String databaseHostname;
+        /**
+         * Password of the SQL Server database user to be used when connecting
+         * to the database.
+         */
+        private String databasePassword;
+        /**
+         * Maximum size of each batch of source records. Defaults to 2048.
+         */
+        private Integer maxBatchSize = 2048;
+        /**
+         * The criteria for running a snapshot upon startup of the connector.
+         * Options include: 'initial' (the default) to specify the connector
+         * should run a snapshot only when no offsets are available for the
+         * logical server name; 'initial_schema_only' to specify the connector
+         * should run a snapshot of the schema when no offsets are available for
+         * the logical server name.
+         */
+        private String snapshotMode = "initial";
+        /**
+         * The name of the DatabaseHistory class that should be used to store
+         * and recover database schema changes. The configuration properties for
+         * the history are prefixed with the 'database.history.' string.
+         */
+        private String databaseHistory = "io.debezium.relational.history.FileDatabaseHistory";
+        /**
+         * The name of the Java class for the connector
+         */
+        private Class connectorClass;
+        /**
+         * Unique name for the connector. Attempting to register again with the
+         * same name will fail.
+         */
+        private String name;
+        /**
+         * The name of the Java class that is responsible for persistence of
+         * connector offsets.
+         */
+        private String offsetStorage = "org.apache.kafka.connect.storage.FileOffsetBackingStore";
+        /**
+         * Path to file where offsets are to be stored. Required when
+         * offset.storage is set to the FileOffsetBackingStore
+         */
+        private String offsetStorageFileName;
+        /**
+         * The name of the Kafka topic where offsets are to be stored. Required
+         * when offset.storage is set to the KafkaOffsetBackingStore.
+         */
+        private String offsetStorageTopic;
+        /**
+         * Replication factor used when creating the offset storage topic.
+         * Required when offset.storage is set to the KafkaOffsetBackingStore
+         */
+        private Integer offsetStorageReplicationFactor;
+        /**
+         * The name of the Java class of the commit policy. It defines when
+         * offsets commit has to be triggered based on the number of events
+         * processed and the time elapsed since the last commit. This class must
+         * implement the interface 'OffsetCommitPolicy'. The default is a
+         * periodic commit policy based upon time intervals.
+         */
+        private String offsetCommitPolicy = "io.debezium.embedded.spi.OffsetCommitPolicy.PeriodicCommitOffsetPolicy";
+        /**
+         * Interval at which to try committing offsets. The default is 1 minute.
+         */
+        private Long offsetFlushIntervalMs = 60000L;
+        /**
+         * Maximum number of milliseconds to wait for records to flush and
+         * partition offset data to be committed to offset storage before
+         * cancelling the process and restoring the offset data to be committed
+         * in a future attempt. The default is 5 seconds.
+         */
+        private Long offsetCommitTimeoutMs = 5000L;
+        /**
+         * The number of partitions used when creating the offset storage topic.
+         * Required when offset.storage is set to the 'KafkaOffsetBackingStore'.
+         */
+        private Integer offsetStoragePartitions;
+        /**
+         * The Converter class that should be used to serialize and deserialize
+         * key data for offsets. The default is JSON converter.
+         */
+        private String internalKeyConverter = "org.apache.kafka.connect.json.JsonConverter";
+        /**
+         * The Converter class that should be used to serialize and deserialize
+         * value data for offsets. The default is JSON converter.
+         */
+        private String internalValueConverter = "org.apache.kafka.connect.json.JsonConverter";
+
+        public String getMessageKeyColumns() {
+            return messageKeyColumns;
+        }
+
+        public void setMessageKeyColumns(String messageKeyColumns) {
+            this.messageKeyColumns = messageKeyColumns;
+        }
+
+        public Integer getMaxQueueSize() {
+            return maxQueueSize;
+        }
+
+        public void setMaxQueueSize(Integer maxQueueSize) {
+            this.maxQueueSize = maxQueueSize;
+        }
+
+        public String getDatabaseHistoryKafkaTopic() {
+            return databaseHistoryKafkaTopic;
+        }
+
+        public void setDatabaseHistoryKafkaTopic(
+                String databaseHistoryKafkaTopic) {
+            this.databaseHistoryKafkaTopic = databaseHistoryKafkaTopic;
+        }
+
+        public String getColumnBlacklist() {
+            return columnBlacklist;
+        }
+
+        public void setColumnBlacklist(String columnBlacklist) {
+            this.columnBlacklist = columnBlacklist;
+        }
+
+        public Long getSnapshotDelayMs() {
+            return snapshotDelayMs;
+        }
+
+        public void setSnapshotDelayMs(Long snapshotDelayMs) {
+            this.snapshotDelayMs = snapshotDelayMs;
+        }
+
+        public Integer getDatabaseHistoryKafkaRecoveryAttempts() {
+            return databaseHistoryKafkaRecoveryAttempts;
+        }
+
+        public void setDatabaseHistoryKafkaRecoveryAttempts(
+                Integer databaseHistoryKafkaRecoveryAttempts) {
+            this.databaseHistoryKafkaRecoveryAttempts = databaseHistoryKafkaRecoveryAttempts;
+        }
+
+        public String getTableBlacklist() {
+            return tableBlacklist;
+        }
+
+        public void setTableBlacklist(String tableBlacklist) {
+            this.tableBlacklist = tableBlacklist;
+        }
+
+        public String getTableWhitelist() {
+            return tableWhitelist;
+        }
+
+        public void setTableWhitelist(String tableWhitelist) {
+            this.tableWhitelist = tableWhitelist;
+        }
+
+        public String getDecimalHandlingMode() {
+            return decimalHandlingMode;
+        }
+
+        public void setDecimalHandlingMode(String decimalHandlingMode) {
+            this.decimalHandlingMode = decimalHandlingMode;
+        }
+
+        public Integer getDatabaseHistoryKafkaRecoveryPollIntervalMs() {
+            return databaseHistoryKafkaRecoveryPollIntervalMs;
+        }
+
+        public void setDatabaseHistoryKafkaRecoveryPollIntervalMs(
+                Integer databaseHistoryKafkaRecoveryPollIntervalMs) {
+            this.databaseHistoryKafkaRecoveryPollIntervalMs = databaseHistoryKafkaRecoveryPollIntervalMs;
+        }
+
+        public Long getPollIntervalMs() {
+            return pollIntervalMs;
+        }
+
+        public void setPollIntervalMs(Long pollIntervalMs) {
+            this.pollIntervalMs = pollIntervalMs;
+        }
+
+        public String getHeartbeatTopicsPrefix() {
+            return heartbeatTopicsPrefix;
+        }
+
+        public void setHeartbeatTopicsPrefix(String heartbeatTopicsPrefix) {
+            this.heartbeatTopicsPrefix = heartbeatTopicsPrefix;
+        }
+
+        public Boolean getTableIgnoreBuiltin() {
+            return tableIgnoreBuiltin;
+        }
+
+        public void setTableIgnoreBuiltin(Boolean tableIgnoreBuiltin) {
+            this.tableIgnoreBuiltin = tableIgnoreBuiltin;
+        }
+
+        public Integer getSnapshotFetchSize() {
+            return snapshotFetchSize;
+        }
+
+        public void setSnapshotFetchSize(Integer snapshotFetchSize) {
+            this.snapshotFetchSize = snapshotFetchSize;
+        }
+
+        public Long getSnapshotLockTimeoutMs() {
+            return snapshotLockTimeoutMs;
+        }
+
+        public void setSnapshotLockTimeoutMs(Long snapshotLockTimeoutMs) {
+            this.snapshotLockTimeoutMs = snapshotLockTimeoutMs;
+        }
+
+        public String getDatabaseHistoryFileFilename() {
+            return databaseHistoryFileFilename;
+        }
+
+        public void setDatabaseHistoryFileFilename(
+                String databaseHistoryFileFilename) {
+            this.databaseHistoryFileFilename = databaseHistoryFileFilename;
+        }
+
+        public String getDatabaseDbname() {
+            return databaseDbname;
+        }
+
+        public void setDatabaseDbname(String databaseDbname) {
+            this.databaseDbname = databaseDbname;
+        }
+
+        public String getDatabaseUser() {
+            return databaseUser;
+        }
+
+        public void setDatabaseUser(String databaseUser) {
+            this.databaseUser = databaseUser;
+        }
+
+        public String getSnapshotSelectStatementOverrides() {
+            return snapshotSelectStatementOverrides;
+        }
+
+        public void setSnapshotSelectStatementOverrides(
+                String snapshotSelectStatementOverrides) {
+            this.snapshotSelectStatementOverrides = snapshotSelectStatementOverrides;
+        }
+
+        public String getDatabaseHistoryKafkaBootstrapServers() {
+            return databaseHistoryKafkaBootstrapServers;
+        }
+
+        public void setDatabaseHistoryKafkaBootstrapServers(
+                String databaseHistoryKafkaBootstrapServers) {
+            this.databaseHistoryKafkaBootstrapServers = databaseHistoryKafkaBootstrapServers;
+        }
+
+        public String getTimePrecisionMode() {
+            return timePrecisionMode;
+        }
+
+        public void setTimePrecisionMode(String timePrecisionMode) {
+            this.timePrecisionMode = timePrecisionMode;
+        }
+
+        public String getDatabaseServerName() {
+            return databaseServerName;
+        }
+
+        public void setDatabaseServerName(String databaseServerName) {
+            this.databaseServerName = databaseServerName;
+        }
+
+        public Integer getHeartbeatIntervalMs() {
+            return heartbeatIntervalMs;
+        }
+
+        public void setHeartbeatIntervalMs(Integer heartbeatIntervalMs) {
+            this.heartbeatIntervalMs = heartbeatIntervalMs;
+        }
+
+        public String getSourceStructVersion() {
+            return sourceStructVersion;
+        }
+
+        public void setSourceStructVersion(String sourceStructVersion) {
+            this.sourceStructVersion = sourceStructVersion;
+        }
+
+        public Integer getDatabasePort() {
+            return databasePort;
+        }
+
+        public void setDatabasePort(Integer databasePort) {
+            this.databasePort = databasePort;
+        }
+
+        public String getDatabaseHostname() {
+            return databaseHostname;
+        }
+
+        public void setDatabaseHostname(String databaseHostname) {
+            this.databaseHostname = databaseHostname;
+        }
+
+        public String getDatabasePassword() {
+            return databasePassword;
+        }
+
+        public void setDatabasePassword(String databasePassword) {
+            this.databasePassword = databasePassword;
+        }
+
+        public Integer getMaxBatchSize() {
+            return maxBatchSize;
+        }
+
+        public void setMaxBatchSize(Integer maxBatchSize) {
+            this.maxBatchSize = maxBatchSize;
+        }
+
+        public String getSnapshotMode() {
+            return snapshotMode;
+        }
+
+        public void setSnapshotMode(String snapshotMode) {
+            this.snapshotMode = snapshotMode;
+        }
+
+        public String getDatabaseHistory() {
+            return databaseHistory;
+        }
+
+        public void setDatabaseHistory(String databaseHistory) {
+            this.databaseHistory = databaseHistory;
+        }
+
+        public Class getConnectorClass() {
+            return connectorClass;
+        }
+
+        public void setConnectorClass(Class connectorClass) {
+            this.connectorClass = connectorClass;
+        }
+
+        public String getName() {
+            return name;
+        }
+
+        public void setName(String name) {
+            this.name = name;
+        }
+
+        public String getOffsetStorage() {
+            return offsetStorage;
+        }
+
+        public void setOffsetStorage(String offsetStorage) {
+            this.offsetStorage = offsetStorage;
+        }
+
+        public String getOffsetStorageFileName() {
+            return offsetStorageFileName;
+        }
+
+        public void setOffsetStorageFileName(String offsetStorageFileName) {
+            this.offsetStorageFileName = offsetStorageFileName;
+        }
+
+        public String getOffsetStorageTopic() {
+            return offsetStorageTopic;
+        }
+
+        public void setOffsetStorageTopic(String offsetStorageTopic) {
+            this.offsetStorageTopic = offsetStorageTopic;
+        }
+
+        public Integer getOffsetStorageReplicationFactor() {
+            return offsetStorageReplicationFactor;
+        }
+
+        public void setOffsetStorageReplicationFactor(
+                Integer offsetStorageReplicationFactor) {
+            this.offsetStorageReplicationFactor = offsetStorageReplicationFactor;
+        }
+
+        public String getOffsetCommitPolicy() {
+            return offsetCommitPolicy;
+        }
+
+        public void setOffsetCommitPolicy(String offsetCommitPolicy) {
+            this.offsetCommitPolicy = offsetCommitPolicy;
+        }
+
+        public Long getOffsetFlushIntervalMs() {
+            return offsetFlushIntervalMs;
+        }
+
+        public void setOffsetFlushIntervalMs(Long offsetFlushIntervalMs) {
+            this.offsetFlushIntervalMs = offsetFlushIntervalMs;
+        }
+
+        public Long getOffsetCommitTimeoutMs() {
+            return offsetCommitTimeoutMs;
+        }
+
+        public void setOffsetCommitTimeoutMs(Long offsetCommitTimeoutMs) {
+            this.offsetCommitTimeoutMs = offsetCommitTimeoutMs;
+        }
+
+        public Integer getOffsetStoragePartitions() {
+            return offsetStoragePartitions;
+        }
+
+        public void setOffsetStoragePartitions(Integer offsetStoragePartitions) {
+            this.offsetStoragePartitions = offsetStoragePartitions;
+        }
+
+        public String getInternalKeyConverter() {
+            return internalKeyConverter;
+        }
+
+        public void setInternalKeyConverter(String internalKeyConverter) {
+            this.internalKeyConverter = internalKeyConverter;
+        }
+
+        public String getInternalValueConverter() {
+            return internalValueConverter;
+        }
+
+        public void setInternalValueConverter(String internalValueConverter) {
+            this.internalValueConverter = internalValueConverter;
+        }
+    }
+}
\ No newline at end of file
diff --git a/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/resources/META-INF/LICENSE.txt b/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/resources/META-INF/LICENSE.txt
new file mode 100644
index 0000000..6b0b127
--- /dev/null
+++ b/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/resources/META-INF/LICENSE.txt
@@ -0,0 +1,203 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
diff --git a/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/resources/META-INF/NOTICE.txt b/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/resources/META-INF/NOTICE.txt
new file mode 100644
index 0000000..2e215bf
--- /dev/null
+++ b/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/resources/META-INF/NOTICE.txt
@@ -0,0 +1,11 @@
+   =========================================================================
+   ==  NOTICE file corresponding to the section 4 d of                    ==
+   ==  the Apache License, Version 2.0,                                   ==
+   ==  in this case for the Apache Camel distribution.                    ==
+   =========================================================================
+
+   This product includes software developed by
+   The Apache Software Foundation (http://www.apache.org/).
+
+   Please read the different LICENSE files present in the licenses directory of
+   this distribution.
diff --git a/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/resources/META-INF/spring.factories b/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/resources/META-INF/spring.factories
new file mode 100644
index 0000000..36c72e8
--- /dev/null
+++ b/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/resources/META-INF/spring.factories
@@ -0,0 +1,19 @@
+## ---------------------------------------------------------------------------
+## Licensed to the Apache Software Foundation (ASF) under one or more
+## contributor license agreements.  See the NOTICE file distributed with
+## this work for additional information regarding copyright ownership.
+## The ASF licenses this file to You under the Apache License, Version 2.0
+## (the "License"); you may not use this file except in compliance with
+## the License.  You may obtain a copy of the License at
+##
+##      http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+## ---------------------------------------------------------------------------
+
+org.springframework.boot.autoconfigure.EnableAutoConfiguration=\
+org.apache.camel.component.debezium.springboot.DebeziumSqlserverComponentAutoConfiguration
diff --git a/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/resources/META-INF/spring.provides b/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/resources/META-INF/spring.provides
new file mode 100644
index 0000000..98f13f7
--- /dev/null
+++ b/platforms/spring-boot/components-starter/camel-debezium-sqlserver-starter/src/main/resources/META-INF/spring.provides
@@ -0,0 +1,17 @@
+## ---------------------------------------------------------------------------
+## Licensed to the Apache Software Foundation (ASF) under one or more
+## contributor license agreements.  See the NOTICE file distributed with
+## this work for additional information regarding copyright ownership.
+## The ASF licenses this file to You under the Apache License, Version 2.0
+## (the "License"); you may not use this file except in compliance with
+## the License.  You may obtain a copy of the License at
+##
+##      http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+## ---------------------------------------------------------------------------
+provides: camel-debezium-sqlserver
diff --git a/platforms/spring-boot/components-starter/pom.xml b/platforms/spring-boot/components-starter/pom.xml
index bf29ca5..944d86f 100644
--- a/platforms/spring-boot/components-starter/pom.xml
+++ b/platforms/spring-boot/components-starter/pom.xml
@@ -172,6 +172,7 @@
     <module>camel-debezium-mongodb-starter</module>
     <module>camel-debezium-mysql-starter</module>
     <module>camel-debezium-postgres-starter</module>
+    <module>camel-debezium-sqlserver-starter</module>
     <module>camel-digitalocean-starter</module>
     <module>camel-direct-starter</module>
     <module>camel-directvm-starter</module>
diff --git a/platforms/spring-boot/spring-boot-dm/camel-spring-boot-dependencies/pom.xml b/platforms/spring-boot/spring-boot-dm/camel-spring-boot-dependencies/pom.xml
index 8d83873..2c4089a 100644
--- a/platforms/spring-boot/spring-boot-dm/camel-spring-boot-dependencies/pom.xml
+++ b/platforms/spring-boot/spring-boot-dm/camel-spring-boot-dependencies/pom.xml
@@ -1094,6 +1094,16 @@
       </dependency>
       <dependency>
         <groupId>org.apache.camel</groupId>
+        <artifactId>camel-debezium-sqlserver</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.camel</groupId>
+        <artifactId>camel-debezium-sqlserver-starter</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.camel</groupId>
         <artifactId>camel-digitalocean</artifactId>
         <version>${project.version}</version>
       </dependency>
diff --git a/tests/camel-itest-spring-boot/src/test/java/org/apache/camel/itest/springboot/CamelDebeziumSqlserverTest.java b/tests/camel-itest-spring-boot/src/test/java/org/apache/camel/itest/springboot/CamelDebeziumSqlserverTest.java
new file mode 100644
index 0000000..9781454
--- /dev/null
+++ b/tests/camel-itest-spring-boot/src/test/java/org/apache/camel/itest/springboot/CamelDebeziumSqlserverTest.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.itest.springboot;
+
+import org.apache.camel.itest.springboot.util.ArquillianPackager;
+import org.jboss.arquillian.container.test.api.Deployment;
+import org.jboss.arquillian.junit.Arquillian;
+import org.jboss.shrinkwrap.api.Archive;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+
+@RunWith(Arquillian.class)
+public class CamelDebeziumSqlserverTest extends AbstractSpringBootTestSupport {
+
+    @Deployment
+    public static Archive<?> createSpringBootPackage() throws Exception {
+        return ArquillianPackager.springBootPackage(createTestConfig());
+    }
+
+    public static ITestConfig createTestConfig() {
+        return new ITestConfigBuilder()
+                .module(inferModuleName(CamelDebeziumSqlserverTest.class))
+                .build();
+    }
+
+    @Test
+    public void componentTests() throws Exception {
+        this.runComponentTest(config);
+        this.runModuleUnitTestsIfEnabled(config);
+    }
+}


[camel] 04/11: CAMEL-14137: removed qtp variable according to pr comment.

Posted by ac...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel.git

commit 6d64b13a6ff873c682753d41bc46b88f0dfc9d7d
Author: Luigi De Masi <ld...@redhat.com>
AuthorDate: Tue Nov 5 10:54:15 2019 +0100

     CAMEL-14137: removed qtp variable according to pr comment.
---
 .../camel/component/jetty/JettyHttpComponent.java  | 28 +++++++++++-----------
 1 file changed, 14 insertions(+), 14 deletions(-)

diff --git a/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java b/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java
index d6de2a1..c01f769 100644
--- a/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java
+++ b/components/camel-jetty-common/src/main/java/org/apache/camel/component/jetty/JettyHttpComponent.java
@@ -134,7 +134,7 @@ public abstract class JettyHttpComponent extends HttpCommonComponent implements
     protected boolean useXForwardedForHeader;
     private Integer proxyPort;
     private boolean sendServerVersion = true;
-    private QueuedThreadPool defaultThreadPool;
+    private QueuedThreadPool defaultQueuedThreadPool;
 
     public JettyHttpComponent() {
     }
@@ -481,13 +481,13 @@ public abstract class JettyHttpComponent extends HttpCommonComponent implements
                         this.removeServerMBean(connectorRef.server);
                         //mbContainer.removeBean(connectorRef.connector);
                     }
-                    if (defaultThreadPool !=null){
+                    if (defaultQueuedThreadPool !=null){
                         try {
-                            defaultThreadPool.stop();
+                            defaultQueuedThreadPool.stop();
                         }catch(Throwable t){
-                            defaultThreadPool.destroy();
+                            defaultQueuedThreadPool.destroy();
                         }finally {
-                            defaultThreadPool =null;
+                            defaultQueuedThreadPool =null;
                         }
                     }
                 }
@@ -1315,20 +1315,20 @@ public abstract class JettyHttpComponent extends HttpCommonComponent implements
     protected Server createServer() {
         Server s = null;
         ThreadPool tp = threadPool;
-        defaultThreadPool = null;
+        defaultQueuedThreadPool = null;
         // configure thread pool if min/max given
         if (minThreads != null || maxThreads != null) {
             if (getThreadPool() != null) {
                 throw new IllegalArgumentException("You cannot configure both minThreads/maxThreads and a custom threadPool on JettyHttpComponent: " + this);
             }
-            defaultThreadPool = new QueuedThreadPool();
+            defaultQueuedThreadPool = new QueuedThreadPool();
             if (minThreads != null) {
-                defaultThreadPool.setMinThreads(minThreads.intValue());
+                defaultQueuedThreadPool.setMinThreads(minThreads.intValue());
             }
             if (maxThreads != null) {
-                defaultThreadPool.setMaxThreads(maxThreads.intValue());
+                defaultQueuedThreadPool.setMaxThreads(maxThreads.intValue());
             }
-            tp = defaultThreadPool;
+            tp = defaultQueuedThreadPool;
 
         }
         if (tp != null) {
@@ -1350,13 +1350,13 @@ public abstract class JettyHttpComponent extends HttpCommonComponent implements
         if (s == null) {
             s = new Server();
         }
-        if (defaultThreadPool != null) {
+        if (defaultQueuedThreadPool != null) {
             // let the thread names indicate they are from the server
-            defaultThreadPool.setName("CamelJettyServer(" + ObjectHelper.getIdentityHashCode(s) + ")");
+            defaultQueuedThreadPool.setName("CamelJettyServer(" + ObjectHelper.getIdentityHashCode(s) + ")");
             try {
-                defaultThreadPool.start();
+                defaultQueuedThreadPool.start();
             } catch (Exception e) {
-                throw new RuntimeCamelException("Error starting JettyServer thread pool: " + defaultThreadPool, e);
+                throw new RuntimeCamelException("Error starting JettyServer thread pool: " + defaultQueuedThreadPool, e);
             }
         }
         ContextHandlerCollection collection = new ContextHandlerCollection();


[camel] 02/11: CAMEL-14137 Thread leak in camel-jetty component if maxThreads or minThreads property is set

Posted by ac...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel.git

commit 918d8d30e4bb1be04eb390279e18fb829e819e11
Author: Luigi De Masi <ld...@redhat.com>
AuthorDate: Mon Nov 4 23:30:23 2019 +0100

     CAMEL-14137 Thread leak in camel-jetty component if maxThreads or minThreads property is set
---
 .../component/jetty/JettyThreadPoolSizeTest.java   | 38 ++++++++++------------
 1 file changed, 17 insertions(+), 21 deletions(-)

diff --git a/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java b/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java
index 27aae78..760b285 100644
--- a/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java
+++ b/components/camel-jetty/src/test/java/org/apache/camel/component/jetty/JettyThreadPoolSizeTest.java
@@ -12,61 +12,57 @@ public class JettyThreadPoolSizeTest extends BaseJettyTest {
 
     private static final Logger log =  LoggerFactory.getLogger(JettyThreadPoolSizeTest.class);
 
-
-    private JettyHttpComponent jettyComponent;
-
-    private RouteBuilder builder;
-
     @Test
     public void threadPoolTest(){
 
-
-        Set<Thread> threadSet = Thread.getAllStackTraces().keySet();
-        long initialJettyThreadNumber = threadSet.stream().filter(thread -> thread.getName().contains("CamelJettyServer")).count();
+        long initialJettyThreadNumber = countJettyThread();
 
         log.info("initial Jetty thread number (expected 5): "+ initialJettyThreadNumber);
 
         context.stop();
 
-        Set<Thread> threadSetAfterStop = Thread.getAllStackTraces().keySet();
-        long jettyThreadNumberAfterStop = threadSetAfterStop.stream().filter(thread -> thread.getName().contains("CamelJettyServer")).count();
+        long jettyThreadNumberAfterStop =  countJettyThread();
 
         log.info("Jetty thread number after stopping Camel Context: (expected 0): "+ jettyThreadNumberAfterStop);
 
-
-        jettyComponent = (JettyHttpComponent)context.getComponent("jetty");
+        JettyHttpComponent jettyComponent = (JettyHttpComponent)context.getComponent("jetty");
         jettyComponent.setMinThreads(5);
         jettyComponent.setMaxThreads(5);
 
         context.start();
-        Set<Thread> threadSetAfterRestart = Thread.getAllStackTraces().keySet();
-        long jettyThreadNumberAfterRestart = threadSetAfterRestart.stream().filter(thread -> thread.getName().contains("CamelJettyServer")).count();
 
-        log.info("Jetty thread number after starting Camel Context: (expected 5): "+ jettyThreadNumberAfterRestart);
+        long jettyThreadNumberAfterRestart = countJettyThread();
 
+        log.info("Jetty thread number after starting Camel Context: (expected 5): "+ jettyThreadNumberAfterRestart);
 
-        assertEquals(5,initialJettyThreadNumber);
+        assertEquals(5L,initialJettyThreadNumber);
 
-        assertEquals(0,jettyThreadNumberAfterStop);
+        assertEquals(0L,jettyThreadNumberAfterStop);
 
-        assertEquals(5,jettyThreadNumberAfterRestart);
+        assertEquals(5L,jettyThreadNumberAfterRestart);
     }
 
     @Override
     protected RouteBuilder createRouteBuilder() throws Exception {
 
-        builder = new RouteBuilder() {
+        return new  RouteBuilder() {
             @Override
             public void configure() throws Exception {
                 // setup the jetty component with the custom minThreads
-                jettyComponent = (JettyHttpComponent)context.getComponent("jetty");
+               JettyHttpComponent jettyComponent = (JettyHttpComponent)context.getComponent("jetty");
                 jettyComponent.setMinThreads(5);
                 jettyComponent.setMaxThreads(5);
 
                 from("jetty://http://localhost:{{port}}/myserverWithCustomPoolSize").to("mock:result");
             }
         };
-        return builder;
+    }
+
+    private long countJettyThread(){
+
+        Set<Thread> threadSet = Thread.getAllStackTraces().keySet();
+        return threadSet.stream().filter(thread -> thread.getName().contains("CamelJettyServer")).count();
+
     }
 
 }