You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@camel.apache.org by ac...@apache.org on 2023/12/20 10:53:35 UTC

(camel) branch main updated: CAMEL-20259 - Remove Camel-HDFS Component (#12499)

This is an automated email from the ASF dual-hosted git repository.

acosentino pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/camel.git


The following commit(s) were added to refs/heads/main by this push:
     new 705e6a151f9 CAMEL-20259 - Remove Camel-HDFS Component (#12499)
705e6a151f9 is described below

commit 705e6a151f91d28f56c25a0f2ddafa81f5645655
Author: Andrea Cosentino <an...@gmail.com>
AuthorDate: Wed Dec 20 11:53:28 2023 +0100

    CAMEL-20259 - Remove Camel-HDFS Component (#12499)
    
    * CAMEL-20259 - Remove Camel-HDFS Component
    
    Signed-off-by: Andrea Cosentino <an...@gmail.com>
    
    * CAMEL-20259 - Remove Camel-HDFS Component - Docs
    
    Signed-off-by: Andrea Cosentino <an...@gmail.com>
    
    ---------
    
    Signed-off-by: Andrea Cosentino <an...@gmail.com>
---
 bom/camel-bom/pom.xml                              |    5 -
 catalog/camel-allcomponents/pom.xml                |    5 -
 .../org/apache/camel/catalog/components.properties |    1 -
 .../org/apache/camel/catalog/components/hdfs.json  |   91 -
 components/camel-hdfs/pom.xml                      |  164 --
 .../component/hdfs/HdfsComponentConfigurer.java    |   85 -
 .../component/hdfs/HdfsEndpointConfigurer.java     |  298 ---
 .../component/hdfs/HdfsEndpointUriFactory.java     |  119 -
 .../services/org/apache/camel/component.properties |    7 -
 .../services/org/apache/camel/component/hdfs       |    2 -
 .../org/apache/camel/configurer/hdfs-component     |    2 -
 .../org/apache/camel/configurer/hdfs-endpoint      |    2 -
 .../org/apache/camel/urifactory/hdfs-endpoint      |    2 -
 .../org/apache/camel/component/hdfs/hdfs.json      |   91 -
 .../camel-hdfs/src/main/docs/hdfs-component.adoc   |  236 --
 .../camel/component/hdfs/DefaultHdfsFile.java      |  120 -
 .../component/hdfs/HaConfigurationBuilder.java     |  116 -
 .../component/hdfs/HdfsArrayFileTypeHandler.java   |   95 -
 .../component/hdfs/HdfsBloomMapFileHandler.java    |  106 -
 .../apache/camel/component/hdfs/HdfsComponent.java |  106 -
 .../camel/component/hdfs/HdfsCompressionCodec.java |   73 -
 .../camel/component/hdfs/HdfsCompressionType.java  |   24 -
 .../camel/component/hdfs/HdfsConfiguration.java    |  660 -----
 .../apache/camel/component/hdfs/HdfsConstants.java |   68 -
 .../apache/camel/component/hdfs/HdfsConsumer.java  |  266 --
 .../apache/camel/component/hdfs/HdfsEndpoint.java  |   64 -
 .../org/apache/camel/component/hdfs/HdfsFile.java  |   33 -
 .../camel/component/hdfs/HdfsFileSystemType.java   |   52 -
 .../apache/camel/component/hdfs/HdfsFileType.java  |   53 -
 .../apache/camel/component/hdfs/HdfsHeader.java    |   23 -
 .../apache/camel/component/hdfs/HdfsHelper.java    |   29 -
 .../org/apache/camel/component/hdfs/HdfsInfo.java  |   47 -
 .../camel/component/hdfs/HdfsInfoFactory.java      |  108 -
 .../camel/component/hdfs/HdfsInputStream.java      |  194 --
 .../camel/component/hdfs/HdfsMapFileHandler.java   |  105 -
 .../component/hdfs/HdfsNormalFileHandler.java      |  184 --
 .../camel/component/hdfs/HdfsOsgiHelper.java       |   62 -
 .../camel/component/hdfs/HdfsOutputStream.java     |  122 -
 .../apache/camel/component/hdfs/HdfsProducer.java  |  312 ---
 .../component/hdfs/HdfsSequenceFileHandler.java    |  107 -
 .../component/hdfs/HdfsWritableFactories.java      |  256 --
 .../org/apache/camel/component/hdfs/Holder.java    |   48 -
 .../apache/camel/component/hdfs/WritableType.java  |  105 -
 .../hdfs/kerberos/KerberosAuthentication.java      |   62 -
 .../kerberos/KerberosConfigurationBuilder.java     |   69 -
 .../camel/component/hdfs/FromFileToHdfsTest.java   |  104 -
 .../component/hdfs/HaConfigurationBuilderTest.java |   83 -
 .../camel/component/hdfs/HdfsConsumerTest.java     |  233 --
 .../camel/component/hdfs/HdfsInputStreamTest.java  |  124 -
 .../camel/component/hdfs/HdfsOutputStreamTest.java |  177 --
 .../component/hdfs/HdfsProducerConsumerTest.java   |   94 -
 .../component/hdfs/HdfsProducerSplitTest.java      |  132 -
 .../camel/component/hdfs/HdfsProducerTest.java     |  537 ----
 .../camel/component/hdfs/HdfsTestSupport.java      |   62 -
 .../camel/component/hdfs/MockDataInputStream.java  |  124 -
 .../component/hdfs/integration/HdfsAppendIT.java   |  161 --
 .../integration/HdfsConsumerIntegrationIT.java     |  528 ----
 .../HdfsProducerConsumerIntegrationIT.java         |  157 --
 .../hdfs/kerberos/KerberosAuthenticationTest.java  |   66 -
 .../kerberos/KerberosConfigurationBuilderTest.java |   68 -
 .../camel-hdfs/src/test/resources/hdfs-default.xml | 1607 -----------
 .../src/test/resources/hdfs-mac-test.xml           | 1607 -----------
 .../camel-hdfs/src/test/resources/hdfs-test.xml    | 1607 -----------
 .../src/test/resources/hdfs/normal_file.txt        |   28 -
 .../src/test/resources/kerberos/test-kerb5.conf    |   12 -
 .../src/test/resources/kerberos/test-keytab.bin    |    1 -
 .../src/test/resources/log4j2.properties           |   28 -
 components/pom.xml                                 |    1 -
 .../org/apache/camel/main/components.properties    |    1 -
 .../ROOT/pages/camel-4x-upgrade-guide-4_4.adoc     |    4 +
 .../component/ComponentsBuilderFactory.java        |   14 -
 .../AzureStorageQueueComponentBuilderFactory.java  |   18 +
 .../component/dsl/HdfsComponentBuilderFactory.java |  221 --
 .../src/generated/resources/metadata.json          |   22 -
 .../builder/endpoint/EndpointBuilderFactory.java   |    1 -
 .../camel/builder/endpoint/EndpointBuilders.java   |    1 -
 .../builder/endpoint/StaticEndpointBuilders.java   |   59 -
 .../dsl/CosmosDbEndpointBuilderFactory.java        |   12 +-
 .../dsl/DataLakeEndpointBuilderFactory.java        |  194 +-
 .../dsl/EventHubsEndpointBuilderFactory.java       |  101 +
 .../endpoint/dsl/FilesEndpointBuilderFactory.java  |  159 +-
 .../endpoint/dsl/HdfsEndpointBuilderFactory.java   | 2800 --------------------
 .../endpoint/dsl/QueueEndpointBuilderFactory.java  |  101 +
 .../dsl/ServiceBusEndpointBuilderFactory.java      |  101 +
 .../camel-component-known-dependencies.properties  |    1 -
 parent/pom.xml                                     |    5 -
 test-infra/camel-test-infra-hdfs/pom.xml           |   65 -
 .../src/main/resources/META-INF/MANIFEST.MF        |    0
 .../test/infra/hdfs/v2/common/HDFSProperties.java  |   25 -
 .../hdfs/v2/services/EmbeddedHDFSService.java      |   59 -
 .../test/infra/hdfs/v2/services/HDFSContainer.java |   65 -
 .../test/infra/hdfs/v2/services/HDFSService.java   |   37 -
 .../infra/hdfs/v2/services/HDFSServiceFactory.java |   60 -
 test-infra/pom.xml                                 |    1 -
 94 files changed, 562 insertions(+), 15885 deletions(-)

diff --git a/bom/camel-bom/pom.xml b/bom/camel-bom/pom.xml
index 32141213244..fd13cbcf4bd 100644
--- a/bom/camel-bom/pom.xml
+++ b/bom/camel-bom/pom.xml
@@ -907,11 +907,6 @@
         <artifactId>camel-hazelcast</artifactId>
         <version>${project.version}</version>
       </dependency>
-      <dependency>
-        <groupId>org.apache.camel</groupId>
-        <artifactId>camel-hdfs</artifactId>
-        <version>${project.version}</version>
-      </dependency>
       <dependency>
         <groupId>org.apache.camel</groupId>
         <artifactId>camel-headersmap</artifactId>
diff --git a/catalog/camel-allcomponents/pom.xml b/catalog/camel-allcomponents/pom.xml
index 679a8f03247..c4f8be0e682 100644
--- a/catalog/camel-allcomponents/pom.xml
+++ b/catalog/camel-allcomponents/pom.xml
@@ -767,11 +767,6 @@
             <artifactId>camel-hazelcast</artifactId>
             <version>${project.version}</version>
         </dependency>
-        <dependency>
-            <groupId>org.apache.camel</groupId>
-            <artifactId>camel-hdfs</artifactId>
-            <version>${project.version}</version>
-        </dependency>
         <dependency>
             <groupId>org.apache.camel</groupId>
             <artifactId>camel-headersmap</artifactId>
diff --git a/catalog/camel-catalog/src/generated/resources/org/apache/camel/catalog/components.properties b/catalog/camel-catalog/src/generated/resources/org/apache/camel/catalog/components.properties
index b706a98aebc..27aa8ac58dd 100644
--- a/catalog/camel-catalog/src/generated/resources/org/apache/camel/catalog/components.properties
+++ b/catalog/camel-catalog/src/generated/resources/org/apache/camel/catalog/components.properties
@@ -135,7 +135,6 @@ hazelcast-ringbuffer
 hazelcast-seda
 hazelcast-set
 hazelcast-topic
-hdfs
 http
 https
 hwcloud-dms
diff --git a/catalog/camel-catalog/src/generated/resources/org/apache/camel/catalog/components/hdfs.json b/catalog/camel-catalog/src/generated/resources/org/apache/camel/catalog/components/hdfs.json
deleted file mode 100644
index 31bc6c30056..00000000000
--- a/catalog/camel-catalog/src/generated/resources/org/apache/camel/catalog/components/hdfs.json
+++ /dev/null
@@ -1,91 +0,0 @@
-{
-  "component": {
-    "kind": "component",
-    "name": "hdfs",
-    "title": "HDFS",
-    "description": "Read and write from\/to an HDFS filesystem using Hadoop 2.x.",
-    "deprecated": true,
-    "firstVersion": "2.14.0",
-    "label": "bigdata,file",
-    "javaType": "org.apache.camel.component.hdfs.HdfsComponent",
-    "supportLevel": "Stable",
-    "groupId": "org.apache.camel",
-    "artifactId": "camel-hdfs",
-    "version": "4.4.0-SNAPSHOT",
-    "scheme": "hdfs",
-    "extendsScheme": "",
-    "syntax": "hdfs:hostName:port\/path",
-    "async": false,
-    "api": false,
-    "consumerOnly": false,
-    "producerOnly": false,
-    "lenientProperties": false
-  },
-  "componentProperties": {
-    "bridgeErrorHandler": { "index": 0, "kind": "property", "displayName": "Bridge Error Handler", "group": "consumer", "label": "consumer", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "description": "Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions (if possible) occurred while the Camel consumer is trying to pickup incoming messages, or the like [...]
-    "lazyStartProducer": { "index": 1, "kind": "property", "displayName": "Lazy Start Producer", "group": "producer", "label": "producer", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "description": "Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail [...]
-    "autowiredEnabled": { "index": 2, "kind": "property", "displayName": "Autowired Enabled", "group": "advanced", "label": "advanced", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": true, "description": "Whether autowiring is enabled. This is used for automatic autowiring options (the option must be marked as autowired) by looking up in the registry to find if there is a single instance of matching t [...]
-    "healthCheckConsumerEnabled": { "index": 3, "kind": "property", "displayName": "Health Check Consumer Enabled", "group": "health", "label": "health", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": true, "description": "Used for enabling or disabling all consumer based health checks from this component" },
-    "healthCheckProducerEnabled": { "index": 4, "kind": "property", "displayName": "Health Check Producer Enabled", "group": "health", "label": "health", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": true, "description": "Used for enabling or disabling all producer based health checks from this component. Notice: Camel has by default disabled all producer based health-checks. You can turn on producer [...]
-    "jAASConfiguration": { "index": 5, "kind": "property", "displayName": "JAASConfiguration", "group": "security", "label": "security", "required": false, "type": "object", "javaType": "javax.security.auth.login.Configuration", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "To use the given configuration for security with JAAS." },
-    "kerberosConfigFile": { "index": 6, "kind": "property", "displayName": "Kerberos Config File", "group": "security", "label": "security", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "To use kerberos authentication, set the value of the 'java.security.krb5.conf' environment variable to an existing file. If the environment variable is already set, warn if different th [...]
-  },
-  "headers": {
-    "CamelHdfsClose": { "index": 0, "kind": "header", "displayName": "", "group": "producer", "label": "producer", "required": false, "javaType": "Boolean", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "Indicates to close the stream", "constantName": "org.apache.camel.component.hdfs.HdfsConstants#HDFS_CLOSE" },
-    "CamelFileName": { "index": 1, "kind": "header", "displayName": "", "group": "common", "label": "", "required": false, "javaType": "String", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "(producer) Specifies the name of the file to write (relative to the endpoint path). The name can be a String or an Expression object. Only relevant when not using a split strategy. (consumer) Specifies the name of the file to read", "constantName": " [...]
-    "CamelFileNameConsumed": { "index": 2, "kind": "header", "displayName": "", "group": "consumer", "label": "consumer", "required": false, "javaType": "String", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "The name of the file consumed", "constantName": "org.apache.camel.component.hdfs.HdfsConstants#FILE_NAME_CONSUMED" },
-    "CamelFileAbsolutePath": { "index": 3, "kind": "header", "displayName": "", "group": "consumer", "label": "consumer", "required": false, "javaType": "String", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "The absolute path of the file", "constantName": "org.apache.camel.component.hdfs.HdfsConstants#FILE_ABSOLUTE_PATH" },
-    "KEY": { "index": 4, "kind": "header", "displayName": "", "group": "common", "label": "", "required": false, "javaType": "Object", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "The HDFS key", "constantName": "org.apache.camel.component.hdfs.HdfsConstants#KEY" },
-    "CamelFileLength": { "index": 5, "kind": "header", "displayName": "", "group": "consumer", "label": "consumer", "required": false, "javaType": "Long", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "The size of the file", "constantName": "org.apache.camel.component.hdfs.HdfsConstants#FILE_LENGTH" }
-  },
-  "properties": {
-    "hostName": { "index": 0, "kind": "path", "displayName": "Host Name", "group": "common", "label": "", "required": true, "type": "string", "javaType": "java.lang.String", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "HDFS host to use" },
-    "port": { "index": 1, "kind": "path", "displayName": "Port", "group": "common", "label": "", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 8020, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "HDFS port to use" },
-    "path": { "index": 2, "kind": "path", "displayName": "Path", "group": "common", "label": "", "required": true, "type": "string", "javaType": "java.lang.String", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The directory path to use" },
-    "connectOnStartup": { "index": 3, "kind": "parameter", "displayName": "Connect On Startup", "group": "common", "label": "", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": true, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "Whether to connect to the HDFS file system on starting the producer\/consumer. If false then the con [...]
-    "fileSystemType": { "index": 4, "kind": "parameter", "displayName": "File System Type", "group": "common", "label": "", "required": false, "type": "object", "javaType": "org.apache.camel.component.hdfs.HdfsFileSystemType", "enum": [ "LOCAL", "HDFS" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "HDFS", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "Set to LOCAL to not use HDFS bu [...]
-    "fileType": { "index": 5, "kind": "parameter", "displayName": "File Type", "group": "common", "label": "", "required": false, "type": "object", "javaType": "org.apache.camel.component.hdfs.HdfsFileType", "enum": [ "NORMAL_FILE", "SEQUENCE_FILE", "MAP_FILE", "BLOOMMAP_FILE", "ARRAY_FILE" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "NORMAL_FILE", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", " [...]
-    "keyType": { "index": 6, "kind": "parameter", "displayName": "Key Type", "group": "common", "label": "", "required": false, "type": "object", "javaType": "org.apache.camel.component.hdfs.WritableType", "enum": [ "NULL", "BOOLEAN", "BYTE", "SHORT", "INT", "FLOAT", "LONG", "DOUBLE", "TEXT", "BYTES" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "NULL", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config" [...]
-    "namedNodes": { "index": 7, "kind": "parameter", "displayName": "Named Nodes", "group": "common", "label": "", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "A comma separated list of named nodes (e.g. srv11.example.com:8020,srv12.example.com:8020)" },
-    "owner": { "index": 8, "kind": "parameter", "displayName": "Owner", "group": "common", "label": "", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The file owner must match this owner for the consumer to pickup the file. Otherwise the file is skipped." },
-    "valueType": { "index": 9, "kind": "parameter", "displayName": "Value Type", "group": "common", "label": "", "required": false, "type": "object", "javaType": "org.apache.camel.component.hdfs.WritableType", "enum": [ "NULL", "BOOLEAN", "BYTE", "SHORT", "INT", "FLOAT", "LONG", "DOUBLE", "TEXT", "BYTES" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "BYTES", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "co [...]
-    "pattern": { "index": 10, "kind": "parameter", "displayName": "Pattern", "group": "consumer", "label": "consumer", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "defaultValue": "*", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The pattern used for scanning the directory" },
-    "sendEmptyMessageWhenIdle": { "index": 11, "kind": "parameter", "displayName": "Send Empty Message When Idle", "group": "consumer", "label": "consumer", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "description": "If the polling consumer did not poll any files, you can enable this option to send an empty message (no body) instead." },
-    "streamDownload": { "index": 12, "kind": "parameter", "displayName": "Stream Download", "group": "consumer", "label": "consumer", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "Sets the download method to use when not using a local working directory. If set to true, the [...]
-    "bridgeErrorHandler": { "index": 13, "kind": "parameter", "displayName": "Bridge Error Handler", "group": "consumer (advanced)", "label": "consumer,advanced", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "description": "Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions (if possible) occurred while the Camel consumer is trying to pickup incoming [...]
-    "exceptionHandler": { "index": 14, "kind": "parameter", "displayName": "Exception Handler", "group": "consumer (advanced)", "label": "consumer,advanced", "required": false, "type": "object", "javaType": "org.apache.camel.spi.ExceptionHandler", "optionalPrefix": "consumer.", "deprecated": false, "autowired": false, "secret": false, "description": "To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By de [...]
-    "exchangePattern": { "index": 15, "kind": "parameter", "displayName": "Exchange Pattern", "group": "consumer (advanced)", "label": "consumer,advanced", "required": false, "type": "object", "javaType": "org.apache.camel.ExchangePattern", "enum": [ "InOnly", "InOut" ], "deprecated": false, "autowired": false, "secret": false, "description": "Sets the exchange pattern when the consumer creates an exchange." },
-    "pollStrategy": { "index": 16, "kind": "parameter", "displayName": "Poll Strategy", "group": "consumer (advanced)", "label": "consumer,advanced", "required": false, "type": "object", "javaType": "org.apache.camel.spi.PollingConsumerPollStrategy", "deprecated": false, "autowired": false, "secret": false, "description": "A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing you to provide your custom implementation to control error handling usually occurred during the po [...]
-    "append": { "index": 17, "kind": "parameter", "displayName": "Append", "group": "producer", "label": "producer", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "Append to existing file. Notice that not all HDFS file systems support the append option." },
-    "overwrite": { "index": 18, "kind": "parameter", "displayName": "Overwrite", "group": "producer", "label": "producer", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": true, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "Whether to overwrite existing files with the same name" },
-    "lazyStartProducer": { "index": 19, "kind": "parameter", "displayName": "Lazy Start Producer", "group": "producer (advanced)", "label": "producer,advanced", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "description": "Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a produ [...]
-    "blockSize": { "index": 20, "kind": "parameter", "displayName": "Block Size", "group": "advanced", "label": "advanced", "required": false, "type": "integer", "javaType": "long", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 67108864, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The size of the HDFS blocks" },
-    "bufferSize": { "index": 21, "kind": "parameter", "displayName": "Buffer Size", "group": "advanced", "label": "advanced", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 4096, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The buffer size used by HDFS" },
-    "checkIdleInterval": { "index": 22, "kind": "parameter", "displayName": "Check Idle Interval", "group": "advanced", "label": "advanced", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 500, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "How often (time in millis) in to run the idle checker background task. This option is only i [...]
-    "chunkSize": { "index": 23, "kind": "parameter", "displayName": "Chunk Size", "group": "advanced", "label": "advanced", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 4096, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "When reading a normal file, this is split into chunks producing a message per chunk." },
-    "compressionCodec": { "index": 24, "kind": "parameter", "displayName": "Compression Codec", "group": "advanced", "label": "advanced", "required": false, "type": "object", "javaType": "org.apache.camel.component.hdfs.HdfsCompressionCodec", "enum": [ "DEFAULT", "GZIP", "BZIP2", "SNAPPY", "LZ4", "ZSTANDARD" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "DEFAULT", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField [...]
-    "compressionType": { "index": 25, "kind": "parameter", "displayName": "Compression Type", "group": "advanced", "label": "advanced", "required": false, "type": "object", "javaType": "org.apache.camel.component.hdfs.HdfsCompressionType", "enum": [ "NONE", "RECORD", "BLOCK" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "NONE", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The comp [...]
-    "openedSuffix": { "index": 26, "kind": "parameter", "displayName": "Opened Suffix", "group": "advanced", "label": "advanced", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "defaultValue": "opened", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "When a file is opened for reading\/writing the file is renamed with this suffix to avo [...]
-    "readSuffix": { "index": 27, "kind": "parameter", "displayName": "Read Suffix", "group": "advanced", "label": "advanced", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "defaultValue": "read", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "Once the file has been read is renamed with this suffix to avoid to read it again." },
-    "replication": { "index": 28, "kind": "parameter", "displayName": "Replication", "group": "advanced", "label": "advanced", "required": false, "type": "integer", "javaType": "short", "deprecated": false, "autowired": false, "secret": false, "defaultValue": "3", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The HDFS replication factor" },
-    "splitStrategy": { "index": 29, "kind": "parameter", "displayName": "Split Strategy", "group": "advanced", "label": "advanced", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "In the current version of Hadoop opening a file in append mode is disabled since it's not very reliable. So, f [...]
-    "maxMessagesPerPoll": { "index": 30, "kind": "parameter", "displayName": "Max Messages Per Poll", "group": "filter", "label": "consumer,filter", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 100, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "To define a maximum messages to gather per poll. By default a limit of 100 is set. C [...]
-    "backoffErrorThreshold": { "index": 31, "kind": "parameter", "displayName": "Backoff Error Threshold", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "description": "The number of subsequent error polls (failed due some error) that should happen before the backoffMultipler should kick-in." },
-    "backoffIdleThreshold": { "index": 32, "kind": "parameter", "displayName": "Backoff Idle Threshold", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "description": "The number of subsequent idle polls that should happen before the backoffMultipler should kick-in." },
-    "backoffMultiplier": { "index": 33, "kind": "parameter", "displayName": "Backoff Multiplier", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "description": "To let the scheduled polling consumer backoff if there has been a number of subsequent idles\/errors in a row. The multiplier is then the number of polls that will be skipped before the next actual attempt is h [...]
-    "delay": { "index": 34, "kind": "parameter", "displayName": "Delay", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "integer", "javaType": "long", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 500, "description": "Milliseconds before the next poll." },
-    "greedy": { "index": 35, "kind": "parameter", "displayName": "Greedy", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "description": "If greedy is enabled, then the ScheduledPollConsumer will run immediately again, if the previous run polled 1 or more messages." },
-    "initialDelay": { "index": 36, "kind": "parameter", "displayName": "Initial Delay", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "integer", "javaType": "long", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 1000, "description": "Milliseconds before the first poll starts." },
-    "repeatCount": { "index": 37, "kind": "parameter", "displayName": "Repeat Count", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "integer", "javaType": "long", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 0, "description": "Specifies a maximum limit of number of fires. So if you set it to 1, the scheduler will only fire once. If you set it to 5, it will only fire five times. A value of zero or negative means fire forever." },
-    "runLoggingLevel": { "index": 38, "kind": "parameter", "displayName": "Run Logging Level", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "object", "javaType": "org.apache.camel.LoggingLevel", "enum": [ "TRACE", "DEBUG", "INFO", "WARN", "ERROR", "OFF" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "TRACE", "description": "The consumer logs a start\/complete log line when it polls. This option allows you to configure the l [...]
-    "scheduledExecutorService": { "index": 39, "kind": "parameter", "displayName": "Scheduled Executor Service", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "object", "javaType": "java.util.concurrent.ScheduledExecutorService", "deprecated": false, "autowired": false, "secret": false, "description": "Allows for configuring a custom\/shared thread pool to use for the consumer. By default each consumer has its own single threaded thread pool." },
-    "scheduler": { "index": 40, "kind": "parameter", "displayName": "Scheduler", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "object", "javaType": "java.lang.Object", "deprecated": false, "autowired": false, "secret": false, "defaultValue": "none", "description": "To use a cron scheduler from either camel-spring or camel-quartz component. Use value spring or quartz for built in scheduler" },
-    "schedulerProperties": { "index": 41, "kind": "parameter", "displayName": "Scheduler Properties", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "object", "javaType": "java.util.Map<java.lang.String, java.lang.Object>", "prefix": "scheduler.", "multiValue": true, "deprecated": false, "autowired": false, "secret": false, "description": "To configure additional properties when using a custom scheduler or any of the Quartz, Spring based scheduler." },
-    "startScheduler": { "index": 42, "kind": "parameter", "displayName": "Start Scheduler", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": true, "description": "Whether the scheduler should be auto started." },
-    "timeUnit": { "index": 43, "kind": "parameter", "displayName": "Time Unit", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "object", "javaType": "java.util.concurrent.TimeUnit", "enum": [ "NANOSECONDS", "MICROSECONDS", "MILLISECONDS", "SECONDS", "MINUTES", "HOURS", "DAYS" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "MILLISECONDS", "description": "Time unit for initialDelay and delay options." },
-    "useFixedDelay": { "index": 44, "kind": "parameter", "displayName": "Use Fixed Delay", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": true, "description": "Controls if fixed delay or fixed rate is used. See ScheduledExecutorService in JDK for details." },
-    "kerberosConfigFileLocation": { "index": 45, "kind": "parameter", "displayName": "Kerberos Config File Location", "group": "security", "label": "security", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The location of the kerb5.conf file (https:\/\/web.mit.edu\/kerberos\/krb5-1.12\/d [...]
-    "kerberosKeytabLocation": { "index": 46, "kind": "parameter", "displayName": "Kerberos Keytab Location", "group": "security", "label": "security", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The location of the keytab file used to authenticate with the kerberos nodes (contains pair [...]
-    "kerberosUsername": { "index": 47, "kind": "parameter", "displayName": "Kerberos Username", "group": "security", "label": "security", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The username used to authenticate with the kerberos nodes" }
-  }
-}
diff --git a/components/camel-hdfs/pom.xml b/components/camel-hdfs/pom.xml
deleted file mode 100644
index 036167c1307..00000000000
--- a/components/camel-hdfs/pom.xml
+++ /dev/null
@@ -1,164 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.apache.camel</groupId>
-        <artifactId>components</artifactId>
-        <version>4.4.0-SNAPSHOT</version>
-    </parent>
-
-    <artifactId>camel-hdfs</artifactId>
-    <packaging>jar</packaging>
-    <name>Camel :: HDFS (deprecated)</name>
-    <description>Camel HDFS support with Hadoop 3.x libraries</description>
-
-    <properties>
-        <!-- HDFS is not available on this platform -->
-        <skipTests.s390x>true</skipTests.s390x>
-        <skipITs.s390x>true</skipITs.s390x>
-    </properties>
-
-    <dependencies>
-
-        <dependency>
-            <groupId>org.apache.camel</groupId>
-            <artifactId>camel-support</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-common</artifactId>
-            <version>${hadoop3-version}</version>
-            <exclusions>
-                <exclusion>
-                    <groupId>com.google.code.findbugs</groupId>
-                    <artifactId>jsr305</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.slf4j</groupId>
-                    <artifactId>slf4j-log4j12</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.slf4j</groupId>
-                    <artifactId>slf4j-reload4j</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>log4j</groupId>
-                    <artifactId>log4j</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>commons-lang</groupId>
-                    <artifactId>commons-lang</artifactId>
-                </exclusion>
-            </exclusions>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-hdfs</artifactId>
-            <version>${hadoop3-version}</version>
-            <exclusions>
-                <exclusion>
-                    <groupId>log4j</groupId>
-                    <artifactId>log4j</artifactId>
-                </exclusion>
-            </exclusions>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-hdfs-client</artifactId>
-            <version>${hadoop3-version}</version>
-        </dependency>
-        <dependency>
-            <groupId>commons-lang</groupId>
-            <artifactId>commons-lang</artifactId>
-            <version>${commons-lang-version}</version>
-        </dependency>
-
-        <!-- testing -->
-        <dependency>
-            <groupId>org.apache.camel</groupId>
-            <artifactId>camel-test-junit5</artifactId>
-            <scope>test</scope>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.camel</groupId>
-            <artifactId>camel-core-languages</artifactId>
-            <scope>test</scope>
-        </dependency>
-        <dependency>
-            <groupId>org.junit.jupiter</groupId>
-            <artifactId>junit-jupiter</artifactId>
-            <scope>test</scope>
-        </dependency>
-        <dependency>
-            <groupId>org.mockito</groupId>
-            <artifactId>mockito-core</artifactId>
-            <version>${mockito-version}</version>
-            <scope>test</scope>
-        </dependency>
-        <dependency>
-            <groupId>org.hamcrest</groupId>
-            <artifactId>hamcrest</artifactId>
-            <version>${hamcrest-version}</version>
-            <scope>test</scope>
-        </dependency>
-        <dependency>
-            <groupId>org.lz4</groupId>
-            <artifactId>lz4-java</artifactId>
-            <version>1.8.0</version>
-            <scope>test</scope>
-        </dependency>
-
-        <!-- test infra -->
-        <dependency>
-            <groupId>org.apache.camel</groupId>
-            <artifactId>camel-test-infra-hdfs</artifactId>
-            <version>${project.version}</version>
-            <type>test-jar</type>
-            <scope>test</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>jcl-over-slf4j</artifactId>
-            <version>${slf4j-version}</version>
-            <scope>test</scope>
-        </dependency>
-
-    </dependencies>
-
-    <!-- skip tests on Windows due CAMEL-8445 -->
-    <profiles>
-        <profile>
-            <id>Windows</id>
-            <activation>
-                <os>
-                    <family>Windows</family>
-                </os>
-            </activation>
-            <properties>
-                <skipTests>true</skipTests>
-            </properties>
-        </profile>
-    </profiles>
-
-</project>
diff --git a/components/camel-hdfs/src/generated/java/org/apache/camel/component/hdfs/HdfsComponentConfigurer.java b/components/camel-hdfs/src/generated/java/org/apache/camel/component/hdfs/HdfsComponentConfigurer.java
deleted file mode 100644
index bf05216d738..00000000000
--- a/components/camel-hdfs/src/generated/java/org/apache/camel/component/hdfs/HdfsComponentConfigurer.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/* Generated by camel build tools - do NOT edit this file! */
-package org.apache.camel.component.hdfs;
-
-import java.util.Map;
-
-import org.apache.camel.CamelContext;
-import org.apache.camel.spi.ExtendedPropertyConfigurerGetter;
-import org.apache.camel.spi.PropertyConfigurerGetter;
-import org.apache.camel.spi.ConfigurerStrategy;
-import org.apache.camel.spi.GeneratedPropertyConfigurer;
-import org.apache.camel.util.CaseInsensitiveMap;
-import org.apache.camel.support.component.PropertyConfigurerSupport;
-
-/**
- * Generated by camel build tools - do NOT edit this file!
- */
-@SuppressWarnings("unchecked")
-public class HdfsComponentConfigurer extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
-
-    @Override
-    public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
-        HdfsComponent target = (HdfsComponent) obj;
-        switch (ignoreCase ? name.toLowerCase() : name) {
-        case "autowiredenabled":
-        case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
-        case "bridgeerrorhandler":
-        case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
-        case "healthcheckconsumerenabled":
-        case "healthCheckConsumerEnabled": target.setHealthCheckConsumerEnabled(property(camelContext, boolean.class, value)); return true;
-        case "healthcheckproducerenabled":
-        case "healthCheckProducerEnabled": target.setHealthCheckProducerEnabled(property(camelContext, boolean.class, value)); return true;
-        case "jaasconfiguration":
-        case "jAASConfiguration": target.setJAASConfiguration(property(camelContext, javax.security.auth.login.Configuration.class, value)); return true;
-        case "kerberosconfigfile":
-        case "kerberosConfigFile": target.setKerberosConfigFile(property(camelContext, java.lang.String.class, value)); return true;
-        case "lazystartproducer":
-        case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
-        default: return false;
-        }
-    }
-
-    @Override
-    public Class<?> getOptionType(String name, boolean ignoreCase) {
-        switch (ignoreCase ? name.toLowerCase() : name) {
-        case "autowiredenabled":
-        case "autowiredEnabled": return boolean.class;
-        case "bridgeerrorhandler":
-        case "bridgeErrorHandler": return boolean.class;
-        case "healthcheckconsumerenabled":
-        case "healthCheckConsumerEnabled": return boolean.class;
-        case "healthcheckproducerenabled":
-        case "healthCheckProducerEnabled": return boolean.class;
-        case "jaasconfiguration":
-        case "jAASConfiguration": return javax.security.auth.login.Configuration.class;
-        case "kerberosconfigfile":
-        case "kerberosConfigFile": return java.lang.String.class;
-        case "lazystartproducer":
-        case "lazyStartProducer": return boolean.class;
-        default: return null;
-        }
-    }
-
-    @Override
-    public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
-        HdfsComponent target = (HdfsComponent) obj;
-        switch (ignoreCase ? name.toLowerCase() : name) {
-        case "autowiredenabled":
-        case "autowiredEnabled": return target.isAutowiredEnabled();
-        case "bridgeerrorhandler":
-        case "bridgeErrorHandler": return target.isBridgeErrorHandler();
-        case "healthcheckconsumerenabled":
-        case "healthCheckConsumerEnabled": return target.isHealthCheckConsumerEnabled();
-        case "healthcheckproducerenabled":
-        case "healthCheckProducerEnabled": return target.isHealthCheckProducerEnabled();
-        case "jaasconfiguration":
-        case "jAASConfiguration": return target.getJAASConfiguration();
-        case "kerberosconfigfile":
-        case "kerberosConfigFile": return target.getKerberosConfigFile();
-        case "lazystartproducer":
-        case "lazyStartProducer": return target.isLazyStartProducer();
-        default: return null;
-        }
-    }
-}
-
diff --git a/components/camel-hdfs/src/generated/java/org/apache/camel/component/hdfs/HdfsEndpointConfigurer.java b/components/camel-hdfs/src/generated/java/org/apache/camel/component/hdfs/HdfsEndpointConfigurer.java
deleted file mode 100644
index 1a22df10193..00000000000
--- a/components/camel-hdfs/src/generated/java/org/apache/camel/component/hdfs/HdfsEndpointConfigurer.java
+++ /dev/null
@@ -1,298 +0,0 @@
-/* Generated by camel build tools - do NOT edit this file! */
-package org.apache.camel.component.hdfs;
-
-import java.util.Map;
-
-import org.apache.camel.CamelContext;
-import org.apache.camel.spi.ExtendedPropertyConfigurerGetter;
-import org.apache.camel.spi.PropertyConfigurerGetter;
-import org.apache.camel.spi.ConfigurerStrategy;
-import org.apache.camel.spi.GeneratedPropertyConfigurer;
-import org.apache.camel.util.CaseInsensitiveMap;
-import org.apache.camel.support.component.PropertyConfigurerSupport;
-
-/**
- * Generated by camel build tools - do NOT edit this file!
- */
-@SuppressWarnings("unchecked")
-public class HdfsEndpointConfigurer extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
-
-    @Override
-    public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
-        HdfsEndpoint target = (HdfsEndpoint) obj;
-        switch (ignoreCase ? name.toLowerCase() : name) {
-        case "append": target.getConfig().setAppend(property(camelContext, boolean.class, value)); return true;
-        case "backofferrorthreshold":
-        case "backoffErrorThreshold": target.setBackoffErrorThreshold(property(camelContext, int.class, value)); return true;
-        case "backoffidlethreshold":
-        case "backoffIdleThreshold": target.setBackoffIdleThreshold(property(camelContext, int.class, value)); return true;
-        case "backoffmultiplier":
-        case "backoffMultiplier": target.setBackoffMultiplier(property(camelContext, int.class, value)); return true;
-        case "blocksize":
-        case "blockSize": target.getConfig().setBlockSize(property(camelContext, long.class, value)); return true;
-        case "bridgeerrorhandler":
-        case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
-        case "buffersize":
-        case "bufferSize": target.getConfig().setBufferSize(property(camelContext, int.class, value)); return true;
-        case "checkidleinterval":
-        case "checkIdleInterval": target.getConfig().setCheckIdleInterval(property(camelContext, int.class, value)); return true;
-        case "chunksize":
-        case "chunkSize": target.getConfig().setChunkSize(property(camelContext, int.class, value)); return true;
-        case "compressioncodec":
-        case "compressionCodec": target.getConfig().setCompressionCodec(property(camelContext, org.apache.camel.component.hdfs.HdfsCompressionCodec.class, value)); return true;
-        case "compressiontype":
-        case "compressionType": target.getConfig().setCompressionType(property(camelContext, org.apache.camel.component.hdfs.HdfsCompressionType.class, value)); return true;
-        case "connectonstartup":
-        case "connectOnStartup": target.getConfig().setConnectOnStartup(property(camelContext, boolean.class, value)); return true;
-        case "delay": target.setDelay(property(camelContext, long.class, value)); return true;
-        case "exceptionhandler":
-        case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
-        case "exchangepattern":
-        case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
-        case "filesystemtype":
-        case "fileSystemType": target.getConfig().setFileSystemType(property(camelContext, org.apache.camel.component.hdfs.HdfsFileSystemType.class, value)); return true;
-        case "filetype":
-        case "fileType": target.getConfig().setFileType(property(camelContext, org.apache.camel.component.hdfs.HdfsFileType.class, value)); return true;
-        case "greedy": target.setGreedy(property(camelContext, boolean.class, value)); return true;
-        case "initialdelay":
-        case "initialDelay": target.setInitialDelay(property(camelContext, long.class, value)); return true;
-        case "kerberosconfigfilelocation":
-        case "kerberosConfigFileLocation": target.getConfig().setKerberosConfigFileLocation(property(camelContext, java.lang.String.class, value)); return true;
-        case "kerberoskeytablocation":
-        case "kerberosKeytabLocation": target.getConfig().setKerberosKeytabLocation(property(camelContext, java.lang.String.class, value)); return true;
-        case "kerberosusername":
-        case "kerberosUsername": target.getConfig().setKerberosUsername(property(camelContext, java.lang.String.class, value)); return true;
-        case "keytype":
-        case "keyType": target.getConfig().setKeyType(property(camelContext, org.apache.camel.component.hdfs.WritableType.class, value)); return true;
-        case "lazystartproducer":
-        case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
-        case "maxmessagesperpoll":
-        case "maxMessagesPerPoll": target.getConfig().setMaxMessagesPerPoll(property(camelContext, int.class, value)); return true;
-        case "namednodes":
-        case "namedNodes": target.getConfig().setNamedNodes(property(camelContext, java.lang.String.class, value)); return true;
-        case "openedsuffix":
-        case "openedSuffix": target.getConfig().setOpenedSuffix(property(camelContext, java.lang.String.class, value)); return true;
-        case "overwrite": target.getConfig().setOverwrite(property(camelContext, boolean.class, value)); return true;
-        case "owner": target.getConfig().setOwner(property(camelContext, java.lang.String.class, value)); return true;
-        case "pattern": target.getConfig().setPattern(property(camelContext, java.lang.String.class, value)); return true;
-        case "pollstrategy":
-        case "pollStrategy": target.setPollStrategy(property(camelContext, org.apache.camel.spi.PollingConsumerPollStrategy.class, value)); return true;
-        case "readsuffix":
-        case "readSuffix": target.getConfig().setReadSuffix(property(camelContext, java.lang.String.class, value)); return true;
-        case "repeatcount":
-        case "repeatCount": target.setRepeatCount(property(camelContext, long.class, value)); return true;
-        case "replication": target.getConfig().setReplication(property(camelContext, short.class, value)); return true;
-        case "runlogginglevel":
-        case "runLoggingLevel": target.setRunLoggingLevel(property(camelContext, org.apache.camel.LoggingLevel.class, value)); return true;
-        case "scheduledexecutorservice":
-        case "scheduledExecutorService": target.setScheduledExecutorService(property(camelContext, java.util.concurrent.ScheduledExecutorService.class, value)); return true;
-        case "scheduler": target.setScheduler(property(camelContext, java.lang.Object.class, value)); return true;
-        case "schedulerproperties":
-        case "schedulerProperties": target.setSchedulerProperties(property(camelContext, java.util.Map.class, value)); return true;
-        case "sendemptymessagewhenidle":
-        case "sendEmptyMessageWhenIdle": target.setSendEmptyMessageWhenIdle(property(camelContext, boolean.class, value)); return true;
-        case "splitstrategy":
-        case "splitStrategy": target.getConfig().setSplitStrategy(property(camelContext, java.lang.String.class, value)); return true;
-        case "startscheduler":
-        case "startScheduler": target.setStartScheduler(property(camelContext, boolean.class, value)); return true;
-        case "streamdownload":
-        case "streamDownload": target.getConfig().setStreamDownload(property(camelContext, boolean.class, value)); return true;
-        case "timeunit":
-        case "timeUnit": target.setTimeUnit(property(camelContext, java.util.concurrent.TimeUnit.class, value)); return true;
-        case "usefixeddelay":
-        case "useFixedDelay": target.setUseFixedDelay(property(camelContext, boolean.class, value)); return true;
-        case "valuetype":
-        case "valueType": target.getConfig().setValueType(property(camelContext, org.apache.camel.component.hdfs.WritableType.class, value)); return true;
-        default: return false;
-        }
-    }
-
-    @Override
-    public Class<?> getOptionType(String name, boolean ignoreCase) {
-        switch (ignoreCase ? name.toLowerCase() : name) {
-        case "append": return boolean.class;
-        case "backofferrorthreshold":
-        case "backoffErrorThreshold": return int.class;
-        case "backoffidlethreshold":
-        case "backoffIdleThreshold": return int.class;
-        case "backoffmultiplier":
-        case "backoffMultiplier": return int.class;
-        case "blocksize":
-        case "blockSize": return long.class;
-        case "bridgeerrorhandler":
-        case "bridgeErrorHandler": return boolean.class;
-        case "buffersize":
-        case "bufferSize": return int.class;
-        case "checkidleinterval":
-        case "checkIdleInterval": return int.class;
-        case "chunksize":
-        case "chunkSize": return int.class;
-        case "compressioncodec":
-        case "compressionCodec": return org.apache.camel.component.hdfs.HdfsCompressionCodec.class;
-        case "compressiontype":
-        case "compressionType": return org.apache.camel.component.hdfs.HdfsCompressionType.class;
-        case "connectonstartup":
-        case "connectOnStartup": return boolean.class;
-        case "delay": return long.class;
-        case "exceptionhandler":
-        case "exceptionHandler": return org.apache.camel.spi.ExceptionHandler.class;
-        case "exchangepattern":
-        case "exchangePattern": return org.apache.camel.ExchangePattern.class;
-        case "filesystemtype":
-        case "fileSystemType": return org.apache.camel.component.hdfs.HdfsFileSystemType.class;
-        case "filetype":
-        case "fileType": return org.apache.camel.component.hdfs.HdfsFileType.class;
-        case "greedy": return boolean.class;
-        case "initialdelay":
-        case "initialDelay": return long.class;
-        case "kerberosconfigfilelocation":
-        case "kerberosConfigFileLocation": return java.lang.String.class;
-        case "kerberoskeytablocation":
-        case "kerberosKeytabLocation": return java.lang.String.class;
-        case "kerberosusername":
-        case "kerberosUsername": return java.lang.String.class;
-        case "keytype":
-        case "keyType": return org.apache.camel.component.hdfs.WritableType.class;
-        case "lazystartproducer":
-        case "lazyStartProducer": return boolean.class;
-        case "maxmessagesperpoll":
-        case "maxMessagesPerPoll": return int.class;
-        case "namednodes":
-        case "namedNodes": return java.lang.String.class;
-        case "openedsuffix":
-        case "openedSuffix": return java.lang.String.class;
-        case "overwrite": return boolean.class;
-        case "owner": return java.lang.String.class;
-        case "pattern": return java.lang.String.class;
-        case "pollstrategy":
-        case "pollStrategy": return org.apache.camel.spi.PollingConsumerPollStrategy.class;
-        case "readsuffix":
-        case "readSuffix": return java.lang.String.class;
-        case "repeatcount":
-        case "repeatCount": return long.class;
-        case "replication": return short.class;
-        case "runlogginglevel":
-        case "runLoggingLevel": return org.apache.camel.LoggingLevel.class;
-        case "scheduledexecutorservice":
-        case "scheduledExecutorService": return java.util.concurrent.ScheduledExecutorService.class;
-        case "scheduler": return java.lang.Object.class;
-        case "schedulerproperties":
-        case "schedulerProperties": return java.util.Map.class;
-        case "sendemptymessagewhenidle":
-        case "sendEmptyMessageWhenIdle": return boolean.class;
-        case "splitstrategy":
-        case "splitStrategy": return java.lang.String.class;
-        case "startscheduler":
-        case "startScheduler": return boolean.class;
-        case "streamdownload":
-        case "streamDownload": return boolean.class;
-        case "timeunit":
-        case "timeUnit": return java.util.concurrent.TimeUnit.class;
-        case "usefixeddelay":
-        case "useFixedDelay": return boolean.class;
-        case "valuetype":
-        case "valueType": return org.apache.camel.component.hdfs.WritableType.class;
-        default: return null;
-        }
-    }
-
-    @Override
-    public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
-        HdfsEndpoint target = (HdfsEndpoint) obj;
-        switch (ignoreCase ? name.toLowerCase() : name) {
-        case "append": return target.getConfig().isAppend();
-        case "backofferrorthreshold":
-        case "backoffErrorThreshold": return target.getBackoffErrorThreshold();
-        case "backoffidlethreshold":
-        case "backoffIdleThreshold": return target.getBackoffIdleThreshold();
-        case "backoffmultiplier":
-        case "backoffMultiplier": return target.getBackoffMultiplier();
-        case "blocksize":
-        case "blockSize": return target.getConfig().getBlockSize();
-        case "bridgeerrorhandler":
-        case "bridgeErrorHandler": return target.isBridgeErrorHandler();
-        case "buffersize":
-        case "bufferSize": return target.getConfig().getBufferSize();
-        case "checkidleinterval":
-        case "checkIdleInterval": return target.getConfig().getCheckIdleInterval();
-        case "chunksize":
-        case "chunkSize": return target.getConfig().getChunkSize();
-        case "compressioncodec":
-        case "compressionCodec": return target.getConfig().getCompressionCodec();
-        case "compressiontype":
-        case "compressionType": return target.getConfig().getCompressionType();
-        case "connectonstartup":
-        case "connectOnStartup": return target.getConfig().isConnectOnStartup();
-        case "delay": return target.getDelay();
-        case "exceptionhandler":
-        case "exceptionHandler": return target.getExceptionHandler();
-        case "exchangepattern":
-        case "exchangePattern": return target.getExchangePattern();
-        case "filesystemtype":
-        case "fileSystemType": return target.getConfig().getFileSystemType();
-        case "filetype":
-        case "fileType": return target.getConfig().getFileType();
-        case "greedy": return target.isGreedy();
-        case "initialdelay":
-        case "initialDelay": return target.getInitialDelay();
-        case "kerberosconfigfilelocation":
-        case "kerberosConfigFileLocation": return target.getConfig().getKerberosConfigFileLocation();
-        case "kerberoskeytablocation":
-        case "kerberosKeytabLocation": return target.getConfig().getKerberosKeytabLocation();
-        case "kerberosusername":
-        case "kerberosUsername": return target.getConfig().getKerberosUsername();
-        case "keytype":
-        case "keyType": return target.getConfig().getKeyType();
-        case "lazystartproducer":
-        case "lazyStartProducer": return target.isLazyStartProducer();
-        case "maxmessagesperpoll":
-        case "maxMessagesPerPoll": return target.getConfig().getMaxMessagesPerPoll();
-        case "namednodes":
-        case "namedNodes": return target.getConfig().getNamedNodes();
-        case "openedsuffix":
-        case "openedSuffix": return target.getConfig().getOpenedSuffix();
-        case "overwrite": return target.getConfig().isOverwrite();
-        case "owner": return target.getConfig().getOwner();
-        case "pattern": return target.getConfig().getPattern();
-        case "pollstrategy":
-        case "pollStrategy": return target.getPollStrategy();
-        case "readsuffix":
-        case "readSuffix": return target.getConfig().getReadSuffix();
-        case "repeatcount":
-        case "repeatCount": return target.getRepeatCount();
-        case "replication": return target.getConfig().getReplication();
-        case "runlogginglevel":
-        case "runLoggingLevel": return target.getRunLoggingLevel();
-        case "scheduledexecutorservice":
-        case "scheduledExecutorService": return target.getScheduledExecutorService();
-        case "scheduler": return target.getScheduler();
-        case "schedulerproperties":
-        case "schedulerProperties": return target.getSchedulerProperties();
-        case "sendemptymessagewhenidle":
-        case "sendEmptyMessageWhenIdle": return target.isSendEmptyMessageWhenIdle();
-        case "splitstrategy":
-        case "splitStrategy": return target.getConfig().getSplitStrategy();
-        case "startscheduler":
-        case "startScheduler": return target.isStartScheduler();
-        case "streamdownload":
-        case "streamDownload": return target.getConfig().isStreamDownload();
-        case "timeunit":
-        case "timeUnit": return target.getTimeUnit();
-        case "usefixeddelay":
-        case "useFixedDelay": return target.isUseFixedDelay();
-        case "valuetype":
-        case "valueType": return target.getConfig().getValueType();
-        default: return null;
-        }
-    }
-
-    @Override
-    public Object getCollectionValueType(Object target, String name, boolean ignoreCase) {
-        switch (ignoreCase ? name.toLowerCase() : name) {
-        case "schedulerproperties":
-        case "schedulerProperties": return java.lang.Object.class;
-        default: return null;
-        }
-    }
-}
-
diff --git a/components/camel-hdfs/src/generated/java/org/apache/camel/component/hdfs/HdfsEndpointUriFactory.java b/components/camel-hdfs/src/generated/java/org/apache/camel/component/hdfs/HdfsEndpointUriFactory.java
deleted file mode 100644
index 8758ba5fc93..00000000000
--- a/components/camel-hdfs/src/generated/java/org/apache/camel/component/hdfs/HdfsEndpointUriFactory.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/* Generated by camel build tools - do NOT edit this file! */
-package org.apache.camel.component.hdfs;
-
-import java.net.URISyntaxException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.camel.spi.EndpointUriFactory;
-
-/**
- * Generated by camel build tools - do NOT edit this file!
- */
-public class HdfsEndpointUriFactory extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
-
-    private static final String BASE = ":hostName:port/path";
-
-    private static final Set<String> PROPERTY_NAMES;
-    private static final Set<String> SECRET_PROPERTY_NAMES;
-    private static final Set<String> MULTI_VALUE_PREFIXES;
-    static {
-        Set<String> props = new HashSet<>(48);
-        props.add("append");
-        props.add("backoffErrorThreshold");
-        props.add("backoffIdleThreshold");
-        props.add("backoffMultiplier");
-        props.add("blockSize");
-        props.add("bridgeErrorHandler");
-        props.add("bufferSize");
-        props.add("checkIdleInterval");
-        props.add("chunkSize");
-        props.add("compressionCodec");
-        props.add("compressionType");
-        props.add("connectOnStartup");
-        props.add("delay");
-        props.add("exceptionHandler");
-        props.add("exchangePattern");
-        props.add("fileSystemType");
-        props.add("fileType");
-        props.add("greedy");
-        props.add("hostName");
-        props.add("initialDelay");
-        props.add("kerberosConfigFileLocation");
-        props.add("kerberosKeytabLocation");
-        props.add("kerberosUsername");
-        props.add("keyType");
-        props.add("lazyStartProducer");
-        props.add("maxMessagesPerPoll");
-        props.add("namedNodes");
-        props.add("openedSuffix");
-        props.add("overwrite");
-        props.add("owner");
-        props.add("path");
-        props.add("pattern");
-        props.add("pollStrategy");
-        props.add("port");
-        props.add("readSuffix");
-        props.add("repeatCount");
-        props.add("replication");
-        props.add("runLoggingLevel");
-        props.add("scheduledExecutorService");
-        props.add("scheduler");
-        props.add("schedulerProperties");
-        props.add("sendEmptyMessageWhenIdle");
-        props.add("splitStrategy");
-        props.add("startScheduler");
-        props.add("streamDownload");
-        props.add("timeUnit");
-        props.add("useFixedDelay");
-        props.add("valueType");
-        PROPERTY_NAMES = Collections.unmodifiableSet(props);
-        SECRET_PROPERTY_NAMES = Collections.emptySet();
-        Set<String> prefixes = new HashSet<>(1);
-        prefixes.add("scheduler.");
-        MULTI_VALUE_PREFIXES = Collections.unmodifiableSet(prefixes);
-    }
-
-    @Override
-    public boolean isEnabled(String scheme) {
-        return "hdfs".equals(scheme);
-    }
-
-    @Override
-    public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
-        String syntax = scheme + BASE;
-        String uri = syntax;
-
-        Map<String, Object> copy = new HashMap<>(properties);
-
-        uri = buildPathParameter(syntax, uri, "hostName", null, true, copy);
-        uri = buildPathParameter(syntax, uri, "port", 8020, false, copy);
-        uri = buildPathParameter(syntax, uri, "path", null, true, copy);
-        uri = buildQueryParameters(uri, copy, encode);
-        return uri;
-    }
-
-    @Override
-    public Set<String> propertyNames() {
-        return PROPERTY_NAMES;
-    }
-
-    @Override
-    public Set<String> secretPropertyNames() {
-        return SECRET_PROPERTY_NAMES;
-    }
-
-    @Override
-    public Set<String> multiValuePrefixes() {
-        return MULTI_VALUE_PREFIXES;
-    }
-
-    @Override
-    public boolean isLenientProperties() {
-        return false;
-    }
-}
-
diff --git a/components/camel-hdfs/src/generated/resources/META-INF/services/org/apache/camel/component.properties b/components/camel-hdfs/src/generated/resources/META-INF/services/org/apache/camel/component.properties
deleted file mode 100644
index 2ef8867ca09..00000000000
--- a/components/camel-hdfs/src/generated/resources/META-INF/services/org/apache/camel/component.properties
+++ /dev/null
@@ -1,7 +0,0 @@
-# Generated by camel build tools - do NOT edit this file!
-components=hdfs
-groupId=org.apache.camel
-artifactId=camel-hdfs
-version=4.4.0-SNAPSHOT
-projectName=Camel :: HDFS (deprecated)
-projectDescription=Camel HDFS support with Hadoop 3.x libraries
diff --git a/components/camel-hdfs/src/generated/resources/META-INF/services/org/apache/camel/component/hdfs b/components/camel-hdfs/src/generated/resources/META-INF/services/org/apache/camel/component/hdfs
deleted file mode 100644
index 8e6b6b5f5c2..00000000000
--- a/components/camel-hdfs/src/generated/resources/META-INF/services/org/apache/camel/component/hdfs
+++ /dev/null
@@ -1,2 +0,0 @@
-# Generated by camel build tools - do NOT edit this file!
-class=org.apache.camel.component.hdfs.HdfsComponent
diff --git a/components/camel-hdfs/src/generated/resources/META-INF/services/org/apache/camel/configurer/hdfs-component b/components/camel-hdfs/src/generated/resources/META-INF/services/org/apache/camel/configurer/hdfs-component
deleted file mode 100644
index 7e2db1cebcd..00000000000
--- a/components/camel-hdfs/src/generated/resources/META-INF/services/org/apache/camel/configurer/hdfs-component
+++ /dev/null
@@ -1,2 +0,0 @@
-# Generated by camel build tools - do NOT edit this file!
-class=org.apache.camel.component.hdfs.HdfsComponentConfigurer
diff --git a/components/camel-hdfs/src/generated/resources/META-INF/services/org/apache/camel/configurer/hdfs-endpoint b/components/camel-hdfs/src/generated/resources/META-INF/services/org/apache/camel/configurer/hdfs-endpoint
deleted file mode 100644
index f519efee201..00000000000
--- a/components/camel-hdfs/src/generated/resources/META-INF/services/org/apache/camel/configurer/hdfs-endpoint
+++ /dev/null
@@ -1,2 +0,0 @@
-# Generated by camel build tools - do NOT edit this file!
-class=org.apache.camel.component.hdfs.HdfsEndpointConfigurer
diff --git a/components/camel-hdfs/src/generated/resources/META-INF/services/org/apache/camel/urifactory/hdfs-endpoint b/components/camel-hdfs/src/generated/resources/META-INF/services/org/apache/camel/urifactory/hdfs-endpoint
deleted file mode 100644
index aa560708506..00000000000
--- a/components/camel-hdfs/src/generated/resources/META-INF/services/org/apache/camel/urifactory/hdfs-endpoint
+++ /dev/null
@@ -1,2 +0,0 @@
-# Generated by camel build tools - do NOT edit this file!
-class=org.apache.camel.component.hdfs.HdfsEndpointUriFactory
diff --git a/components/camel-hdfs/src/generated/resources/org/apache/camel/component/hdfs/hdfs.json b/components/camel-hdfs/src/generated/resources/org/apache/camel/component/hdfs/hdfs.json
deleted file mode 100644
index 31bc6c30056..00000000000
--- a/components/camel-hdfs/src/generated/resources/org/apache/camel/component/hdfs/hdfs.json
+++ /dev/null
@@ -1,91 +0,0 @@
-{
-  "component": {
-    "kind": "component",
-    "name": "hdfs",
-    "title": "HDFS",
-    "description": "Read and write from\/to an HDFS filesystem using Hadoop 2.x.",
-    "deprecated": true,
-    "firstVersion": "2.14.0",
-    "label": "bigdata,file",
-    "javaType": "org.apache.camel.component.hdfs.HdfsComponent",
-    "supportLevel": "Stable",
-    "groupId": "org.apache.camel",
-    "artifactId": "camel-hdfs",
-    "version": "4.4.0-SNAPSHOT",
-    "scheme": "hdfs",
-    "extendsScheme": "",
-    "syntax": "hdfs:hostName:port\/path",
-    "async": false,
-    "api": false,
-    "consumerOnly": false,
-    "producerOnly": false,
-    "lenientProperties": false
-  },
-  "componentProperties": {
-    "bridgeErrorHandler": { "index": 0, "kind": "property", "displayName": "Bridge Error Handler", "group": "consumer", "label": "consumer", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "description": "Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions (if possible) occurred while the Camel consumer is trying to pickup incoming messages, or the like [...]
-    "lazyStartProducer": { "index": 1, "kind": "property", "displayName": "Lazy Start Producer", "group": "producer", "label": "producer", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "description": "Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail [...]
-    "autowiredEnabled": { "index": 2, "kind": "property", "displayName": "Autowired Enabled", "group": "advanced", "label": "advanced", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": true, "description": "Whether autowiring is enabled. This is used for automatic autowiring options (the option must be marked as autowired) by looking up in the registry to find if there is a single instance of matching t [...]
-    "healthCheckConsumerEnabled": { "index": 3, "kind": "property", "displayName": "Health Check Consumer Enabled", "group": "health", "label": "health", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": true, "description": "Used for enabling or disabling all consumer based health checks from this component" },
-    "healthCheckProducerEnabled": { "index": 4, "kind": "property", "displayName": "Health Check Producer Enabled", "group": "health", "label": "health", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": true, "description": "Used for enabling or disabling all producer based health checks from this component. Notice: Camel has by default disabled all producer based health-checks. You can turn on producer [...]
-    "jAASConfiguration": { "index": 5, "kind": "property", "displayName": "JAASConfiguration", "group": "security", "label": "security", "required": false, "type": "object", "javaType": "javax.security.auth.login.Configuration", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "To use the given configuration for security with JAAS." },
-    "kerberosConfigFile": { "index": 6, "kind": "property", "displayName": "Kerberos Config File", "group": "security", "label": "security", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "To use kerberos authentication, set the value of the 'java.security.krb5.conf' environment variable to an existing file. If the environment variable is already set, warn if different th [...]
-  },
-  "headers": {
-    "CamelHdfsClose": { "index": 0, "kind": "header", "displayName": "", "group": "producer", "label": "producer", "required": false, "javaType": "Boolean", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "Indicates to close the stream", "constantName": "org.apache.camel.component.hdfs.HdfsConstants#HDFS_CLOSE" },
-    "CamelFileName": { "index": 1, "kind": "header", "displayName": "", "group": "common", "label": "", "required": false, "javaType": "String", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "(producer) Specifies the name of the file to write (relative to the endpoint path). The name can be a String or an Expression object. Only relevant when not using a split strategy. (consumer) Specifies the name of the file to read", "constantName": " [...]
-    "CamelFileNameConsumed": { "index": 2, "kind": "header", "displayName": "", "group": "consumer", "label": "consumer", "required": false, "javaType": "String", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "The name of the file consumed", "constantName": "org.apache.camel.component.hdfs.HdfsConstants#FILE_NAME_CONSUMED" },
-    "CamelFileAbsolutePath": { "index": 3, "kind": "header", "displayName": "", "group": "consumer", "label": "consumer", "required": false, "javaType": "String", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "The absolute path of the file", "constantName": "org.apache.camel.component.hdfs.HdfsConstants#FILE_ABSOLUTE_PATH" },
-    "KEY": { "index": 4, "kind": "header", "displayName": "", "group": "common", "label": "", "required": false, "javaType": "Object", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "The HDFS key", "constantName": "org.apache.camel.component.hdfs.HdfsConstants#KEY" },
-    "CamelFileLength": { "index": 5, "kind": "header", "displayName": "", "group": "consumer", "label": "consumer", "required": false, "javaType": "Long", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "description": "The size of the file", "constantName": "org.apache.camel.component.hdfs.HdfsConstants#FILE_LENGTH" }
-  },
-  "properties": {
-    "hostName": { "index": 0, "kind": "path", "displayName": "Host Name", "group": "common", "label": "", "required": true, "type": "string", "javaType": "java.lang.String", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "HDFS host to use" },
-    "port": { "index": 1, "kind": "path", "displayName": "Port", "group": "common", "label": "", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 8020, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "HDFS port to use" },
-    "path": { "index": 2, "kind": "path", "displayName": "Path", "group": "common", "label": "", "required": true, "type": "string", "javaType": "java.lang.String", "deprecated": false, "deprecationNote": "", "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The directory path to use" },
-    "connectOnStartup": { "index": 3, "kind": "parameter", "displayName": "Connect On Startup", "group": "common", "label": "", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": true, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "Whether to connect to the HDFS file system on starting the producer\/consumer. If false then the con [...]
-    "fileSystemType": { "index": 4, "kind": "parameter", "displayName": "File System Type", "group": "common", "label": "", "required": false, "type": "object", "javaType": "org.apache.camel.component.hdfs.HdfsFileSystemType", "enum": [ "LOCAL", "HDFS" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "HDFS", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "Set to LOCAL to not use HDFS bu [...]
-    "fileType": { "index": 5, "kind": "parameter", "displayName": "File Type", "group": "common", "label": "", "required": false, "type": "object", "javaType": "org.apache.camel.component.hdfs.HdfsFileType", "enum": [ "NORMAL_FILE", "SEQUENCE_FILE", "MAP_FILE", "BLOOMMAP_FILE", "ARRAY_FILE" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "NORMAL_FILE", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", " [...]
-    "keyType": { "index": 6, "kind": "parameter", "displayName": "Key Type", "group": "common", "label": "", "required": false, "type": "object", "javaType": "org.apache.camel.component.hdfs.WritableType", "enum": [ "NULL", "BOOLEAN", "BYTE", "SHORT", "INT", "FLOAT", "LONG", "DOUBLE", "TEXT", "BYTES" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "NULL", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config" [...]
-    "namedNodes": { "index": 7, "kind": "parameter", "displayName": "Named Nodes", "group": "common", "label": "", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "A comma separated list of named nodes (e.g. srv11.example.com:8020,srv12.example.com:8020)" },
-    "owner": { "index": 8, "kind": "parameter", "displayName": "Owner", "group": "common", "label": "", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The file owner must match this owner for the consumer to pickup the file. Otherwise the file is skipped." },
-    "valueType": { "index": 9, "kind": "parameter", "displayName": "Value Type", "group": "common", "label": "", "required": false, "type": "object", "javaType": "org.apache.camel.component.hdfs.WritableType", "enum": [ "NULL", "BOOLEAN", "BYTE", "SHORT", "INT", "FLOAT", "LONG", "DOUBLE", "TEXT", "BYTES" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "BYTES", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "co [...]
-    "pattern": { "index": 10, "kind": "parameter", "displayName": "Pattern", "group": "consumer", "label": "consumer", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "defaultValue": "*", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The pattern used for scanning the directory" },
-    "sendEmptyMessageWhenIdle": { "index": 11, "kind": "parameter", "displayName": "Send Empty Message When Idle", "group": "consumer", "label": "consumer", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "description": "If the polling consumer did not poll any files, you can enable this option to send an empty message (no body) instead." },
-    "streamDownload": { "index": 12, "kind": "parameter", "displayName": "Stream Download", "group": "consumer", "label": "consumer", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "Sets the download method to use when not using a local working directory. If set to true, the [...]
-    "bridgeErrorHandler": { "index": 13, "kind": "parameter", "displayName": "Bridge Error Handler", "group": "consumer (advanced)", "label": "consumer,advanced", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "description": "Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions (if possible) occurred while the Camel consumer is trying to pickup incoming [...]
-    "exceptionHandler": { "index": 14, "kind": "parameter", "displayName": "Exception Handler", "group": "consumer (advanced)", "label": "consumer,advanced", "required": false, "type": "object", "javaType": "org.apache.camel.spi.ExceptionHandler", "optionalPrefix": "consumer.", "deprecated": false, "autowired": false, "secret": false, "description": "To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By de [...]
-    "exchangePattern": { "index": 15, "kind": "parameter", "displayName": "Exchange Pattern", "group": "consumer (advanced)", "label": "consumer,advanced", "required": false, "type": "object", "javaType": "org.apache.camel.ExchangePattern", "enum": [ "InOnly", "InOut" ], "deprecated": false, "autowired": false, "secret": false, "description": "Sets the exchange pattern when the consumer creates an exchange." },
-    "pollStrategy": { "index": 16, "kind": "parameter", "displayName": "Poll Strategy", "group": "consumer (advanced)", "label": "consumer,advanced", "required": false, "type": "object", "javaType": "org.apache.camel.spi.PollingConsumerPollStrategy", "deprecated": false, "autowired": false, "secret": false, "description": "A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing you to provide your custom implementation to control error handling usually occurred during the po [...]
-    "append": { "index": 17, "kind": "parameter", "displayName": "Append", "group": "producer", "label": "producer", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "Append to existing file. Notice that not all HDFS file systems support the append option." },
-    "overwrite": { "index": 18, "kind": "parameter", "displayName": "Overwrite", "group": "producer", "label": "producer", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": true, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "Whether to overwrite existing files with the same name" },
-    "lazyStartProducer": { "index": 19, "kind": "parameter", "displayName": "Lazy Start Producer", "group": "producer (advanced)", "label": "producer,advanced", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "description": "Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a produ [...]
-    "blockSize": { "index": 20, "kind": "parameter", "displayName": "Block Size", "group": "advanced", "label": "advanced", "required": false, "type": "integer", "javaType": "long", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 67108864, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The size of the HDFS blocks" },
-    "bufferSize": { "index": 21, "kind": "parameter", "displayName": "Buffer Size", "group": "advanced", "label": "advanced", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 4096, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The buffer size used by HDFS" },
-    "checkIdleInterval": { "index": 22, "kind": "parameter", "displayName": "Check Idle Interval", "group": "advanced", "label": "advanced", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 500, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "How often (time in millis) in to run the idle checker background task. This option is only i [...]
-    "chunkSize": { "index": 23, "kind": "parameter", "displayName": "Chunk Size", "group": "advanced", "label": "advanced", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 4096, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "When reading a normal file, this is split into chunks producing a message per chunk." },
-    "compressionCodec": { "index": 24, "kind": "parameter", "displayName": "Compression Codec", "group": "advanced", "label": "advanced", "required": false, "type": "object", "javaType": "org.apache.camel.component.hdfs.HdfsCompressionCodec", "enum": [ "DEFAULT", "GZIP", "BZIP2", "SNAPPY", "LZ4", "ZSTANDARD" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "DEFAULT", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField [...]
-    "compressionType": { "index": 25, "kind": "parameter", "displayName": "Compression Type", "group": "advanced", "label": "advanced", "required": false, "type": "object", "javaType": "org.apache.camel.component.hdfs.HdfsCompressionType", "enum": [ "NONE", "RECORD", "BLOCK" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "NONE", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The comp [...]
-    "openedSuffix": { "index": 26, "kind": "parameter", "displayName": "Opened Suffix", "group": "advanced", "label": "advanced", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "defaultValue": "opened", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "When a file is opened for reading\/writing the file is renamed with this suffix to avo [...]
-    "readSuffix": { "index": 27, "kind": "parameter", "displayName": "Read Suffix", "group": "advanced", "label": "advanced", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "defaultValue": "read", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "Once the file has been read is renamed with this suffix to avoid to read it again." },
-    "replication": { "index": 28, "kind": "parameter", "displayName": "Replication", "group": "advanced", "label": "advanced", "required": false, "type": "integer", "javaType": "short", "deprecated": false, "autowired": false, "secret": false, "defaultValue": "3", "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The HDFS replication factor" },
-    "splitStrategy": { "index": 29, "kind": "parameter", "displayName": "Split Strategy", "group": "advanced", "label": "advanced", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "In the current version of Hadoop opening a file in append mode is disabled since it's not very reliable. So, f [...]
-    "maxMessagesPerPoll": { "index": 30, "kind": "parameter", "displayName": "Max Messages Per Poll", "group": "filter", "label": "consumer,filter", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 100, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "To define a maximum messages to gather per poll. By default a limit of 100 is set. C [...]
-    "backoffErrorThreshold": { "index": 31, "kind": "parameter", "displayName": "Backoff Error Threshold", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "description": "The number of subsequent error polls (failed due some error) that should happen before the backoffMultipler should kick-in." },
-    "backoffIdleThreshold": { "index": 32, "kind": "parameter", "displayName": "Backoff Idle Threshold", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "description": "The number of subsequent idle polls that should happen before the backoffMultipler should kick-in." },
-    "backoffMultiplier": { "index": 33, "kind": "parameter", "displayName": "Backoff Multiplier", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "integer", "javaType": "int", "deprecated": false, "autowired": false, "secret": false, "description": "To let the scheduled polling consumer backoff if there has been a number of subsequent idles\/errors in a row. The multiplier is then the number of polls that will be skipped before the next actual attempt is h [...]
-    "delay": { "index": 34, "kind": "parameter", "displayName": "Delay", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "integer", "javaType": "long", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 500, "description": "Milliseconds before the next poll." },
-    "greedy": { "index": 35, "kind": "parameter", "displayName": "Greedy", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": false, "description": "If greedy is enabled, then the ScheduledPollConsumer will run immediately again, if the previous run polled 1 or more messages." },
-    "initialDelay": { "index": 36, "kind": "parameter", "displayName": "Initial Delay", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "integer", "javaType": "long", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 1000, "description": "Milliseconds before the first poll starts." },
-    "repeatCount": { "index": 37, "kind": "parameter", "displayName": "Repeat Count", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "integer", "javaType": "long", "deprecated": false, "autowired": false, "secret": false, "defaultValue": 0, "description": "Specifies a maximum limit of number of fires. So if you set it to 1, the scheduler will only fire once. If you set it to 5, it will only fire five times. A value of zero or negative means fire forever." },
-    "runLoggingLevel": { "index": 38, "kind": "parameter", "displayName": "Run Logging Level", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "object", "javaType": "org.apache.camel.LoggingLevel", "enum": [ "TRACE", "DEBUG", "INFO", "WARN", "ERROR", "OFF" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "TRACE", "description": "The consumer logs a start\/complete log line when it polls. This option allows you to configure the l [...]
-    "scheduledExecutorService": { "index": 39, "kind": "parameter", "displayName": "Scheduled Executor Service", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "object", "javaType": "java.util.concurrent.ScheduledExecutorService", "deprecated": false, "autowired": false, "secret": false, "description": "Allows for configuring a custom\/shared thread pool to use for the consumer. By default each consumer has its own single threaded thread pool." },
-    "scheduler": { "index": 40, "kind": "parameter", "displayName": "Scheduler", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "object", "javaType": "java.lang.Object", "deprecated": false, "autowired": false, "secret": false, "defaultValue": "none", "description": "To use a cron scheduler from either camel-spring or camel-quartz component. Use value spring or quartz for built in scheduler" },
-    "schedulerProperties": { "index": 41, "kind": "parameter", "displayName": "Scheduler Properties", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "object", "javaType": "java.util.Map<java.lang.String, java.lang.Object>", "prefix": "scheduler.", "multiValue": true, "deprecated": false, "autowired": false, "secret": false, "description": "To configure additional properties when using a custom scheduler or any of the Quartz, Spring based scheduler." },
-    "startScheduler": { "index": 42, "kind": "parameter", "displayName": "Start Scheduler", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": true, "description": "Whether the scheduler should be auto started." },
-    "timeUnit": { "index": 43, "kind": "parameter", "displayName": "Time Unit", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "object", "javaType": "java.util.concurrent.TimeUnit", "enum": [ "NANOSECONDS", "MICROSECONDS", "MILLISECONDS", "SECONDS", "MINUTES", "HOURS", "DAYS" ], "deprecated": false, "autowired": false, "secret": false, "defaultValue": "MILLISECONDS", "description": "Time unit for initialDelay and delay options." },
-    "useFixedDelay": { "index": 44, "kind": "parameter", "displayName": "Use Fixed Delay", "group": "scheduler", "label": "consumer,scheduler", "required": false, "type": "boolean", "javaType": "boolean", "deprecated": false, "autowired": false, "secret": false, "defaultValue": true, "description": "Controls if fixed delay or fixed rate is used. See ScheduledExecutorService in JDK for details." },
-    "kerberosConfigFileLocation": { "index": 45, "kind": "parameter", "displayName": "Kerberos Config File Location", "group": "security", "label": "security", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The location of the kerb5.conf file (https:\/\/web.mit.edu\/kerberos\/krb5-1.12\/d [...]
-    "kerberosKeytabLocation": { "index": 46, "kind": "parameter", "displayName": "Kerberos Keytab Location", "group": "security", "label": "security", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The location of the keytab file used to authenticate with the kerberos nodes (contains pair [...]
-    "kerberosUsername": { "index": 47, "kind": "parameter", "displayName": "Kerberos Username", "group": "security", "label": "security", "required": false, "type": "string", "javaType": "java.lang.String", "deprecated": false, "autowired": false, "secret": false, "configurationClass": "org.apache.camel.component.hdfs.HdfsConfiguration", "configurationField": "config", "description": "The username used to authenticate with the kerberos nodes" }
-  }
-}
diff --git a/components/camel-hdfs/src/main/docs/hdfs-component.adoc b/components/camel-hdfs/src/main/docs/hdfs-component.adoc
deleted file mode 100644
index 407c410655d..00000000000
--- a/components/camel-hdfs/src/main/docs/hdfs-component.adoc
+++ /dev/null
@@ -1,236 +0,0 @@
-= HDFS Component (deprecated)
-:doctitle: HDFS
-:shortname: hdfs
-:artifactid: camel-hdfs
-:description: Read and write from/to an HDFS filesystem using Hadoop 2.x.
-:since: 2.14
-:supportlevel: Stable-deprecated
-:deprecated: *deprecated*
-:tabs-sync-option:
-:component-header: Both producer and consumer are supported
-//Manually maintained attributes
-:camel-spring-boot-name: hdfs
-
-*Since Camel {since}*
-
-*{component-header}*
-
-The HDFS component enables you to read and write messages from/to an
-HDFS file system using Hadoop 2.x. HDFS is the distributed file system
-at the heart of http://hadoop.apache.org[Hadoop].
-
-Maven users will need to add the following dependency to their `pom.xml`
-for this component:
-
-[source,xml]
-------------------------------------------------------------
-<dependency>
-    <groupId>org.apache.camel</groupId>
-    <artifactId>camel-hdfs</artifactId>
-    <version>x.x.x</version>
-    <!-- use the same version as your Camel core version -->
-</dependency>
-------------------------------------------------------------
-
-----------------------------------------
-hdfs://hostname[:port][/path][?options]
-----------------------------------------
-
-The path is treated in the following way:
-
-1.  as a consumer, if it's a file, it just reads the file, otherwise if
-it represents a directory it scans all the file under the path
-satisfying the configured pattern. All the files under that directory
-must be of the same type.
-2.  as a producer, if at least one split strategy is defined, the path
-is considered a directory and under that directory the producer creates
-a different file per split named using the configured
-UuidGenerator.
-
-When consuming from hdfs then in normal mode, a file is split into
-chunks, producing a message per chunk. You can configure the size of the
-chunk using the chunkSize option. If you want to read from hdfs and
-write to a regular file using the file component, then you can use the
-fileMode=Append to append each of the chunks together.
-
-
-// component-configure options: START
-
-// component-configure options: END
-
-// component options: START
-include::partial$component-configure-options.adoc[]
-include::partial$component-endpoint-options.adoc[]
-// component options: END
-
-// endpoint options: START
-
-// endpoint options: END
-
-// component headers: START
-include::partial$component-endpoint-headers.adoc[]
-// component headers: END
-
-== KeyType and ValueType
-
-* NULL it means that the key or the value is absent
-* BYTE for writing a byte, the java Byte class is mapped into a BYTE
-* BYTES for writing a sequence of bytes. It maps the java ByteBuffer
-class
-* SHORT for writing java short
-* INT for writing java integer
-* FLOAT for writing java float
-* LONG for writing java long
-* DOUBLE for writing java double
-* TEXT for writing java strings
-
-BYTES is also used with everything else, for example, in Camel a file is
-sent around as an InputStream, int this case is written in a sequence
-file or a map file as a sequence of bytes.
-
-== Splitting Strategy
-
-In the current version of Hadoop opening a file in append mode is
-disabled since it's not very reliable. So, for the moment, it's only
-possible to create new files. The Camel HDFS endpoint tries to solve
-this problem in this way:
-
-* If the split strategy option has been defined, the hdfs path will be
-used as a directory and files will be created using the configured
-UuidGenerator
-* Every time a splitting condition is met, a new file is created. +
- The splitStrategy option is defined as a string with the following
-syntax: splitStrategy=<ST>:<value>,<ST>:<value>,*
-
-where <ST> can be:
-
-* BYTES a new file is created, and the old is closed when the number of
-written bytes is more than <value>
-* MESSAGES a new file is created, and the old is closed when the number
-of written messages is more than <value>
-* IDLE a new file is created, and the old is closed when no writing
-happened in the last <value> milliseconds
-
-note that this strategy currently requires either setting an IDLE value
-or setting the HdfsConstants.HDFS_CLOSE header to false to use the
-BYTES/MESSAGES configuration...otherwise, the file will be closed with
-each message
-
-for example:
-
------------------------------------------------------------------
-hdfs://localhost/tmp/simple-file?splitStrategy=IDLE:1000,BYTES:5
------------------------------------------------------------------
-
-it means: a new file is created either when it has been idle for more
-than 1 second or if more than 5 bytes have been written. So, running
-`hadoop fs -ls /tmp/simple-file` you'll see that multiple files have
-been created.
-
-== Controlling to close file stream
-
-When using the xref:hdfs-component.adoc[HDFS] producer *without* a split
-strategy, then the file output stream is by default closed after the
-write. However you may want to keep the stream open, and only explicitly
-close the stream later. For that you can use the header
-`HdfsConstants.HDFS_CLOSE` (value = `"CamelHdfsClose"`) to control this.
-Setting this value to a boolean allows you to explicit control whether
-the stream should be closed or not.
-
-Notice this does not apply if you use a split strategy, as there are
-various strategies that can control when the stream is closed.
-
-== Using this component in OSGi
-
-There are some quirks when running this component in an OSGi environment
-related to the mechanism Hadoop 2.x uses to discover different
-`org.apache.hadoop.fs.FileSystem` implementations. Hadoop 2.x uses
-`java.util.ServiceLoader` which looks for
-`/META-INF/services/org.apache.hadoop.fs.FileSystem` files defining
-available filesystem types and implementations. These resources are not
-available when running inside OSGi.
-
-As with `camel-hdfs` component, the default configuration files need to
-be visible from the bundle class loader. A typical way to deal with it
-is to keep a copy of `core-default.xml` (and e.g., `hdfs-default.xml`)
-in your bundle root.
-
-=== Using this component with manually defined routes
-
-There are two options:
-
-1.  Package `/META-INF/services/org.apache.hadoop.fs.FileSystem`
-resource with bundle that defines the routes. This resource should list
-all the required Hadoop 2.x filesystem implementations.
-2.  Provide boilerplate initialization code which populates internal,
-static cache inside `org.apache.hadoop.fs.FileSystem` class:
-
-[source,java]
-----------------------------------------------------------------------------------------------------
-org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
-conf.setClass("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class, FileSystem.class);
-conf.setClass("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class, FileSystem.class);
-...
-FileSystem.get("file:///", conf);
-FileSystem.get("hdfs://localhost:9000/", conf);
-...
-----------------------------------------------------------------------------------------------------
-
-=== Using this component with Blueprint container
-
-Two options:
-
-1.  Package `/META-INF/services/org.apache.hadoop.fs.FileSystem`
-resource with bundle that contains blueprint definition.
-2.  Add the following to the blueprint definition file:
-
-[source,xml]
-------------------------------------------------------------------------------------------------------
-<bean id="hdfsOsgiHelper" class="org.apache.camel.component.hdfs.HdfsOsgiHelper">
-   <argument>
-      <map>
-         <entry key="file:///" value="org.apache.hadoop.fs.LocalFileSystem"  />
-         <entry key="hdfs://localhost:9000/" value="org.apache.hadoop.hdfs.DistributedFileSystem" />
-         ...
-      </map>
-   </argument>
-</bean>
-
-<bean id="hdfs" class="org.apache.camel.component.hdfs.HdfsComponent" depends-on="hdfsOsgiHelper" />
-------------------------------------------------------------------------------------------------------
-
-This way Hadoop 2.x will have correct mapping of URI schemes to
-filesystem implementations.
-
-=== Using this component with a HighAvailability configuration
-
-In a HA setup, there will be multiple nodes (_configured through the *namedNodes* parameter_).
-The "hostname" and "port" portion of the endpoint uri will no longer have a _"host"_ meaning, but it will represent the name given to the cluster.
-
-You can choose whatever name you want for the cluster (_the name should follow the [a-zA-Z0-9] convention_).
-This name will be sanitized by replacing the _dirty_ characters with underscore. This is done so that a host name or ip could pottentialy be used, if it makes sense to you.
-
-The cluster name will be mapped to the HA filesystem with a coresponding proxy, with failover, and the _works_.
-
-[source,java]
-------------------------------------------------------------------------------------------------------
-from("hdfs://node1_and_2_cluster/dir1/dir2?namedNodes=node1.exemple.org:8020,node2.exemple.org:8020").routeId(...)
-...
-------------------------------------------------------------------------------------------------------
-
-
-=== Using this component with Kerberos authentication
-
-The kerberos config file is read when the camel component is created, not when the endpoint is created.
-Because of this, the config file must be set at startup, with a call like:
-
-[source,java]
-------------------------------------------------------------------------------------------------------
-static {
-  HdfsComponent.setKerberosConfigFile("/etc/security/kerb5.conf");
-}
-------------------------------------------------------------------------------------------------------
-
-
-
-include::spring-boot:partial$starter.adoc[]
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/DefaultHdfsFile.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/DefaultHdfsFile.java
deleted file mode 100644
index 2b29253e26e..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/DefaultHdfsFile.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.PrintStream;
-import java.nio.ByteBuffer;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Optional;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.util.IOHelper;
-import org.apache.hadoop.io.BooleanWritable;
-import org.apache.hadoop.io.ByteWritable;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.DoubleWritable;
-import org.apache.hadoop.io.FloatWritable;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.io.ShortWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-
-abstract class DefaultHdfsFile<T extends Closeable, U extends Closeable> implements HdfsFile<T, U, Object, Object> {
-
-    protected final long copyBytes(InputStream in, OutputStream out, int buffSize, boolean close) throws IOException {
-        long numBytes = 0;
-        PrintStream ps = out instanceof PrintStream ? (PrintStream) out : null;
-        byte[] buf = new byte[buffSize];
-        try {
-            int bytesRead = in.read(buf);
-            while (bytesRead >= 0) {
-                out.write(buf, 0, bytesRead);
-                numBytes += bytesRead;
-                if (ps != null && ps.checkError()) {
-                    throw new IOException("Unable to write to output stream.");
-                }
-                bytesRead = in.read(buf);
-            }
-        } finally {
-            if (close) {
-                IOHelper.close(out, in);
-            }
-        }
-        return numBytes;
-    }
-
-    protected final Writable getWritable(Object obj, Exchange exchange, Holder<Integer> size) {
-        Class<?> objCls = Optional.ofNullable(obj).orElse(new UnknownType()).getClass();
-        HdfsWritableFactories.HdfsWritableFactory objWritableFactory
-                = WritableCache.writables.getOrDefault(objCls, new HdfsWritableFactories.HdfsObjectWritableFactory());
-        return objWritableFactory.create(obj, exchange.getContext().getTypeConverter(), size);
-    }
-
-    protected final Object getObject(Writable writable, Holder<Integer> size) {
-        Class<?> writableClass = NullWritable.class;
-        if (writable != null) {
-            writableClass = writable.getClass();
-        }
-        HdfsWritableFactories.HdfsWritableFactory writableObjectFactory = WritableCache.readables.get(writableClass);
-        return writableObjectFactory.read(writable, size);
-    }
-
-    @SuppressWarnings({ "rawtypes" })
-    private static final class WritableCache {
-        private static Map<Class, HdfsWritableFactories.HdfsWritableFactory> writables = new HashMap<>();
-        private static Map<Class, HdfsWritableFactories.HdfsWritableFactory> readables = new HashMap<>();
-
-        private WritableCache() {
-        }
-
-        static {
-            writables.put(Boolean.class, new HdfsWritableFactories.HdfsBooleanWritableFactory());
-            writables.put(Byte.class, new HdfsWritableFactories.HdfsByteWritableFactory());
-            writables.put(ByteBuffer.class, new HdfsWritableFactories.HdfsBytesWritableFactory());
-            writables.put(Double.class, new HdfsWritableFactories.HdfsDoubleWritableFactory());
-            writables.put(Float.class, new HdfsWritableFactories.HdfsFloatWritableFactory());
-            writables.put(Short.class, new HdfsWritableFactories.HdfsShortWritableFactory());
-            writables.put(Integer.class, new HdfsWritableFactories.HdfsIntWritableFactory());
-            writables.put(Long.class, new HdfsWritableFactories.HdfsLongWritableFactory());
-            writables.put(String.class, new HdfsWritableFactories.HdfsTextWritableFactory());
-            writables.put(UnknownType.class, new HdfsWritableFactories.HdfsNullWritableFactory());
-        }
-
-        static {
-            readables.put(BooleanWritable.class, new HdfsWritableFactories.HdfsBooleanWritableFactory());
-            readables.put(ByteWritable.class, new HdfsWritableFactories.HdfsByteWritableFactory());
-            readables.put(BytesWritable.class, new HdfsWritableFactories.HdfsBytesWritableFactory());
-            readables.put(DoubleWritable.class, new HdfsWritableFactories.HdfsDoubleWritableFactory());
-            readables.put(FloatWritable.class, new HdfsWritableFactories.HdfsFloatWritableFactory());
-            readables.put(ShortWritable.class, new HdfsWritableFactories.HdfsShortWritableFactory());
-            readables.put(IntWritable.class, new HdfsWritableFactories.HdfsIntWritableFactory());
-            readables.put(LongWritable.class, new HdfsWritableFactories.HdfsLongWritableFactory());
-            readables.put(Text.class, new HdfsWritableFactories.HdfsTextWritableFactory());
-            readables.put(NullWritable.class, new HdfsWritableFactories.HdfsNullWritableFactory());
-        }
-    }
-
-    private static final class UnknownType {
-    }
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HaConfigurationBuilder.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HaConfigurationBuilder.java
deleted file mode 100644
index bea42285468..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HaConfigurationBuilder.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.util.List;
-import java.util.stream.Collectors;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
-
-final class HaConfigurationBuilder {
-
-    private static final String HFDS_NAMED_SERVICE = "hfdsNamedService";
-    private static final String HFDS_NAMED_SERVICE_SEPARATOR = "_";
-    private static final String HFDS_FS = "fs.defaultFS";
-
-    private HaConfigurationBuilder() {
-        // hidden
-    }
-
-    /**
-     * Generates the correct HA configuration (normally read from xml) based on the namedNodes: All named nodes have to
-     * be qualified: configuration.set("dfs.ha.namenodes.hfdsNamedService","namenode1,namenode2"); For each named node
-     * the following entries is added
-     * <p>
-     * configuration.set("dfs.namenode.rpc-address.hfdsNamedService.namenode1", "namenode1:1234");
-     * <p>
-     * Finally the proxy provider has to be specified:
-     * <p>
-     * configuration.set("dfs.client.failover.proxy.provider.hfdsNamedService",
-     * "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
-     * <p>
-     *
-     * @param configuration  - hdfs configuration that will be setup with the HA settings
-     * @param endpointConfig - configuration with the HA settings configured on the endpoint
-     */
-    static void withClusterConfiguration(Configuration configuration, HdfsConfiguration endpointConfig) {
-        String haNamedService = getSanitizedClusterName(endpointConfig.getHostName());
-        withClusterConfiguration(configuration, haNamedService, endpointConfig.getNamedNodeList(),
-                endpointConfig.getReplication());
-    }
-
-    /**
-     * Generates the correct HA configuration (normally read from xml) based on the namedNodes: All named nodes have to
-     * be qualified: configuration.set("dfs.ha.namenodes.hfdsNamedService","namenode1,namenode2"); For each named node
-     * the following entries is added
-     * <p>
-     * configuration.set("dfs.namenode.rpc-address.hfdsNamedService.namenode1", "namenode1:1234");
-     * <p>
-     * Finally the proxy provider has to be specified:
-     * <p>
-     * configuration.set("dfs.client.failover.proxy.provider.hfdsNamedService",
-     * "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
-     * <p>
-     *
-     * @param configuration     - hdfs configuration that will be setup with the HA settings
-     * @param haNamedService    - how the ha named service that represents the cluster will be named (used to resolve
-     *                          the FS)
-     * @param namedNodes        - All named nodes from the hadoop cluster
-     * @param replicationFactor - dfs replication factor
-     */
-    static void withClusterConfiguration(
-            Configuration configuration, String haNamedService, List<String> namedNodes, int replicationFactor) {
-        configuration.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(replicationFactor));
-        configuration.set(DFSConfigKeys.DFS_NAMESERVICES, haNamedService);
-        configuration.set(
-                DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, haNamedService),
-                nodeToString(namedNodes.stream().map(HaConfigurationBuilder::nodeToString).collect(Collectors.joining(","))));
-
-        namedNodes.forEach(nodeName -> configuration.set(
-                DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, haNamedService, nodeToString(nodeName)),
-                nodeName));
-
-        configuration.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + haNamedService,
-                ConfiguredFailoverProxyProvider.class.getName());
-
-        configuration.set(HFDS_FS, "hdfs://" + haNamedService);
-
-    }
-
-    static String getSanitizedClusterName(String rawClusterName) {
-        String clusterName = HFDS_NAMED_SERVICE;
-
-        if (StringUtils.isNotEmpty(rawClusterName)) {
-            clusterName = rawClusterName.replace(".", HFDS_NAMED_SERVICE_SEPARATOR);
-        }
-
-        return clusterName;
-    }
-
-    private static String nodeToString(String nodeName) {
-        return nodeName.replaceAll(":[0-9]*", "").replaceAll("\\.", HFDS_NAMED_SERVICE_SEPARATOR);
-    }
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsArrayFileTypeHandler.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsArrayFileTypeHandler.java
deleted file mode 100644
index 3d5702a619c..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsArrayFileTypeHandler.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.IOException;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.RuntimeCamelException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.ArrayFile;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.util.ReflectionUtils;
-
-import static org.apache.camel.component.hdfs.HdfsHelper.asCompressionType;
-
-class HdfsArrayFileTypeHandler extends DefaultHdfsFile<ArrayFile.Writer, ArrayFile.Reader> {
-
-    @SuppressWarnings("rawtypes")
-    @Override
-    public ArrayFile.Writer createOutputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory) {
-        try {
-            ArrayFile.Writer rout;
-            HdfsInfo hdfsInfo = hdfsInfoFactory.newHdfsInfo(hdfsPath);
-            HdfsConfiguration endpointConfig = hdfsInfoFactory.getEndpointConfig();
-            Class<? extends WritableComparable> valueWritableClass = endpointConfig.getValueType().getWritableClass();
-            rout = new ArrayFile.Writer(
-                    hdfsInfo.getConfiguration(),
-                    hdfsInfo.getFileSystem(),
-                    hdfsPath,
-                    valueWritableClass,
-                    asCompressionType(endpointConfig.getCompressionType()),
-                    () -> {
-                    });
-            return rout;
-        } catch (IOException ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-    @Override
-    public long append(HdfsOutputStream hdfsOutputStream, Object key, Object value, Exchange exchange) {
-        try {
-            Holder<Integer> valueSize = new Holder<>();
-            Writable valueWritable = getWritable(value, exchange, valueSize);
-            ((ArrayFile.Writer) hdfsOutputStream.getOut()).append(valueWritable);
-            return valueSize.getValue();
-        } catch (Exception ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-    @Override
-    public ArrayFile.Reader createInputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory) {
-        try {
-            ArrayFile.Reader rin;
-            HdfsInfo hdfsInfo = hdfsInfoFactory.newHdfsInfo(hdfsPath);
-            rin = new ArrayFile.Reader(hdfsInfo.getFileSystem(), hdfsPath, hdfsInfo.getConfiguration());
-            return rin;
-        } catch (IOException ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-    @Override
-    public long next(HdfsInputStream hdfsInputStream, Holder<Object> key, Holder<Object> value) {
-        try {
-            ArrayFile.Reader reader = (ArrayFile.Reader) hdfsInputStream.getIn();
-            Holder<Integer> valueSize = new Holder<>();
-            Writable valueWritable = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), new Configuration());
-            if (reader.next(valueWritable) != null) {
-                value.setValue(getObject(valueWritable, valueSize));
-                return valueSize.getValue();
-            } else {
-                return 0;
-            }
-        } catch (Exception ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsBloomMapFileHandler.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsBloomMapFileHandler.java
deleted file mode 100644
index d553d01da50..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsBloomMapFileHandler.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.IOException;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.RuntimeCamelException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.BloomMapFile;
-import org.apache.hadoop.io.MapFile;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.util.ReflectionUtils;
-
-import static org.apache.camel.component.hdfs.HdfsHelper.asCompressionType;
-
-class HdfsBloomMapFileHandler extends DefaultHdfsFile<BloomMapFile.Writer, BloomMapFile.Reader> {
-
-    @SuppressWarnings("rawtypes")
-    @Override
-    public BloomMapFile.Writer createOutputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory) {
-        try {
-            BloomMapFile.Writer rout;
-            HdfsInfo hdfsInfo = hdfsInfoFactory.newHdfsInfo(hdfsPath);
-            HdfsConfiguration endpointConfig = hdfsInfoFactory.getEndpointConfig();
-            Class<? extends WritableComparable> keyWritableClass = endpointConfig.getKeyType().getWritableClass();
-            Class<? extends WritableComparable> valueWritableClass = endpointConfig.getValueType().getWritableClass();
-            rout = new BloomMapFile.Writer(
-                    hdfsInfo.getConfiguration(),
-                    new Path(hdfsPath),
-                    MapFile.Writer.keyClass(keyWritableClass),
-                    MapFile.Writer.valueClass(valueWritableClass),
-                    MapFile.Writer.compression(asCompressionType(endpointConfig.getCompressionType()),
-                            endpointConfig.getCompressionCodec().getCodec()),
-                    MapFile.Writer.progressable(() -> {
-                    }));
-            return rout;
-        } catch (IOException ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-    @Override
-    public long append(HdfsOutputStream hdfsOutputStream, Object key, Object value, Exchange exchange) {
-        try {
-            Holder<Integer> keySize = new Holder<>();
-            Writable keyWritable = getWritable(key, exchange, keySize);
-            Holder<Integer> valueSize = new Holder<>();
-            Writable valueWritable = getWritable(value, exchange, valueSize);
-            ((BloomMapFile.Writer) hdfsOutputStream.getOut()).append((WritableComparable<?>) keyWritable, valueWritable);
-            return Long.sum(keySize.getValue(), valueSize.getValue());
-        } catch (Exception ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-    @Override
-    public BloomMapFile.Reader createInputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory) {
-        try {
-            BloomMapFile.Reader rin;
-            HdfsInfo hdfsInfo = hdfsInfoFactory.newHdfsInfo(hdfsPath);
-            rin = new BloomMapFile.Reader(new Path(hdfsPath), hdfsInfo.getConfiguration());
-            return rin;
-        } catch (IOException ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-    @Override
-    public long next(HdfsInputStream hdfsistr, Holder<Object> key, Holder<Object> value) {
-        try {
-            MapFile.Reader reader = (BloomMapFile.Reader) hdfsistr.getIn();
-            Holder<Integer> keySize = new Holder<>();
-            WritableComparable<?> keyWritable
-                    = (WritableComparable<?>) ReflectionUtils.newInstance(reader.getKeyClass(), new Configuration());
-            Holder<Integer> valueSize = new Holder<>();
-            Writable valueWritable = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), new Configuration());
-            if (reader.next(keyWritable, valueWritable)) {
-                key.setValue(getObject(keyWritable, keySize));
-                value.setValue(getObject(valueWritable, valueSize));
-                return Long.sum(keySize.getValue(), valueSize.getValue());
-            } else {
-                return 0;
-            }
-        } catch (Exception ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsComponent.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsComponent.java
deleted file mode 100644
index c175b08f007..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsComponent.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.net.URL;
-import java.util.Map;
-
-import javax.security.auth.login.Configuration;
-
-import org.apache.camel.Endpoint;
-import org.apache.camel.component.hdfs.kerberos.KerberosConfigurationBuilder;
-import org.apache.camel.spi.Metadata;
-import org.apache.camel.spi.annotations.Component;
-import org.apache.camel.support.HealthCheckComponent;
-import org.apache.hadoop.fs.FsUrlStreamHandlerFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Component("hdfs")
-public class HdfsComponent extends HealthCheckComponent {
-
-    private static final Logger LOG = LoggerFactory.getLogger(HdfsComponent.class);
-
-    private static String kerberosConfigFileLocation;
-
-    public HdfsComponent() {
-        initHdfs();
-    }
-
-    @Override
-    protected final Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
-        HdfsEndpoint hdfsEndpoint = new HdfsEndpoint(uri, this);
-        setProperties(hdfsEndpoint, parameters);
-        return hdfsEndpoint;
-    }
-
-    protected void initHdfs() {
-        try {
-            URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory());
-        } catch (Error e) {
-            // The setURLStreamHandlerFactory throws an error if the factory is already set, which is why
-            // we have the unusual catch for a java.lang.Error
-            LOG.debug("Cannot set URLStreamHandlerFactory due {}. This exception will be ignored.", e.getMessage(), e);
-        }
-    }
-
-    public static Configuration getJAASConfiguration() {
-        Configuration auth = null;
-        try {
-            auth = Configuration.getConfiguration();
-            LOG.trace("Existing JAAS Configuration {}", auth);
-        } catch (SecurityException e) {
-            LOG.trace("Cannot load existing JAAS configuration", e);
-        }
-        return auth;
-    }
-
-    /**
-     * To use the given configuration for security with JAAS.
-     */
-    @Metadata(label = "security")
-    public static void setJAASConfiguration(Configuration auth) {
-        if (auth != null) {
-            LOG.trace("Restoring existing JAAS Configuration {}", auth);
-            try {
-                Configuration.setConfiguration(auth);
-            } catch (SecurityException e) {
-                LOG.trace("Cannot restore JAAS Configuration. This exception is ignored.", e);
-            }
-        } else {
-            LOG.trace("No JAAS Configuration to restore");
-        }
-    }
-
-    /**
-     * To use kerberos authentication, set the value of the 'java.security.krb5.conf' environment variable to an
-     * existing file. If the environment variable is already set, warn if different than the specified parameter
-     *
-     * @param kerberosConfigFileLocation - kerb5.conf file
-     *                                   (https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html)
-     */
-    @Metadata(label = "security")
-    public static void setKerberosConfigFile(String kerberosConfigFileLocation) {
-        HdfsComponent.kerberosConfigFileLocation = kerberosConfigFileLocation;
-        KerberosConfigurationBuilder.setKerberosConfigFile(kerberosConfigFileLocation);
-    }
-
-    public static String getKerberosConfigFile() {
-        return kerberosConfigFileLocation;
-    }
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsCompressionCodec.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsCompressionCodec.java
deleted file mode 100644
index 5a22d3f6ff9..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsCompressionCodec.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import org.apache.hadoop.io.compress.BZip2Codec;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.io.compress.DefaultCodec;
-import org.apache.hadoop.io.compress.GzipCodec;
-import org.apache.hadoop.io.compress.Lz4Codec;
-import org.apache.hadoop.io.compress.SnappyCodec;
-import org.apache.hadoop.io.compress.ZStandardCodec;
-
-public enum HdfsCompressionCodec {
-
-    DEFAULT {
-        @Override
-        public CompressionCodec getCodec() {
-            return new DefaultCodec();
-        }
-    },
-
-    GZIP {
-        @Override
-        public CompressionCodec getCodec() {
-            return new GzipCodec();
-        }
-    },
-
-    BZIP2 {
-        @Override
-        public CompressionCodec getCodec() {
-            return new BZip2Codec();
-        }
-    },
-
-    SNAPPY {
-        @Override
-        public CompressionCodec getCodec() {
-            return new SnappyCodec();
-        }
-    },
-
-    LZ4 {
-        @Override
-        public CompressionCodec getCodec() {
-            return new Lz4Codec();
-        }
-    },
-
-    ZSTANDARD {
-        @Override
-        public CompressionCodec getCodec() {
-            return new ZStandardCodec();
-        }
-    };
-
-    public abstract CompressionCodec getCodec();
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsCompressionType.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsCompressionType.java
deleted file mode 100644
index 68991427c4d..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsCompressionType.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-public enum HdfsCompressionType {
-
-    NONE,
-    RECORD,
-    BLOCK;
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConfiguration.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConfiguration.java
deleted file mode 100644
index 2b4b004f305..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConfiguration.java
+++ /dev/null
@@ -1,660 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-import org.apache.camel.spi.Metadata;
-import org.apache.camel.spi.UriParam;
-import org.apache.camel.spi.UriParams;
-import org.apache.camel.spi.UriPath;
-import org.apache.camel.util.URISupport;
-
-import static org.apache.camel.util.ObjectHelper.isNotEmpty;
-
-@UriParams
-public class HdfsConfiguration {
-
-    private URI uri;
-    private boolean wantAppend;
-    private List<HdfsProducer.SplitStrategy> splitStrategies;
-
-    @UriPath
-    @Metadata(required = true)
-    private String hostName;
-    @UriPath(defaultValue = "" + HdfsConstants.DEFAULT_PORT)
-    private int port = HdfsConstants.DEFAULT_PORT;
-    @UriPath
-    @Metadata(required = true)
-    private String path;
-    @UriParam(label = "producer", defaultValue = "true")
-    private boolean overwrite = true;
-    @UriParam(label = "producer")
-    private boolean append;
-    @UriParam(label = "advanced")
-    private String splitStrategy;
-    @UriParam(label = "advanced", defaultValue = "" + HdfsConstants.DEFAULT_BUFFERSIZE)
-    private int bufferSize = HdfsConstants.DEFAULT_BUFFERSIZE;
-    @UriParam(label = "advanced", defaultValue = "" + HdfsConstants.DEFAULT_REPLICATION)
-    private short replication = HdfsConstants.DEFAULT_REPLICATION;
-    @UriParam(label = "advanced", defaultValue = "" + HdfsConstants.DEFAULT_BLOCKSIZE)
-    private long blockSize = HdfsConstants.DEFAULT_BLOCKSIZE;
-    @UriParam(label = "advanced", defaultValue = "NONE")
-    private HdfsCompressionType compressionType = HdfsConstants.DEFAULT_COMPRESSIONTYPE;
-    @UriParam(label = "advanced", defaultValue = "DEFAULT")
-    private HdfsCompressionCodec compressionCodec = HdfsConstants.DEFAULT_CODEC;
-    @UriParam(defaultValue = "NORMAL_FILE")
-    private HdfsFileType fileType = HdfsFileType.NORMAL_FILE;
-    @UriParam(defaultValue = "HDFS")
-    private HdfsFileSystemType fileSystemType = HdfsFileSystemType.HDFS;
-    @UriParam(defaultValue = "NULL")
-    private WritableType keyType = WritableType.NULL;
-    @UriParam(defaultValue = "BYTES")
-    private WritableType valueType = WritableType.BYTES;
-    @UriParam(label = "advanced", defaultValue = HdfsConstants.DEFAULT_OPENED_SUFFIX)
-    private String openedSuffix = HdfsConstants.DEFAULT_OPENED_SUFFIX;
-    @UriParam(label = "advanced", defaultValue = HdfsConstants.DEFAULT_READ_SUFFIX)
-    private String readSuffix = HdfsConstants.DEFAULT_READ_SUFFIX;
-    @UriParam(label = "consumer", defaultValue = HdfsConstants.DEFAULT_PATTERN)
-    private String pattern = HdfsConstants.DEFAULT_PATTERN;
-    @UriParam(label = "advanced", defaultValue = "" + HdfsConstants.DEFAULT_BUFFERSIZE)
-    private int chunkSize = HdfsConstants.DEFAULT_BUFFERSIZE;
-    @UriParam(label = "advanced", defaultValue = "" + HdfsConstants.DEFAULT_CHECK_IDLE_INTERVAL)
-    private int checkIdleInterval = HdfsConstants.DEFAULT_CHECK_IDLE_INTERVAL;
-    @UriParam(defaultValue = "true")
-    private boolean connectOnStartup = true;
-    @UriParam(label = "consumer,filter", defaultValue = "" + HdfsConstants.DEFAULT_MAX_MESSAGES_PER_POLL)
-    private int maxMessagesPerPoll = HdfsConstants.DEFAULT_MAX_MESSAGES_PER_POLL;
-    @UriParam
-    private String owner;
-
-    @UriParam(label = "consumer", defaultValue = "false")
-    private boolean streamDownload;
-
-    @UriParam
-    private String namedNodes;
-    private List<String> namedNodeList = Collections.emptyList();
-
-    @UriParam(label = "security")
-    private String kerberosConfigFileLocation;
-    @UriParam(label = "security")
-    private String kerberosUsername;
-    @UriParam(label = "security")
-    private String kerberosKeytabLocation;
-
-    public HdfsConfiguration() {
-        // default constructor
-    }
-
-    private Boolean getBoolean(Map<String, Object> hdfsSettings, String param, Boolean dflt) {
-        if (hdfsSettings.containsKey(param)) {
-            return Boolean.valueOf((String) hdfsSettings.get(param));
-        } else {
-            return dflt;
-        }
-    }
-
-    private Integer getInteger(Map<String, Object> hdfsSettings, String param, Integer dflt) {
-        if (hdfsSettings.containsKey(param)) {
-            return Integer.valueOf((String) hdfsSettings.get(param));
-        } else {
-            return dflt;
-        }
-    }
-
-    private Short getShort(Map<String, Object> hdfsSettings, String param, Short dflt) {
-        if (hdfsSettings.containsKey(param)) {
-            return Short.valueOf((String) hdfsSettings.get(param));
-        } else {
-            return dflt;
-        }
-    }
-
-    private Long getLong(Map<String, Object> hdfsSettings, String param, Long dflt) {
-        if (hdfsSettings.containsKey(param)) {
-            return Long.valueOf((String) hdfsSettings.get(param));
-        } else {
-            return dflt;
-        }
-    }
-
-    private HdfsFileType getFileType(Map<String, Object> hdfsSettings, String param, HdfsFileType dflt) {
-        String eit = (String) hdfsSettings.get(param);
-        if (eit != null) {
-            return HdfsFileType.valueOf(eit);
-        } else {
-            return dflt;
-        }
-    }
-
-    private HdfsFileSystemType getFileSystemType(Map<String, Object> hdfsSettings, String param, HdfsFileSystemType dflt) {
-        String eit = (String) hdfsSettings.get(param);
-        if (eit != null) {
-            return HdfsFileSystemType.valueOf(eit);
-        } else {
-            return dflt;
-        }
-    }
-
-    private WritableType getWritableType(Map<String, Object> hdfsSettings, String param, WritableType dflt) {
-        String eit = (String) hdfsSettings.get(param);
-        if (eit != null) {
-            return WritableType.valueOf(eit);
-        } else {
-            return dflt;
-        }
-    }
-
-    private HdfsCompressionType getCompressionType(
-            Map<String, Object> hdfsSettings, String param, HdfsCompressionType ct) {
-        String eit = (String) hdfsSettings.get(param);
-        if (eit != null) {
-            return HdfsCompressionType.valueOf(eit);
-        } else {
-            return ct;
-        }
-    }
-
-    private HdfsCompressionCodec getCompressionCodec(Map<String, Object> hdfsSettings, String param, HdfsCompressionCodec cd) {
-        String eit = (String) hdfsSettings.get(param);
-        if (eit != null) {
-            return HdfsCompressionCodec.valueOf(eit);
-        } else {
-            return cd;
-        }
-    }
-
-    private String getString(Map<String, Object> hdfsSettings, String param, String dflt) {
-        if (hdfsSettings.containsKey(param)) {
-            return (String) hdfsSettings.get(param);
-        } else {
-            return dflt;
-        }
-    }
-
-    private List<HdfsProducer.SplitStrategy> getSplitStrategies(Map<String, Object> hdfsSettings) {
-        List<HdfsProducer.SplitStrategy> strategies = new ArrayList<>();
-
-        splitStrategy = getString(hdfsSettings, "splitStrategy", splitStrategy);
-
-        if (isNotEmpty(splitStrategy)) {
-            String[] strategyElements = splitStrategy.split(",");
-            for (String strategyElement : strategyElements) {
-                String[] tokens = strategyElement.split(":");
-                if (tokens.length != 2) {
-                    throw new IllegalArgumentException("Wrong Split Strategy [splitStrategy" + "=" + splitStrategy + "]");
-                }
-                HdfsProducer.SplitStrategyType strategyType = HdfsProducer.SplitStrategyType.valueOf(tokens[0]);
-                long strategyValue = Long.parseLong(tokens[1]);
-                strategies.add(new HdfsProducer.SplitStrategy(strategyType, strategyValue));
-            }
-        }
-        return strategies;
-    }
-
-    private List<String> getNamedNodeList(Map<String, Object> hdfsSettings) {
-        namedNodes = getString(hdfsSettings, "namedNodes", namedNodes);
-
-        if (isNotEmpty(namedNodes)) {
-            return Arrays.stream(namedNodes.split(",")).distinct().collect(Collectors.toList());
-        }
-
-        return Collections.emptyList();
-    }
-
-    public void checkConsumerOptions() {
-        // no validation required
-    }
-
-    public void checkProducerOptions() {
-        if (isAppend()) {
-            if (hasSplitStrategies()) {
-                throw new IllegalArgumentException("Split Strategies incompatible with append=true");
-            }
-            if (getFileType() != HdfsFileType.NORMAL_FILE) {
-                throw new IllegalArgumentException("append=true works only with NORMAL_FILEs");
-            }
-        }
-    }
-
-    public void parseURI(URI uri) throws URISyntaxException {
-        String protocol = uri.getScheme();
-        if (!protocol.equalsIgnoreCase("hdfs")) {
-            throw new IllegalArgumentException("Unrecognized protocol: " + protocol + " for uri: " + uri);
-        }
-        hostName = uri.getHost();
-        if (hostName == null) {
-            hostName = "localhost";
-        }
-        port = uri.getPort() == -1 ? HdfsConstants.DEFAULT_PORT : uri.getPort();
-        path = uri.getPath();
-        Map<String, Object> hdfsSettings = URISupport.parseParameters(uri);
-
-        overwrite = getBoolean(hdfsSettings, "overwrite", overwrite);
-        append = getBoolean(hdfsSettings, "append", append);
-        wantAppend = append;
-        bufferSize = getInteger(hdfsSettings, "bufferSize", bufferSize);
-        replication = getShort(hdfsSettings, "replication", replication);
-        blockSize = getLong(hdfsSettings, "blockSize", blockSize);
-        compressionType = getCompressionType(hdfsSettings, "compressionType", compressionType);
-        compressionCodec = getCompressionCodec(hdfsSettings, "compressionCodec", compressionCodec);
-        fileType = getFileType(hdfsSettings, "fileType", fileType);
-        fileSystemType = getFileSystemType(hdfsSettings, "fileSystemType", fileSystemType);
-        keyType = getWritableType(hdfsSettings, "keyType", keyType);
-        valueType = getWritableType(hdfsSettings, "valueType", valueType);
-        openedSuffix = getString(hdfsSettings, "openedSuffix", openedSuffix);
-        readSuffix = getString(hdfsSettings, "readSuffix", readSuffix);
-        pattern = getString(hdfsSettings, "pattern", pattern);
-        chunkSize = getInteger(hdfsSettings, "chunkSize", chunkSize);
-        splitStrategies = getSplitStrategies(hdfsSettings);
-
-        namedNodeList = getNamedNodeList(hdfsSettings);
-        kerberosConfigFileLocation = getString(hdfsSettings, "kerberosConfigFileLocation", kerberosConfigFileLocation);
-        kerberosUsername = getString(hdfsSettings, "kerberosUsername", kerberosUsername);
-        kerberosKeytabLocation = getString(hdfsSettings, "kerberosKeytabLocation", kerberosKeytabLocation);
-    }
-
-    public URI getUri() {
-        return uri;
-    }
-
-    public void setUri(URI uri) {
-        this.uri = uri;
-    }
-
-    public String getHostName() {
-        return hostName;
-    }
-
-    /**
-     * HDFS host to use
-     */
-    public void setHostName(String hostName) {
-        this.hostName = hostName;
-    }
-
-    public int getPort() {
-        return port;
-    }
-
-    /**
-     * HDFS port to use
-     */
-    public void setPort(int port) {
-        this.port = port;
-    }
-
-    public String getPath() {
-        return path;
-    }
-
-    /**
-     * The directory path to use
-     */
-    public void setPath(String path) {
-        this.path = path;
-    }
-
-    public boolean isOverwrite() {
-        return overwrite;
-    }
-
-    /**
-     * Whether to overwrite existing files with the same name
-     */
-    public void setOverwrite(boolean overwrite) {
-        this.overwrite = overwrite;
-    }
-
-    public boolean isAppend() {
-        return append;
-    }
-
-    public boolean isWantAppend() {
-        return wantAppend;
-    }
-
-    /**
-     * Append to existing file. Notice that not all HDFS file systems support the append option.
-     */
-    public void setAppend(boolean append) {
-        this.append = append;
-    }
-
-    public int getBufferSize() {
-        return bufferSize;
-    }
-
-    /**
-     * The buffer size used by HDFS
-     */
-    public void setBufferSize(int bufferSize) {
-        this.bufferSize = bufferSize;
-    }
-
-    public short getReplication() {
-        return replication;
-    }
-
-    /**
-     * The HDFS replication factor
-     */
-    public void setReplication(short replication) {
-        this.replication = replication;
-    }
-
-    public long getBlockSize() {
-        return blockSize;
-    }
-
-    /**
-     * The size of the HDFS blocks
-     */
-    public void setBlockSize(long blockSize) {
-        this.blockSize = blockSize;
-    }
-
-    public HdfsFileType getFileType() {
-        return fileType;
-    }
-
-    /**
-     * The file type to use. For more details see Hadoop HDFS documentation about the various files types.
-     */
-    public void setFileType(HdfsFileType fileType) {
-        this.fileType = fileType;
-    }
-
-    public HdfsCompressionType getCompressionType() {
-        return compressionType;
-    }
-
-    /**
-     * The compression type to use (is default not in use)
-     */
-    public void setCompressionType(HdfsCompressionType compressionType) {
-        this.compressionType = compressionType;
-    }
-
-    public HdfsCompressionCodec getCompressionCodec() {
-        return compressionCodec;
-    }
-
-    /**
-     * The compression codec to use
-     */
-    public void setCompressionCodec(HdfsCompressionCodec compressionCodec) {
-        this.compressionCodec = compressionCodec;
-    }
-
-    /**
-     * Set to LOCAL to not use HDFS but local java.io.File instead.
-     */
-    public void setFileSystemType(HdfsFileSystemType fileSystemType) {
-        this.fileSystemType = fileSystemType;
-    }
-
-    public HdfsFileSystemType getFileSystemType() {
-        return fileSystemType;
-    }
-
-    public WritableType getKeyType() {
-        return keyType;
-    }
-
-    /**
-     * The type for the key in case of sequence or map files.
-     */
-    public void setKeyType(WritableType keyType) {
-        this.keyType = keyType;
-    }
-
-    public WritableType getValueType() {
-        return valueType;
-    }
-
-    /**
-     * The type for the key in case of sequence or map files
-     */
-    public void setValueType(WritableType valueType) {
-        this.valueType = valueType;
-    }
-
-    /**
-     * When a file is opened for reading/writing the file is renamed with this suffix to avoid to read it during the
-     * writing phase.
-     */
-    public void setOpenedSuffix(String openedSuffix) {
-        this.openedSuffix = openedSuffix;
-    }
-
-    public String getOpenedSuffix() {
-        return openedSuffix;
-    }
-
-    /**
-     * Once the file has been read is renamed with this suffix to avoid to read it again.
-     */
-    public void setReadSuffix(String readSuffix) {
-        this.readSuffix = readSuffix;
-    }
-
-    public String getReadSuffix() {
-        return readSuffix;
-    }
-
-    /**
-     * The pattern used for scanning the directory
-     */
-    public void setPattern(String pattern) {
-        this.pattern = pattern;
-    }
-
-    public String getPattern() {
-        return pattern;
-    }
-
-    /**
-     * When reading a normal file, this is split into chunks producing a message per chunk.
-     */
-    public void setChunkSize(int chunkSize) {
-        this.chunkSize = chunkSize;
-    }
-
-    public int getChunkSize() {
-        return chunkSize;
-    }
-
-    /**
-     * How often (time in millis) in to run the idle checker background task. This option is only in use if the splitter
-     * strategy is IDLE.
-     */
-    public void setCheckIdleInterval(int checkIdleInterval) {
-        this.checkIdleInterval = checkIdleInterval;
-    }
-
-    public int getCheckIdleInterval() {
-        return checkIdleInterval;
-    }
-
-    public List<HdfsProducer.SplitStrategy> getSplitStrategies() {
-        return splitStrategies;
-    }
-
-    public boolean hasSplitStrategies() {
-        return !splitStrategies.isEmpty();
-    }
-
-    public String getSplitStrategy() {
-        return splitStrategy;
-    }
-
-    /**
-     * In the current version of Hadoop opening a file in append mode is disabled since it's not very reliable. So, for
-     * the moment, it's only possible to create new files. The Camel HDFS endpoint tries to solve this problem in this
-     * way:
-     * <ul>
-     * <li>If the split strategy option has been defined, the hdfs path will be used as a directory and files will be
-     * created using the configured UuidGenerator.</li>
-     * <li>Every time a splitting condition is met, a new file is created.</li>
-     * </ul>
-     * The splitStrategy option is defined as a string with the following syntax: <br/>
-     * <tt>splitStrategy=ST:value,ST:value,...</tt> <br/>
-     * where ST can be:
-     * <ul>
-     * <li>BYTES a new file is created, and the old is closed when the number of written bytes is more than value</li>
-     * <li>MESSAGES a new file is created, and the old is closed when the number of written messages is more than
-     * value</li>
-     * <li>IDLE a new file is created, and the old is closed when no writing happened in the last value
-     * milliseconds</li>
-     * </ul>
-     */
-    public void setSplitStrategy(String splitStrategy) {
-        this.splitStrategy = splitStrategy;
-    }
-
-    public boolean isConnectOnStartup() {
-        return connectOnStartup;
-    }
-
-    /**
-     * Whether to connect to the HDFS file system on starting the producer/consumer. If false then the connection is
-     * created on-demand. Notice that HDFS may take up till 15 minutes to establish a connection, as it has hardcoded 45
-     * x 20 sec redelivery. By setting this option to false allows your application to startup, and not block for up
-     * till 15 minutes.
-     */
-    public void setConnectOnStartup(boolean connectOnStartup) {
-        this.connectOnStartup = connectOnStartup;
-    }
-
-    public int getMaxMessagesPerPoll() {
-        return maxMessagesPerPoll;
-    }
-
-    /**
-     * To define a maximum messages to gather per poll. By default a limit of 100 is set. Can be used to set a limit of
-     * e.g. 1000 to avoid when starting up the server that there are thousands of files. Values can only be greater than
-     * 0. Notice: If this option is in use then the limit will be applied on the valid files. For example if you have
-     * 100000 files and use maxMessagesPerPoll=500, then only the first 500 files will be picked up.
-     */
-    public void setMaxMessagesPerPoll(int maxMessagesPerPoll) {
-        this.maxMessagesPerPoll = maxMessagesPerPoll;
-    }
-
-    public String getOwner() {
-        return owner;
-    }
-
-    /**
-     * The file owner must match this owner for the consumer to pickup the file. Otherwise the file is skipped.
-     */
-    public void setOwner(String owner) {
-        this.owner = owner;
-    }
-
-    public String getNamedNodes() {
-        return namedNodes;
-    }
-
-    /**
-     * A comma separated list of named nodes (e.g. srv11.example.com:8020,srv12.example.com:8020)
-     */
-    public void setNamedNodes(String namedNodes) {
-        this.namedNodes = namedNodes;
-    }
-
-    public List<String> getNamedNodeList() {
-        return namedNodeList;
-    }
-
-    public boolean hasClusterConfiguration() {
-        return !namedNodeList.isEmpty();
-    }
-
-    public String getKerberosConfigFileLocation() {
-        return kerberosConfigFileLocation;
-    }
-
-    /**
-     * The location of the kerb5.conf file (https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html)
-     */
-    public void setKerberosConfigFileLocation(String kerberosConfigFileLocation) {
-        this.kerberosConfigFileLocation = kerberosConfigFileLocation;
-    }
-
-    public String getKerberosUsername() {
-        return kerberosUsername;
-    }
-
-    /**
-     * The username used to authenticate with the kerberos nodes
-     */
-    public void setKerberosUsername(String kerberosUsername) {
-        this.kerberosUsername = kerberosUsername;
-    }
-
-    public String getKerberosKeytabLocation() {
-        return kerberosKeytabLocation;
-    }
-
-    /**
-     * The location of the keytab file used to authenticate with the kerberos nodes (contains pairs of kerberos
-     * principals and encrypted keys (which are derived from the Kerberos password))
-     */
-    public void setKerberosKeytabLocation(String kerberosKeytabLocation) {
-        this.kerberosKeytabLocation = kerberosKeytabLocation;
-    }
-
-    public boolean isKerberosAuthentication() {
-        return isNotEmpty(kerberosConfigFileLocation) && isNotEmpty(kerberosUsername) && isNotEmpty(kerberosKeytabLocation);
-    }
-
-    public boolean isStreamDownload() {
-        return streamDownload;
-    }
-
-    /**
-     * Sets the download method to use when not using a local working directory. If set to true, the remote files are
-     * streamed to the route as they are read. When set to false, the remote files are loaded into memory before being
-     * sent into the route.
-     */
-    public void setStreamDownload(boolean streamDownload) {
-        this.streamDownload = streamDownload;
-    }
-
-    /**
-     * Get the label of the hdfs file system like: HOST_NAME:PORT/PATH
-     *
-     * @param  path the file path
-     * @return      HOST_NAME:PORT/PATH
-     */
-    String getFileSystemLabel(String path) {
-        if (hasClusterConfiguration()) {
-            return String.format("%s/%s", getHostName(), path);
-        } else {
-            return String.format("%s:%s/%s", getHostName(), getPort(), path);
-        }
-    }
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConstants.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConstants.java
deleted file mode 100644
index c1c68d37060..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConstants.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.spi.Metadata;
-
-public final class HdfsConstants {
-
-    public static final int DEFAULT_PORT = 8020;
-
-    public static final int DEFAULT_BUFFERSIZE = 4096;
-
-    public static final short DEFAULT_REPLICATION = 3;
-
-    public static final long DEFAULT_BLOCKSIZE = 64 * 1024 * 1024L;
-
-    public static final HdfsCompressionType DEFAULT_COMPRESSIONTYPE = HdfsCompressionType.NONE;
-
-    public static final HdfsCompressionCodec DEFAULT_CODEC = HdfsCompressionCodec.DEFAULT;
-
-    public static final String DEFAULT_OPENED_SUFFIX = "opened";
-
-    public static final String DEFAULT_READ_SUFFIX = "read";
-
-    public static final String DEFAULT_SEGMENT_PREFIX = "seg";
-
-    public static final long DEFAULT_DELAY = 1000L;
-
-    public static final String DEFAULT_PATTERN = "*";
-
-    public static final int DEFAULT_CHECK_IDLE_INTERVAL = 500;
-    @Metadata(label = "producer", description = "Indicates to close the stream", javaType = "Boolean")
-    public static final String HDFS_CLOSE = "CamelHdfsClose";
-
-    public static final int DEFAULT_MAX_MESSAGES_PER_POLL = 100;
-    @Metadata(description = "(producer) Specifies the name of the file to write (relative to the\n" +
-                            "endpoint path). The name can be a `String` or an\n" +
-                            "Expression object. Only relevant when not using a\n" +
-                            "split strategy. (consumer) Specifies the name of the file to read",
-              javaType = "String")
-    public static final String FILE_NAME = Exchange.FILE_NAME;
-    @Metadata(label = "consumer", description = "The name of the file consumed", javaType = "String")
-    public static final String FILE_NAME_CONSUMED = Exchange.FILE_NAME_CONSUMED;
-    @Metadata(label = "consumer", description = "The absolute path of the file", javaType = "String")
-    public static final String FILE_ABSOLUTE_PATH = "CamelFileAbsolutePath";
-    @Metadata(description = "The HDFS key", javaType = "Object")
-    public static final String KEY = HdfsHeader.KEY.name();
-    @Metadata(label = "consumer", description = "The size of the file", javaType = "Long")
-    public static final String FILE_LENGTH = Exchange.FILE_LENGTH;
-
-    private HdfsConstants() {
-    }
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConsumer.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConsumer.java
deleted file mode 100644
index ba7f5dfc9b2..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsConsumer.java
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.stream.Collectors;
-
-import javax.security.auth.login.Configuration;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.ExchangePropertyKey;
-import org.apache.camel.Message;
-import org.apache.camel.Processor;
-import org.apache.camel.RuntimeCamelException;
-import org.apache.camel.support.ScheduledPollConsumer;
-import org.apache.camel.util.IOHelper;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public final class HdfsConsumer extends ScheduledPollConsumer {
-
-    private static final Logger LOG = LoggerFactory.getLogger(HdfsConsumer.class);
-
-    private final HdfsConfiguration endpointConfig;
-    private final StringBuilder hdfsPath;
-    private final Processor processor;
-    private final HdfsInfoFactory hdfsInfoFactory;
-    private final ReadWriteLock rwLock = new ReentrantReadWriteLock();
-
-    public HdfsConsumer(HdfsEndpoint endpoint, Processor processor, HdfsConfiguration endpointConfig) {
-        this(endpoint, processor, endpointConfig, new HdfsInfoFactory(endpointConfig),
-             endpointConfig.getFileSystemType().getHdfsPath(endpointConfig));
-    }
-
-    HdfsConsumer(HdfsEndpoint endpoint, Processor processor, HdfsConfiguration endpointConfig, HdfsInfoFactory hdfsInfoFactory,
-                 StringBuilder hdfsPath) {
-        super(endpoint, processor);
-        this.processor = processor;
-        this.endpointConfig = endpointConfig;
-        this.hdfsPath = hdfsPath;
-        this.hdfsInfoFactory = hdfsInfoFactory;
-        setUseFixedDelay(true);
-    }
-
-    @Override
-    public HdfsEndpoint getEndpoint() {
-        return (HdfsEndpoint) super.getEndpoint();
-    }
-
-    @Override
-    protected void doStart() throws Exception {
-        super.doStart();
-
-        if (endpointConfig.isConnectOnStartup()) {
-            // setup hdfs if configured to do on startup
-            setupHdfs(true);
-        }
-    }
-
-    private HdfsInfo setupHdfs(boolean onStartup) throws IOException {
-        String hdfsFsDescription = endpointConfig.getFileSystemLabel(hdfsPath.toString());
-        // if we are starting up then log at info level, and if runtime then log at debug level to not flood the log
-        if (onStartup) {
-            LOG.info("Connecting to hdfs file-system {} (may take a while if connection is not available)", hdfsFsDescription);
-        } else {
-            LOG.debug("Connecting to hdfs file-system {} (may take a while if connection is not available)", hdfsFsDescription);
-        }
-
-        // hadoop will cache the connection by default so its faster to get in the poll method
-        HdfsInfo answer = hdfsInfoFactory.newHdfsInfo(this.hdfsPath.toString());
-
-        if (onStartup) {
-            LOG.info("Connected to hdfs file-system {}", hdfsFsDescription);
-        } else {
-            LOG.debug("Connected to hdfs file-system {}", hdfsFsDescription);
-        }
-        return answer;
-    }
-
-    @Override
-    protected int poll() throws Exception {
-        // need to remember auth as Hadoop will override that, which otherwise means the Auth is broken afterwards
-        Configuration auth = HdfsComponent.getJAASConfiguration();
-        try {
-            return doPoll();
-        } finally {
-            HdfsComponent.setJAASConfiguration(auth);
-        }
-    }
-
-    protected int doPoll() throws IOException {
-        class ExcludePathFilter implements PathFilter {
-            @Override
-            public boolean accept(Path path) {
-                return !(path.toString().endsWith(endpointConfig.getOpenedSuffix())
-                        || path.toString().endsWith(endpointConfig.getReadSuffix()));
-            }
-        }
-
-        HdfsInfo info = setupHdfs(false);
-        FileStatus[] fileStatuses;
-        if (info.getFileSystem().isFile(info.getPath())) {
-            fileStatuses = info.getFileSystem().globStatus(info.getPath());
-        } else {
-            Path pattern = info.getPath().suffix("/" + this.endpointConfig.getPattern());
-            fileStatuses = info.getFileSystem().globStatus(pattern, new ExcludePathFilter());
-        }
-
-        fileStatuses = Optional.ofNullable(fileStatuses).orElse(new FileStatus[0]);
-
-        return processFileStatuses(info, fileStatuses);
-    }
-
-    private int processFileStatuses(HdfsInfo info, FileStatus[] fileStatuses) {
-        final AtomicInteger totalMessageCount = new AtomicInteger();
-
-        List<HdfsInputStream> hdfsFiles = Arrays.stream(fileStatuses)
-                .filter(status -> normalFileIsDirectoryHasSuccessFile(status, info))
-                .filter(this::hasMatchingOwner)
-                .limit(endpointConfig.getMaxMessagesPerPoll())
-                .map(this::asHdfsFile)
-                .filter(Objects::nonNull)
-                .collect(Collectors.toList());
-
-        LOG.info("Processing [{}] valid files out of [{}] available.", hdfsFiles.size(), fileStatuses.length);
-
-        for (int i = 0; i < hdfsFiles.size(); i++) {
-            HdfsInputStream hdfsFile = hdfsFiles.get(i);
-            try {
-                int messageCount = processHdfsInputStream(hdfsFile, totalMessageCount);
-                LOG.debug("Processed [{}] files out of [{}].", i, hdfsFiles.size());
-                LOG.debug("File [{}] was split to [{}] messages.", i, messageCount);
-            } finally {
-                IOHelper.close(hdfsFile, "hdfs file", LOG);
-            }
-        }
-
-        return totalMessageCount.get();
-    }
-
-    private int processHdfsInputStream(HdfsInputStream hdfsFile, AtomicInteger totalMessageCount) {
-        final AtomicInteger messageCount = new AtomicInteger();
-        Holder<Object> currentKey = new Holder<>();
-        Holder<Object> currentValue = new Holder<>();
-
-        while (hdfsFile.next(currentKey, currentValue) >= 0) {
-            processHdfsInputStream(hdfsFile, currentKey, currentValue, messageCount, totalMessageCount);
-            messageCount.incrementAndGet();
-        }
-
-        return messageCount.get();
-    }
-
-    private void processHdfsInputStream(
-            HdfsInputStream hdfsFile, Holder<Object> key, Holder<Object> value, AtomicInteger messageCount,
-            AtomicInteger totalMessageCount) {
-        Exchange exchange = createExchange(false);
-        try {
-            Message message = exchange.getIn();
-            String fileName = StringUtils.substringAfterLast(hdfsFile.getActualPath(), "/");
-            message.setHeader(HdfsConstants.FILE_NAME, fileName);
-            message.setHeader(HdfsConstants.FILE_NAME_CONSUMED, fileName);
-            message.setHeader(HdfsConstants.FILE_ABSOLUTE_PATH, hdfsFile.getActualPath());
-            if (key.getValue() != null) {
-                message.setHeader(HdfsConstants.KEY, key.getValue());
-            }
-
-            if (hdfsFile.getNumOfReadBytes() >= 0) {
-                message.setHeader(HdfsConstants.FILE_LENGTH, hdfsFile.getNumOfReadBytes());
-            }
-
-            message.setBody(value.getValue());
-
-            updateNewExchange(exchange, messageCount.get(), hdfsFile);
-
-            LOG.debug("Processing file [{}]", fileName);
-
-            processor.process(exchange);
-            totalMessageCount.incrementAndGet();
-
-        } catch (Exception e) {
-            exchange.setException(e);
-        } finally {
-            // in case of unhandled exceptions then let the exception handler handle them
-            if (exchange.getException() != null) {
-                getExceptionHandler().handleException(exchange.getException());
-            }
-            releaseExchange(exchange, false);
-        }
-
-    }
-
-    private boolean normalFileIsDirectoryHasSuccessFile(FileStatus fileStatus, HdfsInfo info) {
-        if (endpointConfig.getFileType().equals(HdfsFileType.NORMAL_FILE) && fileStatus.isDirectory()) {
-            try {
-                Path successPath = new Path(fileStatus.getPath().toString() + "/_SUCCESS");
-                if (!info.getFileSystem().exists(successPath)) {
-                    return false;
-                }
-            } catch (IOException e) {
-                throw new RuntimeCamelException(e);
-            }
-        }
-        return true;
-    }
-
-    private boolean hasMatchingOwner(FileStatus fileStatus) {
-        if (endpointConfig.getOwner() != null && !endpointConfig.getOwner().equals(fileStatus.getOwner())) {
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Skipping file: {} as not matching owner: {}", fileStatus.getPath(), endpointConfig.getOwner());
-            }
-            return false;
-        }
-        return true;
-    }
-
-    private HdfsInputStream asHdfsFile(FileStatus fileStatus) {
-        try {
-            this.rwLock.writeLock().lock();
-            return HdfsInputStream.createInputStream(fileStatus.getPath().toString(), hdfsInfoFactory);
-        } finally {
-            this.rwLock.writeLock().unlock();
-        }
-    }
-
-    protected void updateNewExchange(Exchange exchange, int index, HdfsInputStream hdfsFile) {
-        // do not share unit of work
-        exchange.getExchangeExtension().setUnitOfWork(null);
-
-        exchange.setProperty(ExchangePropertyKey.SPLIT_INDEX, index);
-
-        if (hdfsFile.hasNext()) {
-            exchange.setProperty(ExchangePropertyKey.SPLIT_COMPLETE, Boolean.FALSE);
-        } else {
-            exchange.setProperty(ExchangePropertyKey.SPLIT_COMPLETE, Boolean.TRUE);
-            // streaming mode, so set total size when we are complete based on the index
-            exchange.setProperty(ExchangePropertyKey.SPLIT_SIZE, index + 1);
-        }
-    }
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsEndpoint.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsEndpoint.java
deleted file mode 100644
index 5bb69ae61f4..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsEndpoint.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.net.URI;
-import java.net.URISyntaxException;
-
-import org.apache.camel.Category;
-import org.apache.camel.Consumer;
-import org.apache.camel.Processor;
-import org.apache.camel.Producer;
-import org.apache.camel.spi.UriEndpoint;
-import org.apache.camel.spi.UriParam;
-import org.apache.camel.support.ScheduledPollEndpoint;
-
-/**
- * Read and write from/to an HDFS filesystem using Hadoop 2.x.
- */
-@UriEndpoint(firstVersion = "2.14.0", scheme = "hdfs", title = "HDFS", syntax = "hdfs:hostName:port/path",
-             category = { Category.BIGDATA, Category.FILE }, headersClass = HdfsConstants.class)
-public class HdfsEndpoint extends ScheduledPollEndpoint {
-
-    @UriParam
-    private final HdfsConfiguration config;
-
-    public HdfsEndpoint(String endpointUri, HdfsComponent component) throws URISyntaxException {
-        super(endpointUri, component);
-        this.config = new HdfsConfiguration();
-        this.config.parseURI(new URI(endpointUri));
-    }
-
-    @Override
-    public Consumer createConsumer(Processor processor) throws Exception {
-        config.checkConsumerOptions();
-        HdfsConsumer answer = new HdfsConsumer(this, processor, config);
-        configureConsumer(answer);
-        return answer;
-    }
-
-    @Override
-    public Producer createProducer() {
-        config.checkProducerOptions();
-        return new HdfsProducer(this, config);
-    }
-
-    public HdfsConfiguration getConfig() {
-        return config;
-    }
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsFile.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsFile.java
deleted file mode 100644
index 9979adf387a..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsFile.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.Closeable;
-
-import org.apache.camel.Exchange;
-
-interface HdfsFile<T extends Closeable, U extends Closeable, K, V> {
-
-    T createOutputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory);
-
-    long append(HdfsOutputStream hdfsOutputStream, K key, V value, Exchange exchange);
-
-    U createInputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory);
-
-    long next(HdfsInputStream hdfsInputStream, Holder<K> key, Holder<V> value);
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsFileSystemType.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsFileSystemType.java
deleted file mode 100644
index eccc34891c3..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsFileSystemType.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-public enum HdfsFileSystemType {
-
-    LOCAL {
-        @Override
-        public StringBuilder getHdfsPath(HdfsConfiguration config) {
-            StringBuilder hpath = new StringBuilder();
-            hpath.append("file://");
-            hpath.append(config.getPath());
-            if (config.hasSplitStrategies()) {
-                hpath.append('/');
-            }
-            return hpath;
-        }
-    },
-
-    HDFS {
-        @Override
-        public StringBuilder getHdfsPath(HdfsConfiguration config) {
-            StringBuilder hpath = new StringBuilder();
-            hpath.append("hdfs://");
-            hpath.append(config.getHostName());
-            if (!config.hasClusterConfiguration()) {
-                hpath.append(':').append(config.getPort());
-            }
-            hpath.append(config.getPath());
-            if (config.hasSplitStrategies()) {
-                hpath.append('/');
-            }
-            return hpath;
-        }
-    };
-
-    public abstract StringBuilder getHdfsPath(HdfsConfiguration conf);
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsFileType.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsFileType.java
deleted file mode 100644
index 8930587bf1b..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsFileType.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.Closeable;
-
-import org.apache.camel.Exchange;
-
-public enum HdfsFileType {
-
-    NORMAL_FILE(new HdfsNormalFileHandler()),
-    SEQUENCE_FILE(new HdfsSequenceFileHandler()),
-    MAP_FILE(new HdfsMapFileHandler()),
-    BLOOMMAP_FILE(new HdfsBloomMapFileHandler()),
-    ARRAY_FILE(new HdfsArrayFileTypeHandler());
-
-    private final HdfsFile file;
-
-    HdfsFileType(HdfsFile file) {
-        this.file = file;
-    }
-
-    public Closeable createOutputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory) {
-        return this.file.createOutputStream(hdfsPath, hdfsInfoFactory);
-    }
-
-    public long append(HdfsOutputStream hdfsOutputStream, Object key, Object value, Exchange exchange) {
-        return this.file.append(hdfsOutputStream, key, value, exchange);
-    }
-
-    public Closeable createInputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory) {
-        return this.file.createInputStream(hdfsPath, hdfsInfoFactory);
-    }
-
-    public long next(HdfsInputStream hdfsInputStream, final Holder<Object> key, final Holder<Object> value) {
-        return this.file.next(hdfsInputStream, key, value);
-    }
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsHeader.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsHeader.java
deleted file mode 100644
index 53992edadc2..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsHeader.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-public enum HdfsHeader {
-
-    KEY
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsHelper.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsHelper.java
deleted file mode 100644
index f01c2bf511d..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsHelper.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import org.apache.hadoop.io.SequenceFile;
-
-public final class HdfsHelper {
-
-    private HdfsHelper() {
-    }
-
-    public static SequenceFile.CompressionType asCompressionType(HdfsCompressionType type) {
-        return SequenceFile.CompressionType.valueOf(type.name());
-    }
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsInfo.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsInfo.java
deleted file mode 100644
index 556ba9ecd42..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsInfo.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-class HdfsInfo {
-
-    private final Configuration configuration;
-    private final FileSystem fileSystem;
-    private final Path path;
-
-    HdfsInfo(Configuration configuration, FileSystem fileSystem, Path hdfsPath) {
-        this.configuration = configuration;
-        this.fileSystem = fileSystem;
-        this.path = hdfsPath;
-    }
-
-    public Configuration getConfiguration() {
-        return configuration;
-    }
-
-    public FileSystem getFileSystem() {
-        return fileSystem;
-    }
-
-    public Path getPath() {
-        return path;
-    }
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsInfoFactory.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsInfoFactory.java
deleted file mode 100644
index 29072717075..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsInfoFactory.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.IOException;
-import java.net.URI;
-
-import org.apache.camel.component.hdfs.kerberos.KerberosAuthentication;
-import org.apache.camel.component.hdfs.kerberos.KerberosConfigurationBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-class HdfsInfoFactory {
-
-    private final HdfsConfiguration endpointConfig;
-
-    HdfsInfoFactory(HdfsConfiguration endpointConfig) {
-        this.endpointConfig = endpointConfig;
-    }
-
-    HdfsInfo newHdfsInfo(String hdfsPath) throws IOException {
-        return newHdfsInfo(hdfsPath, endpointConfig);
-    }
-
-    HdfsInfo newHdfsInfoWithoutAuth(String hdfsPath) throws IOException {
-        return newHdfsInfoWithoutAuth(hdfsPath, endpointConfig);
-    }
-
-    HdfsConfiguration getEndpointConfig() {
-        return endpointConfig;
-    }
-
-    private static HdfsInfo newHdfsInfo(String hdfsPath, HdfsConfiguration endpointConfig) throws IOException {
-        // need to remember auth as Hadoop will override that, which otherwise means the Auth is broken afterwards
-        javax.security.auth.login.Configuration auth = HdfsComponent.getJAASConfiguration();
-        try {
-            return newHdfsInfoWithoutAuth(hdfsPath, endpointConfig);
-        } finally {
-            HdfsComponent.setJAASConfiguration(auth);
-        }
-    }
-
-    private static HdfsInfo newHdfsInfoWithoutAuth(String hdfsPath, HdfsConfiguration endpointConfig) throws IOException {
-        Configuration configuration = newConfiguration(endpointConfig);
-
-        authenticate(configuration, endpointConfig);
-
-        FileSystem fileSystem = newFileSystem(configuration, hdfsPath, endpointConfig);
-        Path path = new Path(hdfsPath);
-
-        return new HdfsInfo(configuration, fileSystem, path);
-    }
-
-    static Configuration newConfiguration(HdfsConfiguration endpointConfig) {
-        Configuration configuration = new Configuration();
-
-        if (endpointConfig.isKerberosAuthentication()) {
-            KerberosConfigurationBuilder.withKerberosConfiguration(configuration, endpointConfig);
-        }
-
-        if (endpointConfig.hasClusterConfiguration()) {
-            HaConfigurationBuilder.withClusterConfiguration(configuration, endpointConfig);
-        }
-
-        return configuration;
-    }
-
-    static void authenticate(Configuration configuration, HdfsConfiguration endpointConfig) throws IOException {
-        if (endpointConfig.isKerberosAuthentication()) {
-            String userName = endpointConfig.getKerberosUsername();
-            String keytabLocation = endpointConfig.getKerberosKeytabLocation();
-            new KerberosAuthentication(configuration, userName, keytabLocation).loginWithKeytab();
-        }
-    }
-
-    /**
-     * this will connect to the hadoop hdfs file system, and in case of no connection then the hardcoded timeout in
-     * hadoop is 45 x 20 sec = 15 minutes
-     */
-    static FileSystem newFileSystem(Configuration configuration, String hdfsPath, HdfsConfiguration endpointConfig)
-            throws IOException {
-        FileSystem fileSystem;
-        if (endpointConfig.hasClusterConfiguration()) {
-            // using default FS that was set during in the cluster configuration (@see org.apache.camel.component.hdfs.HaConfigurationBuilder)
-            fileSystem = FileSystem.get(configuration);
-        } else {
-            fileSystem = FileSystem.get(URI.create(hdfsPath), configuration);
-        }
-
-        return fileSystem;
-    }
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsInputStream.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsInputStream.java
deleted file mode 100644
index 79c05699869..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsInputStream.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.camel.RuntimeCamelException;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IOUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class HdfsInputStream implements Closeable {
-
-    private static final Logger LOG = LoggerFactory.getLogger(HdfsInputStream.class);
-
-    private HdfsFileType fileType;
-    private HdfsInfo info;
-    private String actualPath;
-    private String suffixedPath;
-    private String suffixedReadPath;
-    private Closeable in;
-    private boolean opened;
-    private int chunkSize;
-    private final AtomicLong numOfReadBytes = new AtomicLong();
-    private final AtomicLong numOfReadMessages = new AtomicLong();
-
-    private boolean streamDownload;
-
-    private EntryHolder cachedNextEntry;
-
-    protected HdfsInputStream() {
-    }
-
-    /**
-     *
-     * @param  hdfsPath
-     * @param  hdfsInfoFactory
-     * @return
-     */
-    public static HdfsInputStream createInputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory) {
-        HdfsConfiguration endpointConfig = hdfsInfoFactory.getEndpointConfig();
-        HdfsInputStream iStream = new HdfsInputStream();
-        iStream.fileType = endpointConfig.getFileType();
-        iStream.actualPath = hdfsPath;
-        iStream.suffixedPath = iStream.actualPath + '.' + endpointConfig.getOpenedSuffix();
-        iStream.suffixedReadPath = iStream.actualPath + '.' + endpointConfig.getReadSuffix();
-        iStream.chunkSize = endpointConfig.getChunkSize();
-        iStream.streamDownload = endpointConfig.isStreamDownload();
-        try {
-            iStream.info = hdfsInfoFactory.newHdfsInfo(iStream.actualPath);
-            if (iStream.info.getFileSystem().rename(new Path(iStream.actualPath), new Path(iStream.suffixedPath))) {
-                iStream.in = iStream.fileType.createInputStream(iStream.suffixedPath, hdfsInfoFactory);
-                iStream.opened = true;
-            } else {
-                LOG.debug("Failed to open file [{}] because it doesn't exist", hdfsPath);
-                iStream = null;
-            }
-        } catch (IOException e) {
-            throw new RuntimeCamelException(e);
-        }
-
-        return iStream;
-    }
-
-    @Override
-    public final void close() throws IOException {
-        if (opened) {
-            IOUtils.closeStream(in);
-            info.getFileSystem().rename(new Path(suffixedPath), new Path(suffixedReadPath));
-            opened = false;
-        }
-    }
-
-    /**
-     * Reads next record/chunk specific to give file type.
-     *
-     * @param  key
-     * @param  value
-     * @return       number of bytes read. 0 is correct number of bytes (empty file), -1 indicates no record was read
-     */
-    public final long next(final Holder<Object> key, final Holder<Object> value) {
-        EntryHolder nextEntry = Optional.ofNullable(cachedNextEntry).orElseGet(() -> getNextFromStream(key, value));
-        cachedNextEntry = null;
-
-        key.setValue(nextEntry.getKey().getValue());
-        value.setValue(nextEntry.getValue().getValue());
-
-        return nextEntry.getByteCount();
-    }
-
-    private EntryHolder getNextFromStream(final Holder<Object> key, final Holder<Object> value) {
-        long nb = fileType.next(this, key, value);
-        // when zero bytes was read from given type of file, we may still have a record (e.g., empty file)
-        // null value.value is the only indication that no (new) record/chunk was read
-        if (nb == 0 && numOfReadMessages.get() > 0 || Objects.isNull(value.getValue())) {
-            // we've read all chunks from file, which size is exact multiple the chunk size
-            nb = -1;
-        } else {
-            numOfReadBytes.addAndGet(nb);
-            numOfReadMessages.incrementAndGet();
-        }
-
-        return new EntryHolder(key, value, nb);
-    }
-
-    /**
-     */
-    public final boolean hasNext() {
-        if (Objects.isNull(cachedNextEntry)) {
-            Holder<Object> nextKey = new Holder<>();
-            Holder<Object> nextValue = new Holder<>();
-            long nextByteCount = next(nextKey, nextValue);
-            cachedNextEntry = new EntryHolder(nextKey, nextValue, nextByteCount);
-        }
-
-        return cachedNextEntry.hasNext();
-    }
-
-    public final long getNumOfReadBytes() {
-        return numOfReadBytes.longValue();
-    }
-
-    public final long getNumOfReadMessages() {
-        return numOfReadMessages.longValue();
-    }
-
-    public final String getActualPath() {
-        return actualPath;
-    }
-
-    public final int getChunkSize() {
-        return chunkSize;
-    }
-
-    public final Closeable getIn() {
-        return in;
-    }
-
-    public boolean isOpened() {
-        return opened;
-    }
-
-    public boolean isStreamDownload() {
-        return streamDownload;
-    }
-
-    private static class EntryHolder {
-
-        private long byteCount;
-        private Holder<Object> key;
-        private Holder<Object> value;
-
-        public EntryHolder(Holder<Object> key, Holder<Object> value, long byteCount) {
-            this.key = key;
-            this.value = value;
-            this.byteCount = byteCount;
-        }
-
-        public Holder<Object> getKey() {
-            return key;
-        }
-
-        public Holder<Object> getValue() {
-            return value;
-        }
-
-        public Boolean hasNext() {
-            return byteCount >= 0;
-        }
-
-        public long getByteCount() {
-            return byteCount;
-        }
-    }
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsMapFileHandler.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsMapFileHandler.java
deleted file mode 100644
index 0d0fcf820c0..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsMapFileHandler.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.IOException;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.RuntimeCamelException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.MapFile;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.util.ReflectionUtils;
-
-import static org.apache.camel.component.hdfs.HdfsHelper.asCompressionType;
-
-class HdfsMapFileHandler extends DefaultHdfsFile<MapFile.Writer, MapFile.Reader> {
-
-    @Override
-    @SuppressWarnings("rawtypes")
-    public MapFile.Writer createOutputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory) {
-        try {
-            MapFile.Writer rout;
-            HdfsInfo hdfsInfo = hdfsInfoFactory.newHdfsInfo(hdfsPath);
-            HdfsConfiguration endpointConfig = hdfsInfoFactory.getEndpointConfig();
-            Class<? extends WritableComparable> keyWritableClass = endpointConfig.getKeyType().getWritableClass();
-            Class<? extends WritableComparable> valueWritableClass = endpointConfig.getValueType().getWritableClass();
-            rout = new MapFile.Writer(
-                    hdfsInfo.getConfiguration(),
-                    new Path(hdfsPath),
-                    MapFile.Writer.keyClass(keyWritableClass),
-                    MapFile.Writer.valueClass(valueWritableClass),
-                    MapFile.Writer.compression(asCompressionType(endpointConfig.getCompressionType()),
-                            endpointConfig.getCompressionCodec().getCodec()),
-                    MapFile.Writer.progressable(() -> {
-                    }));
-            return rout;
-        } catch (IOException ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-    @Override
-    public long append(HdfsOutputStream hdfsOutputStream, Object key, Object value, Exchange exchange) {
-        try {
-            Holder<Integer> keySize = new Holder<>();
-            Writable keyWritable = getWritable(key, exchange, keySize);
-            Holder<Integer> valueSize = new Holder<>();
-            Writable valueWritable = getWritable(value, exchange, valueSize);
-            ((MapFile.Writer) hdfsOutputStream.getOut()).append((WritableComparable<?>) keyWritable, valueWritable);
-            return Long.sum(keySize.getValue(), valueSize.getValue());
-        } catch (Exception ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-    @Override
-    public MapFile.Reader createInputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory) {
-        try {
-            MapFile.Reader rin;
-            HdfsInfo hdfsInfo = hdfsInfoFactory.newHdfsInfo(hdfsPath);
-            rin = new MapFile.Reader(new Path(hdfsPath), hdfsInfo.getConfiguration());
-            return rin;
-        } catch (IOException ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-    @Override
-    public long next(HdfsInputStream hdfsInputStream, Holder<Object> key, Holder<Object> value) {
-        try {
-            MapFile.Reader reader = (MapFile.Reader) hdfsInputStream.getIn();
-            Holder<Integer> keySize = new Holder<>();
-            WritableComparable<?> keyWritable
-                    = (WritableComparable<?>) ReflectionUtils.newInstance(reader.getKeyClass(), new Configuration());
-            Holder<Integer> valueSize = new Holder<>();
-            Writable valueWritable = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), new Configuration());
-            if (reader.next(keyWritable, valueWritable)) {
-                key.setValue(getObject(keyWritable, keySize));
-                value.setValue(getObject(valueWritable, valueSize));
-                return Long.sum(keySize.getValue(), valueSize.getValue());
-            } else {
-                return 0;
-            }
-        } catch (Exception ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsNormalFileHandler.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsNormalFileHandler.java
deleted file mode 100644
index 75bddd4866a..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsNormalFileHandler.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.file.Files;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.RuntimeCamelException;
-import org.apache.camel.util.IOHelper;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-class HdfsNormalFileHandler extends DefaultHdfsFile<OutputStream, InputStream> {
-
-    private static final Logger LOG = LoggerFactory.getLogger(HdfsNormalFileHandler.class);
-    private boolean consumed;
-
-    @Override
-    public OutputStream createOutputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory) {
-        try {
-            OutputStream outputStream;
-            HdfsInfo hdfsInfo = hdfsInfoFactory.newHdfsInfo(hdfsPath);
-            HdfsConfiguration endpointConfig = hdfsInfoFactory.getEndpointConfig();
-            if (endpointConfig.isAppend()) {
-                outputStream = hdfsInfo.getFileSystem().append(
-                        hdfsInfo.getPath(),
-                        endpointConfig.getBufferSize(),
-                        () -> {
-                        });
-            } else {
-                outputStream = hdfsInfo.getFileSystem().create(
-                        hdfsInfo.getPath(),
-                        endpointConfig.isOverwrite(),
-                        endpointConfig.getBufferSize(),
-                        endpointConfig.getReplication(),
-                        endpointConfig.getBlockSize(),
-                        () -> {
-                        });
-            }
-            return outputStream;
-        } catch (IOException ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-    @Override
-    public long append(HdfsOutputStream hdfsOutputStream, Object key, Object value, Exchange exchange) {
-        InputStream inputStream = null;
-        try {
-            inputStream = exchange.getContext().getTypeConverter().convertTo(InputStream.class, exchange, value);
-            return copyBytes(inputStream, (FSDataOutputStream) hdfsOutputStream.getOut(), HdfsConstants.DEFAULT_BUFFERSIZE,
-                    false);
-        } catch (IOException ex) {
-            throw new RuntimeCamelException(ex);
-        } finally {
-            IOHelper.close(inputStream);
-        }
-    }
-
-    @Override
-    public InputStream createInputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory) {
-        try {
-            InputStream inputStream;
-            HdfsConfiguration endpointConfig = hdfsInfoFactory.getEndpointConfig();
-            if (endpointConfig.getFileSystemType().equals(HdfsFileSystemType.LOCAL)) {
-                HdfsInfo hdfsInfo = hdfsInfoFactory.newHdfsInfo(hdfsPath);
-                inputStream = hdfsInfo.getFileSystem().open(hdfsInfo.getPath());
-            } else {
-                inputStream = new FileInputStream(getHdfsFileToTmpFile(hdfsPath, endpointConfig));
-            }
-            return inputStream;
-        } catch (IOException ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-    @Override
-    public long next(HdfsInputStream hdfsInputStream, Holder<Object> key, Holder<Object> value) {
-        if (hdfsInputStream.isStreamDownload()) {
-            return nextAsWrappedStream(hdfsInputStream, value);
-        } else {
-            return nextAsOutputStream(hdfsInputStream, value);
-        }
-    }
-
-    private long nextAsWrappedStream(HdfsInputStream hdfsInputStream, Holder<Object> value) {
-        InputStream inputStream = (InputStream) hdfsInputStream.getIn();
-        value.setValue(inputStream);
-
-        if (consumed) {
-            return 0;
-        } else {
-            consumed = true;
-            return 1;
-        }
-    }
-
-    private long nextAsOutputStream(HdfsInputStream hdfsInputStream, Holder<Object> value) {
-        try {
-            ByteArrayOutputStream outputStream = new ByteArrayOutputStream(hdfsInputStream.getChunkSize());
-            byte[] buf = new byte[hdfsInputStream.getChunkSize()];
-            int bytesRead = ((InputStream) hdfsInputStream.getIn()).read(buf);
-            if (bytesRead >= 0) {
-                outputStream.write(buf, 0, bytesRead);
-                value.setValue(outputStream);
-                return bytesRead;
-            } else {
-                // indication that we may have read from empty file
-                value.setValue(outputStream);
-                return 0;
-            }
-        } catch (IOException ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-    private File getHdfsFileToTmpFile(String hdfsPath, HdfsConfiguration configuration) {
-        try {
-            String fileName = hdfsPath.substring(hdfsPath.lastIndexOf('/'));
-
-            // [CAMEL-13711] Files.createTempFile not equivalent to File.createTempFile
-            File outputDest;
-            try {
-                // First trying: Files.createTempFile
-                outputDest = Files.createTempFile(fileName, ".hdfs").toFile();
-
-            } catch (Exception ex) {
-                // Now trying: File.createTempFile
-                outputDest = File.createTempFile(fileName, ".hdfs");
-            }
-
-            if (outputDest.exists()) {
-                boolean result = outputDest.delete();
-                if (!result) {
-                    LOG.error("Failed to delete output destination {}", outputDest);
-                }
-            }
-
-            HdfsInfoFactory hdfsInfoFactory = new HdfsInfoFactory(configuration);
-            HdfsInfo hdfsInfo = hdfsInfoFactory.newHdfsInfo(hdfsPath);
-            FileSystem fileSystem = hdfsInfo.getFileSystem();
-            FileUtil.copy(fileSystem, new Path(hdfsPath), outputDest, false, fileSystem.getConf());
-            try {
-                FileUtil.copy(
-                        fileSystem, // src
-                        new Path(hdfsPath),
-                        FileSystem.getLocal(new Configuration()), // dest
-                        new Path(outputDest.toURI()),
-                        false, fileSystem.getConf());
-            } catch (IOException e) {
-                return outputDest;
-            }
-
-            return outputDest;
-        } catch (IOException ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsOsgiHelper.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsOsgiHelper.java
deleted file mode 100644
index 13170c736a7..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsOsgiHelper.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.net.URI;
-import java.util.Map;
-import java.util.ServiceLoader;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * <p>
- * Class which may be used in OSGi/Blueprint environment to perform some static initialization
- * </p>
- * <p>
- * This could be useful to fix the usage of {@link ServiceLoader} by Hadoop 2 in OSGi environment.
- * </p>
- */
-public class HdfsOsgiHelper {
-
-    private static final Logger LOG = LoggerFactory.getLogger(HdfsOsgiHelper.class);
-
-    /**
-     * By using this constructor it is possible to perform static initialization of {@link FileSystem}.
-     */
-    public HdfsOsgiHelper(Map<String, String> fileSystems) {
-        try {
-            // get bundle classloader for camel-hdfs bundle
-            ClassLoader cl = getClass().getClassLoader();
-            Configuration conf = new Configuration();
-            // set that as the hdfs configuration's classloader
-            conf.setClassLoader(cl);
-            for (Map.Entry<String, String> fsEntry : fileSystems.entrySet()) {
-                URI uri = URI.create(fsEntry.getKey());
-                conf.setClass(String.format("fs.%s.impl", uri.getScheme()), cl.loadClass(fsEntry.getValue()), FileSystem.class);
-                LOG.debug("Successfully loaded class: {}", fsEntry.getValue());
-                FileSystem.get(uri, conf);
-                LOG.debug("Successfully got uri: {} from FileSystem Object", uri);
-            }
-        } catch (Exception e) {
-            LOG.debug(e.getMessage(), e);
-        }
-    }
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsOutputStream.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsOutputStream.java
deleted file mode 100644
index 65483218330..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsOutputStream.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.RuntimeCamelException;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IOUtils;
-
-public class HdfsOutputStream implements Closeable {
-
-    private HdfsFileType fileType;
-    private HdfsInfo info;
-    private String actualPath;
-    private String suffixedPath;
-    private Closeable out;
-    private volatile boolean opened;
-    private final AtomicLong numOfWrittenBytes = new AtomicLong();
-    private final AtomicLong numOfWrittenMessages = new AtomicLong();
-    private final AtomicLong lastAccess = new AtomicLong(Long.MAX_VALUE);
-    private final AtomicBoolean busy = new AtomicBoolean();
-
-    protected HdfsOutputStream() {
-    }
-
-    public static HdfsOutputStream createOutputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory) throws IOException {
-        HdfsConfiguration endpointConfig = hdfsInfoFactory.getEndpointConfig();
-        HdfsOutputStream oStream = new HdfsOutputStream();
-        oStream.fileType = endpointConfig.getFileType();
-        oStream.actualPath = hdfsPath;
-        oStream.info = hdfsInfoFactory.newHdfsInfoWithoutAuth(oStream.actualPath);
-
-        oStream.suffixedPath = oStream.actualPath + '.' + endpointConfig.getOpenedSuffix();
-
-        Path actualPath = new Path(oStream.actualPath);
-        boolean actualPathExists = oStream.info.getFileSystem().exists(actualPath);
-
-        if (endpointConfig.isWantAppend() || endpointConfig.isAppend()) {
-            if (actualPathExists) {
-                endpointConfig.setAppend(true);
-                oStream.info = hdfsInfoFactory.newHdfsInfoWithoutAuth(oStream.suffixedPath);
-                oStream.info.getFileSystem().rename(actualPath, new Path(oStream.suffixedPath));
-            } else {
-                endpointConfig.setAppend(false);
-            }
-        } else if (actualPathExists && !oStream.info.getFileSystem().isDirectory(actualPath)) { // only check if not directory
-            if (endpointConfig.isOverwrite()) {
-                oStream.info.getFileSystem().delete(actualPath, true);
-            } else {
-                throw new RuntimeCamelException("File [" + actualPath + "] already exists");
-            }
-        }
-
-        oStream.out = oStream.fileType.createOutputStream(oStream.suffixedPath, hdfsInfoFactory);
-        oStream.opened = true;
-        return oStream;
-    }
-
-    @Override
-    public void close() throws IOException {
-        if (opened) {
-            IOUtils.closeStream(out);
-            info.getFileSystem().rename(new Path(suffixedPath), new Path(actualPath));
-            opened = false;
-        }
-    }
-
-    public void append(Object key, Object value, Exchange exchange) {
-        try {
-            busy.set(true);
-            long nb = fileType.append(this, key, value, exchange);
-            numOfWrittenBytes.addAndGet(nb);
-            numOfWrittenMessages.incrementAndGet();
-            lastAccess.set(System.currentTimeMillis());
-        } finally {
-            busy.set(false);
-        }
-    }
-
-    public long getNumOfWrittenBytes() {
-        return numOfWrittenBytes.longValue();
-    }
-
-    public long getNumOfWrittenMessages() {
-        return numOfWrittenMessages.longValue();
-    }
-
-    public long getLastAccess() {
-        return lastAccess.longValue();
-    }
-
-    public String getActualPath() {
-        return actualPath;
-    }
-
-    public AtomicBoolean isBusy() {
-        return busy;
-    }
-
-    public Closeable getOut() {
-        return out;
-    }
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsProducer.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsProducer.java
deleted file mode 100644
index 9c3000c02d8..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsProducer.java
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Optional;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import javax.security.auth.login.Configuration;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.Expression;
-import org.apache.camel.RuntimeCamelException;
-import org.apache.camel.support.DefaultProducer;
-import org.apache.camel.util.IOHelper;
-import org.apache.camel.util.StringHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class HdfsProducer extends DefaultProducer {
-
-    private static final Logger LOG = LoggerFactory.getLogger(HdfsProducer.class);
-
-    private final HdfsConfiguration config;
-    private final StringBuilder hdfsPath;
-    private final AtomicBoolean idle = new AtomicBoolean();
-    private volatile ScheduledExecutorService scheduler;
-    private volatile HdfsOutputStream oStream;
-
-    public static final class SplitStrategy {
-        private SplitStrategyType type;
-        private long value;
-
-        public SplitStrategy(SplitStrategyType type, long value) {
-            this.type = type;
-            this.value = value;
-        }
-
-        public SplitStrategyType getType() {
-            return type;
-        }
-
-        public long getValue() {
-            return value;
-        }
-    }
-
-    public enum SplitStrategyType {
-        BYTES {
-            @Override
-            public boolean split(HdfsOutputStream oldOstream, long value, HdfsProducer producer) {
-                return oldOstream.getNumOfWrittenBytes() >= value;
-            }
-        },
-
-        MESSAGES {
-            @Override
-            public boolean split(HdfsOutputStream oldOstream, long value, HdfsProducer producer) {
-                return oldOstream.getNumOfWrittenMessages() >= value;
-            }
-        },
-
-        IDLE {
-            @Override
-            public boolean split(HdfsOutputStream oldOstream, long value, HdfsProducer producer) {
-                return producer.idle.get();
-            }
-        };
-
-        public abstract boolean split(HdfsOutputStream oldOstream, long value, HdfsProducer producer);
-    }
-
-    public HdfsProducer(HdfsEndpoint endpoint, HdfsConfiguration config) {
-        super(endpoint);
-        this.config = config;
-        this.hdfsPath = config.getFileSystemType().getHdfsPath(config);
-    }
-
-    @Override
-    public HdfsEndpoint getEndpoint() {
-        return (HdfsEndpoint) super.getEndpoint();
-    }
-
-    @Override
-    protected void doStart() {
-        // need to remember auth as Hadoop will override that, which otherwise means the Auth is broken afterwards
-        Configuration auth = HdfsComponent.getJAASConfiguration();
-        try {
-            super.doStart();
-
-            // setup hdfs if configured to do on startup
-            if (getEndpoint().getConfig().isConnectOnStartup()) {
-                oStream = setupHdfs(true);
-            }
-
-            Optional<SplitStrategy> idleStrategy = tryFindIdleStrategy(config.getSplitStrategies());
-            if (idleStrategy.isPresent()) {
-                scheduler = getEndpoint().getCamelContext().getExecutorServiceManager().newSingleThreadScheduledExecutor(this,
-                        "HdfsIdleCheck");
-                LOG.debug("Creating IdleCheck task scheduled to run every {} millis", config.getCheckIdleInterval());
-                scheduler.scheduleAtFixedRate(new IdleCheck(idleStrategy.get()), config.getCheckIdleInterval(),
-                        config.getCheckIdleInterval(), TimeUnit.MILLISECONDS);
-            }
-        } catch (Exception e) {
-            LOG.warn("Failed to start the HDFS producer. Caused by: [{}]", e.getMessage(), e);
-            throw new RuntimeCamelException(e);
-        } finally {
-            HdfsComponent.setJAASConfiguration(auth);
-        }
-    }
-
-    private synchronized HdfsOutputStream setupHdfs(boolean onStartup) throws IOException {
-        if (oStream != null) {
-            return oStream;
-        }
-
-        StringBuilder actualPath = new StringBuilder(hdfsPath);
-        if (config.hasSplitStrategies()) {
-            actualPath = newFileName();
-        }
-
-        String hdfsFsDescription = config.getFileSystemLabel(actualPath.toString());
-
-        // if we are starting up then log at info level, and if runtime then log at debug level to not flood the log
-        if (onStartup) {
-            LOG.info("Connecting to hdfs file-system {} (may take a while if connection is not available)", hdfsFsDescription);
-        } else {
-            LOG.debug("Connecting to hdfs file-system {} (may take a while if connection is not available)", hdfsFsDescription);
-        }
-
-        HdfsInfoFactory hdfsInfoFactory = new HdfsInfoFactory(config);
-        HdfsOutputStream answer = HdfsOutputStream.createOutputStream(actualPath.toString(), hdfsInfoFactory);
-
-        if (onStartup) {
-            LOG.info("Connected to hdfs file-system {}", hdfsFsDescription);
-        } else {
-            LOG.debug("Connected to hdfs file-system {}", hdfsFsDescription);
-        }
-
-        return answer;
-    }
-
-    private Optional<SplitStrategy> tryFindIdleStrategy(List<SplitStrategy> strategies) {
-        for (SplitStrategy strategy : strategies) {
-            if (strategy.type == SplitStrategyType.IDLE) {
-                return Optional.of(strategy);
-            }
-        }
-        return Optional.empty();
-    }
-
-    @Override
-    protected void doStop() throws Exception {
-        super.doStop();
-        if (scheduler != null) {
-            getEndpoint().getCamelContext().getExecutorServiceManager().shutdown(scheduler);
-            scheduler = null;
-        }
-        if (oStream != null) {
-            IOHelper.close(oStream, "output stream", LOG);
-            oStream = null;
-        }
-    }
-
-    @Override
-    public void process(Exchange exchange) throws Exception {
-        // need to remember auth as Hadoop will override that, which otherwise means the Auth is broken afterwards
-        Configuration auth = HdfsComponent.getJAASConfiguration();
-        try {
-            doProcess(exchange);
-        } finally {
-            HdfsComponent.setJAASConfiguration(auth);
-        }
-    }
-
-    void doProcess(Exchange exchange) throws IOException {
-        Object body = exchange.getIn().getBody();
-        Object key = exchange.getIn().getHeader(HdfsConstants.KEY);
-
-        HdfsInfoFactory hdfsInfoFactory = new HdfsInfoFactory(config);
-        // if an explicit filename is specified, close any existing stream and append the filename to the hdfsPath
-        if (exchange.getIn().getHeader(HdfsConstants.FILE_NAME) != null) {
-            if (oStream != null) {
-                IOHelper.close(oStream, "output stream", LOG);
-            }
-            StringBuilder actualPath = getHdfsPathUsingFileNameHeader(exchange);
-            oStream = HdfsOutputStream.createOutputStream(actualPath.toString(), hdfsInfoFactory);
-        } else if (oStream == null) {
-            // must have oStream
-            oStream = setupHdfs(false);
-        }
-
-        if (isSplitRequired(config.getSplitStrategies())) {
-            if (oStream != null) {
-                IOHelper.close(oStream, "output stream", LOG);
-            }
-            StringBuilder actualPath = newFileName();
-            oStream = HdfsOutputStream.createOutputStream(actualPath.toString(), hdfsInfoFactory);
-        }
-
-        String path = oStream.getActualPath();
-        LOG.trace("Writing body to hdfs-file {}", path);
-        oStream.append(key, body, exchange);
-
-        idle.set(false);
-
-        // close if we do not have idle checker task to do this for us
-        boolean close = scheduler == null;
-        // but user may have a header to explicit control the close
-        Boolean closeHeader = exchange.getIn().getHeader(HdfsConstants.HDFS_CLOSE, Boolean.class);
-        if (closeHeader != null) {
-            close = closeHeader;
-        }
-
-        // if no idle checker then we need to explicit close the stream after usage
-        if (close) {
-            try {
-                LOG.trace("Closing stream");
-                oStream.close();
-                oStream = null;
-            } catch (IOException e) {
-                // ignore
-            }
-        }
-
-        LOG.debug("Wrote body to hdfs-file {}", path);
-    }
-
-    /**
-     * helper method to construct the hdfsPath from the CamelFileName String or Expression
-     */
-    private StringBuilder getHdfsPathUsingFileNameHeader(Exchange exchange) {
-        StringBuilder actualPath = new StringBuilder(hdfsPath);
-        String fileName = "";
-        Object value = exchange.getIn().getHeader(HdfsConstants.FILE_NAME);
-        if (value instanceof String) {
-            fileName = exchange.getContext().getTypeConverter().convertTo(String.class, exchange, value);
-        } else if (value instanceof Expression) {
-            fileName = ((Expression) value).evaluate(exchange, String.class);
-        }
-        return actualPath.append(fileName);
-    }
-
-    private boolean isSplitRequired(List<SplitStrategy> strategies) {
-        boolean split = false;
-        for (SplitStrategy splitStrategy : strategies) {
-            split |= splitStrategy.getType().split(oStream, splitStrategy.value, this);
-        }
-        return split;
-    }
-
-    private StringBuilder newFileName() {
-        StringBuilder actualPath = new StringBuilder(hdfsPath);
-        actualPath.append(StringHelper.sanitize(getEndpoint().getCamelContext().getUuidGenerator().generateUuid()));
-        return actualPath;
-    }
-
-    /**
-     * Idle check background task
-     */
-    private final class IdleCheck implements Runnable {
-
-        private final SplitStrategy strategy;
-
-        private IdleCheck(SplitStrategy strategy) {
-            this.strategy = strategy;
-        }
-
-        @Override
-        public void run() {
-            // only run if oStream has been created
-            if (oStream == null) {
-                return;
-            }
-
-            LOG.trace("IdleCheck running");
-
-            if (System.currentTimeMillis() - oStream.getLastAccess() > strategy.value && !idle.get()
-                    && !oStream.isBusy().get()) {
-                idle.set(true);
-                try {
-                    LOG.trace("Closing stream as idle");
-                    oStream.close();
-                } catch (IOException e) {
-                    // ignore
-                }
-            }
-        }
-
-        @Override
-        public String toString() {
-            return "IdleCheck";
-        }
-    }
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsSequenceFileHandler.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsSequenceFileHandler.java
deleted file mode 100644
index efc14a882c2..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsSequenceFileHandler.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.IOException;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.RuntimeCamelException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.util.ReflectionUtils;
-
-import static org.apache.camel.component.hdfs.HdfsHelper.asCompressionType;
-
-class HdfsSequenceFileHandler extends DefaultHdfsFile<SequenceFile.Writer, SequenceFile.Reader> {
-
-    @Override
-    public SequenceFile.Writer createOutputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory) {
-        try {
-            SequenceFile.Writer rout;
-            HdfsInfo hdfsInfo = hdfsInfoFactory.newHdfsInfo(hdfsPath);
-            HdfsConfiguration endpointConfig = hdfsInfoFactory.getEndpointConfig();
-            Class<?> keyWritableClass = endpointConfig.getKeyType().getWritableClass();
-            Class<?> valueWritableClass = endpointConfig.getValueType().getWritableClass();
-            rout = SequenceFile.createWriter(
-                    hdfsInfo.getConfiguration(),
-                    SequenceFile.Writer.file(hdfsInfo.getPath()),
-                    SequenceFile.Writer.keyClass(keyWritableClass),
-                    SequenceFile.Writer.valueClass(valueWritableClass),
-                    SequenceFile.Writer.bufferSize(endpointConfig.getBufferSize()),
-                    SequenceFile.Writer.replication(endpointConfig.getReplication()),
-                    SequenceFile.Writer.blockSize(endpointConfig.getBlockSize()),
-                    SequenceFile.Writer.compression(asCompressionType(endpointConfig.getCompressionType()),
-                            endpointConfig.getCompressionCodec().getCodec()),
-                    SequenceFile.Writer.progressable(() -> {
-                    }),
-                    SequenceFile.Writer.metadata(new SequenceFile.Metadata()));
-            return rout;
-        } catch (IOException ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-    @Override
-    public long append(HdfsOutputStream hdfsOutputStream, Object key, Object value, Exchange exchange) {
-        try {
-            Holder<Integer> keySize = new Holder<>();
-            Writable keyWritable = getWritable(key, exchange, keySize);
-            Holder<Integer> valueSize = new Holder<>();
-            Writable valueWritable = getWritable(value, exchange, valueSize);
-            SequenceFile.Writer writer = (SequenceFile.Writer) hdfsOutputStream.getOut();
-            writer.append(keyWritable, valueWritable);
-            writer.sync();
-            return Long.sum(keySize.getValue(), valueSize.getValue());
-        } catch (Exception ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-    @Override
-    public SequenceFile.Reader createInputStream(String hdfsPath, HdfsInfoFactory hdfsInfoFactory) {
-        try {
-            SequenceFile.Reader rin;
-            HdfsInfo hdfsInfo = hdfsInfoFactory.newHdfsInfo(hdfsPath);
-            rin = new SequenceFile.Reader(hdfsInfo.getConfiguration(), SequenceFile.Reader.file(hdfsInfo.getPath()));
-            return rin;
-        } catch (IOException ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-    @Override
-    public long next(HdfsInputStream hdfsInputStream, Holder<Object> key, Holder<Object> value) {
-        try {
-            SequenceFile.Reader reader = (SequenceFile.Reader) hdfsInputStream.getIn();
-            Holder<Integer> keySize = new Holder<>();
-            Writable keyWritable = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), new Configuration());
-            Holder<Integer> valueSize = new Holder<>();
-            Writable valueWritable = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), new Configuration());
-            if (reader.next(keyWritable, valueWritable)) {
-                key.setValue(getObject(keyWritable, keySize));
-                value.setValue(getObject(valueWritable, valueSize));
-                return Long.sum(keySize.getValue(), valueSize.getValue());
-            } else {
-                return 0;
-            }
-        } catch (Exception ex) {
-            throw new RuntimeCamelException(ex);
-        }
-    }
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsWritableFactories.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsWritableFactories.java
deleted file mode 100644
index a53f0e1d80b..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/HdfsWritableFactories.java
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteBuffer;
-
-import org.apache.camel.RuntimeCamelException;
-import org.apache.camel.TypeConverter;
-import org.apache.hadoop.io.BooleanWritable;
-import org.apache.hadoop.io.ByteWritable;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.DoubleWritable;
-import org.apache.hadoop.io.FloatWritable;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.io.ShortWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-
-public class HdfsWritableFactories {
-
-    interface HdfsWritableFactory {
-
-        Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size);
-
-        Object read(Writable writable, Holder<Integer> size);
-    }
-
-    public static final class HdfsNullWritableFactory implements HdfsWritableFactory {
-
-        @Override
-        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
-            size.setValue(0);
-            return NullWritable.get();
-        }
-
-        @Override
-        public Object read(Writable writable, Holder<Integer> size) {
-            size.setValue(0);
-            return null;
-        }
-    }
-
-    public static final class HdfsByteWritableFactory implements HdfsWritableFactory {
-
-        private static final int SIZE = 1;
-
-        @Override
-        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
-            size.setValue(SIZE);
-            ByteWritable writable = new ByteWritable();
-            writable.set(typeConverter.convertTo(Byte.class, value));
-            return writable;
-        }
-
-        @Override
-        public Object read(Writable writable, Holder<Integer> size) {
-            size.setValue(SIZE);
-            return ((ByteWritable) writable).get();
-        }
-    }
-
-    public static final class HdfsBooleanWritableFactory implements HdfsWritableFactory {
-
-        private static final int SIZE = 1;
-
-        @Override
-        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
-            size.setValue(SIZE);
-            BooleanWritable writable = new BooleanWritable();
-            writable.set(typeConverter.convertTo(Boolean.class, value));
-            return writable;
-        }
-
-        @Override
-        public Object read(Writable writable, Holder<Integer> size) {
-            size.setValue(SIZE);
-            return ((BooleanWritable) writable).get();
-        }
-    }
-
-    public static final class HdfsBytesWritableFactory implements HdfsWritableFactory {
-
-        @Override
-        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
-            BytesWritable writable = new BytesWritable();
-            ByteBuffer bb = (ByteBuffer) value;
-            writable.set(bb.array(), 0, bb.array().length);
-            size.setValue(bb.array().length);
-            return writable;
-        }
-
-        @Override
-        public Object read(Writable writable, Holder<Integer> size) {
-            size.setValue(((BytesWritable) writable).getLength());
-            ByteBuffer bb = ByteBuffer.allocate(size.getValue());
-            bb.put(((BytesWritable) writable).getBytes(), 0, size.getValue());
-            return bb;
-        }
-    }
-
-    public static final class HdfsDoubleWritableFactory implements HdfsWritableFactory {
-
-        private static final int SIZE = 8;
-
-        @Override
-        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
-            size.setValue(SIZE);
-            DoubleWritable writable = new DoubleWritable();
-            writable.set(typeConverter.convertTo(Double.class, value));
-            return writable;
-        }
-
-        @Override
-        public Object read(Writable writable, Holder<Integer> size) {
-            size.setValue(SIZE);
-            return ((DoubleWritable) writable).get();
-        }
-    }
-
-    public static final class HdfsFloatWritableFactory implements HdfsWritableFactory {
-
-        private static final int SIZE = 4;
-
-        @Override
-        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
-            size.setValue(SIZE);
-            FloatWritable writable = new FloatWritable();
-            writable.set(typeConverter.convertTo(Float.class, value));
-            return writable;
-        }
-
-        @Override
-        public Object read(Writable writable, Holder<Integer> size) {
-            size.setValue(SIZE);
-            return ((FloatWritable) writable).get();
-        }
-    }
-
-    public static final class HdfsShortWritableFactory implements HdfsWritableFactory {
-
-        private static final int SIZE = 2;
-
-        @Override
-        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
-            size.setValue(SIZE);
-            ShortWritable writable = new ShortWritable();
-            writable.set(typeConverter.convertTo(Short.class, value));
-            return writable;
-        }
-
-        @Override
-        public Object read(Writable writable, Holder<Integer> size) {
-            size.setValue(SIZE);
-            return ((ShortWritable) writable).get();
-        }
-    }
-
-    public static final class HdfsIntWritableFactory implements HdfsWritableFactory {
-
-        private static final int SIZE = 4;
-
-        @Override
-        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
-            size.setValue(SIZE);
-            IntWritable writable = new IntWritable();
-            writable.set(typeConverter.convertTo(Integer.class, value));
-            return writable;
-        }
-
-        @Override
-        public Object read(Writable writable, Holder<Integer> size) {
-            size.setValue(SIZE);
-            return ((IntWritable) writable).get();
-        }
-    }
-
-    public static final class HdfsLongWritableFactory implements HdfsWritableFactory {
-
-        private static final int SIZE = 8;
-
-        @Override
-        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
-            size.setValue(SIZE);
-            LongWritable writable = new LongWritable();
-            writable.set(typeConverter.convertTo(Long.class, value));
-            return writable;
-        }
-
-        @Override
-        public Object read(Writable writable, Holder<Integer> size) {
-            size.setValue(SIZE);
-            return ((LongWritable) writable).get();
-        }
-    }
-
-    public static final class HdfsTextWritableFactory implements HdfsWritableFactory {
-
-        @Override
-        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
-            Text writable = new Text();
-            writable.set(typeConverter.convertTo(String.class, value));
-            size.setValue(writable.getBytes().length);
-            return writable;
-        }
-
-        @Override
-        public Object read(Writable writable, Holder<Integer> size) {
-            size.setValue(((Text) writable).getLength());
-            return writable.toString();
-        }
-    }
-
-    public static final class HdfsObjectWritableFactory implements HdfsWritableFactory {
-
-        @Override
-        public Writable create(Object value, TypeConverter typeConverter, Holder<Integer> size) {
-            try (InputStream is = typeConverter.convertTo(InputStream.class, value)) {
-                ByteArrayOutputStream bos = new ByteArrayOutputStream();
-                IOUtils.copyBytes(is, bos, HdfsConstants.DEFAULT_BUFFERSIZE, false);
-                BytesWritable writable = new BytesWritable();
-                writable.set(bos.toByteArray(), 0, bos.toByteArray().length);
-                size.setValue(bos.toByteArray().length);
-                return writable;
-            } catch (IOException ex) {
-                throw new RuntimeCamelException(ex);
-            }
-        }
-
-        @Override
-        public Object read(Writable writable, Holder<Integer> size) {
-            size.setValue(0);
-            return null;
-        }
-    }
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/Holder.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/Holder.java
deleted file mode 100644
index 458f8eaf88f..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/Holder.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-public final class Holder<T> {
-
-    /**
-     * The value contained in the holder.
-     **/
-    private T value;
-
-    /**
-     * Creates a new holder with a <code>null</code> value.
-     **/
-    public Holder() {
-    }
-
-    /**
-     * Create a new holder with the specified value.
-     *
-     * @param value The value to be stored in the holder.
-     **/
-    public Holder(T value) {
-        this.value = value;
-    }
-
-    public T getValue() {
-        return value;
-    }
-
-    public void setValue(T value) {
-        this.value = value;
-    }
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/WritableType.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/WritableType.java
deleted file mode 100644
index 35d9c8af017..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/WritableType.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import org.apache.hadoop.io.BooleanWritable;
-import org.apache.hadoop.io.ByteWritable;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.DoubleWritable;
-import org.apache.hadoop.io.FloatWritable;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.io.ShortWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableComparable;
-
-public enum WritableType {
-
-    NULL {
-        @Override
-        public Class<NullWritable> getWritableClass() {
-            return NullWritable.class;
-        }
-    },
-
-    BOOLEAN {
-        @Override
-        public Class<BooleanWritable> getWritableClass() {
-            return BooleanWritable.class;
-        }
-    },
-
-    BYTE {
-        @Override
-        public Class<ByteWritable> getWritableClass() {
-            return ByteWritable.class;
-        }
-    },
-
-    SHORT {
-        @Override
-        public Class<ShortWritable> getWritableClass() {
-            return ShortWritable.class;
-        }
-    },
-
-    INT {
-        @Override
-        public Class<IntWritable> getWritableClass() {
-            return IntWritable.class;
-        }
-    },
-
-    FLOAT {
-        @Override
-        public Class<FloatWritable> getWritableClass() {
-            return FloatWritable.class;
-        }
-    },
-
-    LONG {
-        @Override
-        public Class<LongWritable> getWritableClass() {
-            return LongWritable.class;
-        }
-    },
-
-    DOUBLE {
-        @Override
-        public Class<DoubleWritable> getWritableClass() {
-            return DoubleWritable.class;
-        }
-    },
-
-    TEXT {
-        @Override
-        public Class<Text> getWritableClass() {
-            return Text.class;
-        }
-    },
-
-    BYTES {
-        @Override
-        public Class<BytesWritable> getWritableClass() {
-            return BytesWritable.class;
-        }
-    };
-
-    @SuppressWarnings("rawtypes")
-    public abstract Class<? extends WritableComparable> getWritableClass();
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/kerberos/KerberosAuthentication.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/kerberos/KerberosAuthentication.java
deleted file mode 100644
index 7facc4a9d3d..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/kerberos/KerberosAuthentication.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs.kerberos;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import static java.lang.String.format;
-
-public class KerberosAuthentication {
-
-    private final String username;
-    private final String keyTabFileLocation;
-    private final Configuration configuration;
-
-    /**
-     * @param configuration      - hdfs configuration
-     * @param username           - Principal used to authenticate to the kerberos server
-     * @param keyTabFileLocation - keyTab file location
-     */
-    public KerberosAuthentication(Configuration configuration, String username, String keyTabFileLocation) {
-        this.configuration = configuration;
-        this.username = username;
-        this.keyTabFileLocation = keyTabFileLocation;
-    }
-
-    /**
-     * In order to connect to a hadoop cluster using Kerberos you need to add your own filesystem to the cache of the
-     * FileSystem component. This is done by setting the uri that you use in your camel route as the URI that is used to
-     * setup the connection. The URI is used as key when adding it to the cache (default functionality of the static
-     * FileSystem.get(URI, Configuration) method).
-     *
-     * @throws IOException - In case of error
-     */
-    public void loginWithKeytab() throws IOException {
-        if (!new File(keyTabFileLocation).exists()) {
-            throw new FileNotFoundException(format("KeyTab file [%s] could not be found.", keyTabFileLocation));
-        }
-        // we need to log in otherwise you cannot connect to the filesystem later on
-        UserGroupInformation.setConfiguration(configuration);
-        UserGroupInformation.loginUserFromKeytab(username, keyTabFileLocation);
-    }
-
-}
diff --git a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/kerberos/KerberosConfigurationBuilder.java b/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/kerberos/KerberosConfigurationBuilder.java
deleted file mode 100644
index 2a8227dd2ba..00000000000
--- a/components/camel-hdfs/src/main/java/org/apache/camel/component/hdfs/kerberos/KerberosConfigurationBuilder.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs.kerberos;
-
-import java.io.File;
-
-import org.apache.camel.component.hdfs.HdfsConfiguration;
-import org.apache.hadoop.conf.Configuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public final class KerberosConfigurationBuilder {
-
-    private static final Logger LOG = LoggerFactory.getLogger(KerberosConfigurationBuilder.class);
-
-    private static final String KERBEROS_5_SYS_ENV = "java.security.krb5.conf";
-    private static final String AUTHENTICATION_MODE = "hadoop.security.authentication";
-
-    private KerberosConfigurationBuilder() {
-        // hidden
-    }
-
-    /**
-     * Add all the kerberos specific settings needed for this authentication mode
-     *
-     * @param endpointConfig - configuration with the HA settings configured on the endpoint
-     */
-    public static void withKerberosConfiguration(Configuration configuration, HdfsConfiguration endpointConfig) {
-        setKerberosConfigFile(endpointConfig.getKerberosConfigFileLocation());
-        configuration.set(AUTHENTICATION_MODE, "kerberos");
-
-    }
-
-    /**
-     * To use kerberos authentication, set the value of the 'java.security.krb5.conf' environment variable to an
-     * existing file. If the environment variable is already set, warn if different than the specified parameter
-     *
-     * @param kerberosConfigFileLocation - kerb5.conf file
-     *                                   (https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html)
-     */
-    public static void setKerberosConfigFile(String kerberosConfigFileLocation) {
-        if (!new File(kerberosConfigFileLocation).exists()) {
-            LOG.warn("Kerberos configuration file [{}}] could not be found.", kerberosConfigFileLocation);
-            return;
-        }
-
-        String krb5Conf = System.getProperty(KERBEROS_5_SYS_ENV);
-        if (krb5Conf == null || !krb5Conf.isEmpty()) {
-            System.setProperty(KERBEROS_5_SYS_ENV, kerberosConfigFileLocation);
-        } else if (!krb5Conf.equalsIgnoreCase(kerberosConfigFileLocation)) {
-            LOG.warn("[{}] was already configured with: [{}] config file", KERBEROS_5_SYS_ENV, krb5Conf);
-        }
-    }
-
-}
diff --git a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/FromFileToHdfsTest.java b/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/FromFileToHdfsTest.java
deleted file mode 100644
index c63d50facf4..00000000000
--- a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/FromFileToHdfsTest.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.File;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.builder.NotifyBuilder;
-import org.apache.camel.builder.RouteBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-import static org.apache.camel.test.junit5.TestSupport.deleteDirectory;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-/**
- *
- */
-public class FromFileToHdfsTest extends HdfsTestSupport {
-
-    private static final Path TEMP_DIR = new Path(new File("target/outbox/").getAbsolutePath());
-
-    @Override
-    @BeforeEach
-    public void setUp() throws Exception {
-        checkTest();
-        deleteDirectory("target/inbox");
-        deleteDirectory("target/outbox");
-        super.setUp();
-    }
-
-    @Override
-    @AfterEach
-    public void tearDown() throws Exception {
-        super.tearDown();
-        Configuration conf = new Configuration();
-        Path dir = new Path("target/outbox");
-        FileSystem fs = FileSystem.get(dir.toUri(), conf);
-        fs.delete(dir, true);
-    }
-
-    @Test
-    public void testFileToHdfs() {
-        NotifyBuilder notify = new NotifyBuilder(context).whenDone(1).create();
-
-        template.sendBodyAndHeader("file:target/inbox", "Hello World", Exchange.FILE_NAME, "hello.txt");
-
-        notify.matchesWaitTime();
-
-        File delete = new File("target/inbox/hello.txt");
-        assertFalse(delete.exists(), "File should be deleted " + delete);
-
-        File create = new File(TEMP_DIR + "/output.txt");
-        assertTrue(create.exists(), "File should be created " + create);
-    }
-
-    @Test
-    public void testTwoFilesToHdfs() {
-        NotifyBuilder notify = new NotifyBuilder(context).whenDone(2).create();
-
-        template.sendBodyAndHeader("file:target/inbox", "Hello World", Exchange.FILE_NAME, "hello.txt");
-        template.sendBodyAndHeader("file:target/inbox", "Bye World", Exchange.FILE_NAME, "bye.txt");
-
-        notify.matchesWaitTime();
-
-        File delete = new File("target/inbox/hello.txt");
-        assertFalse(delete.exists(), "File should be deleted " + delete);
-        delete = new File("target/inbox/bye.txt");
-        assertFalse(delete.exists(), "File should be deleted " + delete);
-
-        File create = new File(TEMP_DIR + "/output.txt");
-        assertTrue(create.exists(), "File should be created " + create);
-    }
-
-    @Override
-    protected RouteBuilder createRouteBuilder() {
-        return new RouteBuilder() {
-            @Override
-            public void configure() {
-                from("file:target/inbox?delete=true")
-                        .to("hdfs:localhost/" + TEMP_DIR.toUri() + "/output.txt?fileSystemType=LOCAL");
-            }
-        };
-    }
-}
diff --git a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HaConfigurationBuilderTest.java b/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HaConfigurationBuilderTest.java
deleted file mode 100644
index 76b8a99dfe7..00000000000
--- a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HaConfigurationBuilderTest.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.junit.jupiter.api.Test;
-
-import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.CoreMatchers.notNullValue;
-import static org.hamcrest.MatcherAssert.assertThat;
-
-public class HaConfigurationBuilderTest {
-
-    @Test
-    public void withClusterConfiguration() {
-        // given
-        Configuration configuration = new Configuration();
-        String haClusterName = "haCluster";
-        List<String> namedNodes = Arrays.asList("kerb_node_01.example.com:8021", "kerb_node_02.example.com:8022");
-        int replicationFactor = 3;
-
-        // when
-        HaConfigurationBuilder.withClusterConfiguration(configuration, haClusterName, namedNodes, replicationFactor);
-
-        // then
-        assertThat(configuration, notNullValue());
-        assertThat(configuration.get(DFSConfigKeys.DFS_REPLICATION_KEY), is("3"));
-        assertThat(configuration.get(DFSConfigKeys.DFS_NAMESERVICES), is("haCluster"));
-        assertThat(configuration.get("dfs.ha.namenodes.haCluster"), is("kerb_node_01_example_com,kerb_node_02_example_com"));
-        assertThat(configuration.get("dfs.namenode.rpc-address.haCluster.kerb_node_01_example_com"),
-                is("kerb_node_01.example.com:8021"));
-        assertThat(configuration.get("dfs.namenode.rpc-address.haCluster.kerb_node_02_example_com"),
-                is("kerb_node_02.example.com:8022"));
-        assertThat(configuration.get("dfs.client.failover.proxy.provider.haCluster"),
-                is("org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"));
-        assertThat(configuration.get("fs.defaultFS"), is("hdfs://haCluster"));
-    }
-
-    @Test
-    public void getSanitizedClusterNameWithNull() {
-        // given
-        String haClusterName = null;
-
-        // when
-        String actual = HaConfigurationBuilder.getSanitizedClusterName(haClusterName);
-
-        // then
-        assertThat(actual, notNullValue());
-        assertThat(actual, is("hfdsNamedService"));
-    }
-
-    @Test
-    public void getSanitizedClusterNameWithHostName() {
-        // given
-        String haClusterName = "this.is.a.cluster.host";
-
-        // when
-        String actual = HaConfigurationBuilder.getSanitizedClusterName(haClusterName);
-
-        // then
-        assertThat(actual, notNullValue());
-        assertThat(actual, is("this_is_a_cluster_host"));
-    }
-
-}
diff --git a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsConsumerTest.java b/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsConsumerTest.java
deleted file mode 100644
index af3c5c692a0..00000000000
--- a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsConsumerTest.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.ByteArrayOutputStream;
-import java.io.InputStream;
-
-import org.apache.camel.CamelContext;
-import org.apache.camel.Exchange;
-import org.apache.camel.Processor;
-import org.apache.camel.impl.DefaultCamelContext;
-import org.apache.camel.impl.engine.PrototypeExchangeFactory;
-import org.apache.camel.support.DefaultExchange;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.mockito.ArgumentCaptor;
-
-import static org.apache.camel.component.hdfs.HdfsConstants.DEFAULT_OPENED_SUFFIX;
-import static org.apache.camel.component.hdfs.HdfsConstants.DEFAULT_READ_SUFFIX;
-import static org.apache.camel.component.hdfs.HdfsTestSupport.CWD;
-import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.CoreMatchers.notNullValue;
-import static org.hamcrest.CoreMatchers.startsWith;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-public class HdfsConsumerTest {
-
-    private HdfsEndpoint endpoint;
-    private Processor processor;
-    private HdfsConfiguration endpointConfig;
-    private HdfsInfoFactory hdfsInfoFactory;
-    private CamelContext context;
-    private FileSystem fileSystem;
-    private Configuration configuration;
-
-    private HdfsConsumer underTest;
-
-    @BeforeEach
-    public void setUp() throws Exception {
-        endpoint = mock(HdfsEndpoint.class);
-        processor = mock(Processor.class);
-        endpointConfig = mock(HdfsConfiguration.class);
-        hdfsInfoFactory = mock(HdfsInfoFactory.class);
-
-        HdfsInfo hdfsInfo = mock(HdfsInfo.class);
-        fileSystem = mock(FileSystem.class);
-        configuration = mock(Configuration.class);
-        Path path = mock(Path.class);
-
-        when(hdfsInfoFactory.newHdfsInfo(anyString())).thenReturn(hdfsInfo);
-        when(hdfsInfoFactory.getEndpointConfig()).thenReturn(endpointConfig);
-        when(hdfsInfoFactory.newHdfsInfo(anyString())).thenReturn(hdfsInfo);
-
-        when(hdfsInfo.getFileSystem()).thenReturn(fileSystem);
-        when(hdfsInfo.getConfiguration()).thenReturn(configuration);
-        when(hdfsInfo.getPath()).thenReturn(path);
-
-        when(endpointConfig.getReadSuffix()).thenReturn(DEFAULT_READ_SUFFIX);
-        when(endpointConfig.getOpenedSuffix()).thenReturn(DEFAULT_OPENED_SUFFIX);
-
-        context = new DefaultCamelContext();
-        // this test is mocking and its easier to test with prototype scoped
-        context.getCamelContextExtension().setExchangeFactory(new PrototypeExchangeFactory());
-    }
-
-    @Test
-    public void doStartWithoutHdfsSetup() throws Exception {
-        // given
-        String hdfsPath = "hdfs://localhost/target/test/multiple-consumers";
-        when(endpointConfig.getFileSystemType()).thenReturn(HdfsFileSystemType.LOCAL);
-        when(endpointConfig.getPath()).thenReturn(hdfsPath);
-        when(endpointConfig.isConnectOnStartup()).thenReturn(false);
-        when(endpoint.getCamelContext()).thenReturn(context);
-        when(endpoint.getEndpointUri()).thenReturn(hdfsPath);
-
-        underTest = new HdfsConsumer(endpoint, processor, endpointConfig, hdfsInfoFactory, new StringBuilder(hdfsPath));
-
-        // when
-        underTest.doStart();
-
-        // then
-        verify(hdfsInfoFactory, times(0)).newHdfsInfo(anyString());
-    }
-
-    @Test
-    public void doStartWithHdfsSetup() throws Exception {
-        // given
-        String hdfsPath = "hdfs://localhost/target/test/multiple-consumers";
-        when(endpointConfig.getFileSystemType()).thenReturn(HdfsFileSystemType.LOCAL);
-        when(endpointConfig.getPath()).thenReturn(hdfsPath);
-        when(endpointConfig.isConnectOnStartup()).thenReturn(true);
-        when(endpointConfig.getFileSystemLabel(anyString())).thenReturn("TEST_FS_LABEL");
-        when(endpoint.getCamelContext()).thenReturn(context);
-        when(endpoint.getEndpointUri()).thenReturn(hdfsPath);
-
-        underTest = new HdfsConsumer(endpoint, processor, endpointConfig, hdfsInfoFactory, new StringBuilder(hdfsPath));
-
-        // when
-        underTest.doStart();
-
-        // then
-        verify(hdfsInfoFactory, times(1)).newHdfsInfo(hdfsPath);
-    }
-
-    @Test
-    public void doPollFromExistingLocalFile() throws Exception {
-        // given
-        String hdfsPath = "hdfs://localhost/target/test/multiple-consumers";
-        when(endpointConfig.getFileSystemType()).thenReturn(HdfsFileSystemType.LOCAL);
-        when(endpointConfig.getFileType()).thenReturn(HdfsFileType.NORMAL_FILE);
-        when(endpointConfig.getPath()).thenReturn(hdfsPath);
-        when(endpointConfig.getOwner()).thenReturn("spiderman");
-        when(endpointConfig.isConnectOnStartup()).thenReturn(true);
-        when(endpointConfig.getFileSystemLabel(anyString())).thenReturn("TEST_FS_LABEL");
-        when(endpointConfig.getChunkSize()).thenReturn(100 * 1000);
-        when(endpointConfig.getMaxMessagesPerPoll()).thenReturn(10);
-        when(endpoint.getCamelContext()).thenReturn(context);
-        when(endpoint.createExchange()).thenReturn(new DefaultExchange(context));
-        when(endpoint.getEndpointUri()).thenReturn(hdfsPath);
-
-        when(fileSystem.isFile(any(Path.class))).thenReturn(true);
-
-        FileStatus[] fileStatuses = new FileStatus[1];
-        FileStatus fileStatus = mock(FileStatus.class);
-        fileStatuses[0] = fileStatus;
-        when(fileSystem.globStatus(any(Path.class))).thenReturn(fileStatuses);
-        when(fileStatus.getPath()).thenReturn(new Path(hdfsPath));
-        when(fileStatus.isFile()).thenReturn(true);
-        when(fileStatus.isDirectory()).thenReturn(false);
-        when(fileStatus.getOwner()).thenReturn("spiderman");
-
-        String normalFile = CWD.getAbsolutePath() + "/src/test/resources/hdfs/normal_file.txt";
-        FSDataInputStream fsDataInputStream = new FSDataInputStream(new MockDataInputStream(normalFile));
-        when(fileSystem.rename(any(Path.class), any(Path.class))).thenReturn(true);
-        when(fileSystem.open(any(Path.class))).thenReturn(fsDataInputStream);
-
-        ArgumentCaptor<Exchange> exchangeCaptor = ArgumentCaptor.forClass(Exchange.class);
-
-        underTest = new HdfsConsumer(endpoint, processor, endpointConfig, hdfsInfoFactory, new StringBuilder(hdfsPath));
-        underTest.start();
-
-        // when
-        int actual = underTest.doPoll();
-
-        // then
-        assertThat(actual, is(1));
-        verify(processor, times(1)).process(exchangeCaptor.capture());
-        Exchange exchange = exchangeCaptor.getValue();
-        assertThat(exchange, notNullValue());
-
-        ByteArrayOutputStream body = exchange.getIn().getBody(ByteArrayOutputStream.class);
-        assertThat(body, notNullValue());
-        assertThat(body.toString(), startsWith(
-                "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam eget fermentum arcu, vel dignissim ipsum."));
-    }
-
-    @Test
-    public void doPollFromExistingLocalFileWithStreamDownload() throws Exception {
-        // given
-        String hdfsPath = "hdfs://localhost/target/test/multiple-consumers";
-        when(endpointConfig.getFileSystemType()).thenReturn(HdfsFileSystemType.LOCAL);
-        when(endpointConfig.getFileType()).thenReturn(HdfsFileType.NORMAL_FILE);
-        when(endpointConfig.getPath()).thenReturn(hdfsPath);
-        when(endpointConfig.getOwner()).thenReturn("spiderman");
-        when(endpointConfig.isConnectOnStartup()).thenReturn(true);
-        when(endpointConfig.getFileSystemLabel(anyString())).thenReturn("TEST_FS_LABEL");
-        when(endpointConfig.getChunkSize()).thenReturn(100 * 1000);
-        when(endpointConfig.isStreamDownload()).thenReturn(true);
-        when(endpointConfig.getMaxMessagesPerPoll()).thenReturn(10);
-        when(endpoint.getCamelContext()).thenReturn(context);
-        when(endpoint.createExchange()).thenReturn(new DefaultExchange(context));
-        when(endpoint.getEndpointUri()).thenReturn(hdfsPath);
-
-        when(fileSystem.isFile(any(Path.class))).thenReturn(true);
-
-        FileStatus[] fileStatuses = new FileStatus[1];
-        FileStatus fileStatus = mock(FileStatus.class);
-        fileStatuses[0] = fileStatus;
-        when(fileSystem.globStatus(any(Path.class))).thenReturn(fileStatuses);
-        when(fileStatus.getPath()).thenReturn(new Path(hdfsPath));
-        when(fileStatus.isFile()).thenReturn(true);
-        when(fileStatus.isDirectory()).thenReturn(false);
-        when(fileStatus.getOwner()).thenReturn("spiderman");
-
-        String normalFile = CWD.getAbsolutePath() + "/src/test/resources/hdfs/normal_file.txt";
-        FSDataInputStream fsDataInputStream = new FSDataInputStream(new MockDataInputStream(normalFile));
-        when(fileSystem.rename(any(Path.class), any(Path.class))).thenReturn(true);
-        when(fileSystem.open(any(Path.class))).thenReturn(fsDataInputStream);
-
-        ArgumentCaptor<Exchange> exchangeCaptor = ArgumentCaptor.forClass(Exchange.class);
-
-        underTest = new HdfsConsumer(endpoint, processor, endpointConfig, hdfsInfoFactory, new StringBuilder(hdfsPath));
-        underTest.start();
-
-        // when
-        int actual = underTest.doPoll();
-
-        // then
-        assertThat(actual, is(1));
-        verify(processor, times(1)).process(exchangeCaptor.capture());
-        Exchange exchange = exchangeCaptor.getValue();
-        assertThat(exchange, notNullValue());
-
-        InputStream body = (InputStream) exchange.getIn().getBody();
-        assertThat(body, notNullValue());
-    }
-
-}
diff --git a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsInputStreamTest.java b/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsInputStreamTest.java
deleted file mode 100644
index 3f07e1b8689..00000000000
--- a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsInputStreamTest.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.mockito.ArgumentCaptor;
-
-import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.CoreMatchers.notNullValue;
-import static org.hamcrest.CoreMatchers.nullValue;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-public class HdfsInputStreamTest {
-
-    private HdfsInfoFactory hdfsInfoFactory;
-    private HdfsConfiguration endpointConfig;
-    private FileSystem fileSystem;
-    private Configuration configuration;
-
-    private HdfsInputStream underTest;
-
-    @BeforeEach
-    public void setUp() throws Exception {
-        hdfsInfoFactory = mock(HdfsInfoFactory.class);
-        HdfsInfo hdfsInfo = mock(HdfsInfo.class);
-        endpointConfig = mock(HdfsConfiguration.class);
-
-        fileSystem = mock(FileSystem.class);
-        configuration = mock(Configuration.class);
-        Path path = mock(Path.class);
-        FileStatus fileStatus = mock(FileStatus.class);
-
-        when(hdfsInfoFactory.newHdfsInfo(anyString())).thenReturn(hdfsInfo);
-        when(hdfsInfoFactory.newHdfsInfoWithoutAuth(anyString())).thenReturn(hdfsInfo);
-        when(hdfsInfoFactory.getEndpointConfig()).thenReturn(endpointConfig);
-
-        when(hdfsInfo.getFileSystem()).thenReturn(fileSystem);
-        when(hdfsInfo.getConfiguration()).thenReturn(configuration);
-        when(hdfsInfo.getPath()).thenReturn(path);
-
-        when(path.getFileSystem(configuration)).thenReturn(fileSystem);
-
-        when(fileSystem.getFileStatus(path)).thenReturn(fileStatus);
-        when(fileStatus.getLen()).thenReturn(1000L);
-    }
-
-    @Test
-    public void createInputStreamForLocalNormalFile() throws IOException {
-        // given
-        String hdfsPath = "hdfs://localhost/target/test/multiple-consumers";
-        FSDataInputStream fsDataInputStream = mock(FSDataInputStream.class);
-        when(endpointConfig.getFileType()).thenReturn(HdfsFileType.NORMAL_FILE);
-        when(endpointConfig.getFileSystemType()).thenReturn(HdfsFileSystemType.LOCAL);
-
-        when(fileSystem.rename(any(Path.class), any(Path.class))).thenReturn(true);
-        when(fileSystem.open(any(Path.class))).thenReturn(fsDataInputStream);
-
-        ArgumentCaptor<Path> pathCaptor = ArgumentCaptor.forClass(Path.class);
-
-        // when
-        underTest = HdfsInputStream.createInputStream(hdfsPath, hdfsInfoFactory);
-
-        // then
-        assertThat(underTest, notNullValue());
-        verify(fileSystem, times(1)).rename(any(Path.class), pathCaptor.capture());
-        assertThat(pathCaptor.getValue().toString(), is("hdfs://localhost/target/test/multiple-consumers.null"));
-
-        assertThat(underTest.getNumOfReadBytes(), is(0L));
-        assertThat(underTest.getNumOfReadMessages(), is(0L));
-        assertThat(underTest.getActualPath(), is(hdfsPath));
-        assertThat(underTest.getChunkSize(), is(0));
-        assertThat(underTest.isOpened(), is(true));
-    }
-
-    @Test
-    public void createInputStreamForMissingNormalFile() throws IOException {
-        // given
-        String hdfsPath = "hdfs://localhost/target/test/multiple-consumers";
-        FSDataInputStream fsDataInputStream = mock(FSDataInputStream.class);
-        when(endpointConfig.getFileType()).thenReturn(HdfsFileType.NORMAL_FILE);
-        when(endpointConfig.getFileSystemType()).thenReturn(HdfsFileSystemType.LOCAL);
-
-        when(fileSystem.rename(any(Path.class), any(Path.class))).thenReturn(false);
-        when(fileSystem.open(any(Path.class))).thenReturn(fsDataInputStream);
-
-        ArgumentCaptor<Path> pathCaptor = ArgumentCaptor.forClass(Path.class);
-
-        // when
-        underTest = HdfsInputStream.createInputStream(hdfsPath, hdfsInfoFactory);
-
-        // then
-        assertThat(underTest, nullValue());
-        verify(fileSystem, times(1)).rename(any(Path.class), any(Path.class));
-    }
-
-}
diff --git a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsOutputStreamTest.java b/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsOutputStreamTest.java
deleted file mode 100644
index c17d1c54c4e..00000000000
--- a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsOutputStreamTest.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.util.Progressable;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.mockito.ArgumentCaptor;
-
-import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.CoreMatchers.notNullValue;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.anyShort;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-public class HdfsOutputStreamTest {
-
-    private HdfsInfoFactory hdfsInfoFactory;
-    private HdfsConfiguration endpointConfig;
-    private FileSystem fileSystem;
-
-    private HdfsOutputStream underTest;
-
-    @BeforeEach
-    public void setUp() throws Exception {
-        hdfsInfoFactory = mock(HdfsInfoFactory.class);
-        HdfsInfo hdfsInfo = mock(HdfsInfo.class);
-        endpointConfig = mock(HdfsConfiguration.class);
-
-        fileSystem = mock(FileSystem.class);
-        Configuration configuration = mock(Configuration.class);
-        Path path = mock(Path.class);
-
-        when(hdfsInfoFactory.newHdfsInfo(anyString())).thenReturn(hdfsInfo);
-        when(hdfsInfoFactory.newHdfsInfoWithoutAuth(anyString())).thenReturn(hdfsInfo);
-        when(hdfsInfoFactory.getEndpointConfig()).thenReturn(endpointConfig);
-
-        when(hdfsInfo.getFileSystem()).thenReturn(fileSystem);
-        when(hdfsInfo.getConfiguration()).thenReturn(configuration);
-        when(hdfsInfo.getPath()).thenReturn(path);
-    }
-
-    @Test
-    public void createOutputStreamForExistingNormalFileWithAppend() throws IOException {
-        // given
-        String hdfsPath = "hdfs://localhost/target/test/multiple-consumers";
-        FSDataOutputStream fsDataOutputStream = mock(FSDataOutputStream.class);
-        when(endpointConfig.getFileType()).thenReturn(HdfsFileType.NORMAL_FILE);
-        when(endpointConfig.isWantAppend()).thenReturn(true);
-        when(endpointConfig.isAppend()).thenReturn(false);
-
-        when(fileSystem.exists(any(Path.class))).thenReturn(true);
-        when(fileSystem.create(any(Path.class), anyBoolean(), anyInt(), anyShort(), anyLong(), any(Progressable.class)))
-                .thenReturn(fsDataOutputStream);
-
-        ArgumentCaptor<Path> pathCaptor = ArgumentCaptor.forClass(Path.class);
-
-        // when
-        underTest = HdfsOutputStream.createOutputStream(hdfsPath, hdfsInfoFactory);
-
-        // then
-        assertThat(underTest, notNullValue());
-        verify(endpointConfig, times(1)).setAppend(true);
-        verify(fileSystem, times(1)).rename(any(Path.class), pathCaptor.capture());
-        assertThat(pathCaptor.getValue().toString(), is("hdfs://localhost/target/test/multiple-consumers.null"));
-    }
-
-    @Test
-    public void createOutputStreamForMissingNormalFileWithAppend() throws IOException {
-        // given
-        String hdfsPath = "hdfs://localhost/target/test/multiple-consumers";
-        FSDataOutputStream fsDataOutputStream = mock(FSDataOutputStream.class);
-        when(endpointConfig.getFileType()).thenReturn(HdfsFileType.NORMAL_FILE);
-        when(endpointConfig.isWantAppend()).thenReturn(true);
-        when(endpointConfig.isAppend()).thenReturn(false);
-
-        when(fileSystem.exists(any(Path.class))).thenReturn(false);
-        when(fileSystem.create(any(Path.class), anyBoolean(), anyInt(), anyShort(), anyLong(), any(Progressable.class)))
-                .thenReturn(fsDataOutputStream);
-
-        // when
-        underTest = HdfsOutputStream.createOutputStream(hdfsPath, hdfsInfoFactory);
-
-        // then
-        assertThat(underTest, notNullValue());
-        verify(endpointConfig, times(1)).setAppend(false);
-        verify(fileSystem, times(0)).rename(any(Path.class), any(Path.class));
-    }
-
-    @Test
-    public void createOutputStreamOverwriteExistingNormalFile() throws IOException {
-        // given
-        String hdfsPath = "hdfs://localhost/target/test/multiple-consumers";
-        FSDataOutputStream fsDataOutputStream = mock(FSDataOutputStream.class);
-        when(endpointConfig.getFileType()).thenReturn(HdfsFileType.NORMAL_FILE);
-        when(endpointConfig.isWantAppend()).thenReturn(false);
-        when(endpointConfig.isAppend()).thenReturn(false);
-        when(endpointConfig.isOverwrite()).thenReturn(true);
-
-        when(fileSystem.exists(any(Path.class))).thenReturn(true);
-        when(fileSystem.create(any(Path.class), anyBoolean(), anyInt(), anyShort(), anyLong(), any(Progressable.class)))
-                .thenReturn(fsDataOutputStream);
-
-        ArgumentCaptor<Path> pathCaptor = ArgumentCaptor.forClass(Path.class);
-
-        // when
-        underTest = HdfsOutputStream.createOutputStream(hdfsPath, hdfsInfoFactory);
-
-        // then
-        assertThat(underTest, notNullValue());
-        verify(fileSystem, times(1)).delete(pathCaptor.capture(), eq(true));
-        assertThat(pathCaptor.getValue().toString(), is(hdfsPath));
-
-        assertThat(underTest.getNumOfWrittenBytes(), is(0L));
-        assertThat(underTest.getNumOfWrittenMessages(), is(0L));
-        assertThat(underTest.getActualPath(), is(hdfsPath));
-        assertThat(underTest.getLastAccess() > 0L, is(true));
-        assertThat(underTest.isBusy().get(), is(false));
-    }
-
-    @Test
-    public void createOutputStreamWillFailForExistingNormalFileNoOverwrite() throws IOException {
-        // given
-        String hdfsPath = "hdfs://localhost/target/test/multiple-consumers";
-        FSDataOutputStream fsDataOutputStream = mock(FSDataOutputStream.class);
-        when(endpointConfig.getFileType()).thenReturn(HdfsFileType.NORMAL_FILE);
-        when(endpointConfig.isWantAppend()).thenReturn(false);
-        when(endpointConfig.isAppend()).thenReturn(false);
-        when(endpointConfig.isOverwrite()).thenReturn(false);
-
-        when(fileSystem.exists(any(Path.class))).thenReturn(true);
-        when(fileSystem.create(any(Path.class), anyBoolean(), anyInt(), anyShort(), anyLong(), any(Progressable.class)))
-                .thenReturn(fsDataOutputStream);
-
-        // when
-        Throwable expected = null;
-        try {
-            underTest = HdfsOutputStream.createOutputStream(hdfsPath, hdfsInfoFactory);
-        } catch (Exception e) {
-            expected = e;
-        }
-
-        // then
-        assertThat(expected, notNullValue());
-        assertThat(expected.getMessage(), is("File [hdfs://localhost/target/test/multiple-consumers] already exists"));
-    }
-
-}
diff --git a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsProducerConsumerTest.java b/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsProducerConsumerTest.java
deleted file mode 100644
index cb4307a46e7..00000000000
--- a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsProducerConsumerTest.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.builder.RouteBuilder;
-import org.apache.camel.component.mock.MockEndpoint;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-public class HdfsProducerConsumerTest extends HdfsTestSupport {
-
-    @Override
-    @BeforeEach
-    public void setUp() throws Exception {
-        checkTest();
-        super.setUp();
-    }
-
-    @Override
-    public boolean isUseRouteBuilder() {
-        return false;
-    }
-
-    @Test
-    public void testSimpleSplitWriteRead() throws Exception {
-        checkTest();
-
-        final Path file = new Path(new File("target/test/test-camel-simple-write-file").getAbsolutePath());
-
-        context.addRoutes(new RouteBuilder() {
-            @Override
-            public void configure() {
-                from("direct:start")
-                        .to("hdfs:localhost/" + file.toUri() + "?fileSystemType=LOCAL&splitStrategy=BYTES:5,IDLE:1000");
-                from("hdfs:localhost/" + file.toUri() + "?initialDelay=2000&fileSystemType=LOCAL&chunkSize=5")
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        List<String> expectedResults = new ArrayList<>();
-        for (int i = 0; i < 10; ++i) {
-            template.sendBody("direct:start", "CIAO" + i);
-            expectedResults.add("CIAO" + i);
-        }
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-
-        resultEndpoint.expectedMessageCount(10);
-        resultEndpoint.assertIsSatisfied();
-
-        List<Exchange> exchanges = resultEndpoint.getExchanges();
-        assertEquals(10, exchanges.size());
-        resultEndpoint.expectedBodiesReceivedInAnyOrder(expectedResults);
-    }
-
-    @Override
-    @AfterEach
-    public void tearDown() throws Exception {
-        checkTest();
-
-        super.tearDown();
-        Thread.sleep(100);
-        Configuration conf = new Configuration();
-        Path dir = new Path("target/test");
-        FileSystem fs = FileSystem.get(dir.toUri(), conf);
-        fs.delete(dir, true);
-    }
-}
diff --git a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsProducerSplitTest.java b/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsProducerSplitTest.java
deleted file mode 100644
index bd6f7072ba6..00000000000
--- a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsProducerSplitTest.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.InputStreamReader;
-
-import org.apache.camel.builder.RouteBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-public class HdfsProducerSplitTest extends HdfsTestSupport {
-
-    private static final Path BASE_FILE = new Path(new File("target/test/test-camel-simple-write-BASE_FILE").getAbsolutePath());
-
-    @Override
-    @BeforeEach
-    public void setUp() throws Exception {
-        checkTest();
-        super.setUp();
-    }
-
-    @Test
-    public void testSimpleWriteFileWithMessageSplit() throws Exception {
-        doTest(1);
-    }
-
-    @Test
-    public void testSimpleWriteFileWithBytesSplit() throws Exception {
-        doTest(2);
-    }
-
-    @Test
-    public void testSimpleWriteFileWithIdleSplit() throws Exception {
-        for (int i = 0; i < 3; ++i) {
-            template.sendBody("direct:start3", "CIAO" + i);
-            Thread.sleep(2000);
-        }
-
-        // stop Camel to flush and close file stream
-        stopCamelContext();
-
-        FileSystem fs = FileSystem.get(new Configuration());
-        FileStatus[] status = fs.listStatus(new Path("file:///" + BASE_FILE.toUri() + "3"));
-        assertEquals(3, status.length);
-        for (int i = 0; i < 3; i++) {
-            BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(status[i].getPath())));
-            assertTrue(br.readLine().startsWith("CIAO"));
-            assertNull(br.readLine());
-        }
-    }
-
-    @Test
-    public void testSimpleWriteFileWithMessageIdleSplit() throws Exception {
-        doTest(4);
-    }
-
-    @Test
-    public void testSimpleWriteFileWithBytesIdleSplit() throws Exception {
-        doTest(5);
-    }
-
-    private void doTest(int routeNr) throws Exception {
-        for (int i = 0; i < 10; ++i) {
-            template.sendBody("direct:start" + routeNr, "CIAO" + i);
-        }
-        stopCamelContext();
-
-        FileSystem fs = FileSystem.get(new Configuration());
-        FileStatus[] status = fs.listStatus(new Path("file:///" + BASE_FILE.toUri() + routeNr));
-        assertEquals(10, status.length);
-        for (FileStatus fileStatus : status) {
-            BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(fileStatus.getPath())));
-            assertTrue(br.readLine().startsWith("CIAO"));
-            assertNull(br.readLine());
-        }
-    }
-
-    @Override
-    @AfterEach
-    public void tearDown() throws Exception {
-        super.tearDown();
-        Thread.sleep(100);
-        Configuration conf = new Configuration();
-        Path dir = new Path("target/test");
-        FileSystem fs = FileSystem.get(dir.toUri(), conf);
-        fs.delete(dir, true);
-    }
-
-    @Override
-    protected RouteBuilder createRouteBuilder() {
-        return new RouteBuilder() {
-            @Override
-            public void configure() {
-                from("direct:start1")
-                        .to("hdfs:localhost/" + BASE_FILE.toUri() + "1?fileSystemType=LOCAL&splitStrategy=MESSAGES:1");
-                from("direct:start2")
-                        .to("hdfs:localhost/" + BASE_FILE.toUri() + "2?fileSystemType=LOCAL&splitStrategy=BYTES:5");
-                from("direct:start3")
-                        .to("hdfs:localhost/" + BASE_FILE.toUri() + "3?fileSystemType=LOCAL&splitStrategy=IDLE:1000");
-                from("direct:start4").to(
-                        "hdfs:localhost/" + BASE_FILE.toUri() + "4?fileSystemType=LOCAL&splitStrategy=IDLE:1000,MESSAGES:1");
-                from("direct:start5")
-                        .to("hdfs:localhost/" + BASE_FILE.toUri() + "5?fileSystemType=LOCAL&splitStrategy=IDLE:1000,BYTES:5");
-            }
-        };
-    }
-}
diff --git a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsProducerTest.java b/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsProducerTest.java
deleted file mode 100644
index 0f265d0dfca..00000000000
--- a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsProducerTest.java
+++ /dev/null
@@ -1,537 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.InputStream;
-import java.net.URL;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.builder.RouteBuilder;
-import org.apache.camel.model.language.SimpleExpression;
-import org.apache.camel.util.IOHelper;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.ArrayFile;
-import org.apache.hadoop.io.BloomMapFile;
-import org.apache.hadoop.io.BooleanWritable;
-import org.apache.hadoop.io.ByteWritable;
-import org.apache.hadoop.io.DoubleWritable;
-import org.apache.hadoop.io.FloatWritable;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.MapFile;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.ShortWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assumptions.assumeTrue;
-
-public class HdfsProducerTest extends HdfsTestSupport {
-
-    private static final Path TEMP_DIR = new Path(new File("target/test/").getAbsolutePath());
-
-    private static final boolean LD_LIBRARY_PATH_DEFINED = StringUtils.isNotBlank(System.getenv("LD_LIBRARY_PATH"));
-
-    @Override
-    @BeforeEach
-    public void setUp() throws Exception {
-        checkTest();
-        super.setUp();
-    }
-
-    @Test
-    public void testProducer() throws Exception {
-        template.sendBody("direct:start1", "PAPPO");
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel1");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        assertEquals("PAPPO", value.toString());
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testProducerClose() throws Exception {
-        for (int i = 0; i < 10; ++i) {
-            // send 10 messages, and mark to close in last message
-            template.sendBodyAndHeader("direct:start1", "PAPPO" + i, HdfsConstants.HDFS_CLOSE, i == 9);
-        }
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel1");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-
-        int i = 0;
-        while (reader.next(key, value)) {
-            Text txt = (Text) value;
-            assertEquals("PAPPO" + i, txt.toString());
-            ++i;
-        }
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testWriteBoolean() throws Exception {
-        Boolean aBoolean = true;
-        template.sendBody("direct:write_boolean", aBoolean);
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-boolean");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        Boolean rBoolean = ((BooleanWritable) value).get();
-        assertEquals(rBoolean, aBoolean);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testWriteByte() throws Exception {
-        byte aByte = 8;
-        template.sendBody("direct:write_byte", aByte);
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-byte");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        byte rByte = ((ByteWritable) value).get();
-        assertEquals(rByte, aByte);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testWriteShort() throws Exception {
-        short aShort = 32767;
-        template.sendBody("direct:write_short", aShort);
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-short");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        short rShort = ((ShortWritable) value).get();
-        assertEquals(rShort, aShort);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testWriteInt() throws Exception {
-        int anInt = 1234;
-        template.sendBody("direct:write_int", anInt);
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-int");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        int rInt = ((IntWritable) value).get();
-        assertEquals(rInt, anInt);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testWriteFloat() throws Exception {
-        float aFloat = 12.34f;
-        template.sendBody("direct:write_float", aFloat);
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-float");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        float rFloat = ((FloatWritable) value).get();
-        assertEquals(rFloat, aFloat, 0.0F);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testWriteDouble() throws Exception {
-        Double aDouble = 12.34D;
-        template.sendBody("direct:write_double", aDouble);
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-double");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        Double rDouble = ((DoubleWritable) value).get();
-        assertEquals(rDouble, aDouble);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testWriteLong() throws Exception {
-        long aLong = 1234567890;
-        template.sendBody("direct:write_long", aLong);
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-long");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        long rLong = ((LongWritable) value).get();
-        assertEquals(rLong, aLong);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testWriteText() throws Exception {
-        String txt = "CIAO MONDO !";
-        template.sendBody("direct:write_text1", txt);
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-text1");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Text value = (Text) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        String rTxt = value.toString();
-        assertEquals(rTxt, txt);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testWriteTextWithKey() throws Exception {
-        String txtKey = "THEKEY";
-        String txtValue = "CIAO MONDO !";
-        template.sendBodyAndHeader("direct:write_text2", txtValue, "KEY", txtKey);
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-text2");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Text key = (Text) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Text value = (Text) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        assertEquals(key.toString(), txtKey);
-        assertEquals(value.toString(), txtValue);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testMapWriteTextWithKey() throws Exception {
-        String txtKey = "THEKEY";
-        String txtValue = "CIAO MONDO !";
-        template.sendBodyAndHeader("direct:write_text3", txtValue, "KEY", txtKey);
-
-        Configuration conf = new Configuration();
-        MapFile.Reader reader = new MapFile.Reader(new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-text3"), conf);
-        Text key = (Text) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Text value = (Text) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        assertEquals(key.toString(), txtKey);
-        assertEquals(value.toString(), txtValue);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testArrayWriteText() throws Exception {
-        String txtValue = "CIAO MONDO !";
-        template.sendBody("direct:write_text4", txtValue);
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-text4");
-        FileSystem fs1 = FileSystem.get(file1.toUri(), conf);
-        ArrayFile.Reader reader = new ArrayFile.Reader(fs1, "file:///" + TEMP_DIR.toUri() + "/test-camel-text4", conf);
-        Text value = (Text) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(value);
-        assertEquals(value.toString(), txtValue);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testBloomMapWriteText() throws Exception {
-        String txtKey = "THEKEY";
-        String txtValue = "CIAO MONDO !";
-        template.sendBodyAndHeader("direct:write_text5", txtValue, "KEY", txtKey);
-
-        Configuration conf = new Configuration();
-        BloomMapFile.Reader reader
-                = new BloomMapFile.Reader(new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-text5"), conf);
-        Text key = (Text) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Text value = (Text) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        assertEquals(key.toString(), txtKey);
-        assertEquals(value.toString(), txtValue);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testWriteTextWithDynamicFilename() throws Exception {
-        for (int i = 0; i < 5; i++) {
-            template.sendBodyAndHeader("direct:write_dynamic_filename", "CIAO" + i, Exchange.FILE_NAME, "file" + i);
-        }
-
-        for (int i = 0; i < 5; i++) {
-            InputStream in = null;
-            try {
-                in = new URL("file:///" + TEMP_DIR.toUri() + "/test-camel-dynamic/file" + i).openStream();
-                ByteArrayOutputStream bos = new ByteArrayOutputStream();
-                IOUtils.copyBytes(in, bos, 4096, false);
-                assertEquals("CIAO" + i, new String(bos.toByteArray()));
-            } finally {
-                IOHelper.close(in);
-            }
-        }
-    }
-
-    @Test
-    public void testWriteTextWithDynamicFilenameExpression() throws Exception {
-        for (int i = 0; i < 5; i++) {
-            template.sendBodyAndHeader("direct:write_dynamic_filename", "CIAO" + i, Exchange.FILE_NAME,
-                    new SimpleExpression("file-${body}"));
-        }
-
-        for (int i = 0; i < 5; i++) {
-            InputStream in = null;
-            try {
-                in = new URL("file:///" + TEMP_DIR.toUri() + "/test-camel-dynamic/file-CIAO" + i).openStream();
-                ByteArrayOutputStream bos = new ByteArrayOutputStream();
-                IOUtils.copyBytes(in, bos, 4096, false);
-                assertEquals("CIAO" + i, new String(bos.toByteArray()));
-            } finally {
-                IOHelper.close(in);
-            }
-        }
-    }
-
-    @Test
-    public void testCompressWithGZip() throws Exception {
-        assumeTrue(LD_LIBRARY_PATH_DEFINED);
-        byte aByte = 8;
-        template.sendBody("direct:gzip", aByte);
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-gzip");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        byte rByte = ((ByteWritable) value).get();
-        assertEquals(rByte, aByte);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testCompressWithBZip2() throws Exception {
-        byte aByte = 8;
-        template.sendBody("direct:bzip2", aByte);
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-bzip2");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        byte rByte = ((ByteWritable) value).get();
-        assertEquals(rByte, aByte);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testCompressWithSnappy() throws Exception {
-        byte aByte = 8;
-        template.sendBody("direct:snappy", aByte);
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-snappy");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        byte rByte = ((ByteWritable) value).get();
-        assertEquals(rByte, aByte);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testCompressWithLz4() throws Exception {
-        byte aByte = 8;
-        template.sendBody("direct:lz4", aByte);
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-lz4");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        byte rByte = ((ByteWritable) value).get();
-        assertEquals(rByte, aByte);
-
-        IOHelper.close(reader);
-    }
-
-    @Test
-    public void testCompressWithZStandard() throws Exception {
-        assumeTrue(LD_LIBRARY_PATH_DEFINED);
-        byte aByte = 8;
-        template.sendBody("direct:zstandard", aByte);
-
-        Configuration conf = new Configuration();
-        Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-zstandard");
-        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
-        Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        reader.next(key, value);
-        byte rByte = ((ByteWritable) value).get();
-        assertEquals(rByte, aByte);
-
-        IOHelper.close(reader);
-    }
-
-    @Override
-    @AfterEach
-    public void tearDown() throws Exception {
-        super.tearDown();
-
-        Thread.sleep(250);
-        Configuration conf = new Configuration();
-        Path dir = new Path("target/test");
-        FileSystem fs = FileSystem.get(dir.toUri(), conf);
-        fs.delete(dir, true);
-    }
-
-    @Override
-    protected RouteBuilder createRouteBuilder() {
-        return new RouteBuilder() {
-
-            @Override
-            public void configure() {
-                from("direct:start1").to("hdfs://localhost/" + TEMP_DIR.toUri()
-                                         + "/test-camel1?fileSystemType=LOCAL&valueType=TEXT&fileType=SEQUENCE_FILE");
-
-                /* For testing writables */
-                from("direct:write_boolean")
-                        .to("hdfs:localhost/" + TEMP_DIR.toUri()
-                            + "/test-camel-boolean?fileSystemType=LOCAL&valueType=BOOLEAN&fileType=SEQUENCE_FILE");
-
-                from("direct:write_byte").to("hdfs:localhost/" + TEMP_DIR.toUri()
-                                             + "/test-camel-byte?fileSystemType=LOCAL&valueType=BYTE&fileType=SEQUENCE_FILE");
-
-                from("direct:write_short").to("hdfs:localhost/" + TEMP_DIR.toUri()
-                                              + "/test-camel-short?fileSystemType=LOCAL&valueType=SHORT&fileType=SEQUENCE_FILE");
-
-                from("direct:write_int").to("hdfs:localhost/" + TEMP_DIR.toUri()
-                                            + "/test-camel-int?fileSystemType=LOCAL&valueType=INT&fileType=SEQUENCE_FILE");
-
-                from("direct:write_float")
-                        .to("hdfs:localhost/" + TEMP_DIR.toUri()
-                            + "/test-camel-float?fileSystemType=LOCAL&valueType=FLOAT&fileType=SEQUENCE_FILE");
-
-                from("direct:write_long").to("hdfs:localhost/" + TEMP_DIR.toUri()
-                                             + "/test-camel-long?fileSystemType=LOCAL&valueType=LONG&fileType=SEQUENCE_FILE");
-
-                from("direct:write_double")
-                        .to("hdfs:localhost/" + TEMP_DIR.toUri()
-                            + "/test-camel-double?fileSystemType=LOCAL&valueType=DOUBLE&fileType=SEQUENCE_FILE");
-
-                from("direct:write_text1").to("hdfs:localhost/" + TEMP_DIR.toUri()
-                                              + "/test-camel-text1?fileSystemType=LOCAL&valueType=TEXT&fileType=SEQUENCE_FILE");
-
-                /* For testing key and value writing */
-                from("direct:write_text2")
-                        .to("hdfs:localhost/" + TEMP_DIR.toUri()
-                            + "/test-camel-text2?fileSystemType=LOCAL&keyType=TEXT&valueType=TEXT&fileType=SEQUENCE_FILE");
-
-                from("direct:write_text3")
-                        .to("hdfs:localhost/" + TEMP_DIR.toUri()
-                            + "/test-camel-text3?fileSystemType=LOCAL&keyType=TEXT&valueType=TEXT&fileType=MAP_FILE");
-
-                /* For testing ArrayFile */
-                from("direct:write_text4").to("hdfs:localhost/" + TEMP_DIR.toUri()
-                                              + "/test-camel-text4?fileSystemType=LOCAL&valueType=TEXT&fileType=ARRAY_FILE");
-
-                /* For testing BloomMapFile */
-                from("direct:write_text5")
-                        .to("hdfs:localhost/" + TEMP_DIR.toUri()
-                            + "/test-camel-text5?fileSystemType=LOCAL&keyType=TEXT&valueType=TEXT&fileType=BLOOMMAP_FILE");
-
-                from("direct:write_dynamic_filename")
-                        .to("hdfs:localhost/" + TEMP_DIR.toUri() + "/test-camel-dynamic/?fileSystemType=LOCAL&valueType=TEXT");
-
-                /* For testing compression codecs */
-                from("direct:bzip2")
-                        .to("hdfs:localhost/" + TEMP_DIR.toUri()
-                            + "/test-camel-bzip2?fileSystemType=LOCAL&valueType=BYTE&fileType=SEQUENCE_FILE&compressionCodec=BZIP2&compressionType=BLOCK");
-
-                from("direct:snappy")
-                        .to("hdfs:localhost/" + TEMP_DIR.toUri()
-                            + "/test-camel-snappy?fileSystemType=LOCAL&valueType=BYTE&fileType=SEQUENCE_FILE&compressionCodec=SNAPPY&compressionType=BLOCK");
-
-                from("direct:lz4")
-                        .to("hdfs:localhost/" + TEMP_DIR.toUri()
-                            + "/test-camel-lz4?fileSystemType=LOCAL&valueType=BYTE&fileType=SEQUENCE_FILE&compressionCodec=LZ4&compressionType=BLOCK");
-
-                // GZip and ZStandard requires native hadoop library. To run these tests,
-                // 1. install shared libraries for these codecs (e.g., libz.so and libzstd.so on Linux)
-                // 2. download pre-built native hadoop library, or build it yourself in accordance with
-                //    https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/NativeLibraries.html
-                // 3. set LD_LIBRARY_PATH to point native hadoop library when running tests, like
-                //    `$ LD_LIBRARY_PATH=/path/to/hadoop/lib/native ./mvnw clean test -f components/camel-hdfs`
-                if (LD_LIBRARY_PATH_DEFINED) {
-                    from("direct:gzip")
-                            .to("hdfs:localhost/" + TEMP_DIR.toUri()
-                                + "/test-camel-gzip?fileSystemType=LOCAL&valueType=BYTE&fileType=SEQUENCE_FILE&compressionCodec=GZIP&compressionType=BLOCK");
-
-                    from("direct:zstandard")
-                            .to("hdfs:localhost/" + TEMP_DIR.toUri()
-                                + "/test-camel-zstandard?fileSystemType=LOCAL&valueType=BYTE&fileType=SEQUENCE_FILE&compressionCodec=ZSTANDARD&compressionType=BLOCK");
-                }
-            }
-        };
-    }
-}
diff --git a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsTestSupport.java b/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsTestSupport.java
deleted file mode 100644
index ef4870b9c52..00000000000
--- a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/HdfsTestSupport.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.File;
-
-import org.apache.camel.test.junit5.CamelTestSupport;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.util.Shell;
-
-import static org.junit.jupiter.api.Assumptions.assumeFalse;
-import static org.junit.jupiter.api.Assumptions.assumeTrue;
-
-public abstract class HdfsTestSupport extends CamelTestSupport {
-
-    public static final File CWD = new File(".");
-
-    public void checkTest() {
-        isJavaFromIbm();
-        missingLocalHadoopConfiguration();
-        missingAuthenticationConfiguration();
-    }
-
-    protected static void isJavaFromIbm() {
-        // Hadoop doesn't run on IBM JDK
-        assumeFalse(System.getProperty("java.vendor").contains("IBM"), "IBM JDK not supported");
-    }
-
-    private static void missingLocalHadoopConfiguration() {
-        boolean hasLocalHadoop;
-        try {
-            String hadoopHome = Shell.getHadoopHome();
-            hasLocalHadoop = StringUtils.isNotEmpty(hadoopHome);
-        } catch (Exception e) {
-            hasLocalHadoop = false;
-        }
-        assumeTrue(hasLocalHadoop, "Missing local hadoop configuration");
-    }
-
-    private void missingAuthenticationConfiguration() {
-        try {
-            javax.security.auth.login.Configuration.getConfiguration();
-        } catch (Exception e) {
-            assumeTrue(false, "Missing authentication configuration: " + e);
-        }
-    }
-
-}
diff --git a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/MockDataInputStream.java b/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/MockDataInputStream.java
deleted file mode 100644
index 3ef6b2c4acb..00000000000
--- a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/MockDataInputStream.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs;
-
-import java.io.EOFException;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-
-import org.apache.hadoop.fs.FSExceptionMessages;
-import org.apache.hadoop.fs.FSInputStream;
-import org.apache.hadoop.fs.PositionedReadable;
-import org.apache.hadoop.fs.Seekable;
-
-public class MockDataInputStream extends FSInputStream implements Seekable, PositionedReadable {
-
-    private final FileInputStream fis;
-    private long position;
-
-    MockDataInputStream(String targetFile) throws FileNotFoundException {
-        this(new FileInputStream(targetFile));
-    }
-
-    MockDataInputStream(FileInputStream fis) {
-        this.fis = fis;
-    }
-
-    @Override
-    public void seek(long pos) throws IOException {
-        if (pos < 0) {
-            throw new EOFException(
-                    FSExceptionMessages.NEGATIVE_SEEK);
-        }
-        fis.getChannel().position(pos);
-        this.position = pos;
-    }
-
-    @Override
-    public long getPos() {
-        return this.position;
-    }
-
-    @Override
-    public boolean seekToNewSource(long targetPos) {
-        return false;
-    }
-
-    @Override
-    public int available() throws IOException {
-        return fis.available();
-    }
-
-    @Override
-    public void close() throws IOException {
-        fis.close();
-    }
-
-    @Override
-    public boolean markSupported() {
-        return false;
-    }
-
-    @Override
-    public int read() throws IOException {
-        try {
-            int value = fis.read();
-            if (value >= 0) {
-                this.position++;
-            }
-            return value;
-        } catch (IOException e) {
-            throw new IOException(e);
-        }
-    }
-
-    @Override
-    public int read(byte[] b, int off, int len) throws IOException {
-        try {
-            int value = fis.read(b, off, len);
-            if (value > 0) {
-                this.position += value;
-            }
-            return value;
-        } catch (IOException e) {
-            throw new IOException(e);
-        }
-    }
-
-    @Override
-    public int read(long position, byte[] b, int off, int len)
-            throws IOException {
-        ByteBuffer bb = ByteBuffer.wrap(b, off, len);
-        try {
-            return fis.getChannel().read(bb, position);
-        } catch (IOException e) {
-            throw new IOException(e);
-        }
-    }
-
-    @Override
-    public long skip(long n) throws IOException {
-        long value = fis.skip(n);
-        if (value > 0) {
-            this.position += value;
-        }
-        return value;
-    }
-
-}
diff --git a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/integration/HdfsAppendIT.java b/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/integration/HdfsAppendIT.java
deleted file mode 100644
index e2770f8d127..00000000000
--- a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/integration/HdfsAppendIT.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs.integration;
-
-import java.nio.charset.StandardCharsets;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.builder.RouteBuilder;
-import org.apache.camel.test.AvailablePortFinder;
-import org.apache.camel.test.infra.hdfs.v2.services.HDFSService;
-import org.apache.camel.test.infra.hdfs.v2.services.HDFSServiceFactory;
-import org.apache.camel.test.junit5.CamelTestSupport;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.extension.RegisterExtension;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testcontainers.shaded.org.apache.commons.lang3.SystemUtils;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-public class HdfsAppendIT extends CamelTestSupport {
-    @RegisterExtension
-    public static HDFSService service = HDFSServiceFactory.createSingletonService(AvailablePortFinder.getNextAvailable());
-
-    private static final Logger LOG = LoggerFactory.getLogger(HdfsAppendIT.class);
-
-    private static final int ITERATIONS = 10;
-
-    @Override
-    public boolean isUseRouteBuilder() {
-        return false;
-    }
-
-    @Override
-    @BeforeEach
-    public void setUp() throws Exception {
-        super.setUp();
-
-        Configuration conf = new Configuration();
-        if (SystemUtils.IS_OS_MAC) {
-            conf.addResource("hdfs-mac-test.xml");
-        } else {
-            conf.addResource("hdfs-test.xml");
-        }
-        String path = String.format("hdfs://%s:%d/tmp/test/test-camel-simple-write-file1", service.getHDFSHost(),
-                service.getPort());
-
-        Path file = new Path(path);
-        FileSystem fs = FileSystem.get(file.toUri(), conf);
-        if (fs.exists(file)) {
-            fs.delete(file, true);
-        }
-        try (FSDataOutputStream out = fs.create(file)) {
-            for (int i = 0; i < 10; ++i) {
-                out.write("PIPPO".getBytes(StandardCharsets.UTF_8));
-            }
-        }
-    }
-
-    @Test
-    public void testAppend() throws Exception {
-        context.addRoutes(new RouteBuilder() {
-            @Override
-            public void configure() {
-                from("direct:start1")
-                        .toF("hdfs://%s:%d/tmp/test/test-camel-simple-write-file1?append=true&fileSystemType=HDFS",
-                                service.getHDFSHost(), service.getPort());
-            }
-        });
-        startCamelContext();
-
-        for (int i = 0; i < 10; ++i) {
-            template.sendBody("direct:start1", "PIPPQ");
-        }
-
-        Configuration conf = new Configuration();
-        String path = String.format("hdfs://%s:%d/tmp/test/test-camel-simple-write-file1", service.getHDFSHost(),
-                service.getPort());
-        Path file = new Path(path);
-        FileSystem fs = FileSystem.get(file.toUri(), conf);
-        int ret = 0;
-        try (FSDataInputStream in = fs.open(file)) {
-            byte[] buffer = new byte[5];
-            for (int i = 0; i < 20; ++i) {
-                assertEquals(5, in.read(buffer));
-                LOG.info("> {}", new String(buffer));
-            }
-            ret = in.read(buffer);
-        }
-        assertEquals(-1, ret);
-    }
-
-    @Test
-    public void testAppendWithDynamicFileName() throws Exception {
-
-        context.addRoutes(new RouteBuilder() {
-            @Override
-            public void configure() {
-                from("direct:start1").toF("hdfs://%s:%d/tmp/test-dynamic/?append=true&fileSystemType=HDFS",
-                        service.getHDFSHost(), service.getPort());
-            }
-        });
-        startCamelContext();
-
-        for (int i = 0; i < ITERATIONS; ++i) {
-            template.sendBodyAndHeader("direct:start1", "HELLO", Exchange.FILE_NAME, "camel-hdfs.log");
-        }
-
-        Configuration conf = new Configuration();
-        String path = String.format("hdfs://%s:%d/tmp/test-dynamic/camel-hdfs.log", service.getHDFSHost(),
-                service.getPort());
-
-        Path file = new Path(path);
-        FileSystem fs = FileSystem.get(file.toUri(), conf);
-        int ret = 0;
-        try (FSDataInputStream in = fs.open(file)) {
-            byte[] buffer = new byte[5];
-            for (int i = 0; i < ITERATIONS; ++i) {
-                assertEquals(5, in.read(buffer));
-                LOG.info("> {}", new String(buffer));
-            }
-            ret = in.read(buffer);
-        }
-        assertEquals(-1, ret);
-    }
-
-    @Override
-    @AfterEach
-    public void tearDown() throws Exception {
-        super.tearDown();
-
-        Thread.sleep(250);
-        Configuration conf = new Configuration();
-        Path dir = new Path(String.format("hdfs://%s:%d/tmp/test", service.getHDFSHost(), service.getPort()));
-        FileSystem fs = FileSystem.get(dir.toUri(), conf);
-        fs.delete(dir, true);
-        dir = new Path(String.format("hdfs://%s:%d/tmp/test-dynamic", service.getHDFSHost(), service.getPort()));
-        fs.delete(dir, true);
-    }
-}
diff --git a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/integration/HdfsConsumerIntegrationIT.java b/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/integration/HdfsConsumerIntegrationIT.java
deleted file mode 100644
index a8e0bdf0740..00000000000
--- a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/integration/HdfsConsumerIntegrationIT.java
+++ /dev/null
@@ -1,528 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs.integration;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.Processor;
-import org.apache.camel.builder.RouteBuilder;
-import org.apache.camel.component.mock.MockEndpoint;
-import org.apache.camel.support.DefaultScheduledPollConsumerScheduler;
-import org.apache.camel.test.AvailablePortFinder;
-import org.apache.camel.test.infra.hdfs.v2.services.HDFSService;
-import org.apache.camel.test.infra.hdfs.v2.services.HDFSServiceFactory;
-import org.apache.camel.test.junit5.CamelTestSupport;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.ArrayFile;
-import org.apache.hadoop.io.BooleanWritable;
-import org.apache.hadoop.io.ByteWritable;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.DoubleWritable;
-import org.apache.hadoop.io.FloatWritable;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.SequenceFile.Writer;
-import org.apache.hadoop.io.Text;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.extension.RegisterExtension;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-public class HdfsConsumerIntegrationIT extends CamelTestSupport {
-    @RegisterExtension
-    public static HDFSService service = HDFSServiceFactory.createSingletonService(AvailablePortFinder.getNextAvailable());
-
-    private static final int ITERATIONS = 200;
-
-    @Override
-    public boolean isUseRouteBuilder() {
-        return false;
-    }
-
-    @Test
-    public void testSimpleConsumer() throws Exception {
-        final Path file = new Path(
-                String.format("hdfs://%s:%d/tmp/test/test-camel-normal-file",
-                        service.getHDFSHost(), service.getPort()));
-        Configuration conf = new Configuration();
-        FileSystem fs = FileSystem.get(file.toUri(), conf);
-        try (FSDataOutputStream out = fs.create(file)) {
-            for (int i = 0; i < 1024; ++i) {
-                out.write(("PIPPO" + i).getBytes(StandardCharsets.UTF_8));
-                out.flush();
-            }
-        }
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        resultEndpoint.expectedMessageCount(2);
-
-        context.addRoutes(new RouteBuilder() {
-            public void configure() {
-                from(file.toUri() + "?fileSystemType=HDFS&chunkSize=4096&initialDelay=0")
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        resultEndpoint.assertIsSatisfied();
-    }
-
-    @Test
-    public void testConcurrentConsumers() throws Exception {
-        final Path dir = new Path(
-                String.format("hdfs://%s:%d/tmp/test/multiple-consumers",
-                        service.getHDFSHost(), service.getPort()));
-        Configuration conf = new Configuration();
-        FileSystem fs = FileSystem.get(dir.toUri(), conf);
-        fs.mkdirs(dir);
-        for (int i = 1; i <= ITERATIONS; i++) {
-            try (FSDataOutputStream fos = fs.create(new Path(dir, String.format("file-%04d.txt", i)))) {
-                fos.write(String.format("hello (%04d)\n", i).getBytes());
-            }
-        }
-
-        final Set<String> fileNames = new HashSet<>();
-        final CountDownLatch latch = new CountDownLatch(ITERATIONS);
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        resultEndpoint.whenAnyExchangeReceived(new Processor() {
-            @Override
-            public void process(Exchange exchange) {
-                fileNames.add(exchange.getIn().getHeader(Exchange.FILE_NAME, String.class));
-                latch.countDown();
-            }
-        });
-
-        context.addRoutes(new RouteBuilder() {
-            public void configure() {
-                from(dir.toUri() + "?pattern=*.txt&fileSystemType=HDFS&chunkSize=100&initialDelay=0")
-                        .to("mock:result");
-                from(dir.toUri() + "?pattern=*.txt&fileSystemType=HDFS&chunkSize=200&initialDelay=0")
-                        .to("mock:result");
-                from(dir.toUri() + "?pattern=*.txt&fileSystemType=HDFS&chunkSize=300&initialDelay=0")
-                        .to("mock:result");
-                from(dir.toUri() + "?pattern=*.txt&fileSystemType=HDFS&chunkSize=400&initialDelay=0")
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        resultEndpoint.expectedMessageCount(ITERATIONS);
-
-        latch.await(30, TimeUnit.SECONDS);
-
-        resultEndpoint.assertIsSatisfied();
-        assertEquals(ITERATIONS, fileNames.size());
-    }
-
-    @Test
-    public void testSimpleConsumerWithEmptyFile() throws Exception {
-        final Path file = new Path(
-                String.format("hdfs://%s:%d/tmp/test/test-camel-normal-file",
-                        service.getHDFSHost(), service.getPort()));
-        Configuration conf = new Configuration();
-        FileSystem fs = FileSystem.get(file.toUri(), conf);
-        fs.createNewFile(file);
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        // TODO: See comment from Claus at ticket: https://issues.apache.org/jira/browse/CAMEL-8434
-        resultEndpoint.expectedMinimumMessageCount(1);
-
-        context.addRoutes(new RouteBuilder() {
-            public void configure() {
-                from(file.toUri() + "?fileSystemType=HDFS&chunkSize=4096&initialDelay=0")
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        Thread.sleep(2000);
-
-        resultEndpoint.assertIsSatisfied();
-        assertEquals(0,
-                resultEndpoint.getReceivedExchanges().get(0).getIn().getBody(ByteArrayOutputStream.class).toByteArray().length);
-    }
-
-    @Test
-    public void testSimpleConsumerFileWithSizeEqualToNChunks() throws Exception {
-        final Path file = new Path(
-                String.format("hdfs://%s:%d/tmp/test/test-camel-normal-file",
-                        service.getHDFSHost(), service.getPort()));
-        Configuration conf = new Configuration();
-        FileSystem fs = FileSystem.get(file.toUri(), conf);
-        try (FSDataOutputStream out = fs.create(file)) {
-            // size = 5 times chunk size = 210 bytes
-            for (int i = 0; i < 42; ++i) {
-                out.write(new byte[] { 0x61, 0x62, 0x63, 0x64, 0x65 });
-                out.flush();
-            }
-        }
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        resultEndpoint.expectedMessageCount(5);
-
-        context.addRoutes(new RouteBuilder() {
-            public void configure() {
-                from(file.toUri() + "?fileSystemType=HDFS&chunkSize=42&initialDelay=0").to("mock:result");
-            }
-        });
-        context.start();
-
-        resultEndpoint.assertIsSatisfied();
-        assertEquals(42,
-                resultEndpoint.getReceivedExchanges().get(0).getIn().getBody(ByteArrayOutputStream.class).toByteArray().length);
-    }
-
-    @Test
-    public void testSimpleConsumerWithEmptySequenceFile() throws Exception {
-        final Path file = new Path(
-                String.format("hdfs://%s:%d/tmp/test/test-camel-sequence-file",
-                        service.getHDFSHost(), service.getPort()));
-        Configuration conf = new Configuration();
-        try (SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, BooleanWritable.class)) {
-            writer.sync();
-        }
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        resultEndpoint.expectedMessageCount(0);
-
-        context.addRoutes(new RouteBuilder() {
-            public void configure() {
-                from(file.toUri() + "?fileSystemType=HDFS&fileType=SEQUENCE_FILE&chunkSize=4096&initialDelay=0")
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        resultEndpoint.assertIsSatisfied();
-    }
-
-    @Test
-    public void testReadWithReadSuffix() throws Exception {
-        final Path dir = new Path(String.format("hdfs://%s:%d/tmp/test/", service.getHDFSHost(), service.getPort()));
-        final Path file = new Path(dir, "test-camel-boolean");
-        Configuration conf = new Configuration();
-        NullWritable keyWritable = NullWritable.get();
-        BooleanWritable valueWritable = new BooleanWritable(true);
-        try (SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, BooleanWritable.class)) {
-            writer.append(keyWritable, valueWritable);
-            writer.sync();
-        }
-
-        context.addRoutes(new RouteBuilder() {
-            public void configure() {
-                from(dir.toUri()
-                     + "?scheduler=#myScheduler&pattern=*&fileSystemType=HDFS&fileType=SEQUENCE_FILE&initialDelay=0&readSuffix=handled")
-                        .to("mock:result");
-            }
-        });
-        ScheduledExecutorService pool = context.getExecutorServiceManager().newScheduledThreadPool(null, "unitTestPool", 1);
-        DefaultScheduledPollConsumerScheduler scheduler = new DefaultScheduledPollConsumerScheduler(pool);
-        context.getRegistry().bind("myScheduler", scheduler);
-        context.start();
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        resultEndpoint.expectedMessageCount(1);
-        resultEndpoint.assertIsSatisfied();
-
-        // synchronize on pool that was used to run hdfs consumer thread
-        scheduler.getScheduledExecutorService().shutdown();
-        scheduler.getScheduledExecutorService().awaitTermination(5000, TimeUnit.MILLISECONDS);
-
-        FileSystem fs = FileSystem.get(dir.toUri(), conf);
-        assertEquals(1, fs.listStatus(dir).length);
-        assertTrue(fs.delete(new Path(file.toUri() + ".handled")));
-    }
-
-    @Test
-    public void testReadBoolean() throws Exception {
-        final Path file = new Path(
-                String.format("hdfs://%s:%d/tmp/test/test-camel-boolean",
-                        service.getHDFSHost(), service.getPort()));
-        Configuration conf = new Configuration();
-        NullWritable keyWritable = NullWritable.get();
-        BooleanWritable valueWritable = new BooleanWritable(true);
-        try (SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, BooleanWritable.class)) {
-            writer.append(keyWritable, valueWritable);
-            writer.sync();
-        }
-
-        context.addRoutes(new RouteBuilder() {
-            public void configure() {
-                from(file.toUri() + "?fileSystemType=HDFS&fileType=SEQUENCE_FILE&initialDelay=0")
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        resultEndpoint.expectedMessageCount(1);
-        resultEndpoint.assertIsSatisfied();
-    }
-
-    @Test
-    public void testReadByte() throws Exception {
-        final Path file = new Path(
-                String.format("hdfs://%s:%d/tmp/test/test-camel-byte",
-                        service.getHDFSHost(), service.getPort()));
-        Configuration conf = new Configuration();
-        NullWritable keyWritable = NullWritable.get();
-        ByteWritable valueWritable = new ByteWritable((byte) 3);
-        try (SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, ByteWritable.class)) {
-            writer.append(keyWritable, valueWritable);
-            writer.sync();
-        }
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        resultEndpoint.expectedMessageCount(1);
-        resultEndpoint.message(0).body(Byte.class).isEqualTo(3);
-
-        context.addRoutes(new RouteBuilder() {
-            public void configure() {
-                from(file.toUri() + "?fileSystemType=HDFS&fileType=SEQUENCE_FILE&initialDelay=0")
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        resultEndpoint.assertIsSatisfied();
-    }
-
-    @Test
-    public void testReadFloat() throws Exception {
-        final Path file = new Path(
-                String.format("hdfs://%s:%d/tmp/test/test-camel-float",
-                        service.getHDFSHost(), service.getPort()));
-        Configuration conf = new Configuration();
-        NullWritable keyWritable = NullWritable.get();
-        FloatWritable valueWritable = new FloatWritable(3.1415926535f);
-        try (SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, FloatWritable.class)) {
-            writer.append(keyWritable, valueWritable);
-            writer.sync();
-        }
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        resultEndpoint.expectedMessageCount(1);
-
-        context.addRoutes(new RouteBuilder() {
-            public void configure() {
-                from(file.toUri() + "?fileSystemType=HDFS&fileType=SEQUENCE_FILE&initialDelay=0")
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        resultEndpoint.assertIsSatisfied();
-    }
-
-    @Test
-    public void testReadDouble() throws Exception {
-        final Path file = new Path(
-                String.format("hdfs://%s:%d/tmp/test/test-camel-double",
-                        service.getHDFSHost(), service.getPort()));
-        Configuration conf = new Configuration();
-        NullWritable keyWritable = NullWritable.get();
-        DoubleWritable valueWritable = new DoubleWritable(3.1415926535);
-        try (SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, DoubleWritable.class)) {
-            writer.append(keyWritable, valueWritable);
-            writer.sync();
-        }
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        resultEndpoint.expectedMessageCount(1);
-
-        context.addRoutes(new RouteBuilder() {
-            public void configure() {
-                from(file.toUri() + "?fileSystemType=HDFS&fileType=SEQUENCE_FILE&initialDelay=0")
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        resultEndpoint.assertIsSatisfied();
-    }
-
-    @Test
-    public void testReadInt() throws Exception {
-        final Path file = new Path(
-                String.format("hdfs://%s:%d/tmp/test/test-camel-int",
-                        service.getHDFSHost(), service.getPort()));
-        Configuration conf = new Configuration();
-        NullWritable keyWritable = NullWritable.get();
-        IntWritable valueWritable = new IntWritable(314159265);
-        try (SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, IntWritable.class)) {
-            writer.append(keyWritable, valueWritable);
-            writer.sync();
-        }
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        resultEndpoint.expectedMessageCount(1);
-
-        context.addRoutes(new RouteBuilder() {
-            public void configure() {
-                from(file.toUri() + "?fileSystemType=HDFS&fileType=SEQUENCE_FILE&initialDelay=0")
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        resultEndpoint.assertIsSatisfied();
-    }
-
-    @Test
-    public void testReadLong() throws Exception {
-        final Path file = new Path(
-                String.format("hdfs://%s:%d/tmp/test/test-camel-long",
-                        service.getHDFSHost(), service.getPort()));
-        Configuration conf = new Configuration();
-        NullWritable keyWritable = NullWritable.get();
-        LongWritable valueWritable = new LongWritable(31415926535L);
-        try (SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, LongWritable.class)) {
-            writer.append(keyWritable, valueWritable);
-            writer.sync();
-        }
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        resultEndpoint.expectedMessageCount(1);
-
-        context.addRoutes(new RouteBuilder() {
-            public void configure() {
-                from(file.toUri() + "?fileSystemType=HDFS&fileType=SEQUENCE_FILE&initialDelay=0")
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        resultEndpoint.assertIsSatisfied();
-    }
-
-    @Test
-    public void testReadBytes() throws Exception {
-        final Path file = new Path(
-                String.format("hdfs://%s:%d/tmp/test/test-camel-bytes",
-                        service.getHDFSHost(), service.getPort()));
-        Configuration conf = new Configuration();
-        NullWritable keyWritable = NullWritable.get();
-        BytesWritable valueWritable = new BytesWritable("CIAO!".getBytes());
-        try (SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, BytesWritable.class)) {
-            writer.append(keyWritable, valueWritable);
-            writer.sync();
-        }
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        resultEndpoint.expectedMessageCount(1);
-
-        context.addRoutes(new RouteBuilder() {
-            public void configure() {
-                from(file.toUri() + "?fileSystemType=HDFS&fileType=SEQUENCE_FILE&initialDelay=0")
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        resultEndpoint.assertIsSatisfied();
-    }
-
-    @Test
-    public void testReadString() throws Exception {
-        final Path file = new Path(
-                String.format("hdfs://%s:%d/tmp/test/test-camel-string",
-                        service.getHDFSHost(), service.getPort()));
-        Configuration conf = new Configuration();
-        NullWritable keyWritable = NullWritable.get();
-        Text valueWritable = new Text("CIAO!");
-        try (SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, Text.class)) {
-            writer.append(keyWritable, valueWritable);
-            writer.sync();
-        }
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        resultEndpoint.expectedMessageCount(1);
-
-        context.addRoutes(new RouteBuilder() {
-            public void configure() {
-                from(file.toUri() + "?fileSystemType=HDFS&fileType=SEQUENCE_FILE&initialDelay=0")
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        resultEndpoint.assertIsSatisfied();
-    }
-
-    @Test
-    public void testReadStringArrayFile() throws Exception {
-        final Path dir = new Path(
-                String.format("hdfs://%s:%d/tmp/test",
-                        service.getHDFSHost(), service.getPort()));
-        Configuration conf = new Configuration();
-        FileSystem fs = FileSystem.get(dir.toUri(), conf);
-        fs.mkdirs(dir);
-        Text valueWritable = new Text("CIAO!");
-        try (ArrayFile.Writer writer = new ArrayFile.Writer(
-                conf, fs, new Path(dir, "test-camel-string1").toString(), Text.class)) {
-            writer.append(valueWritable);
-        }
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        resultEndpoint.expectedMessageCount(1);
-
-        context.addRoutes(new RouteBuilder() {
-            public void configure() {
-                from(dir.toUri() + "?fileSystemType=HDFS&fileType=ARRAY_FILE&initialDelay=0")
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        resultEndpoint.assertIsSatisfied();
-    }
-
-    @Override
-    @AfterEach
-    public void tearDown() throws Exception {
-        super.tearDown();
-        Thread.sleep(100);
-        Configuration conf = new Configuration();
-        Path dir = new Path(String.format("hdfs://%s:%d/tmp/test", service.getHDFSHost(), service.getPort()));
-        FileSystem fs = FileSystem.get(dir.toUri(), conf);
-        fs.delete(dir, true);
-    }
-
-    private Writer createWriter(
-            Configuration conf, Path file, Class<?> keyClass,
-            Class<?> valueClass)
-            throws IOException {
-        return SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
-                SequenceFile.Writer.keyClass(keyClass), SequenceFile.Writer.valueClass(valueClass));
-    }
-
-}
diff --git a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/integration/HdfsProducerConsumerIntegrationIT.java b/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/integration/HdfsProducerConsumerIntegrationIT.java
deleted file mode 100644
index 8ff7f86b3c7..00000000000
--- a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/integration/HdfsProducerConsumerIntegrationIT.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs.integration;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.camel.Exchange;
-import org.apache.camel.Processor;
-import org.apache.camel.builder.RouteBuilder;
-import org.apache.camel.component.mock.MockEndpoint;
-import org.apache.camel.test.AvailablePortFinder;
-import org.apache.camel.test.infra.hdfs.v2.services.HDFSService;
-import org.apache.camel.test.infra.hdfs.v2.services.HDFSServiceFactory;
-import org.apache.camel.test.junit5.CamelTestSupport;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.extension.RegisterExtension;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-public class HdfsProducerConsumerIntegrationIT extends CamelTestSupport {
-    @RegisterExtension
-    public static HDFSService service = HDFSServiceFactory.createSingletonService(AvailablePortFinder.getNextAvailable());
-
-    private static final int ITERATIONS = 400;
-
-    @Override
-    public boolean isUseRouteBuilder() {
-        return false;
-    }
-
-    @Test
-    public void testSimpleSplitWriteRead() throws Exception {
-        context.addRoutes(new RouteBuilder() {
-            @Override
-            public void configure() {
-                from("direct:start").toF(
-                        "hdfs://%s:%d/tmp/test/test-camel-simple-write-file?fileSystemType=HDFS&splitStrategy=BYTES:5,IDLE:1000",
-                        service.getHDFSHost(), service.getPort());
-                fromF("hdfs://%s:%d/tmp/test/test-camel-simple-write-file?pattern=*&initialDelay=2000&fileSystemType=HDFS&chunkSize=5",
-                        service.getHDFSHost(), service.getPort())
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        Set<String> sent = new HashSet<>();
-
-        for (int i = 0; i < 10; ++i) {
-            String text = "CIAO" + i;
-            sent.add(text);
-            template.sendBody("direct:start", text);
-        }
-
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-
-        resultEndpoint.expectedMessageCount(10);
-        resultEndpoint.assertIsSatisfied();
-
-        List<Exchange> exchanges = resultEndpoint.getExchanges();
-        for (Exchange exchange : exchanges) {
-            String text = exchange.getIn().getBody(String.class);
-            sent.remove(text);
-        }
-        assertTrue(sent.isEmpty());
-    }
-
-    @Test
-    // see https://issues.apache.org/jira/browse/CAMEL-7318
-    public void testMultipleConsumers() throws Exception {
-
-        Path p = new Path(String.format("hdfs://%s:%d/tmp/test/multiple-consumers", service.getHDFSHost(), service.getPort()));
-        FileSystem fs = FileSystem.get(p.toUri(), new Configuration());
-        fs.mkdirs(p);
-        for (int i = 1; i <= ITERATIONS; i++) {
-            try (FSDataOutputStream os = fs.create(new Path(p, String.format("file-%03d.txt", i)))) {
-                os.write(String.format("hello (%03d)\n", i).getBytes());
-            }
-        }
-
-        final Set<String> fileNames = new HashSet<>();
-        final CountDownLatch latch = new CountDownLatch(ITERATIONS);
-        MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
-        resultEndpoint.whenAnyExchangeReceived(new Processor() {
-            @Override
-            public void process(Exchange exchange) {
-                fileNames.add(exchange.getIn().getHeader(Exchange.FILE_NAME, String.class));
-                latch.countDown();
-            }
-        });
-
-        context.addRoutes(new RouteBuilder() {
-            @Override
-            public void configure() {
-                // difference in chunkSize only to allow multiple consumers
-                fromF("hdfs://%s:%d/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=128",
-                        service.getHDFSHost(), service.getPort())
-                        .to("mock:result");
-                fromF("hdfs://%s:%d/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=256",
-                        service.getHDFSHost(), service.getPort())
-                        .to("mock:result");
-                fromF("hdfs://%s:%d/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=512",
-                        service.getHDFSHost(), service.getPort())
-                        .to("mock:result");
-                fromF("hdfs://%s:%d/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=1024",
-                        service.getHDFSHost(), service.getPort())
-                        .to("mock:result");
-            }
-        });
-        context.start();
-
-        resultEndpoint.expectedMessageCount(ITERATIONS);
-
-        latch.await(30, TimeUnit.SECONDS);
-
-        resultEndpoint.assertIsSatisfied();
-        assertEquals(ITERATIONS, fileNames.size());
-    }
-
-    @Override
-    @AfterEach
-    public void tearDown() throws Exception {
-        super.tearDown();
-
-        Thread.sleep(250);
-        Configuration conf = new Configuration();
-        Path dir = new Path(String.format("hdfs://%s:%d/tmp/test", service.getHDFSHost(), service.getPort()));
-        FileSystem fs = FileSystem.get(dir.toUri(), conf);
-        fs.delete(dir, true);
-        dir = new Path(String.format("hdfs://%s:%d/tmp/test/multiple-consumers", service.getHDFSHost(), service.getPort()));
-        fs.delete(dir, true);
-    }
-
-}
diff --git a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/kerberos/KerberosAuthenticationTest.java b/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/kerberos/KerberosAuthenticationTest.java
deleted file mode 100644
index 7374b103c7d..00000000000
--- a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/kerberos/KerberosAuthenticationTest.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs.kerberos;
-
-import java.io.FileNotFoundException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.junit.jupiter.api.Test;
-
-import static org.apache.camel.component.hdfs.HdfsTestSupport.CWD;
-import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-
-public class KerberosAuthenticationTest {
-
-    private KerberosAuthentication underTest;
-
-    private void doLogin(String s) {
-        Configuration configuration = new Configuration();
-
-        String username = "test_user";
-        String keyTabFileLocation = CWD.getAbsolutePath() + s;
-
-        underTest = new KerberosAuthentication(configuration, username, keyTabFileLocation);
-    }
-
-    @Test
-    public void loginWithKeytabFile() {
-        // given
-        doLogin("/src/test/resources/kerberos/test-keytab.bin");
-
-        // when
-        assertDoesNotThrow(() -> underTest.loginWithKeytab());
-
-        // then
-        /* message is printed in the logs */
-    }
-
-    @Test
-    public void loginWithMissingKeytabFile() {
-        // given
-        doLogin("/src/test/resources/kerberos/missing.bin");
-
-        // when
-        assertThrows(FileNotFoundException.class,
-                () -> underTest.loginWithKeytab());
-
-        // then
-        /* exception was thrown */
-    }
-
-}
diff --git a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/kerberos/KerberosConfigurationBuilderTest.java b/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/kerberos/KerberosConfigurationBuilderTest.java
deleted file mode 100644
index 1d7a90af4de..00000000000
--- a/components/camel-hdfs/src/test/java/org/apache/camel/component/hdfs/kerberos/KerberosConfigurationBuilderTest.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.camel.component.hdfs.kerberos;
-
-import org.junit.jupiter.api.Test;
-
-import static org.apache.camel.component.hdfs.HdfsTestSupport.CWD;
-import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-public class KerberosConfigurationBuilderTest {
-
-    @Test
-    public void withKerberosConfiguration() {
-        assertDoesNotThrow(() -> prepareKerberosConfiguration());
-    }
-
-    private void prepareKerberosConfiguration() {
-        String kerberosConfigFileLocation = CWD.getAbsolutePath() + "/src/test/resources/kerberos/test-kerb5.conf";
-        KerberosConfigurationBuilder.setKerberosConfigFile(kerberosConfigFileLocation);
-    }
-
-    @Test
-    public void setKerberosConfigFileWithRealFile() {
-        // given
-        String kerb5FileName = "test-kerb5.conf";
-        String kerberosConfigFileLocation = CWD.getAbsolutePath() + "/src/test/resources/kerberos/" + kerb5FileName;
-
-        // when
-        KerberosConfigurationBuilder.setKerberosConfigFile(kerberosConfigFileLocation);
-
-        // then
-        String actual = System.getProperty("java.security.krb5.conf");
-        assertNotNull(actual);
-        assertTrue(actual.endsWith(kerb5FileName));
-    }
-
-    @Test
-    public void setKerberosConfigFileWithMissingFile() {
-        // given
-        String kerb5FileName = "missing-kerb5.conf";
-        String kerberosConfigFileLocation = CWD.getAbsolutePath() + "/src/test/resources/kerberos/" + kerb5FileName;
-
-        // when
-        KerberosConfigurationBuilder.setKerberosConfigFile(kerberosConfigFileLocation);
-
-        // then
-        String actual = System.getProperty("java.security.krb5.conf");
-        assertNull(actual);
-    }
-
-}
diff --git a/components/camel-hdfs/src/test/resources/hdfs-default.xml b/components/camel-hdfs/src/test/resources/hdfs-default.xml
deleted file mode 100644
index 53af074450d..00000000000
--- a/components/camel-hdfs/src/test/resources/hdfs-default.xml
+++ /dev/null
@@ -1,1607 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
--->
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!-- Do not modify this file directly.  Instead, copy entries that you -->
-<!-- wish to modify from this file into hdfs-site.xml and change them -->
-<!-- there.  If hdfs-site.xml does not already exist, create it.      -->
-
-<configuration>
-
-<property>
-  <name>hadoop.hdfs.configuration.version</name>
-  <value>1</value>
-  <description>version of this configuration file</description>
-</property>
-
-<property>
-  <name>dfs.namenode.logging.level</name>
-  <value>info</value>
-  <description>
-    The logging level for dfs namenode. Other values are "dir" (trace
-    namespace mutations), "block" (trace block under/over replications
-    and block creations/deletions), or "all".
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.rpc-address</name>
-  <value></value>
-  <description>
-    RPC address that handles all clients requests. In the case of HA/Federation where multiple namenodes exist,
-    the name service id is added to the name e.g. dfs.namenode.rpc-address.ns1
-    dfs.namenode.rpc-address.EXAMPLENAMESERVICE
-    The value of this property will take the form of nn-host1:rpc-port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.rpc-bind-host</name>
-  <value></value>
-  <description>
-    The actual address the server will bind to. If this optional address is
-    set, the RPC server will bind to this address and the port specified in
-    dfs.namenode.rpc-address for the RPC server. It can also be specified
-    per name node or name service for HA/Federation. This is most useful for
-    making name node listen to all interfaces by setting to 0.0.0.0.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.servicerpc-address</name>
-  <value></value>
-  <description>
-    RPC address for HDFS Services communication. BackupNode, Datanodes and all other services should be
-    connecting to this address if it is configured. In the case of HA/Federation where multiple namenodes exist,
-    the name service id is added to the name e.g. dfs.namenode.servicerpc-address.ns1
-    dfs.namenode.rpc-address.EXAMPLENAMESERVICE
-    The value of this property will take the form of nn-host1:rpc-port.
-    If the value of this property is unset the value of dfs.namenode.rpc-address will be used as the default.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.servicerpc-bind-host</name>
-  <value></value>
-  <description>
-    The actual address the server will bind to. If this optional address is
-    set, the service RPC server will bind to this address and the port 
-    specified in dfs.namenode.servicerpc-address. It can also be specified
-    per name node or name service for HA/Federation. This is most useful for
-    making name node listen to all interfaces by setting to 0.0.0.0.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.secondary.http-address</name>
-  <value>0.0.0.0:50090</value>
-  <description>
-    The secondary namenode http server address and port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.address</name>
-  <value>0.0.0.0:50010</value>
-  <description>
-    The datanode server address and port for data transfer.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.http.address</name>
-  <value>0.0.0.0:50075</value>
-  <description>
-    The datanode http server address and port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.ipc.address</name>
-  <value>0.0.0.0:50020</value>
-  <description>
-    The datanode ipc server address and port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.handler.count</name>
-  <value>10</value>
-  <description>The number of server threads for the datanode.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.http-address</name>
-  <value>0.0.0.0:50070</value>
-  <description>
-    The address and the base port where the dfs namenode web ui will listen on.
-  </description>
-</property>
-
-<property>
-  <name>dfs.https.enable</name>
-  <value>false</value>
-  <description>
-    Deprecated. Use "dfs.http.policy" instead.
-  </description>
-</property>
-
-<property>
-  <name>dfs.http.policy</name>
-  <value>HTTP_ONLY</value>
-  <description>Decide if HTTPS(SSL) is supported on HDFS
-    This configures the HTTP endpoint for HDFS daemons:
-      The following values are supported:
-      - HTTP_ONLY : Service is provided only on http
-      - HTTPS_ONLY : Service is provided only on https
-      - HTTP_AND_HTTPS : Service is provided both on http and https
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.https.need-auth</name>
-  <value>false</value>
-  <description>Whether SSL client certificate authentication is required
-  </description>
-</property>
-
-<property>
-  <name>dfs.https.server.keystore.resource</name>
-  <value>ssl-server.xml</value>
-  <description>Resource file from which ssl server keystore
-  information will be extracted
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.https.keystore.resource</name>
-  <value>ssl-client.xml</value>
-  <description>Resource file from which ssl client keystore
-  information will be extracted
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.https.address</name>
-  <value>0.0.0.0:50475</value>
-  <description>The datanode secure http server address and port.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.https-address</name>
-  <value>0.0.0.0:50470</value>
-  <description>The namenode secure http server address and port.</description>
-</property>
-
- <property>
-  <name>dfs.datanode.dns.interface</name>
-  <value>default</value>
-  <description>The name of the Network Interface from which a data node should 
-  report its IP address.
-  </description>
- </property>
- 
-<property>
-  <name>dfs.datanode.dns.nameserver</name>
-  <value>default</value>
-  <description>The host name or IP address of the name server (DNS)
-  which a DataNode should use to determine the host name used by the
-  NameNode for communication and display purposes.
-  </description>
- </property>
- 
- <property>
-  <name>dfs.namenode.backup.address</name>
-  <value>0.0.0.0:50100</value>
-  <description>
-    The backup node server address and port.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
- 
- <property>
-  <name>dfs.namenode.backup.http-address</name>
-  <value>0.0.0.0:50105</value>
-  <description>
-    The backup node http server address and port.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.replication.considerLoad</name>
-  <value>true</value>
-  <description>Decide if chooseTarget considers the target's load or not
-  </description>
-</property>
-<property>
-  <name>dfs.default.chunk.view.size</name>
-  <value>32768</value>
-  <description>The number of bytes to view for a file on the browser.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.du.reserved</name>
-  <value>0</value>
-  <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.name.dir</name>
-  <value>file://${hadoop.tmp.dir}/dfs/name</value>
-  <description>Determines where on the local filesystem the DFS name node
-      should store the name table(fsimage).  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-</property>
-
-<property>
-  <name>dfs.namenode.name.dir.restore</name>
-  <value>false</value>
-  <description>Set to true to enable NameNode to attempt recovering a
-      previously failed dfs.namenode.name.dir. When enabled, a recovery of any
-      failed directory is attempted during checkpoint.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.fs-limits.max-component-length</name>
-  <value>0</value>
-  <description>Defines the maximum number of characters in each component
-      of a path.  A value of 0 will disable the check.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.fs-limits.max-directory-items</name>
-  <value>0</value>
-  <description>Defines the maximum number of items that a directory may
-      contain.  A value of 0 will disable the check.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.fs-limits.min-block-size</name>
-  <value>1048576</value>
-  <description>Minimum block size in bytes, enforced by the Namenode at create
-      time. This prevents the accidental creation of files with tiny block
-      sizes (and thus many blocks), which can degrade
-      performance.</description>
-</property>
-
-<property>
-    <name>dfs.namenode.fs-limits.max-blocks-per-file</name>
-    <value>1048576</value>
-    <description>Maximum number of blocks per file, enforced by the Namenode on
-        write. This prevents the creation of extremely large files which can
-        degrade performance.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.edits.dir</name>
-  <value>${dfs.namenode.name.dir}</value>
-  <description>Determines where on the local filesystem the DFS name node
-      should store the transaction (edits) file. If this is a comma-delimited list
-      of directories then the transaction file is replicated in all of the 
-      directories, for redundancy. Default value is same as dfs.namenode.name.dir
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.shared.edits.dir</name>
-  <value></value>
-  <description>A directory on shared storage between the multiple namenodes
-  in an HA cluster. This directory will be written by the active and read
-  by the standby in order to keep the namespaces synchronized. This directory
-  does not need to be listed in dfs.namenode.edits.dir above. It should be
-  left empty in a non-HA cluster.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.edits.journal-plugin.qjournal</name>
-  <value>org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager</value>
-</property>
-
-<property>
-  <name>dfs.permissions.enabled</name>
-  <value>true</value>
-  <description>
-    If "true", enable permission checking in HDFS.
-    If "false", permission checking is turned off,
-    but all other behavior is unchanged.
-    Switching from one parameter value to the other does not change the mode,
-    owner or group of files or directories.
-  </description>
-</property>
-
-<property>
-  <name>dfs.permissions.superusergroup</name>
-  <value>supergroup</value>
-  <description>The name of the group of super-users.</description>
-</property>
-<!--
-<property>
-   <name>dfs.cluster.administrators</name>
-   <value>ACL for the admins</value>
-   <description>This configuration is used to control who can access the
-                default servlets in the namenode, etc.
-   </description>
-</property>
--->
-
-<property>
-  <name>dfs.block.access.token.enable</name>
-  <value>false</value>
-  <description>
-    If "true", access tokens are used as capabilities for accessing datanodes.
-    If "false", no access tokens are checked on accessing datanodes.
-  </description>
-</property>
-
-<property>
-  <name>dfs.block.access.key.update.interval</name>
-  <value>600</value>
-  <description>
-    Interval in minutes at which namenode updates its access keys.
-  </description>
-</property>
-
-<property>
-  <name>dfs.block.access.token.lifetime</name>
-  <value>600</value>
-  <description>The lifetime of access tokens in minutes.</description>
-</property>
-
-<property>
-  <name>dfs.datanode.data.dir</name>
-  <value>file://${hadoop.tmp.dir}/dfs/data</value>
-  <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.data.dir.perm</name>
-  <value>700</value>
-  <description>Permissions for the directories on on the local filesystem where
-  the DFS data node store its blocks. The permissions can either be octal or
-  symbolic.</description>
-</property>
-
-<property>
-  <name>dfs.replication</name>
-  <value>3</value>
-  <description>Default block replication. 
-  The actual number of replications can be specified when the file is created.
-  The default is used if replication is not specified in create time.
-  </description>
-</property>
-
-<property>
-  <name>dfs.replication.max</name>
-  <value>512</value>
-  <description>Maximal block replication. 
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.replication.min</name>
-  <value>1</value>
-  <description>Minimal block replication. 
-  </description>
-</property>
-
-<property>
-  <name>dfs.blocksize</name>
-  <value>134217728</value>
-  <description>
-      The default block size for new files, in bytes.
-      You can use the following suffix (case insensitive):
-      k(kilo), m(mega), g(giga), t(tera), p(peta), e(exa) to specify the size (such as 128k, 512m, 1g, etc.),
-      Or provide complete size in bytes (such as 134217728 for 128 MB).
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.block.write.retries</name>
-  <value>3</value>
-  <description>The number of retries for writing blocks to the data nodes, 
-  before we signal failure to the application.
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.block.write.replace-datanode-on-failure.enable</name>
-  <value>true</value>
-  <description>
-    If there is a datanode/network failure in the write pipeline,
-    DFSClient will try to remove the failed datanode from the pipeline
-    and then continue writing with the remaining datanodes. As a result,
-    the number of datanodes in the pipeline is decreased.  The feature is
-    to add new datanodes to the pipeline.
-
-    This is a site-wide property to enable/disable the feature.
-
-    When the cluster size is extremely small, e.g. 3 nodes or less, cluster
-    administrators may want to set the policy to NEVER in the default
-    configuration file or disable this feature.  Otherwise, users may
-    experience an unusually high rate of pipeline failures since it is
-    impossible to find new datanodes for replacement.
-
-    See also dfs.client.block.write.replace-datanode-on-failure.policy
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.block.write.replace-datanode-on-failure.policy</name>
-  <value>DEFAULT</value>
-  <description>
-    This property is used only if the value of
-    dfs.client.block.write.replace-datanode-on-failure.enable is true.
-
-    ALWAYS: always add a new datanode when an existing datanode is removed.
-    
-    NEVER: never add a new datanode.
-
-    DEFAULT: 
-      Let r be the replication number.
-      Let n be the number of existing datanodes.
-      Add a new datanode only if r is greater than or equal to 3 and either
-      (1) floor(r/2) is greater than or equal to n; or
-      (2) r is greater than n and the block is hflushed/appended.
-  </description>
-</property>
-
-<property>
-  <name>dfs.blockreport.intervalMsec</name>
-  <value>21600000</value>
-  <description>Determines block reporting interval in milliseconds.</description>
-</property>
-
-<property>
-  <name>dfs.blockreport.initialDelay</name>  <value>0</value>
-  <description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-  <name>dfs.datanode.directoryscan.interval</name>
-  <value>21600</value>
-  <description>Interval in seconds for Datanode to scan data directories and
-  reconcile the difference between blocks in memory and on the disk.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.directoryscan.threads</name>
-  <value>1</value>
-  <description>How many threads should the threadpool used to compile reports
-  for volumes in parallel have.
-  </description>
-</property>
-
-<property>
-  <name>dfs.heartbeat.interval</name>
-  <value>3</value>
-  <description>Determines datanode heartbeat interval in seconds.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.handler.count</name>
-  <value>10</value>
-  <description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.safemode.threshold-pct</name>
-  <value>0.999f</value>
-  <description>
-    Specifies the percentage of blocks that should satisfy 
-    the minimal replication requirement defined by dfs.namenode.replication.min.
-    Values less than or equal to 0 mean not to wait for any particular
-    percentage of blocks before exiting safemode.
-    Values greater than 1 will make safe mode permanent.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.safemode.min.datanodes</name>
-  <value>0</value>
-  <description>
-    Specifies the number of datanodes that must be considered alive
-    before the name node exits safemode.
-    Values less than or equal to 0 mean not to take the number of live
-    datanodes into account when deciding whether to remain in safe mode
-    during startup.
-    Values greater than the number of datanodes in the cluster
-    will make safe mode permanent.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.safemode.extension</name>
-  <value>30000</value>
-  <description>
-    Determines extension of safe mode in milliseconds 
-    after the threshold level is reached.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.balance.bandwidthPerSec</name>
-  <value>1048576</value>
-  <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-</property>
-
-<property>
-  <name>dfs.hosts</name>
-  <value></value>
-  <description>Names a file that contains a list of hosts that are
-  permitted to connect to the namenode. The full pathname of the file
-  must be specified.  If the value is empty, all hosts are
-  permitted.</description>
-</property>
-
-<property>
-  <name>dfs.hosts.exclude</name>
-  <value></value>
-  <description>Names a file that contains a list of hosts that are
-  not permitted to connect to the namenode.  The full pathname of the
-  file must be specified.  If the value is empty, no hosts are
-  excluded.</description>
-</property> 
-
-<property>
-  <name>dfs.namenode.max.objects</name>
-  <value>0</value>
-  <description>The maximum number of files, directories and blocks
-  dfs supports. A value of zero indicates no limit to the number
-  of objects that dfs supports.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.datanode.registration.ip-hostname-check</name>
-  <value>true</value>
-  <description>
-    If true (the default), then the namenode requires that a connecting
-    datanode's address must be resolved to a hostname.  If necessary, a reverse
-    DNS lookup is performed.  All attempts to register a datanode from an
-    unresolvable address are rejected.
-
-    It is recommended that this setting be left on to prevent accidental
-    registration of datanodes listed by hostname in the excludes file during a
-    DNS outage.  Only set this to false in environments where there is no
-    infrastructure to support reverse DNS lookup.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.decommission.interval</name>
-  <value>30</value>
-  <description>Namenode periodicity in seconds to check if decommission is 
-  complete.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.decommission.nodes.per.interval</name>
-  <value>5</value>
-  <description>The number of nodes namenode checks if decommission is complete
-  in each dfs.namenode.decommission.interval.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.replication.interval</name>
-  <value>3</value>
-  <description>The periodicity in seconds with which the namenode computes 
-  repliaction work for datanodes. </description>
-</property>
-
-<property>
-  <name>dfs.namenode.accesstime.precision</name>
-  <value>3600000</value>
-  <description>The access time for HDFS file is precise upto this value. 
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.plugins</name>
-  <value></value>
-  <description>Comma-separated list of datanode plug-ins to be activated.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.plugins</name>
-  <value></value>
-  <description>Comma-separated list of namenode plug-ins to be activated.
-  </description>
-</property>
-
-<property>
-  <name>dfs.stream-buffer-size</name>
-  <value>4096</value>
-  <description>The size of buffer to stream files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-</property>
-
-<property>
-  <name>dfs.bytes-per-checksum</name>
-  <value>512</value>
-  <description>The number of bytes per checksum.  Must not be larger than
-  dfs.stream-buffer-size</description>
-</property>
-
-<property>
-  <name>dfs.client-write-packet-size</name>
-  <value>65536</value>
-  <description>Packet size for clients to write</description>
-</property>
-
-<property>
-  <name>dfs.client.write.exclude.nodes.cache.expiry.interval.millis</name>
-  <value>600000</value>
-  <description>The maximum period to keep a DN in the excluded nodes list
-  at a client. After this period, in milliseconds, the previously excluded node(s) will
-  be removed automatically from the cache and will be considered good for block allocations
-  again. Useful to lower or raise in situations where you keep a file open for very long
-  periods (such as a Write-Ahead-Log (WAL) file) to make the writer tolerant to cluster maintenance
-  restarts. Defaults to 10 minutes.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.checkpoint.dir</name>
-  <value>file://${hadoop.tmp.dir}/dfs/namesecondary</value>
-  <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.checkpoint.edits.dir</name>
-  <value>${dfs.namenode.checkpoint.dir}</value>
-  <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directories then the edits is
-      replicated in all of the directories for redundancy.
-      Default value is same as dfs.namenode.checkpoint.dir
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.checkpoint.period</name>
-  <value>3600</value>
-  <description>The number of seconds between two periodic checkpoints.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.checkpoint.txns</name>
-  <value>1000000</value>
-  <description>The Secondary NameNode or CheckpointNode will create a checkpoint
-  of the namespace every 'dfs.namenode.checkpoint.txns' transactions, regardless
-  of whether 'dfs.namenode.checkpoint.period' has expired.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.checkpoint.check.period</name>
-  <value>60</value>
-  <description>The SecondaryNameNode and CheckpointNode will poll the NameNode
-  every 'dfs.namenode.checkpoint.check.period' seconds to query the number
-  of uncheckpointed transactions.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.checkpoint.max-retries</name>
-  <value>3</value>
-  <description>The SecondaryNameNode retries failed checkpointing. If the 
-  failure occurs while loading fsimage or replaying edits, the number of
-  retries is limited by this variable. 
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.num.checkpoints.retained</name>
-  <value>2</value>
-  <description>The number of image checkpoint files that will be retained by
-  the NameNode and Secondary NameNode in their storage directories. All edit
-  logs necessary to recover an up-to-date namespace from the oldest retained
-  checkpoint will also be retained.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.num.extra.edits.retained</name>
-  <value>1000000</value>
-  <description>The number of extra transactions which should be retained
-  beyond what is minimally necessary for a NN restart. This can be useful for
-  audit purposes or for an HA setup where a remote Standby Node may have
-  been offline for some time and need to have a longer backlog of retained
-  edits in order to start again.
-  Typically each edit is on the order of a few hundred bytes, so the default
-  of 1 million edits should be on the order of hundreds of MBs or low GBs.
-
-  NOTE: Fewer extra edits may be retained than value specified for this setting
-  if doing so would mean that more segments would be retained than the number
-  configured by dfs.namenode.max.extra.edits.segments.retained.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.max.extra.edits.segments.retained</name>
-  <value>10000</value>
-  <description>The maximum number of extra edit log segments which should be retained
-  beyond what is minimally necessary for a NN restart. When used in conjunction with
-  dfs.namenode.num.extra.edits.retained, this configuration property serves to cap
-  the number of extra edits files to a reasonable value.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.delegation.key.update-interval</name>
-  <value>86400000</value>
-  <description>The update interval for master key for delegation tokens 
-       in the namenode in milliseconds.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.delegation.token.max-lifetime</name>
-  <value>604800000</value>
-  <description>The maximum lifetime in milliseconds for which a delegation 
-      token is valid.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.delegation.token.renew-interval</name>
-  <value>86400000</value>
-  <description>The renewal interval for delegation token in milliseconds.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.failed.volumes.tolerated</name>
-  <value>0</value>
-  <description>The number of volumes that are allowed to
-  fail before a datanode stops offering service. By default
-  any volume failure will cause a datanode to shutdown.
-  </description>
-</property>
-
-<property>
-  <name>dfs.image.compress</name>
-  <value>false</value>
-  <description>Should the dfs image be compressed?
-  </description>
-</property>
-
-<property>
-  <name>dfs.image.compression.codec</name>
-  <value>org.apache.hadoop.io.compress.DefaultCodec</value>
-  <description>If the dfs image is compressed, how should they be compressed?
-               This has to be a codec defined in io.compression.codecs.
-  </description>
-</property>
-
-<property>
-  <name>dfs.image.transfer.timeout</name>
-  <value>600000</value>
-  <description>
-        Timeout for image transfer in milliseconds. This timeout and the related
-        dfs.image.transfer.bandwidthPerSec parameter should be configured such
-        that normal image transfer can complete within the timeout.
-        This timeout prevents client hangs when the sender fails during
-        image transfer, which is particularly important during checkpointing.
-        Note that this timeout applies to the entirety of image transfer, and
-        is not a socket timeout.
-  </description>
-</property>
-
-<property>
-  <name>dfs.image.transfer.bandwidthPerSec</name>
-  <value>0</value>
-  <description>
-        Maximum bandwidth used for image transfer in bytes per second.
-        This can help keep normal namenode operations responsive during
-        checkpointing. The maximum bandwidth and timeout in
-        dfs.image.transfer.timeout should be set such that normal image
-        transfers can complete successfully.
-        A default value of 0 indicates that throttling is disabled. 
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.support.allow.format</name>
-  <value>true</value>
-  <description>Does HDFS namenode allow itself to be formatted?
-               You may consider setting this to false for any production
-               cluster, to avoid any possibility of formatting a running DFS.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.max.transfer.threads</name>
-  <value>4096</value>
-  <description>
-        Specifies the maximum number of threads to use for transferring data
-        in and out of the DN.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.readahead.bytes</name>
-  <value>4193404</value>
-  <description>
-        While reading block files, if the Hadoop native libraries are available,
-        the datanode can use the posix_fadvise system call to explicitly
-        page data into the operating system buffer cache ahead of the current
-        reader's position. This can improve performance especially when
-        disks are highly contended.
-
-        This configuration specifies the number of bytes ahead of the current
-        read position which the datanode will attempt to read ahead. This
-        feature may be disabled by configuring this property to 0.
-
-        If the native libraries are not available, this configuration has no
-        effect.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.drop.cache.behind.reads</name>
-  <value>false</value>
-  <description>
-        In some workloads, the data read from HDFS is known to be significantly
-        large enough that it is unlikely to be useful to cache it in the
-        operating system buffer cache. In this case, the DataNode may be
-        configured to automatically purge all data from the buffer cache
-        after it is delivered to the client. This behavior is automatically
-        disabled for workloads which read only short sections of a block
-        (e.g HBase random-IO workloads).
-
-        This may improve performance for some workloads by freeing buffer
-        cache spage usage for more cacheable data.
-
-        If the Hadoop native libraries are not available, this configuration
-        has no effect.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.drop.cache.behind.writes</name>
-  <value>false</value>
-  <description>
-        In some workloads, the data written to HDFS is known to be significantly
-        large enough that it is unlikely to be useful to cache it in the
-        operating system buffer cache. In this case, the DataNode may be
-        configured to automatically purge all data from the buffer cache
-        after it is written to disk.
-
-        This may improve performance for some workloads by freeing buffer
-        cache spage usage for more cacheable data.
-
-        If the Hadoop native libraries are not available, this configuration
-        has no effect.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.sync.behind.writes</name>
-  <value>false</value>
-  <description>
-        If this configuration is enabled, the datanode will instruct the
-        operating system to enqueue all written data to the disk immediately
-        after it is written. This differs from the usual OS policy which
-        may wait for up to 30 seconds before triggering writeback.
-
-        This may improve performance for some workloads by smoothing the
-        IO profile for data written to disk.
-
-        If the Hadoop native libraries are not available, this configuration
-        has no effect.
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.failover.max.attempts</name>
-  <value>15</value>
-  <description>
-    Expert only. The number of client failover attempts that should be
-    made before the failover is considered failed.
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.failover.sleep.base.millis</name>
-  <value>500</value>
-  <description>
-    Expert only. The time to wait, in milliseconds, between failover
-    attempts increases exponentially as a function of the number of
-    attempts made so far, with a random factor of +/- 50%. This option
-    specifies the base value used in the failover calculation. The
-    first failover will retry immediately. The 2nd failover attempt
-    will delay at least dfs.client.failover.sleep.base.millis
-    milliseconds. And so on.
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.failover.sleep.max.millis</name>
-  <value>15000</value>
-  <description>
-    Expert only. The time to wait, in milliseconds, between failover
-    attempts increases exponentially as a function of the number of
-    attempts made so far, with a random factor of +/- 50%. This option
-    specifies the maximum value to wait between failovers. 
-    Specifically, the time between two failover attempts will not
-    exceed +/- 50% of dfs.client.failover.sleep.max.millis
-    milliseconds.
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.failover.connection.retries</name>
-  <value>0</value>
-  <description>
-    Expert only. Indicates the number of retries a failover IPC client
-    will make to establish a server connection.
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.failover.connection.retries.on.timeouts</name>
-  <value>0</value>
-  <description>
-    Expert only. The number of retry attempts a failover IPC client
-    will make on socket timeout when establishing a server connection.
-  </description>
-</property>
-
-<property>
-  <name>dfs.nameservices</name>
-  <value></value>
-  <description>
-    Comma-separated list of nameservices.
-  </description>
-</property>
-
-<property>
-  <name>dfs.nameservice.id</name>
-  <value></value>
-  <description>
-    The ID of this nameservice. If the nameservice ID is not
-    configured or more than one nameservice is configured for
-    dfs.nameservices it is determined automatically by
-    matching the local node's address with the configured address.
-  </description>
-</property>
-
-<property>
-  <name>dfs.ha.namenodes.EXAMPLENAMESERVICE</name>
-  <value></value>
-  <description>
-    The prefix for a given nameservice, contains a comma-separated
-    list of namenodes for a given nameservice (eg EXAMPLENAMESERVICE).
-  </description>
-</property>
-
-<property>
-  <name>dfs.ha.namenode.id</name>
-  <value></value>
-  <description>
-    The ID of this namenode. If the namenode ID is not configured it
-    is determined automatically by matching the local node's address
-    with the configured address.
-  </description>
-</property>
-
-<property>
-  <name>dfs.ha.log-roll.period</name>
-  <value>120</value>
-  <description>
-    How often, in seconds, the StandbyNode should ask the active to
-    roll edit logs. Since the StandbyNode only reads from finalized
-    log segments, the StandbyNode will only be as up-to-date as how
-    often the logs are rolled. Note that failover triggers a log roll
-    so the StandbyNode will be up to date before it becomes active.
-  </description>
-</property>
-
-<property>
-  <name>dfs.ha.tail-edits.period</name>
-  <value>60</value>
-  <description>
-    How often, in seconds, the StandbyNode should check for new
-    finalized log segments in the shared edits log.
-  </description>
-</property>
-
-<property>
-  <name>dfs.ha.automatic-failover.enabled</name>
-  <value>false</value>
-  <description>
-    Whether automatic failover is enabled. See the HDFS High
-    Availability documentation for details on automatic HA
-    configuration.
-  </description>
-</property>
-
-<property>
-  <name>dfs.support.append</name>
-  <value>true</value>
-  <description>
-    Does HDFS allow appends to files?
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.use.datanode.hostname</name>
-  <value>false</value>
-  <description>Whether clients should use datanode hostnames when
-    connecting to datanodes.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.use.datanode.hostname</name>
-  <value>false</value>
-  <description>Whether datanodes should use datanode hostnames when
-    connecting to other datanodes for data transfer.
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.local.interfaces</name>
-  <value></value>
-  <description>A comma separated list of network interface names to use
-    for data transfer between the client and datanodes. When creating
-    a connection to read from or write to a datanode, the client
-    chooses one of the specified interfaces at random and binds its
-    socket to the IP of that interface. Individual names may be
-    specified as either an interface name (eg "eth0"), a subinterface
-    name (eg "eth0:0"), or an IP address (which may be specified using
-    CIDR notation to match a range of IPs).
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.kerberos.internal.spnego.principal</name>
-  <value>${dfs.web.authentication.kerberos.principal}</value>
-</property>
-
-<property>
-  <name>dfs.secondary.namenode.kerberos.internal.spnego.principal</name>
-  <value>${dfs.web.authentication.kerberos.principal}</value>
-</property>
-
-<property>
-  <name>dfs.namenode.avoid.read.stale.datanode</name>
-  <value>false</value>
-  <description>
-    Indicate whether or not to avoid reading from &quot;stale&quot; datanodes whose
-    heartbeat messages have not been received by the namenode 
-    for more than a specified time interval. Stale datanodes will be
-    moved to the end of the node list returned for reading. See
-    dfs.namenode.avoid.write.stale.datanode for a similar setting for writes.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.avoid.write.stale.datanode</name>
-  <value>false</value>
-  <description>
-    Indicate whether or not to avoid writing to &quot;stale&quot; datanodes whose 
-    heartbeat messages have not been received by the namenode 
-    for more than a specified time interval. Writes will avoid using 
-    stale datanodes unless more than a configured ratio 
-    (dfs.namenode.write.stale.datanode.ratio) of datanodes are marked as 
-    stale. See dfs.namenode.avoid.read.stale.datanode for a similar setting
-    for reads.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.stale.datanode.interval</name>
-  <value>30000</value>
-  <description>
-    Default time interval for marking a datanode as "stale", i.e., if 
-    the namenode has not received heartbeat msg from a datanode for 
-    more than this time interval, the datanode will be marked and treated 
-    as "stale" by default. The stale interval cannot be too small since 
-    otherwise this may cause too frequent change of stale states. 
-    We thus set a minimum stale interval value (the default value is 3 times 
-    of heartbeat interval) and guarantee that the stale interval cannot be less
-    than the minimum value. A stale data node is avoided during lease/block
-    recovery. It can be conditionally avoided for reads (see
-    dfs.namenode.avoid.read.stale.datanode) and for writes (see
-    dfs.namenode.avoid.write.stale.datanode).
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.write.stale.datanode.ratio</name>
-  <value>0.5f</value>
-  <description>
-    When the ratio of number stale datanodes to total datanodes marked
-    is greater than this ratio, stop avoiding writing to stale nodes so
-    as to prevent causing hotspots.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.invalidate.work.pct.per.iteration</name>
-  <value>0.32f</value>
-  <description>
-    *Note*: Advanced property. Change with caution.
-    This determines the percentage amount of block
-    invalidations (deletes) to do over a single DN heartbeat
-    deletion command. The final deletion count is determined by applying this
-    percentage to the number of live nodes in the system.
-    The resultant number is the number of blocks from the deletion list
-    chosen for proper invalidation over a single heartbeat of a single DN.
-    Value should be a positive, non-zero percentage in float notation (X.Yf),
-    with 1.0f meaning 100%.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.replication.work.multiplier.per.iteration</name>
-  <value>2</value>
-  <description>
-    *Note*: Advanced property. Change with caution.
-    This determines the total amount of block transfers to begin in
-    parallel at a DN, for replication, when such a command list is being
-    sent over a DN heartbeat by the NN. The actual number is obtained by
-    multiplying this multiplier with the total number of live nodes in the
-    cluster. The result number is the number of blocks to begin transfers
-    immediately for, per DN heartbeat. This number can be any positive,
-    non-zero integer.
-  </description>
-</property>
-
-<property>
-  <name>dfs.webhdfs.enabled</name>
-  <value>true</value>
-  <description>
-    Enable WebHDFS (REST API) in Namenodes and Datanodes.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.fuse.connection.timeout</name>
-  <value>300</value>
-  <description>
-    The minimum number of seconds that we'll cache libhdfs connection objects
-    in fuse_dfs. Lower values will result in lower memory consumption; higher
-    values may speed up access by avoiding the overhead of creating new
-    connection objects.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.fuse.timer.period</name>
-  <value>5</value>
-  <description>
-    The number of seconds between cache expiry checks in fuse_dfs. Lower values
-    will result in fuse_dfs noticing changes to Kerberos ticket caches more
-    quickly.
-  </description>
-</property>
-
-<property>
-  <name>dfs.metrics.percentiles.intervals</name>
-  <value></value>
-  <description>
-    Comma-delimited set of integers denoting the desired rollover intervals 
-    (in seconds) for percentile latency metrics on the Namenode and Datanode.
-    By default, percentile latency metrics are disabled.
-  </description>
-</property>
-
-<property>
-  <name>dfs.encrypt.data.transfer</name>
-  <value>false</value>
-  <description>
-    Whether or not actual block data that is read/written from/to HDFS should
-    be encrypted on the wire. This only needs to be set on the NN and DNs,
-    clients will deduce this automatically.
-  </description>
-</property>
-
-<property>
-  <name>dfs.encrypt.data.transfer.algorithm</name>
-  <value></value>
-  <description>
-    This value may be set to either "3des" or "rc4". If nothing is set, then
-    the configured JCE default on the system is used (usually 3DES.) It is
-    widely believed that 3DES is more cryptographically secure, but RC4 is
-    substantially faster.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
-  <value>false</value>
-  <description>
-    Boolean which enables backend datanode-side support for the experimental DistributedFileSystem#getFileVBlockStorageLocations API.
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.file-block-storage-locations.num-threads</name>
-  <value>10</value>
-  <description>
-    Number of threads used for making parallel RPCs in DistributedFileSystem#getFileBlockStorageLocations().
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.file-block-storage-locations.timeout</name>
-  <value>60</value>
-  <description>
-    Timeout (in seconds) for the parallel RPCs made in DistributedFileSystem#getFileBlockStorageLocations().
-  </description>
-</property>
-
-<property>
-  <name>dfs.journalnode.rpc-address</name>
-  <value>0.0.0.0:8485</value>
-  <description>
-    The JournalNode RPC server address and port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.journalnode.http-address</name>
-  <value>0.0.0.0:8480</value>
-  <description>
-    The address and port the JournalNode web UI listens on.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.audit.loggers</name>
-  <value>default</value>
-  <description>
-    List of classes implementing audit loggers that will receive audit events.
-    These should be implementations of org.apache.hadoop.hdfs.server.namenode.AuditLogger.
-    The special value "default" can be used to reference the default audit
-    logger, which uses the configured log system. Installing custom audit loggers
-    may affect the performance and stability of the NameNode. Refer to the custom
-    logger's documentation for more details.
-  </description>
-</property>
-
-<property>
-  <name>dfs.domain.socket.path</name>
-  <value></value>
-  <description>
-    Optional.  This is a path to a UNIX domain socket that will be used for
-    communication between the DataNode and local HDFS clients.
-    If the string "_PORT" is present in this path, it will be replaced by the
-    TCP port of the DataNode.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.available-space-volume-choosing-policy.balanced-space-threshold</name>
-  <value>10737418240</value> <!-- 10 GB -->
-  <description>
-    Only used when the dfs.datanode.fsdataset.volume.choosing.policy is set to
-    org.apache.hadoop.hdfs.server.datanode.fsdataset.AvailableSpaceVolumeChoosingPolicy.
-    This setting controls how much DN volumes are allowed to differ in terms of
-    bytes of free disk space before they are considered imbalanced. If the free
-    space of all the volumes are within this range of each other, the volumes
-    will be considered balanced and block assignments will be done on a pure
-    round robin basis.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction</name>
-  <value>0.75f</value>
-  <description>
-    Only used when the dfs.datanode.fsdataset.volume.choosing.policy is set to
-    org.apache.hadoop.hdfs.server.datanode.fsdataset.AvailableSpaceVolumeChoosingPolicy.
-    This setting controls what percentage of new block allocations will be sent
-    to volumes with more available disk space than others. This setting should
-    be in the range 0.0 - 1.0, though in practice 0.5 - 1.0, since there should
-    be no reason to prefer that volumes with less available disk space receive
-    more block allocations.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.edits.noeditlogchannelflush</name>
-  <value>false</value>
-  <description>
-    Specifies whether to flush edit log file channel. When set, expensive
-    FileChannel#force calls are skipped and synchronous disk writes are
-    enabled instead by opening the edit log file with RandomAccessFile("rws")
-    flags. This can significantly improve the performance of edit log writes
-    on the Windows platform.
-    Note that the behavior of the "rws" flags is platform and hardware specific
-    and might not provide the same level of guarantees as FileChannel#force.
-    For example, the write will skip the disk-cache on SAS and SCSI devices
-    while it might not on SATA devices. This is an expert level setting,
-    change with caution.
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.cache.drop.behind.writes</name>
-  <value></value>
-  <description>
-    Just like dfs.datanode.drop.cache.behind.writes, this setting causes the
-    page cache to be dropped behind HDFS writes, potentially freeing up more
-    memory for other uses.  Unlike dfs.datanode.drop.cache.behind.writes, this
-    is a client-side setting rather than a setting for the entire datanode.
-    If present, this setting will override the DataNode default.
-
-    If the native libraries are not available to the DataNode, this
-    configuration has no effect.
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.cache.drop.behind.reads</name>
-  <value></value>
-  <description>
-    Just like dfs.datanode.drop.cache.behind.reads, this setting causes the
-    page cache to be dropped behind HDFS reads, potentially freeing up more
-    memory for other uses.  Unlike dfs.datanode.drop.cache.behind.reads, this
-    is a client-side setting rather than a setting for the entire datanode.  If
-    present, this setting will override the DataNode default.
-
-    If the native libraries are not available to the DataNode, this
-    configuration has no effect.
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.cache.readahead</name>
-  <value></value>
-  <description>
-    When using remote reads, this setting causes the datanode to
-    read ahead in the block file using posix_fadvise, potentially decreasing
-    I/O wait times.  Unlike dfs.datanode.readahead.bytes, this is a client-side
-    setting rather than a setting for the entire datanode.  If present, this
-    setting will override the DataNode default.
-
-    When using local reads, this setting determines how much readahead we do in
-    BlockReaderLocal.
-
-    If the native libraries are not available to the DataNode, this
-    configuration has no effect.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.enable.retrycache</name>
-  <value>true</value>
-  <description>
-    This enables the retry cache on the namenode. Namenode tracks for
-    non-idempotent requests the corresponding response. If a client retries the
-    request, the response from the retry cache is sent. Such operations
-    are tagged with annotation @AtMostOnce in namenode protocols. It is
-    recommended that this flag be set to true. Setting it to false, will result
-    in clients getting failure responses to retried request. This flag must 
-    be enabled in HA setup for transparent fail-overs.
-
-    The entries in the cache have expiration time configurable
-    using dfs.namenode.retrycache.expirytime.millis.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.retrycache.expirytime.millis</name>
-  <value>600000</value>
-  <description>
-    The time for which retry cache entries are retained.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.retrycache.heap.percent</name>
-  <value>0.03f</value>
-  <description>
-    This parameter configures the heap size allocated for retry cache
-    (excluding the response cached). This corresponds to approximately
-    4096 entries for every 64MB of namenode process java heap size.
-    Assuming retry cache entry expiration time (configured using
-    dfs.namenode.retrycache.expirytime.millis) of 10 minutes, this
-    enables retry cache to support 7 operations per second sustained
-    for 10 minutes. As the heap size is increased, the operation rate
-    linearly increases.
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.mmap.cache.size</name>
-  <value>1024</value>
-  <description>
-    When zero-copy reads are used, the DFSClient keeps a cache of recently used
-    memory mapped regions.  This parameter controls the maximum number of
-    entries that we will keep in that cache.
-
-    If this is set to 0, we will not allow mmap.
-
-    The larger this number is, the more file descriptors we will potentially
-    use for memory-mapped files.  mmaped files also use virtual address space.
-    You may need to increase your ulimit virtual address space limits before
-    increasing the client mmap cache size.
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.mmap.cache.timeout.ms</name>
-  <value>900000</value>
-  <description>
-    The minimum length of time that we will keep an mmap entry in the cache
-    between uses.  If an entry is in the cache longer than this, and nobody
-    uses it, it will be removed by a background thread.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.path.based.cache.block.map.allocation.percent</name>
-  <value>0.25</value>
-  <description>
-    The percentage of the Java heap which we will allocate to the cached blocks
-    map.  The cached blocks map is a hash map which uses chained hashing.
-    Smaller maps may be accessed more slowly if the number of cached blocks is
-    large; larger maps will consume more memory.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.max.locked.memory</name>
-  <value>0</value>
-  <description>
-    The amount of memory in bytes to use for caching of block replicas in
-    memory on the datanode. The datanode's maximum locked memory soft ulimit
-    (RLIMIT_MEMLOCK) must be set to at least this value, else the datanode
-    will abort on startup.
-
-    By default, this parameter is set to 0, which disables in-memory caching.
-
-    If the native libraries are not available to the DataNode, this
-    configuration has no effect.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.list.cache.directives.num.responses</name>
-  <value>100</value>
-  <description>
-    This value controls the number of cache directives that the NameNode will
-    send over the wire in response to a listDirectives RPC.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.list.cache.pools.num.responses</name>
-  <value>100</value>
-  <description>
-    This value controls the number of cache pools that the NameNode will
-    send over the wire in response to a listPools RPC.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.path.based.cache.refresh.interval.ms</name>
-  <value>300000</value>
-  <description>
-    The amount of milliseconds between subsequent path cache rescans.  Path
-    cache rescans are when we calculate which blocks should be cached, and on
-    what datanodes.
-
-    By default, this parameter is set to 300000, which is five minutes.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.path.based.cache.retry.interval.ms</name>
-  <value>60000</value>
-  <description>
-    When the NameNode needs to uncache something that is cached, or cache
-    something that is not cached, it must direct the DataNodes to do so by
-    sending a DNA_CACHE or DNA_UNCACHE command in response to a DataNode
-    heartbeat.  This parameter controls how frequently the NameNode will
-    resend these commands.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.fsdatasetcache.max.threads.per.volume</name>
-  <value>4</value>
-  <description>
-    The maximum number of threads per volume to use for caching new data
-    on the datanode. These threads consume both I/O and CPU. This can affect
-    normal datanode operations.
-  </description>
-</property>
-
-<property>
-  <name>dfs.cachereport.intervalMsec</name>
-  <value>10000</value>
-  <description>
-    Determines cache reporting interval in milliseconds.  After this amount of
-    time, the DataNode sends a full report of its cache state to the NameNode.
-    The NameNode uses the cache report to update its map of cached blocks to
-    DataNode locations.
-
-    This configuration has no effect if in-memory caching has been disabled by
-    setting dfs.datanode.max.locked.memory to 0 (which is the default).
-
-    If the native libraries are not available to the DataNode, this
-    configuration has no effect.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.edit.log.autoroll.multiplier.threshold</name>
-  <value>2.0</value>
-  <description>
-    Determines when an active namenode will roll its own edit log.
-    The actual threshold (in number of edits) is determined by multiplying
-    this value by dfs.namenode.checkpoint.txns.
-
-    This prevents extremely large edit files from accumulating on the active
-    namenode, which can cause timeouts during namenode startup and pose an
-    administrative hassle. This behavior is intended as a failsafe for when
-    the standby or secondary namenode fail to roll the edit log by the normal
-    checkpoint threshold.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.edit.log.autoroll.check.interval.ms</name>
-  <value>300000</value>
-  <description>
-    How often an active namenode will check if it needs to roll its edit log,
-    in milliseconds.
-  </description>
-</property>
-
-<property>
-  <name>dfs.webhdfs.user.provider.user.pattern</name>
-  <value>^[A-Za-z_][A-Za-z0-9._-]*[$]?$</value>
-  <description>
-    Valid pattern for user and group names for webhdfs, it must be a valid java regex.
-  </description>
-</property>
-
-</configuration>
diff --git a/components/camel-hdfs/src/test/resources/hdfs-mac-test.xml b/components/camel-hdfs/src/test/resources/hdfs-mac-test.xml
deleted file mode 100644
index 23439369b67..00000000000
--- a/components/camel-hdfs/src/test/resources/hdfs-mac-test.xml
+++ /dev/null
@@ -1,1607 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
--->
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!-- Do not modify this file directly.  Instead, copy entries that you -->
-<!-- wish to modify from this file into hdfs-site.xml and change them -->
-<!-- there.  If hdfs-site.xml does not already exist, create it.      -->
-
-<configuration>
-
-<property>
-  <name>hadoop.hdfs.configuration.version</name>
-  <value>1</value>
-  <description>version of this configuration file</description>
-</property>
-
-<property>
-  <name>dfs.namenode.logging.level</name>
-  <value>info</value>
-  <description>
-    The logging level for dfs namenode. Other values are "dir" (trace
-    namespace mutations), "block" (trace block under/over replications
-    and block creations/deletions), or "all".
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.rpc-address</name>
-  <value></value>
-  <description>
-    RPC address that handles all clients requests. In the case of HA/Federation where multiple namenodes exist,
-    the name service id is added to the name e.g. dfs.namenode.rpc-address.ns1
-    dfs.namenode.rpc-address.EXAMPLENAMESERVICE
-    The value of this property will take the form of nn-host1:rpc-port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.rpc-bind-host</name>
-  <value></value>
-  <description>
-    The actual address the server will bind to. If this optional address is
-    set, the RPC server will bind to this address and the port specified in
-    dfs.namenode.rpc-address for the RPC server. It can also be specified
-    per name node or name service for HA/Federation. This is most useful for
-    making name node listen to all interfaces by setting to 0.0.0.0.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.servicerpc-address</name>
-  <value></value>
-  <description>
-    RPC address for HDFS Services communication. BackupNode, Datanodes and all other services should be
-    connecting to this address if it is configured. In the case of HA/Federation where multiple namenodes exist,
-    the name service id is added to the name e.g. dfs.namenode.servicerpc-address.ns1
-    dfs.namenode.rpc-address.EXAMPLENAMESERVICE
-    The value of this property will take the form of nn-host1:rpc-port.
-    If the value of this property is unset the value of dfs.namenode.rpc-address will be used as the default.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.servicerpc-bind-host</name>
-  <value></value>
-  <description>
-    The actual address the server will bind to. If this optional address is
-    set, the service RPC server will bind to this address and the port 
-    specified in dfs.namenode.servicerpc-address. It can also be specified
-    per name node or name service for HA/Federation. This is most useful for
-    making name node listen to all interfaces by setting to 0.0.0.0.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.secondary.http-address</name>
-  <value>0.0.0.0:50090</value>
-  <description>
-    The secondary namenode http server address and port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.address</name>
-  <value>0.0.0.0:50010</value>
-  <description>
-    The datanode server address and port for data transfer.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.http.address</name>
-  <value>0.0.0.0:50075</value>
-  <description>
-    The datanode http server address and port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.ipc.address</name>
-  <value>0.0.0.0:50020</value>
-  <description>
-    The datanode ipc server address and port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.handler.count</name>
-  <value>10</value>
-  <description>The number of server threads for the datanode.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.http-address</name>
-  <value>0.0.0.0:50070</value>
-  <description>
-    The address and the base port where the dfs namenode web ui will listen on.
-  </description>
-</property>
-
-<property>
-  <name>dfs.https.enable</name>
-  <value>false</value>
-  <description>
-    Deprecated. Use "dfs.http.policy" instead.
-  </description>
-</property>
-
-<property>
-  <name>dfs.http.policy</name>
-  <value>HTTP_ONLY</value>
-  <description>Decide if HTTPS(SSL) is supported on HDFS
-    This configures the HTTP endpoint for HDFS daemons:
-      The following values are supported:
-      - HTTP_ONLY : Service is provided only on http
-      - HTTPS_ONLY : Service is provided only on https
-      - HTTP_AND_HTTPS : Service is provided both on http and https
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.https.need-auth</name>
-  <value>false</value>
-  <description>Whether SSL client certificate authentication is required
-  </description>
-</property>
-
-<property>
-  <name>dfs.https.server.keystore.resource</name>
-  <value>ssl-server.xml</value>
-  <description>Resource file from which ssl server keystore
-  information will be extracted
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.https.keystore.resource</name>
-  <value>ssl-client.xml</value>
-  <description>Resource file from which ssl client keystore
-  information will be extracted
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.https.address</name>
-  <value>0.0.0.0:50475</value>
-  <description>The datanode secure http server address and port.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.https-address</name>
-  <value>0.0.0.0:50470</value>
-  <description>The namenode secure http server address and port.</description>
-</property>
-
- <property>
-  <name>dfs.datanode.dns.interface</name>
-  <value>default</value>
-  <description>The name of the Network Interface from which a data node should 
-  report its IP address.
-  </description>
- </property>
- 
-<property>
-  <name>dfs.datanode.dns.nameserver</name>
-  <value>default</value>
-  <description>The host name or IP address of the name server (DNS)
-  which a DataNode should use to determine the host name used by the
-  NameNode for communication and display purposes.
-  </description>
- </property>
- 
- <property>
-  <name>dfs.namenode.backup.address</name>
-  <value>0.0.0.0:50100</value>
-  <description>
-    The backup node server address and port.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
- 
- <property>
-  <name>dfs.namenode.backup.http-address</name>
-  <value>0.0.0.0:50105</value>
-  <description>
-    The backup node http server address and port.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.replication.considerLoad</name>
-  <value>true</value>
-  <description>Decide if chooseTarget considers the target's load or not
-  </description>
-</property>
-<property>
-  <name>dfs.default.chunk.view.size</name>
-  <value>32768</value>
-  <description>The number of bytes to view for a file on the browser.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.du.reserved</name>
-  <value>0</value>
-  <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.name.dir</name>
-  <value>file://${hadoop.tmp.dir}/dfs/name</value>
-  <description>Determines where on the local filesystem the DFS name node
-      should store the name table(fsimage).  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-</property>
-
-<property>
-  <name>dfs.namenode.name.dir.restore</name>
-  <value>false</value>
-  <description>Set to true to enable NameNode to attempt recovering a
-      previously failed dfs.namenode.name.dir. When enabled, a recovery of any
-      failed directory is attempted during checkpoint.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.fs-limits.max-component-length</name>
-  <value>0</value>
-  <description>Defines the maximum number of characters in each component
-      of a path.  A value of 0 will disable the check.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.fs-limits.max-directory-items</name>
-  <value>0</value>
-  <description>Defines the maximum number of items that a directory may
-      contain.  A value of 0 will disable the check.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.fs-limits.min-block-size</name>
-  <value>1048576</value>
-  <description>Minimum block size in bytes, enforced by the Namenode at create
-      time. This prevents the accidental creation of files with tiny block
-      sizes (and thus many blocks), which can degrade
-      performance.</description>
-</property>
-
-<property>
-    <name>dfs.namenode.fs-limits.max-blocks-per-file</name>
-    <value>1048576</value>
-    <description>Maximum number of blocks per file, enforced by the Namenode on
-        write. This prevents the creation of extremely large files which can
-        degrade performance.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.edits.dir</name>
-  <value>${dfs.namenode.name.dir}</value>
-  <description>Determines where on the local filesystem the DFS name node
-      should store the transaction (edits) file. If this is a comma-delimited list
-      of directories then the transaction file is replicated in all of the 
-      directories, for redundancy. Default value is same as dfs.namenode.name.dir
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.shared.edits.dir</name>
-  <value></value>
-  <description>A directory on shared storage between the multiple namenodes
-  in an HA cluster. This directory will be written by the active and read
-  by the standby in order to keep the namespaces synchronized. This directory
-  does not need to be listed in dfs.namenode.edits.dir above. It should be
-  left empty in a non-HA cluster.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.edits.journal-plugin.qjournal</name>
-  <value>org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager</value>
-</property>
-
-<property>
-  <name>dfs.permissions.enabled</name>
-  <value>true</value>
-  <description>
-    If "true", enable permission checking in HDFS.
-    If "false", permission checking is turned off,
-    but all other behavior is unchanged.
-    Switching from one parameter value to the other does not change the mode,
-    owner or group of files or directories.
-  </description>
-</property>
-
-<property>
-  <name>dfs.permissions.superusergroup</name>
-  <value>supergroup</value>
-  <description>The name of the group of super-users.</description>
-</property>
-<!--
-<property>
-   <name>dfs.cluster.administrators</name>
-   <value>ACL for the admins</value>
-   <description>This configuration is used to control who can access the
-                default servlets in the namenode, etc.
-   </description>
-</property>
--->
-
-<property>
-  <name>dfs.block.access.token.enable</name>
-  <value>false</value>
-  <description>
-    If "true", access tokens are used as capabilities for accessing datanodes.
-    If "false", no access tokens are checked on accessing datanodes.
-  </description>
-</property>
-
-<property>
-  <name>dfs.block.access.key.update.interval</name>
-  <value>600</value>
-  <description>
-    Interval in minutes at which namenode updates its access keys.
-  </description>
-</property>
-
-<property>
-  <name>dfs.block.access.token.lifetime</name>
-  <value>600</value>
-  <description>The lifetime of access tokens in minutes.</description>
-</property>
-
-<property>
-  <name>dfs.datanode.data.dir</name>
-  <value>file://${hadoop.tmp.dir}/dfs/data</value>
-  <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.data.dir.perm</name>
-  <value>700</value>
-  <description>Permissions for the directories on on the local filesystem where
-  the DFS data node store its blocks. The permissions can either be octal or
-  symbolic.</description>
-</property>
-
-<property>
-  <name>dfs.replication</name>
-  <value>2</value>
-  <description>Default block replication. 
-  The actual number of replications can be specified when the file is created.
-  The default is used if replication is not specified in create time.
-  </description>
-</property>
-
-<property>
-  <name>dfs.replication.max</name>
-  <value>512</value>
-  <description>Maximal block replication. 
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.replication.min</name>
-  <value>1</value>
-  <description>Minimal block replication. 
-  </description>
-</property>
-
-<property>
-  <name>dfs.blocksize</name>
-  <value>134217728</value>
-  <description>
-      The default block size for new files, in bytes.
-      You can use the following suffix (case insensitive):
-      k(kilo), m(mega), g(giga), t(tera), p(peta), e(exa) to specify the size (such as 128k, 512m, 1g, etc.),
-      Or provide complete size in bytes (such as 134217728 for 128 MB).
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.block.write.retries</name>
-  <value>3</value>
-  <description>The number of retries for writing blocks to the data nodes, 
-  before we signal failure to the application.
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.block.write.replace-datanode-on-failure.enable</name>
-  <value>false</value>
-  <description>
-    If there is a datanode/network failure in the write pipeline,
-    DFSClient will try to remove the failed datanode from the pipeline
-    and then continue writing with the remaining datanodes. As a result,
-    the number of datanodes in the pipeline is decreased.  The feature is
-    to add new datanodes to the pipeline.
-
-    This is a site-wide property to enable/disable the feature.
-
-    When the cluster size is extremely small, e.g. 3 nodes or less, cluster
-    administrators may want to set the policy to NEVER in the default
-    configuration file or disable this feature.  Otherwise, users may
-    experience an unusually high rate of pipeline failures since it is
-    impossible to find new datanodes for replacement.
-
-    See also dfs.client.block.write.replace-datanode-on-failure.policy
-  </description>
-</property>
-
-<property>
-  <name>dfs.client.block.write.replace-datanode-on-failure.policy</name>
-  <value>NEVER</value>
-  <description>
-    This property is used only if the value of
-    dfs.client.block.write.replace-datanode-on-failure.enable is true.
... 7403 lines suppressed ...