You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@camel.apache.org by gn...@apache.org on 2018/10/12 11:26:28 UTC

[camel] 24/44: Add a protected logger on ServiceSupport and use it instead of a static one

This is an automated email from the ASF dual-hosted git repository.

gnodet pushed a commit to branch sandbox/camel-3.x
in repository https://gitbox.apache.org/repos/asf/camel.git

commit feae39aa6dcaf509f7d034219108b1343fff0351
Author: Guillaume Nodet <gn...@gmail.com>
AuthorDate: Fri Sep 28 17:33:23 2018 +0200

    Add a protected logger on ServiceSupport and use it instead of a static one
    
    # Conflicts:
    #	camel-core/src/main/java/org/apache/camel/processor/Resequencer.java
---
 .../apache/camel/component/bean/BeanComponent.java |   5 +-
 .../camel/component/direct/DirectProducer.java     |   6 +-
 .../directvm/DirectVmBlockingProducer.java         |   6 +-
 .../component/directvm/DirectVmProcessor.java      |   7 +-
 .../camel/component/file/GenericFileConsumer.java  |   2 +-
 .../component/file/GenericFilePollingConsumer.java |  30 +++---
 ...dempotentChangedRepositoryReadLockStrategy.java |  18 ++--
 ...IdempotentRenameRepositoryReadLockStrategy.java |   6 +-
 .../FileIdempotentRepositoryReadLockStrategy.java  |  14 ++-
 .../GenericFileProcessStrategySupport.java         |   1 -
 .../apache/camel/component/log/LogComponent.java   |   5 +-
 .../apache/camel/component/mock/MockEndpoint.java  |  26 ++---
 .../component/properties/PropertiesComponent.java  |  13 ++-
 .../apache/camel/component/rest/RestEndpoint.java  |  10 +-
 .../apache/camel/component/seda/SedaConsumer.java  |  33 +++---
 .../apache/camel/component/seda/SedaEndpoint.java  |  14 +--
 .../apache/camel/component/test/TestEndpoint.java  |   7 +-
 .../camel/component/timer/TimerConsumer.java       |  12 +--
 .../apache/camel/component/xslt/XsltComponent.java |   4 +-
 .../apache/camel/component/xslt/XsltEndpoint.java  |  12 +--
 .../java/org/apache/camel/impl/ConsumerCache.java  |  11 +-
 .../impl/DefaultAsyncProcessorAwaitManager.java    |  22 ++--
 .../org/apache/camel/impl/DefaultCamelContext.java |   4 +-
 .../org/apache/camel/impl/DefaultComponent.java    |  14 +--
 .../org/apache/camel/impl/DefaultConsumer.java     |   4 +-
 .../apache/camel/impl/DefaultConsumerTemplate.java |   5 +-
 .../org/apache/camel/impl/DefaultEndpoint.java     |   5 +-
 .../camel/impl/DefaultInflightRepository.java      |   5 +-
 .../org/apache/camel/impl/DefaultProducer.java     |   2 +-
 .../DefaultScheduledPollConsumerScheduler.java     |  14 +--
 .../apache/camel/impl/DefaultShutdownStrategy.java |  77 +++++++-------
 .../camel/impl/DefaultStreamCachingStrategy.java   |  43 ++++----
 .../camel/impl/EventDrivenPollingConsumer.java     |   8 +-
 .../org/apache/camel/impl/FileStateRepository.java |  24 ++---
 .../impl/InterceptSendToEndpointProcessor.java     |  12 +--
 .../apache/camel/impl/PollingConsumerSupport.java  |   2 +-
 .../java/org/apache/camel/impl/ProducerCache.java  |  12 +--
 .../java/org/apache/camel/impl/RouteService.java   |  12 +--
 .../camel/impl/ScheduledBatchPollingConsumer.java  |   4 +-
 .../apache/camel/impl/ScheduledPollConsumer.java   |  53 +++++-----
 .../camel/impl/ThrottlingExceptionRoutePolicy.java |  27 +++--
 .../impl/cluster/AbstractCamelClusterService.java  |  19 ++--
 .../camel/impl/cluster/ClusteredRoutePolicy.java   |  17 ++-
 .../DefaultManagementLifecycleStrategy.java        |  65 ++++++------
 .../DefaultManagementMBeanAssembler.java           |   8 +-
 .../camel/processor/CamelInternalProcessor.java    |  34 +++---
 .../org/apache/camel/processor/CatchProcessor.java |   9 +-
 .../apache/camel/processor/ChoiceProcessor.java    |   4 +-
 .../camel/processor/ClaimCheckProcessor.java       |  13 ++-
 .../camel/processor/DelayProcessorSupport.java     |   2 +-
 .../java/org/apache/camel/processor/Enricher.java  |  11 +-
 .../camel/processor/ErrorHandlerSupport.java       |   2 -
 .../camel/processor/FatalFallbackErrorHandler.java |  12 +--
 .../apache/camel/processor/FilterProcessor.java    |   4 +-
 .../apache/camel/processor/FinallyProcessor.java   |   5 +-
 .../org/apache/camel/processor/LogProcessor.java   |   7 +-
 .../org/apache/camel/processor/LoopProcessor.java  |  17 ++-
 .../apache/camel/processor/MulticastProcessor.java |  74 +++++++------
 .../camel/processor/OnCompletionProcessor.java     |  15 ++-
 .../java/org/apache/camel/processor/Pipeline.java  |  21 ++--
 .../org/apache/camel/processor/PollEnricher.java   |  17 ++-
 .../org/apache/camel/processor/RecipientList.java  |   3 +-
 .../camel/processor/RecipientListProcessor.java    |  16 +--
 .../org/apache/camel/processor/RoutingSlip.java    |   2 +-
 .../apache/camel/processor/SamplingThrottler.java  |   9 +-
 .../camel/processor/SendDynamicProcessor.java      |  28 +++--
 .../org/apache/camel/processor/SendProcessor.java  |  15 +--
 .../apache/camel/processor/StreamResequencer.java  |   4 +-
 .../apache/camel/processor/ThreadsProcessor.java   |   9 +-
 .../java/org/apache/camel/processor/Throttler.java |   1 -
 .../apache/camel/processor/ThroughputLogger.java   |  29 +++---
 .../org/apache/camel/processor/TryProcessor.java   |  15 ++-
 .../apache/camel/processor/WireTapProcessor.java   |   6 +-
 .../processor/aggregate/AggregateProcessor.java    | 114 ++++++++++-----------
 .../idempotent/FileIdempotentRepository.java       |  34 +++---
 .../processor/idempotent/IdempotentConsumer.java   |   4 +-
 .../processor/interceptor/BacklogDebugger.java     |   4 +-
 .../camel/processor/interceptor/BacklogTracer.java |   5 +-
 .../processor/interceptor/DefaultChannel.java      |   6 +-
 .../apache/camel/support/EventNotifierSupport.java |   4 +-
 .../apache/camel/support/RoutePolicySupport.java   |   1 -
 .../org/apache/camel/support/ServiceSupport.java   |  13 ++-
 .../camel/util/component/AbstractApiConsumer.java  |   3 -
 .../camel/component/log/LogCustomLoggerTest.java   |   2 +-
 .../org/apache/camel/impl/CustomIdFactoryTest.java |   3 +-
 .../impl/transformer/TransformerRouteTest.java     |   8 +-
 .../camel/impl/validator/ValidatorRouteTest.java   |   4 +-
 .../processor/LogEipPropagateExceptionTest.java    |   2 +-
 .../apache/camel/component/ahc/ws/WsEndpoint.java  |  21 ++--
 .../apache/camel/component/ahc/AhcComponent.java   |   6 +-
 .../apache/camel/component/as2/AS2Component.java   |   4 +-
 .../camel/component/asterisk/AsteriskProducer.java |   5 +-
 .../camel/component/atmos/AtmosEndpoint.java       |  12 +--
 .../consumer/AtmosScheduledPollConsumer.java       |   2 +-
 .../client/AbstractAtomixClientProducer.java       |   4 +-
 .../atomix/client/map/AtomixMapConsumer.java       |   5 +-
 .../client/messaging/AtomixMessagingConsumer.java  |   3 +-
 .../atomix/client/queue/AtomixQueueConsumer.java   |   3 +-
 .../atomix/client/set/AtomixSetConsumer.java       |   3 +-
 .../atomix/client/value/AtomixValueConsumer.java   |   3 +-
 .../atomix/cluster/AtomixClusterService.java       |  11 +-
 .../atomix/cluster/AtomixClusterView.java          |  21 ++--
 .../camel/component/aws/xray/XRayTracer.java       |  34 +++---
 .../camel/component/aws/ddb/DdbEndpoint.java       |  20 ++--
 .../component/aws/ddbstream/DdbStreamConsumer.java |   9 +-
 .../camel/component/aws/ec2/EC2Producer.java       |  44 ++++----
 .../camel/component/aws/iam/IAMProducer.java       |  20 ++--
 .../component/aws/kinesis/KinesisConsumer.java     |  15 ++-
 .../camel/component/aws/kms/KMSProducer.java       |  16 ++-
 .../camel/component/aws/lambda/LambdaProducer.java |  16 ++-
 .../apache/camel/component/aws/mq/MQProducer.java  |  16 ++-
 .../apache/camel/component/aws/s3/S3Consumer.java  |  33 +++---
 .../camel/component/aws/sns/SnsEndpoint.java       |  14 +--
 .../camel/component/aws/sns/SnsProducer.java       |  10 +-
 .../camel/component/aws/sqs/SqsConsumer.java       |  41 ++++----
 .../camel/component/aws/sqs/SqsEndpoint.java       |  18 ++--
 .../camel/component/aws/sqs/SqsProducer.java       |  16 ++-
 .../component/azure/blob/BlobServiceConsumer.java  |   7 +-
 .../component/azure/blob/BlobServiceEndpoint.java  |   8 +-
 .../component/azure/blob/BlobServiceProducer.java  |  28 +++--
 .../azure/queue/QueueServiceConsumer.java          |   7 +-
 .../azure/queue/QueueServiceEndpoint.java          |   8 +-
 .../azure/queue/QueueServiceProducer.java          |  14 +--
 .../dataformat/barcode/BarcodeDataFormat.java      |  17 +--
 .../component/beanstalk/BeanstalkConsumer.java     |  16 ++-
 ...indySimpleCsvMandatoryFieldsUnmarshallTest.java |   4 +-
 .../bindy/csv/BindySimpleCsvUnmarshallTest.java    |   2 +-
 .../camel/blueprint/BlueprintCamelContext.java     |  38 +++----
 .../aggregate/CaffeineAggregationRepository.java   |  15 ++-
 .../component/cassandra/CassandraProducer.java     |   1 -
 .../org/apache/camel/component/cm/CMComponent.java |   6 +-
 .../apache/camel/component/cmis/CMISConsumer.java  |   6 +-
 .../apache/camel/component/cmis/CMISProducer.java  |  10 +-
 .../component/crypto/cms/CryptoCmsComponent.java   |   8 +-
 .../component/crypto/cms/CryptoCmsProducer.java    |   2 +-
 .../camel/converter/crypto/PGPDataFormat.java      |   2 +-
 .../camel/component/cxf/CxfBlueprintEndpoint.java  |   6 +-
 .../apache/camel/component/cxf/CxfComponent.java   |   2 -
 .../apache/camel/component/cxf/CxfConsumer.java    |  22 ++--
 .../apache/camel/component/cxf/CxfEndpoint.java    |  26 ++---
 .../apache/camel/component/cxf/CxfProducer.java    |  32 +++---
 .../disruptor/SedaDisruptorCompareTest.java        |   4 +-
 .../camel/component/docker/DockerComponent.java    |   1 -
 .../docker/consumer/DockerEventsConsumer.java      |   9 +-
 .../camel/component/dozer/DozerComponent.java      |   2 -
 .../camel/component/dozer/DozerEndpoint.java       |   5 +-
 .../camel/component/dozer/DozerProducer.java       |  12 +--
 .../aggregate/EhcacheAggregationRepository.java    |  15 ++-
 .../elasticsearch/ElasticsearchProducer.java       |  10 +-
 .../camel/component/elsql/ElsqlEndpoint.java       |   7 +-
 .../camel/component/elsql/ElsqlProducer.java       |   5 +-
 .../component/eventadmin/EventAdminConsumer.java   |   5 +-
 .../camel/component/facebook/FacebookConsumer.java |  10 +-
 .../camel/component/facebook/FacebookEndpoint.java |   8 +-
 .../git/consumer/AbstractGitConsumer.java          |   6 +-
 .../camel/component/git/producer/GitProducer.java  |  52 +++++-----
 .../stream/GoogleCalendarStreamConsumer.java       |   5 +-
 .../stream/GoogleCalendarStreamEndpoint.java       |   2 -
 .../apache/camel/component/gora/GoraConsumer.java  |   9 +-
 .../apache/camel/component/gora/GoraProducer.java  |   5 -
 .../apache/camel/component/grpc/GrpcConsumer.java  |  13 +--
 .../apache/camel/component/grpc/GrpcProducer.java  |  11 +-
 .../hazelcast/queue/HazelcastQueueConsumer.java    |   7 +-
 .../hazelcast/seda/HazelcastSedaConsumer.java      |   9 +-
 .../camel/component/hbase/HBaseConsumer.java       |   8 +-
 .../idempotent/HBaseIdempotentRepository.java      |  12 +--
 .../camel/component/hipchat/HipchatComponent.java  |   6 +-
 .../camel/component/hipchat/HipchatConsumer.java   |   9 +-
 .../camel/component/hipchat/HipchatProducer.java   |  12 +--
 .../camel/component/http4/HttpComponent.java       |  10 +-
 .../apache/camel/component/http4/HttpEndpoint.java |   8 +-
 .../apache/camel/component/http4/HttpProducer.java |  18 ++--
 .../hystrix/metrics/HystrixEventStreamService.java |  11 +-
 .../hystrix/processor/HystrixProcessor.java        |   1 -
 .../component/iec60870/AbstractIecComponent.java   |  16 ++-
 .../component/iec60870/client/ClientComponent.java |   6 +-
 .../component/iec60870/client/ClientConsumer.java  |   6 +-
 .../component/iec60870/server/ServerComponent.java |   6 +-
 .../component/iec60870/server/ServerConsumer.java  |   6 +-
 .../cache/IgniteCacheContinuousQueryConsumer.java  |  16 ++-
 .../ignite/events/IgniteEventsConsumer.java        |  14 +--
 .../ignite/events/IgniteEventsEndpoint.java        |  10 +-
 .../ignite/idgen/IgniteIdGenEndpoint.java          |   6 +-
 .../ignite/messaging/IgniteMessagingConsumer.java  |  14 +--
 .../component/influxdb/InfluxDbComponent.java      |   2 -
 .../camel/component/influxdb/InfluxDbEndpoint.java |   6 +-
 .../camel/component/influxdb/InfluxDbProducer.java |  10 +-
 .../apache/camel/component/irc/IrcComponent.java   |  24 ++---
 .../apache/camel/component/irc/IrcConsumer.java    |   9 +-
 .../apache/camel/component/irc/IrcEndpoint.java    |  15 ++-
 .../apache/camel/component/irc/IrcProducer.java    |  12 +--
 .../camel/component/irc/CodehausIrcChat.java       |   4 +-
 .../camel/component/ironmq/IronMQConsumer.java     |  23 ++---
 .../camel/component/ironmq/IronMQEndpoint.java     |   5 +-
 .../camel/component/ironmq/IronMQProducer.java     |   9 +-
 .../camel/component/jackson/JacksonDataFormat.java |  20 ++--
 .../component/jacksonxml/JacksonXMLDataFormat.java |  14 +--
 .../converter/jaxb/FallbackTypeConverter.java      |  15 ++-
 .../camel/converter/jaxb/JaxbDataFormat.java       |  22 ++--
 .../aggregate/JCacheAggregationRepository.java     |  21 ++--
 .../jclouds/JcloudsBlobStoreConsumer.java          |   5 +-
 .../jclouds/JcloudsBlobStoreProducer.java          |   8 +-
 .../apache/camel/component/jcr/JcrConsumer.java    |  26 ++---
 .../apache/camel/component/jdbc/JdbcComponent.java |   5 +-
 .../apache/camel/component/jdbc/JdbcProducer.java  |  18 ++--
 .../camel/component/jetty/JettyHttpComponent.java  |  25 ++---
 .../camel/component/jetty/JettyHttpProducer.java   |  16 ++-
 .../component/jetty/async/MyAsyncProducer.java     |  13 +--
 .../camel/component/jgroups/JGroupsEndpoint.java   |  10 +-
 .../apache/camel/component/jms/JmsComponent.java   |   4 +-
 .../apache/camel/component/jms/JmsEndpoint.java    |   2 +-
 .../apache/camel/component/jms/JmsProducer.java    |  46 ++++-----
 .../reply/SharedQueueMessageListenerContainer.java |   6 +-
 .../camel/component/jms/async/MyAsyncProducer.java |  13 +--
 .../apache/camel/component/jmx/JMXConsumer.java    |  22 ++--
 .../camel/component/johnzon/JohnzonDataFormat.java |   2 -
 .../apache/camel/component/jpa/JpaComponent.java   |  25 ++---
 .../apache/camel/component/jpa/JpaConsumer.java    |  30 +++---
 .../camel/component/jpa/JpaPollingConsumer.java    |   9 +-
 .../apache/camel/component/jpa/JpaProducer.java    |   8 +-
 .../idempotent/jpa/JpaMessageIdRepository.java     |  20 ++--
 .../apache/camel/component/scp/ScpComponent.java   |  20 ++--
 .../jsonvalidator/JsonValidatorEndpoint.java       |   8 +-
 .../camel/component/kafka/KafkaEndpoint.java       |   5 +-
 .../kubernetes/AbstractKubernetesEndpoint.java     |   2 -
 .../config_maps/KubernetesConfigMapsEndpoint.java  |   2 -
 .../config_maps/KubernetesConfigMapsProducer.java  |  16 ++-
 .../deployments/KubernetesDeploymentsConsumer.java |   8 +-
 .../deployments/KubernetesDeploymentsEndpoint.java |   2 -
 .../deployments/KubernetesDeploymentsProducer.java |  22 ++--
 .../kubernetes/hpa/KubernetesHPAConsumer.java      |   8 +-
 .../kubernetes/hpa/KubernetesHPAEndpoint.java      |   2 -
 .../kubernetes/hpa/KubernetesHPAProducer.java      |  20 ++--
 .../kubernetes/job/KubernetesJobEndpoint.java      |   2 -
 .../kubernetes/job/KubernetesJobProducer.java      |  20 ++--
 .../namespaces/KubernetesNamespacesConsumer.java   |   8 +-
 .../namespaces/KubernetesNamespacesEndpoint.java   |   2 -
 .../kubernetes/nodes/KubernetesNodesConsumer.java  |   8 +-
 .../kubernetes/nodes/KubernetesNodesEndpoint.java  |   2 -
 .../kubernetes/nodes/KubernetesNodesProducer.java  |   6 +-
 .../KubernetesPersistentVolumesEndpoint.java       |   2 -
 .../KubernetesPersistentVolumesClaimsEndpoint.java |   2 -
 .../kubernetes/pods/KubernetesPodsConsumer.java    |   8 +-
 .../kubernetes/pods/KubernetesPodsEndpoint.java    |   2 -
 .../KubernetesReplicationControllersConsumer.java  |   8 +-
 .../KubernetesReplicationControllersEndpoint.java  |   2 -
 .../KubernetesResourcesQuotaEndpoint.java          |   2 -
 .../secrets/KubernetesSecretsEndpoint.java         |   2 -
 .../KubernetesServiceAccountsEndpoint.java         |   2 -
 .../services/KubernetesServicesConsumer.java       |   9 +-
 .../services/KubernetesServicesEndpoint.java       |   2 -
 .../OpenshiftBuildConfigsEndpoint.java             |   2 -
 .../OpenshiftBuildConfigsProducer.java             |   8 +-
 .../openshift/builds/OpenshiftBuildsEndpoint.java  |   2 -
 .../openshift/builds/OpenshiftBuildsProducer.java  |   8 +-
 .../leveldb/LevelDBAggregationRepository.java      |  45 ++++----
 .../component/linkedin/LinkedInComponent.java      |   6 +-
 .../apache/camel/component/mail/MailConsumer.java  |  77 +++++++-------
 .../apache/camel/component/mail/MailProducer.java  |   8 +-
 .../component/metrics/AbstractMetricsProducer.java |   1 -
 .../camel/component/metrics/GaugeProducer.java     |   8 +-
 .../camel/component/metrics/HistogramProducer.java |   6 +-
 .../camel/component/metrics/MetricsComponent.java  |  14 +--
 .../camel/component/metrics/TimerProducer.java     |  10 +-
 .../component/micrometer/MicrometerComponent.java  |   6 +-
 .../camel/component/micrometer/TimerProducer.java  |   9 +-
 .../component/milo/client/MiloClientComponent.java |   8 +-
 .../component/milo/client/MiloClientConsumer.java  |   8 +-
 .../component/milo/client/MiloClientProducer.java  |   6 +-
 .../component/milo/server/MiloServerComponent.java |  14 +--
 .../component/milo/server/MiloServerProducer.java  |   6 +-
 .../apache/camel/component/mllp/MllpEndpoint.java  |   8 +-
 .../component/mongodb/gridfs/GridFsEndpoint.java   |  10 +-
 .../camel/component/mongodb3/MongoDbComponent.java |   2 -
 .../camel/component/mongodb3/MongoDbEndpoint.java  |  14 +--
 .../camel/component/mongodb3/MongoDbProducer.java  |  13 +--
 .../apache/camel/component/mqtt/MQTTEndpoint.java  |  37 +++----
 .../component/mybatis/MyBatisBeanProducer.java     |  17 ++-
 .../camel/component/mybatis/MyBatisConsumer.java   |  10 +-
 .../apache/camel/component/nats/NatsConsumer.java  |  16 ++-
 .../apache/camel/component/nats/NatsProducer.java  |  16 ++-
 .../component/netty4/http/NettyHttpComponent.java  |   8 +-
 .../component/netty4/http/NettyHttpEndpoint.java   |   9 +-
 .../camel/component/netty4/NettyConsumer.java      |  12 +--
 .../camel/component/netty4/NettyProducer.java      |  74 +++++++------
 .../camel/opentracing/OpenTracingTracer.java       |  34 +++---
 .../apache/camel/component/paho/PahoConsumer.java  |  12 +--
 .../apache/camel/component/paho/PahoEndpoint.java  |   8 +-
 .../apache/camel/component/paho/PahoProducer.java  |   6 +-
 .../component/paxlogging/PaxLoggingConsumer.java   |   7 +-
 .../apache/camel/component/pdf/PdfProducer.java    |   9 +-
 .../camel/component/pgevent/PgEventConsumer.java   |   8 +-
 .../camel/component/pgevent/PgEventEndpoint.java   |  13 +--
 .../camel/component/pubnub/PubNubConsumer.java     |   8 +-
 .../camel/component/pubnub/PubNubProducer.java     |  22 ++--
 .../camel/component/quartz2/QuartzComponent.java   |  36 +++----
 .../camel/component/quartz2/QuartzEndpoint.java    |  28 +++--
 .../QuartzScheduledPollConsumerScheduler.java      |  21 ++--
 .../component/quickfixj/QuickfixjComponent.java    |   8 +-
 .../component/quickfixj/QuickfixjEndpoint.java     |   8 +-
 .../camel/component/quickfixj/QuickfixjEngine.java |  12 +--
 .../component/rabbitmq/RabbitMQComponent.java      |  12 +--
 .../reactive/streams/ReactiveStreamsConsumer.java  |   5 +-
 .../rest/swagger/RestSwaggerEndpoint.java          |   2 -
 .../camel/component/restlet/RestletComponent.java  |  22 ++--
 .../camel/component/restlet/RestletConsumer.java   |   6 +-
 .../camel/component/restlet/RestletProducer.java   |  27 +++--
 .../component/salesforce/SalesforceComponent.java  |  14 +--
 .../component/salesforce/SalesforceEndpoint.java   |  12 +--
 .../internal/client/DefaultCompositeApiClient.java |   4 +-
 .../camel/component/xquery/XQueryEndpoint.java     |   6 +-
 .../component/schematron/SchematronEndpoint.java   |  14 +--
 .../camel/component/servlet/ServletComponent.java  |   6 +-
 .../camel/component/sjms/jms/JmsBinding.java       |  10 +-
 .../camel/component/slack/SlackConsumer.java       |   6 +-
 .../apache/camel/component/smpp/SmppConsumer.java  |  22 ++--
 .../apache/camel/component/smpp/SmppProducer.java  |  22 ++--
 .../apache/camel/component/snmp/SnmpEndpoint.java  |   6 +-
 .../apache/camel/component/snmp/SnmpOIDPoller.java |  26 ++---
 .../apache/camel/component/snmp/SnmpProducer.java  |  12 +--
 .../camel/component/snmp/SnmpTrapConsumer.java     |  32 +++---
 .../camel/component/snmp/SnmpTrapProducer.java     |  14 +--
 .../camel/dataformat/soap/SoapJaxbDataFormat.java  |  12 +--
 .../apache/camel/component/solr/SolrComponent.java |  11 +-
 .../camel/component/splunk/SplunkConsumer.java     |  12 +--
 .../camel/component/splunk/SplunkEndpoint.java     |   5 +-
 .../apache/camel/spring/boot/RoutesCollector.java  |   4 +-
 .../spring/ws/SpringWebserviceComponent.java       |   7 +-
 .../spring/ws/SpringWebserviceProducer.java        |   9 +-
 .../spring/spi/SpringManagementMBeanAssembler.java |   9 +-
 .../apache/camel/component/sql/SqlComponent.java   |   7 +-
 .../camel/component/stream/StreamConsumer.java     |  19 ++--
 .../camel/component/stream/StreamEndpoint.java     |   5 +-
 .../camel/component/stream/StreamProducer.java     |  21 ++--
 .../camel/component/thrift/ThriftConsumer.java     |  19 ++--
 .../camel/component/thrift/ThriftProducer.java     |  19 ++--
 .../component/undertow/UndertowComponent.java      |   1 -
 .../camel/component/undertow/UndertowConsumer.java |   5 +-
 .../camel/component/undertow/UndertowEndpoint.java |   5 +-
 .../camel/component/undertow/UndertowProducer.java |  11 +-
 .../undertow/handlers/CamelWebSocketHandler.java   |   2 +-
 .../camel/component/vertx/VertxComponent.java      |  19 ++--
 .../camel/component/vertx/VertxConsumer.java       |  18 ++--
 .../camel/component/vertx/VertxProducer.java       |  10 +-
 .../camel/component/weather/WeatherConsumer.java   |  10 +-
 .../apache/camel/component/xmpp/XmppComponent.java |   7 +-
 .../apache/camel/component/xmpp/XmppConsumer.java  |  28 +++--
 .../apache/camel/component/xmpp/XmppEndpoint.java  |  17 ++-
 348 files changed, 1789 insertions(+), 2659 deletions(-)

diff --git a/camel-core/src/main/java/org/apache/camel/component/bean/BeanComponent.java b/camel-core/src/main/java/org/apache/camel/component/bean/BeanComponent.java
index f2fda69..28a81f0 100644
--- a/camel-core/src/main/java/org/apache/camel/component/bean/BeanComponent.java
+++ b/camel-core/src/main/java/org/apache/camel/component/bean/BeanComponent.java
@@ -32,7 +32,6 @@ import org.slf4j.LoggerFactory;
  */
 public class BeanComponent extends DefaultComponent {
 
-    private static final Logger LOG = LoggerFactory.getLogger(BeanComponent.class);
     // use an internal soft cache for BeanInfo as they are costly to introspect
     // for example the bean language using OGNL expression runs much faster reusing the BeanInfo from this cache
     @SuppressWarnings("unchecked")
@@ -69,9 +68,9 @@ public class BeanComponent extends DefaultComponent {
 
     @Override
     protected void doShutdown() throws Exception {
-        if (LOG.isDebugEnabled() && beanInfoCache instanceof LRUCache) {
+        if (log.isDebugEnabled() && beanInfoCache instanceof LRUCache) {
             LRUCache cache = (LRUCache) this.beanInfoCache;
-            LOG.debug("Clearing BeanInfo cache[size={}, hits={}, misses={}, evicted={}]", cache.size(), cache.getHits(), cache.getMisses(), cache.getEvicted());
+            log.debug("Clearing BeanInfo cache[size={}, hits={}, misses={}, evicted={}]", cache.size(), cache.getHits(), cache.getMisses(), cache.getEvicted());
         }
         beanInfoCache.clear();
     }
diff --git a/camel-core/src/main/java/org/apache/camel/component/direct/DirectProducer.java b/camel-core/src/main/java/org/apache/camel/component/direct/DirectProducer.java
index 126ea9c..bd8b267 100644
--- a/camel-core/src/main/java/org/apache/camel/component/direct/DirectProducer.java
+++ b/camel-core/src/main/java/org/apache/camel/component/direct/DirectProducer.java
@@ -28,7 +28,7 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public class DirectProducer extends DefaultAsyncProducer {
-    private static final transient Logger LOG = LoggerFactory.getLogger(DirectProducer.class);
+
     private final DirectEndpoint endpoint;
 
     public DirectProducer(DirectEndpoint endpoint) {
@@ -54,7 +54,7 @@ public class DirectProducer extends DefaultAsyncProducer {
             if (endpoint.isFailIfNoConsumers()) {
                 throw new DirectConsumerNotAvailableException("No consumers available on endpoint: " + endpoint, exchange);
             } else {
-                LOG.debug("message ignored, no consumers available on endpoint: {}", endpoint);
+                log.debug("message ignored, no consumers available on endpoint: {}", endpoint);
             }
         } else {
             consumer.getProcessor().process(exchange);
@@ -68,7 +68,7 @@ public class DirectProducer extends DefaultAsyncProducer {
                 if (endpoint.isFailIfNoConsumers()) {
                     exchange.setException(new DirectConsumerNotAvailableException("No consumers available on endpoint: " + endpoint, exchange));
                 } else {
-                    LOG.debug("message ignored, no consumers available on endpoint: {}", endpoint);
+                    log.debug("message ignored, no consumers available on endpoint: {}", endpoint);
                 }
                 callback.done(true);
                 return true;
diff --git a/camel-core/src/main/java/org/apache/camel/component/directvm/DirectVmBlockingProducer.java b/camel-core/src/main/java/org/apache/camel/component/directvm/DirectVmBlockingProducer.java
index 22d3f63..a2e14ed 100644
--- a/camel-core/src/main/java/org/apache/camel/component/directvm/DirectVmBlockingProducer.java
+++ b/camel-core/src/main/java/org/apache/camel/component/directvm/DirectVmBlockingProducer.java
@@ -36,7 +36,7 @@ import org.slf4j.LoggerFactory;
  * consumer is available, but actual consumer execution will happen concurrently.
  */
 public class DirectVmBlockingProducer extends DefaultAsyncProducer {
-    private static final Logger LOG = LoggerFactory.getLogger(DirectVmBlockingProducer.class);
+
     private final DirectVmEndpoint endpoint;
 
     public DirectVmBlockingProducer(DirectVmEndpoint endpoint) {
@@ -79,8 +79,8 @@ public class DirectVmBlockingProducer extends DefaultAsyncProducer {
         while (!done) {
             // sleep a bit to give chance for the consumer to be ready
             Thread.sleep(500);
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Waited {} for consumer to be ready", watch.taken());
+            if (log.isDebugEnabled()) {
+                log.debug("Waited {} for consumer to be ready", watch.taken());
             }
 
             answer = endpoint.getConsumer();
diff --git a/camel-core/src/main/java/org/apache/camel/component/directvm/DirectVmProcessor.java b/camel-core/src/main/java/org/apache/camel/component/directvm/DirectVmProcessor.java
index 2844f6a..c74c3e5 100644
--- a/camel-core/src/main/java/org/apache/camel/component/directvm/DirectVmProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/component/directvm/DirectVmProcessor.java
@@ -29,7 +29,6 @@ import org.slf4j.LoggerFactory;
 */
 public final class DirectVmProcessor extends DelegateAsyncProcessor {
 
-    private static final Logger LOG = LoggerFactory.getLogger(DirectVmProcessor.class);
     private final DirectVmEndpoint endpoint;
 
     public DirectVmProcessor(Processor processor, DirectVmEndpoint endpoint) {
@@ -48,7 +47,7 @@ public final class DirectVmProcessor extends DelegateAsyncProcessor {
             // set TCCL to application context class loader if given
             ClassLoader appClassLoader = endpoint.getCamelContext().getApplicationContextClassLoader();
             if (appClassLoader != null) {
-                LOG.trace("Setting Thread ContextClassLoader to {}", appClassLoader);
+                log.trace("Setting Thread ContextClassLoader to {}", appClassLoader);
                 Thread.currentThread().setContextClassLoader(appClassLoader);
                 changed = true;
             }
@@ -60,7 +59,7 @@ public final class DirectVmProcessor extends DelegateAsyncProcessor {
                     try {
                         // restore TCCL if it was changed during processing
                         if (chgd) {
-                            LOG.trace("Restoring Thread ContextClassLoader to {}", current);
+                            log.trace("Restoring Thread ContextClassLoader to {}", current);
                             Thread.currentThread().setContextClassLoader(current);
                         }
                         // make sure to copy results back
@@ -74,7 +73,7 @@ public final class DirectVmProcessor extends DelegateAsyncProcessor {
         } finally {
             // restore TCCL if it was changed during processing
             if (changed) {
-                LOG.trace("Restoring Thread ContextClassLoader to {}", current);
+                log.trace("Restoring Thread ContextClassLoader to {}", current);
                 Thread.currentThread().setContextClassLoader(current);
             }
         }
diff --git a/camel-core/src/main/java/org/apache/camel/component/file/GenericFileConsumer.java b/camel-core/src/main/java/org/apache/camel/component/file/GenericFileConsumer.java
index ff01940..4ffd8e1 100644
--- a/camel-core/src/main/java/org/apache/camel/component/file/GenericFileConsumer.java
+++ b/camel-core/src/main/java/org/apache/camel/component/file/GenericFileConsumer.java
@@ -44,7 +44,7 @@ import org.slf4j.LoggerFactory;
  * Base class for file consumers.
  */
 public abstract class GenericFileConsumer<T> extends ScheduledBatchPollingConsumer {
-    protected final Logger log = LoggerFactory.getLogger(getClass());
+
     protected GenericFileEndpoint<T> endpoint;
     protected GenericFileOperations<T> operations;
     protected GenericFileProcessStrategy<T> processStrategy;
diff --git a/camel-core/src/main/java/org/apache/camel/component/file/GenericFilePollingConsumer.java b/camel-core/src/main/java/org/apache/camel/component/file/GenericFilePollingConsumer.java
index c5f4001..91ec5fa 100644
--- a/camel-core/src/main/java/org/apache/camel/component/file/GenericFilePollingConsumer.java
+++ b/camel-core/src/main/java/org/apache/camel/component/file/GenericFilePollingConsumer.java
@@ -29,8 +29,6 @@ import org.slf4j.LoggerFactory;
 
 public class GenericFilePollingConsumer extends EventDrivenPollingConsumer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(GenericFilePollingConsumer.class);
-
     private final long delay;
 
     public GenericFilePollingConsumer(GenericFileEndpoint endpoint) throws Exception {
@@ -76,8 +74,8 @@ public class GenericFilePollingConsumer extends EventDrivenPollingConsumer {
 
     @Override
     public Exchange receiveNoWait() {
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("receiveNoWait polling file: {}", getConsumer().getEndpoint());
+        if (log.isTraceEnabled()) {
+            log.trace("receiveNoWait polling file: {}", getConsumer().getEndpoint());
         }
         int polled = doReceive(0);
         if (polled > 0) {
@@ -89,8 +87,8 @@ public class GenericFilePollingConsumer extends EventDrivenPollingConsumer {
 
     @Override
     public Exchange receive() {
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("receive polling file: {}", getConsumer().getEndpoint());
+        if (log.isTraceEnabled()) {
+            log.trace("receive polling file: {}", getConsumer().getEndpoint());
         }
         int polled = doReceive(Long.MAX_VALUE);
         if (polled > 0) {
@@ -102,8 +100,8 @@ public class GenericFilePollingConsumer extends EventDrivenPollingConsumer {
 
     @Override
     public Exchange receive(long timeout) {
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("receive({}) polling file: {}", timeout, getConsumer().getEndpoint());
+        if (log.isTraceEnabled()) {
+            log.trace("receive({}) polling file: {}", timeout, getConsumer().getEndpoint());
         }
         int polled = doReceive(timeout);
         if (polled > 0) {
@@ -130,9 +128,9 @@ public class GenericFilePollingConsumer extends EventDrivenPollingConsumer {
                 if (isRunAllowed()) {
 
                     if (retryCounter == -1) {
-                        LOG.trace("Starting to poll: {}", this.getEndpoint());
+                        log.trace("Starting to poll: {}", this.getEndpoint());
                     } else {
-                        LOG.debug("Retrying attempt {} to poll: {}", retryCounter, this.getEndpoint());
+                        log.debug("Retrying attempt {} to poll: {}", retryCounter, this.getEndpoint());
                     }
 
                     // mark we are polling which should also include the begin/poll/commit
@@ -140,7 +138,7 @@ public class GenericFilePollingConsumer extends EventDrivenPollingConsumer {
                     if (begin) {
                         retryCounter++;
                         polledMessages = getConsumer().poll();
-                        LOG.trace("Polled {} messages", polledMessages);
+                        log.trace("Polled {} messages", polledMessages);
 
                         if (polledMessages == 0 && sendEmptyMessageWhenIdle) {
                             // send an "empty" exchange
@@ -152,11 +150,11 @@ public class GenericFilePollingConsumer extends EventDrivenPollingConsumer {
 
                         pollStrategy.commit(getConsumer(), getEndpoint(), polledMessages);
                     } else {
-                        LOG.debug("Cannot begin polling as pollStrategy returned false: {}", pollStrategy);
+                        log.debug("Cannot begin polling as pollStrategy returned false: {}", pollStrategy);
                     }
                 }
 
-                LOG.trace("Finished polling: {}", this.getEndpoint());
+                log.trace("Finished polling: {}", this.getEndpoint());
             } catch (Exception e) {
                 try {
                     boolean retry = pollStrategy.rollback(getConsumer(), getEndpoint(), retryCounter, e);
@@ -205,7 +203,7 @@ public class GenericFilePollingConsumer extends EventDrivenPollingConsumer {
     public void process(Exchange exchange) throws Exception {
         Object name = exchange.getIn().getHeader(Exchange.FILE_NAME);
         if (name != null) {
-            LOG.debug("Received file: {}", name);
+            log.debug("Received file: {}", name);
         }
         super.process(exchange);
     }
@@ -221,11 +219,11 @@ public class GenericFilePollingConsumer extends EventDrivenPollingConsumer {
         process(exchange);
     }
 
-    private static void sleep(long delay) throws InterruptedException {
+    private void sleep(long delay) throws InterruptedException {
         if (delay <= 0) {
             return;
         }
-        LOG.trace("Sleeping for: {} millis", delay);
+        log.trace("Sleeping for: {} millis", delay);
         Thread.sleep(delay);
     }
 
diff --git a/camel-core/src/main/java/org/apache/camel/component/file/strategy/FileIdempotentChangedRepositoryReadLockStrategy.java b/camel-core/src/main/java/org/apache/camel/component/file/strategy/FileIdempotentChangedRepositoryReadLockStrategy.java
index 7ae8ba2..b24edd2 100644
--- a/camel-core/src/main/java/org/apache/camel/component/file/strategy/FileIdempotentChangedRepositoryReadLockStrategy.java
+++ b/camel-core/src/main/java/org/apache/camel/component/file/strategy/FileIdempotentChangedRepositoryReadLockStrategy.java
@@ -42,8 +42,6 @@ import org.slf4j.LoggerFactory;
  */
 public class FileIdempotentChangedRepositoryReadLockStrategy extends ServiceSupport implements GenericFileExclusiveReadLockStrategy<File>, CamelContextAware {
 
-    private static final transient Logger LOG = LoggerFactory.getLogger(FileIdempotentChangedRepositoryReadLockStrategy.class);
-
     private final FileChangedExclusiveReadLockStrategy changed;
     private GenericFileEndpoint<File> endpoint;
     private LoggingLevel readLockLoggingLevel = LoggingLevel.DEBUG;
@@ -67,7 +65,7 @@ public class FileIdempotentChangedRepositoryReadLockStrategy extends ServiceSupp
     @Override
     public void prepareOnStartup(GenericFileOperations<File> operations, GenericFileEndpoint<File> endpoint) throws Exception {
         this.endpoint = endpoint;
-        LOG.info("Using FileIdempotentRepositoryReadLockStrategy: {} on endpoint: {}", idempotentRepository, endpoint);
+        log.info("Using FileIdempotentRepositoryReadLockStrategy: {} on endpoint: {}", idempotentRepository, endpoint);
 
         changed.prepareOnStartup(operations, endpoint);
     }
@@ -85,7 +83,7 @@ public class FileIdempotentChangedRepositoryReadLockStrategy extends ServiceSupp
         boolean answer = idempotentRepository.add(key);
         if (!answer) {
             // another node is processing the file so skip
-            CamelLogger.log(LOG, readLockLoggingLevel, "Cannot acquire read lock. Will skip the file: " + file);
+            CamelLogger.log(log, readLockLoggingLevel, "Cannot acquire read lock. Will skip the file: " + file);
         }
 
         if (answer) {
@@ -118,15 +116,15 @@ public class FileIdempotentChangedRepositoryReadLockStrategy extends ServiceSupp
             try {
                 changed.releaseExclusiveReadLockOnRollback(operations, file, exchange);
             } catch (Exception e) {
-                LOG.warn("Error during releasing exclusive readlock on rollback. This exception is ignored.", e);
+                log.warn("Error during releasing exclusive readlock on rollback. This exception is ignored.", e);
             }
         };
 
         if (readLockIdempotentReleaseDelay > 0 && readLockIdempotentReleaseExecutorService != null) {
-            LOG.debug("Scheduling readlock release task to run asynchronous delayed after {} millis", readLockIdempotentReleaseDelay);
+            log.debug("Scheduling readlock release task to run asynchronous delayed after {} millis", readLockIdempotentReleaseDelay);
             readLockIdempotentReleaseExecutorService.schedule(r, readLockIdempotentReleaseDelay, TimeUnit.MILLISECONDS);
         } else if (readLockIdempotentReleaseDelay > 0) {
-            LOG.debug("Delaying readlock release task {} millis", readLockIdempotentReleaseDelay);
+            log.debug("Delaying readlock release task {} millis", readLockIdempotentReleaseDelay);
             Thread.sleep(readLockIdempotentReleaseDelay);
             r.run();
         } else {
@@ -148,15 +146,15 @@ public class FileIdempotentChangedRepositoryReadLockStrategy extends ServiceSupp
             try {
                 changed.releaseExclusiveReadLockOnCommit(operations, file, exchange);
             } catch (Exception e) {
-                LOG.warn("Error during releasing exclusive readlock on rollback. This exception is ignored.", e);
+                log.warn("Error during releasing exclusive readlock on rollback. This exception is ignored.", e);
             }
         };
 
         if (readLockIdempotentReleaseDelay > 0 && readLockIdempotentReleaseExecutorService != null) {
-            LOG.debug("Scheduling readlock release task to run asynchronous delayed after {} millis", readLockIdempotentReleaseDelay);
+            log.debug("Scheduling readlock release task to run asynchronous delayed after {} millis", readLockIdempotentReleaseDelay);
             readLockIdempotentReleaseExecutorService.schedule(r, readLockIdempotentReleaseDelay, TimeUnit.MILLISECONDS);
         } else if (readLockIdempotentReleaseDelay > 0) {
-            LOG.debug("Delaying readlock release task {} millis", readLockIdempotentReleaseDelay);
+            log.debug("Delaying readlock release task {} millis", readLockIdempotentReleaseDelay);
             Thread.sleep(readLockIdempotentReleaseDelay);
             r.run();
         } else {
diff --git a/camel-core/src/main/java/org/apache/camel/component/file/strategy/FileIdempotentRenameRepositoryReadLockStrategy.java b/camel-core/src/main/java/org/apache/camel/component/file/strategy/FileIdempotentRenameRepositoryReadLockStrategy.java
index 790f6d0..d6878e5 100644
--- a/camel-core/src/main/java/org/apache/camel/component/file/strategy/FileIdempotentRenameRepositoryReadLockStrategy.java
+++ b/camel-core/src/main/java/org/apache/camel/component/file/strategy/FileIdempotentRenameRepositoryReadLockStrategy.java
@@ -40,8 +40,6 @@ import org.slf4j.LoggerFactory;
  */
 public class FileIdempotentRenameRepositoryReadLockStrategy extends ServiceSupport implements GenericFileExclusiveReadLockStrategy<File>, CamelContextAware {
 
-    private static final transient Logger LOG = LoggerFactory.getLogger(FileIdempotentRenameRepositoryReadLockStrategy.class);
-
     private final FileRenameExclusiveReadLockStrategy rename;
     private GenericFileEndpoint<File> endpoint;
     private LoggingLevel readLockLoggingLevel = LoggingLevel.DEBUG;
@@ -60,7 +58,7 @@ public class FileIdempotentRenameRepositoryReadLockStrategy extends ServiceSuppo
     @Override
     public void prepareOnStartup(GenericFileOperations<File> operations, GenericFileEndpoint<File> endpoint) throws Exception {
         this.endpoint = endpoint;
-        LOG.info("Using FileIdempotentRepositoryReadLockStrategy: {} on endpoint: {}", idempotentRepository, endpoint);
+        log.info("Using FileIdempotentRepositoryReadLockStrategy: {} on endpoint: {}", idempotentRepository, endpoint);
 
         rename.prepareOnStartup(operations, endpoint);
     }
@@ -78,7 +76,7 @@ public class FileIdempotentRenameRepositoryReadLockStrategy extends ServiceSuppo
         boolean answer = idempotentRepository.add(key);
         if (!answer) {
             // another node is processing the file so skip
-            CamelLogger.log(LOG, readLockLoggingLevel, "Cannot acquire read lock. Will skip the file: " + file);
+            CamelLogger.log(log, readLockLoggingLevel, "Cannot acquire read lock. Will skip the file: " + file);
         }
 
         if (answer) {
diff --git a/camel-core/src/main/java/org/apache/camel/component/file/strategy/FileIdempotentRepositoryReadLockStrategy.java b/camel-core/src/main/java/org/apache/camel/component/file/strategy/FileIdempotentRepositoryReadLockStrategy.java
index d9a1f06..75b0e84 100644
--- a/camel-core/src/main/java/org/apache/camel/component/file/strategy/FileIdempotentRepositoryReadLockStrategy.java
+++ b/camel-core/src/main/java/org/apache/camel/component/file/strategy/FileIdempotentRepositoryReadLockStrategy.java
@@ -42,8 +42,6 @@ import org.slf4j.LoggerFactory;
  */
 public class FileIdempotentRepositoryReadLockStrategy extends ServiceSupport implements GenericFileExclusiveReadLockStrategy<File>, CamelContextAware {
 
-    private static final transient Logger LOG = LoggerFactory.getLogger(FileIdempotentRepositoryReadLockStrategy.class);
-
     private GenericFileEndpoint<File> endpoint;
     private LoggingLevel readLockLoggingLevel = LoggingLevel.DEBUG;
     private CamelContext camelContext;
@@ -59,7 +57,7 @@ public class FileIdempotentRepositoryReadLockStrategy extends ServiceSupport imp
     @Override
     public void prepareOnStartup(GenericFileOperations<File> operations, GenericFileEndpoint<File> endpoint) throws Exception {
         this.endpoint = endpoint;
-        LOG.info("Using FileIdempotentRepositoryReadLockStrategy: {} on endpoint: {}", idempotentRepository, endpoint);
+        log.info("Using FileIdempotentRepositoryReadLockStrategy: {} on endpoint: {}", idempotentRepository, endpoint);
     }
 
     @Override
@@ -75,7 +73,7 @@ public class FileIdempotentRepositoryReadLockStrategy extends ServiceSupport imp
         boolean answer = idempotentRepository.add(key);
         if (!answer) {
             // another node is processing the file so skip
-            CamelLogger.log(LOG, readLockLoggingLevel, "Cannot acquire read lock. Will skip the file: " + file);
+            CamelLogger.log(log, readLockLoggingLevel, "Cannot acquire read lock. Will skip the file: " + file);
         }
         return answer;
     }
@@ -98,10 +96,10 @@ public class FileIdempotentRepositoryReadLockStrategy extends ServiceSupport imp
         };
 
         if (readLockIdempotentReleaseDelay > 0 && readLockIdempotentReleaseExecutorService != null) {
-            LOG.debug("Scheduling readlock release task to run asynchronous delayed after {} millis", readLockIdempotentReleaseDelay);
+            log.debug("Scheduling readlock release task to run asynchronous delayed after {} millis", readLockIdempotentReleaseDelay);
             readLockIdempotentReleaseExecutorService.schedule(r, readLockIdempotentReleaseDelay, TimeUnit.MILLISECONDS);
         } else if (readLockIdempotentReleaseDelay > 0) {
-            LOG.debug("Delaying readlock release task {} millis", readLockIdempotentReleaseDelay);
+            log.debug("Delaying readlock release task {} millis", readLockIdempotentReleaseDelay);
             Thread.sleep(readLockIdempotentReleaseDelay);
             r.run();
         } else {
@@ -122,10 +120,10 @@ public class FileIdempotentRepositoryReadLockStrategy extends ServiceSupport imp
         };
 
         if (readLockIdempotentReleaseDelay > 0 && readLockIdempotentReleaseExecutorService != null) {
-            LOG.debug("Scheduling readlock release task to run asynchronous delayed after {} millis", readLockIdempotentReleaseDelay);
+            log.debug("Scheduling readlock release task to run asynchronous delayed after {} millis", readLockIdempotentReleaseDelay);
             readLockIdempotentReleaseExecutorService.schedule(r, readLockIdempotentReleaseDelay, TimeUnit.MILLISECONDS);
         } else if (readLockIdempotentReleaseDelay > 0) {
-            LOG.debug("Delaying readlock release task {} millis", readLockIdempotentReleaseDelay);
+            log.debug("Delaying readlock release task {} millis", readLockIdempotentReleaseDelay);
             Thread.sleep(readLockIdempotentReleaseDelay);
             r.run();
         } else {
diff --git a/camel-core/src/main/java/org/apache/camel/component/file/strategy/GenericFileProcessStrategySupport.java b/camel-core/src/main/java/org/apache/camel/component/file/strategy/GenericFileProcessStrategySupport.java
index ce067e0..5c145ec 100644
--- a/camel-core/src/main/java/org/apache/camel/component/file/strategy/GenericFileProcessStrategySupport.java
+++ b/camel-core/src/main/java/org/apache/camel/component/file/strategy/GenericFileProcessStrategySupport.java
@@ -38,7 +38,6 @@ import org.slf4j.LoggerFactory;
  * Base class for implementations of {@link GenericFileProcessStrategy}.
  */
 public abstract class GenericFileProcessStrategySupport<T> extends ServiceSupport implements GenericFileProcessStrategy<T>, CamelContextAware {
-    protected final Logger log = LoggerFactory.getLogger(getClass());
     protected GenericFileExclusiveReadLockStrategy<T> exclusiveReadLockStrategy;
     protected CamelContext camelContext;
 
diff --git a/camel-core/src/main/java/org/apache/camel/component/log/LogComponent.java b/camel-core/src/main/java/org/apache/camel/component/log/LogComponent.java
index fac80e6..160649f 100644
--- a/camel-core/src/main/java/org/apache/camel/component/log/LogComponent.java
+++ b/camel-core/src/main/java/org/apache/camel/component/log/LogComponent.java
@@ -36,7 +36,6 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public class LogComponent extends DefaultComponent {
-    private static final Logger LOG = LoggerFactory.getLogger(LogComponent.class);
 
     @Metadata(label = "advanced")
     private ExchangeFormatter exchangeFormatter;
@@ -53,9 +52,9 @@ public class LogComponent extends DefaultComponent {
             Map<String, Logger> availableLoggers = getCamelContext().getRegistry().findByTypeWithName(Logger.class);
             if (availableLoggers.size() == 1) {
                 providedLogger = availableLoggers.values().iterator().next();
-                LOG.info("Using custom Logger: {}", providedLogger);
+                log.info("Using custom Logger: {}", providedLogger);
             } else if (availableLoggers.size() > 1) {
-                LOG.info("More than one {} instance found in the registry. Falling back to creating logger from URI {}.", Logger.class.getName(), uri);
+                log.info("More than one {} instance found in the registry. Falling back to creating logger from URI {}.", Logger.class.getName(), uri);
             }
         }
         
diff --git a/camel-core/src/main/java/org/apache/camel/component/mock/MockEndpoint.java b/camel-core/src/main/java/org/apache/camel/component/mock/MockEndpoint.java
index 9c0663e..fcdb07d 100644
--- a/camel-core/src/main/java/org/apache/camel/component/mock/MockEndpoint.java
+++ b/camel-core/src/main/java/org/apache/camel/component/mock/MockEndpoint.java
@@ -94,7 +94,7 @@ import org.slf4j.LoggerFactory;
  */
 @UriEndpoint(firstVersion = "1.0.0", scheme = "mock", title = "Mock", syntax = "mock:name", producerOnly = true, label = "core,testing", lenientProperties = true)
 public class MockEndpoint extends DefaultEndpoint implements BrowsableEndpoint {
-    private static final Logger LOG = LoggerFactory.getLogger(MockEndpoint.class);
+
     // must be volatile so changes is visible between the thread which performs the assertions
     // and the threads which process the exchanges when routing messages in Camel
     protected volatile Processor reporter;
@@ -373,12 +373,12 @@ public class MockEndpoint extends DefaultEndpoint implements BrowsableEndpoint {
      *                should wait for the test to be true
      */
     public void assertIsSatisfied(long timeoutForEmptyEndpoints) throws InterruptedException {
-        LOG.info("Asserting: {} is satisfied", this);
+        log.info("Asserting: {} is satisfied", this);
         doAssertIsSatisfied(timeoutForEmptyEndpoints);
         if (assertPeriod > 0) {
             // if an assert period was set then re-assert again to ensure the assertion is still valid
             Thread.sleep(assertPeriod);
-            LOG.info("Re-asserting: {} is satisfied after {} millis", this, assertPeriod);
+            log.info("Re-asserting: {} is satisfied after {} millis", this, assertPeriod);
             // do not use timeout when we re-assert
             doAssertIsSatisfied(0);
         }
@@ -387,7 +387,7 @@ public class MockEndpoint extends DefaultEndpoint implements BrowsableEndpoint {
     protected void doAssertIsSatisfied(long timeoutForEmptyEndpoints) throws InterruptedException {
         if (expectedCount == 0) {
             if (timeoutForEmptyEndpoints > 0) {
-                LOG.debug("Sleeping for: {} millis to check there really are no messages received", timeoutForEmptyEndpoints);
+                log.debug("Sleeping for: {} millis to check there really are no messages received", timeoutForEmptyEndpoints);
                 Thread.sleep(timeoutForEmptyEndpoints);
             }
             assertEquals("Received message count", expectedCount, getReceivedCounter());
@@ -411,7 +411,7 @@ public class MockEndpoint extends DefaultEndpoint implements BrowsableEndpoint {
 
         for (Throwable failure : failures) {
             if (failure != null) {
-                LOG.error("Caught on " + getEndpointUri() + " Exception: " + failure, failure);
+                log.error("Caught on " + getEndpointUri() + " Exception: " + failure, failure);
                 fail("Failed due to caught exception: " + failure);
             }
         }
@@ -427,7 +427,7 @@ public class MockEndpoint extends DefaultEndpoint implements BrowsableEndpoint {
             // did not throw expected error... fail!
             failed = true;
         } catch (AssertionError e) {
-            LOG.info("Caught expected failure: {}", e);
+            log.info("Caught expected failure: {}", e);
         }
         if (failed) {
             // fail() throws the AssertionError to indicate the test failed. 
@@ -448,7 +448,7 @@ public class MockEndpoint extends DefaultEndpoint implements BrowsableEndpoint {
             // did not throw expected error... fail!
             failed = true;
         } catch (AssertionError e) {
-            LOG.info("Caught expected failure: {}", e);
+            log.info("Caught expected failure: {}", e);
         }
         if (failed) { 
             // fail() throws the AssertionError to indicate the test failed. 
@@ -1368,12 +1368,12 @@ public class MockEndpoint extends DefaultEndpoint implements BrowsableEndpoint {
         }
 
         // let counter be 0 index-based in the logs
-        if (LOG.isDebugEnabled()) {
+        if (log.isDebugEnabled()) {
             String msg = getEndpointUri() + " >>>> " + counter + " : " + copy + " with body: " + actualBody;
             if (copy.getIn().hasHeaders()) {
                 msg += " and headers:" + copy.getIn().getHeaders();
             }
-            LOG.debug(msg);
+            log.debug(msg);
         }
 
         // record timestamp when exchange was received
@@ -1439,7 +1439,7 @@ public class MockEndpoint extends DefaultEndpoint implements BrowsableEndpoint {
         StopWatch watch = new StopWatch();
         waitForCompleteLatch(resultWaitTime);
         long delta = watch.taken();
-        LOG.debug("Took {} millis to complete latch", delta);
+        log.debug("Took {} millis to complete latch", delta);
 
         if (resultMinimumWaitTime > 0 && delta < resultMinimumWaitTime) {
             fail("Expected minimum " + resultMinimumWaitTime
@@ -1452,7 +1452,7 @@ public class MockEndpoint extends DefaultEndpoint implements BrowsableEndpoint {
         long waitTime = timeout == 0 ? 10000L : timeout;
 
         // now let's wait for the results
-        LOG.debug("Waiting on the latch for: {} millis", timeout);
+        log.debug("Waiting on the latch for: {} millis", timeout);
         latch.await(waitTime, TimeUnit.MILLISECONDS);
     }
 
@@ -1469,11 +1469,11 @@ public class MockEndpoint extends DefaultEndpoint implements BrowsableEndpoint {
     }
 
     protected void fail(Object message) {
-        if (LOG.isDebugEnabled()) {
+        if (log.isDebugEnabled()) {
             List<Exchange> list = getReceivedExchanges();
             int index = 0;
             for (Exchange exchange : list) {
-                LOG.debug("{} failed and received[{}]: {}", getEndpointUri(), ++index, exchange);
+                log.debug("{} failed and received[{}]: {}", getEndpointUri(), ++index, exchange);
             }
         }
         throw new AssertionError(getEndpointUri() + " " + message);
diff --git a/camel-core/src/main/java/org/apache/camel/component/properties/PropertiesComponent.java b/camel-core/src/main/java/org/apache/camel/component/properties/PropertiesComponent.java
index 690df1c..873a675 100644
--- a/camel-core/src/main/java/org/apache/camel/component/properties/PropertiesComponent.java
+++ b/camel-core/src/main/java/org/apache/camel/component/properties/PropertiesComponent.java
@@ -75,7 +75,6 @@ public class PropertiesComponent extends DefaultComponent {
      */
     public static final String OVERRIDE_PROPERTIES = PropertiesComponent.class.getName() + ".OverrideProperties";
 
-    private static final Logger LOG = LoggerFactory.getLogger(PropertiesComponent.class);
     @SuppressWarnings("unchecked")
     private final Map<CacheKey, Properties> cacheMap = LRUCacheFactory.newLRUSoftCache(1000);
     private final Map<String, PropertiesFunction> functions = new HashMap<>();
@@ -146,12 +145,12 @@ public class PropertiesComponent extends DefaultComponent {
         // override default locations
         String locations = getAndRemoveParameter(parameters, "locations", String.class);
         if (locations != null) {
-            LOG.trace("Overriding default locations with location: {}", locations);
+            log.trace("Overriding default locations with location: {}", locations);
             paths = Arrays.stream(locations.split(",")).map(PropertiesLocation::new).collect(Collectors.toList());
         }
 
         String endpointUri = parseUri(remaining, paths);
-        LOG.debug("Endpoint uri parsed as: {}", endpointUri);
+        log.debug("Endpoint uri parsed as: {}", endpointUri);
 
         Endpoint delegate = getCamelContext().getEndpoint(endpointUri);
         PropertiesEndpoint answer = new PropertiesEndpoint(uri, delegate, this);
@@ -215,7 +214,7 @@ public class PropertiesComponent extends DefaultComponent {
             uri = uri + suffixToken;
         }
 
-        LOG.trace("Parsing uri {} with properties: {}", uri, prop);
+        log.trace("Parsing uri {} with properties: {}", uri, prop);
         
         if (propertiesParser instanceof AugmentedPropertyNameAwarePropertiesParser) {
             return ((AugmentedPropertyNameAwarePropertiesParser) propertiesParser).parseUri(
@@ -529,11 +528,11 @@ public class PropertiesComponent extends DefaultComponent {
         List<PropertiesLocation> answer = new ArrayList<>();
 
         for (PropertiesLocation location : locations) {
-            LOG.trace("Parsing location: {}", location);
+            log.trace("Parsing location: {}", location);
 
             try {
                 String path = FilePathResolver.resolvePath(location.getPath());
-                LOG.debug("Parsed location: {}", path);
+                log.debug("Parsed location: {}", path);
                 if (ObjectHelper.isNotEmpty(path)) {
                     answer.add(new PropertiesLocation(
                         location.getResolver(),
@@ -545,7 +544,7 @@ public class PropertiesComponent extends DefaultComponent {
                 if (!ignoreMissingLocation && !location.isOptional()) {
                     throw e;
                 } else {
-                    LOG.debug("Ignored missing location: {}", location);
+                    log.debug("Ignored missing location: {}", location);
                 }
             }
         }
diff --git a/camel-core/src/main/java/org/apache/camel/component/rest/RestEndpoint.java b/camel-core/src/main/java/org/apache/camel/component/rest/RestEndpoint.java
index 7e63034..4a11e45 100644
--- a/camel-core/src/main/java/org/apache/camel/component/rest/RestEndpoint.java
+++ b/camel-core/src/main/java/org/apache/camel/component/rest/RestEndpoint.java
@@ -54,8 +54,6 @@ public class RestEndpoint extends DefaultEndpoint {
     public static final String DEFAULT_API_COMPONENT_NAME = "swagger";
     public static final String RESOURCE_PATH = "META-INF/services/org/apache/camel/rest/";
 
-    private static final Logger LOG = LoggerFactory.getLogger(RestEndpoint.class);
-
     @UriPath(label = "common", enums = "get,post,put,delete,patch,head,trace,connect,options") @Metadata(required = "true")
     private String method;
     @UriPath(label = "common") @Metadata(required = "true")
@@ -282,7 +280,7 @@ public class RestEndpoint extends DefaultEndpoint {
         RestProducerFactory factory = null;
 
         if (apiDoc != null) {
-            LOG.debug("Discovering camel-swagger-java on classpath for using api-doc: {}", apiDoc);
+            log.debug("Discovering camel-swagger-java on classpath for using api-doc: {}", apiDoc);
             // lookup on classpath using factory finder to automatic find it (just add camel-swagger-java to classpath etc)
             try {
                 FactoryFinder finder = getCamelContext().getFactoryFinder(RESOURCE_PATH);
@@ -358,13 +356,13 @@ public class RestEndpoint extends DefaultEndpoint {
                 }
             }
             if (found != null) {
-                LOG.debug("Auto discovered {} as RestProducerFactory", foundName);
+                log.debug("Auto discovered {} as RestProducerFactory", foundName);
                 factory = found;
             }
         }
 
         if (factory != null) {
-            LOG.debug("Using RestProducerFactory: {}", factory);
+            log.debug("Using RestProducerFactory: {}", factory);
             
             RestConfiguration config = getCamelContext().getRestConfiguration(cname, true);
 
@@ -450,7 +448,7 @@ public class RestEndpoint extends DefaultEndpoint {
                 }
             }
             if (found != null) {
-                LOG.debug("Auto discovered {} as RestConsumerFactory", foundName);
+                log.debug("Auto discovered {} as RestConsumerFactory", foundName);
                 factory = found;
             }
         }
diff --git a/camel-core/src/main/java/org/apache/camel/component/seda/SedaConsumer.java b/camel-core/src/main/java/org/apache/camel/component/seda/SedaConsumer.java
index bae2d20..2bd21f3 100644
--- a/camel-core/src/main/java/org/apache/camel/component/seda/SedaConsumer.java
+++ b/camel-core/src/main/java/org/apache/camel/component/seda/SedaConsumer.java
@@ -54,7 +54,6 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public class SedaConsumer extends ServiceSupport implements Consumer, Runnable, ShutdownAware, Suspendable {
-    private static final Logger LOG = LoggerFactory.getLogger(SedaConsumer.class);
 
     private final AtomicInteger taskCount = new AtomicInteger();
     private volatile CountDownLatch latch;
@@ -114,7 +113,7 @@ public class SedaConsumer extends ServiceSupport implements Consumer, Runnable,
         // if we are suspending then we want to keep the thread running but just not route the exchange
         // this logic is only when we stop or shutdown the consumer
         if (suspendOnly) {
-            LOG.debug("Skip preparing to shutdown as consumer is being suspended");
+            log.debug("Skip preparing to shutdown as consumer is being suspended");
             return;
         }
 
@@ -123,7 +122,7 @@ public class SedaConsumer extends ServiceSupport implements Consumer, Runnable,
         forceShutdown = forced;
 
         if (latch != null) {
-            LOG.debug("Preparing to shutdown, waiting for {} consumer threads to complete.", latch.getCount());
+            log.debug("Preparing to shutdown, waiting for {} consumer threads to complete.", latch.getCount());
 
             // wait for all threads to end
             try {
@@ -156,7 +155,7 @@ public class SedaConsumer extends ServiceSupport implements Consumer, Runnable,
         } finally {
             taskCount.decrementAndGet();
             latch.countDown();
-            LOG.debug("Ending this polling consumer thread, there are still {} consumer threads left.", latch.getCount());
+            log.debug("Ending this polling consumer thread, there are still {} consumer threads left.", latch.getCount());
         }
     }
 
@@ -167,12 +166,12 @@ public class SedaConsumer extends ServiceSupport implements Consumer, Runnable,
 
             // do not poll during CamelContext is starting, as we should only poll when CamelContext is fully started
             if (getEndpoint().getCamelContext().getStatus().isStarting()) {
-                LOG.trace("CamelContext is starting so skip polling");
+                log.trace("CamelContext is starting so skip polling");
                 try {
                     // sleep at most 1 sec
                     Thread.sleep(Math.min(pollTimeout, 1000));
                 } catch (InterruptedException e) {
-                    LOG.debug("Sleep interrupted, are we stopping? {}", isStopping() || isStopped());
+                    log.debug("Sleep interrupted, are we stopping? {}", isStopping() || isStopped());
                 }
                 continue;
             }
@@ -180,16 +179,16 @@ public class SedaConsumer extends ServiceSupport implements Consumer, Runnable,
             // do not poll if we are suspended or starting again after resuming
             if (isSuspending() || isSuspended() || isStarting()) {
                 if (shutdownPending && queue.isEmpty()) {
-                    LOG.trace("Consumer is suspended and shutdown is pending, so this consumer thread is breaking out because the task queue is empty.");
+                    log.trace("Consumer is suspended and shutdown is pending, so this consumer thread is breaking out because the task queue is empty.");
                     // we want to shutdown so break out if there queue is empty
                     break;
                 } else {
-                    LOG.trace("Consumer is suspended so skip polling");
+                    log.trace("Consumer is suspended so skip polling");
                     try {
                         // sleep at most 1 sec
                         Thread.sleep(Math.min(pollTimeout, 1000));
                     } catch (InterruptedException e) {
-                        LOG.debug("Sleep interrupted, are we stopping? {}", isStopping() || isStopped());
+                        log.debug("Sleep interrupted, are we stopping? {}", isStopping() || isStopped());
                     }
                     continue;
                 }
@@ -199,8 +198,8 @@ public class SedaConsumer extends ServiceSupport implements Consumer, Runnable,
             try {
                 // use the end user configured poll timeout
                 exchange = queue.poll(pollTimeout, TimeUnit.MILLISECONDS);
-                if (LOG.isTraceEnabled()) {
-                    LOG.trace("Polled queue {} with timeout {} ms. -> {}", ObjectHelper.getIdentityHashCode(queue), pollTimeout, exchange);
+                if (log.isTraceEnabled()) {
+                    log.trace("Polled queue {} with timeout {} ms. -> {}", ObjectHelper.getIdentityHashCode(queue), pollTimeout, exchange);
                 }
                 if (exchange != null) {
                     try {
@@ -223,12 +222,12 @@ public class SedaConsumer extends ServiceSupport implements Consumer, Runnable,
                         getExceptionHandler().handleException("Error processing exchange", exchange, e);
                     }
                 } else if (shutdownPending && queue.isEmpty()) {
-                    LOG.trace("Shutdown is pending, so this consumer thread is breaking out because the task queue is empty.");
+                    log.trace("Shutdown is pending, so this consumer thread is breaking out because the task queue is empty.");
                     // we want to shutdown so break out if there queue is empty
                     break;
                 }
             } catch (InterruptedException e) {
-                LOG.debug("Sleep interrupted, are we stopping? {}", isStopping() || isStopped());
+                log.debug("Sleep interrupted, are we stopping? {}", isStopping() || isStopped());
                 continue;
             } catch (Throwable e) {
                 if (exchange != null) {
@@ -275,8 +274,8 @@ public class SedaConsumer extends ServiceSupport implements Consumer, Runnable,
         // if there are multiple consumers then multicast to them
         if (endpoint.isMultipleConsumersSupported()) {
 
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Multicasting to {} consumers for Exchange: {}", size, exchange);
+            if (log.isTraceEnabled()) {
+                log.trace("Multicasting to {} consumers for Exchange: {}", size, exchange);
             }
 
             // handover completions, as we need to done this when the multicast is done
@@ -290,7 +289,7 @@ public class SedaConsumer extends ServiceSupport implements Consumer, Runnable,
             mp.process(exchange, new AsyncCallback() {
                 public void done(boolean doneSync) {
                     // done the uow on the completions
-                    UnitOfWorkHelper.doneSynchronizations(exchange, completions, LOG);
+                    UnitOfWorkHelper.doneSynchronizations(exchange, completions, log);
                 }
             });
         } else {
@@ -354,7 +353,7 @@ public class SedaConsumer extends ServiceSupport implements Consumer, Runnable,
 
         // submit needed number of tasks
         int tasks = poolSize - taskCount.get();
-        LOG.debug("Creating {} consumer tasks with poll timeout {} ms.", tasks, pollTimeout);
+        log.debug("Creating {} consumer tasks with poll timeout {} ms.", tasks, pollTimeout);
         for (int i = 0; i < tasks; i++) {
             executor.execute(this);
         }
diff --git a/camel-core/src/main/java/org/apache/camel/component/seda/SedaEndpoint.java b/camel-core/src/main/java/org/apache/camel/component/seda/SedaEndpoint.java
index dd15e80..bfa1c98 100644
--- a/camel-core/src/main/java/org/apache/camel/component/seda/SedaEndpoint.java
+++ b/camel-core/src/main/java/org/apache/camel/component/seda/SedaEndpoint.java
@@ -55,7 +55,7 @@ import org.slf4j.LoggerFactory;
 @ManagedResource(description = "Managed SedaEndpoint")
 @UriEndpoint(firstVersion = "1.1.0", scheme = "seda", title = "SEDA", syntax = "seda:name", consumerClass = SedaConsumer.class, label = "core,endpoint")
 public class SedaEndpoint extends DefaultEndpoint implements AsyncEndpoint, BrowsableEndpoint, MultipleConsumersSupport {
-    private static final Logger LOG = LoggerFactory.getLogger(SedaEndpoint.class);
+
     private final Set<SedaProducer> producers = new CopyOnWriteArraySet<>();
     private final Set<SedaConsumer> consumers = new CopyOnWriteArraySet<>();
     private volatile MulticastProcessor consumerMulticastProcessor;
@@ -171,7 +171,7 @@ public class SedaEndpoint extends DefaultEndpoint implements AsyncEndpoint, Brow
                 QueueReference ref = getComponent().getOrCreateQueue(this, size, isMultipleConsumers(), queueFactory);
                 queue = ref.getQueue();
                 String key = getComponent().getQueueKey(getEndpointUri());
-                LOG.info("Endpoint {} is using shared queue: {} with size: {}", this, key, ref.getSize() !=  null ? ref.getSize() : Integer.MAX_VALUE);
+                log.info("Endpoint {} is using shared queue: {} with size: {}", this, key, ref.getSize() !=  null ? ref.getSize() : Integer.MAX_VALUE);
                 // and set the size we are using
                 if (ref.getSize() != null) {
                     setSize(ref.getSize());
@@ -179,7 +179,7 @@ public class SedaEndpoint extends DefaultEndpoint implements AsyncEndpoint, Brow
             } else {
                 // fallback and create queue (as this endpoint has no component)
                 queue = createQueue();
-                LOG.info("Endpoint {} is using queue: {} with size: {}", this, getEndpointUri(), getSize());
+                log.info("Endpoint {} is using queue: {} with size: {}", this, getEndpointUri(), getSize());
             }
         }
         return queue;
@@ -440,7 +440,7 @@ public class SedaEndpoint extends DefaultEndpoint implements AsyncEndpoint, Brow
      */
     @ManagedOperation(description = "Purges the seda queue")
     public void purgeQueue() {
-        LOG.debug("Purging queue with {} exchanges", queue.size());
+        log.debug("Purging queue with {} exchanges", queue.size());
         queue.clear();
     }
 
@@ -504,14 +504,14 @@ public class SedaEndpoint extends DefaultEndpoint implements AsyncEndpoint, Brow
         if (getConsumers().isEmpty()) {
             super.stop();
         } else {
-            LOG.debug("There is still active consumers.");
+            log.debug("There is still active consumers.");
         }
     }
 
     @Override
     public void shutdown() throws Exception {
         if (shutdown.get()) {
-            LOG.trace("Service already shut down");
+            log.trace("Service already shut down");
             return;
         }
 
@@ -523,7 +523,7 @@ public class SedaEndpoint extends DefaultEndpoint implements AsyncEndpoint, Brow
         if (getConsumers().isEmpty()) {
             super.shutdown();
         } else {
-            LOG.debug("There is still active consumers.");
+            log.debug("There is still active consumers.");
         }
     }
 
diff --git a/camel-core/src/main/java/org/apache/camel/component/test/TestEndpoint.java b/camel-core/src/main/java/org/apache/camel/component/test/TestEndpoint.java
index b30c46f..1f7d51b 100644
--- a/camel-core/src/main/java/org/apache/camel/component/test/TestEndpoint.java
+++ b/camel-core/src/main/java/org/apache/camel/component/test/TestEndpoint.java
@@ -46,7 +46,6 @@ import org.slf4j.LoggerFactory;
  */
 @UriEndpoint(firstVersion = "1.3.0", scheme = "test", title = "Test", syntax = "test:name", producerOnly = true, label = "core,testing", lenientProperties = true)
 public class TestEndpoint extends MockEndpoint {
-    private static final Logger LOG = LoggerFactory.getLogger(TestEndpoint.class);
 
     private Endpoint expectedMessageEndpoint;
 
@@ -71,7 +70,7 @@ public class TestEndpoint extends MockEndpoint {
 
     @Override
     protected void doStart() throws Exception {
-        LOG.debug("Consuming expected messages from: {}", expectedMessageEndpoint);
+        log.debug("Consuming expected messages from: {}", expectedMessageEndpoint);
 
         final List<Object> expectedBodies = new ArrayList<>();
         EndpointHelper.pollEndpoint(expectedMessageEndpoint, new Processor() {
@@ -86,7 +85,7 @@ public class TestEndpoint extends MockEndpoint {
                     Iterator it = ObjectHelper.createIterator(body, delimiter, false, true);
                     while (it.hasNext()) {
                         Object line = it.next();
-                        LOG.trace("Received message body {}", line);
+                        log.trace("Received message body {}", line);
                         expectedBodies.add(line);
                     }
                 } else {
@@ -95,7 +94,7 @@ public class TestEndpoint extends MockEndpoint {
             }
         }, timeout);
 
-        LOG.info("Received: {} expected message(s) from: {}", expectedBodies.size(), expectedMessageEndpoint);
+        log.info("Received: {} expected message(s) from: {}", expectedBodies.size(), expectedMessageEndpoint);
         if (anyOrder) {
             expectedBodiesReceivedInAnyOrder(expectedBodies);
         } else {
diff --git a/camel-core/src/main/java/org/apache/camel/component/timer/TimerConsumer.java b/camel-core/src/main/java/org/apache/camel/component/timer/TimerConsumer.java
index 1dc6e88..0b99d79 100644
--- a/camel-core/src/main/java/org/apache/camel/component/timer/TimerConsumer.java
+++ b/camel-core/src/main/java/org/apache/camel/component/timer/TimerConsumer.java
@@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public class TimerConsumer extends DefaultConsumer implements StartupListener, Suspendable {
-    private static final Logger LOG = LoggerFactory.getLogger(TimerConsumer.class);
+
     private final TimerEndpoint endpoint;
     private volatile TimerTask task;
     private volatile boolean configured;
@@ -67,7 +67,7 @@ public class TimerConsumer extends DefaultConsumer implements StartupListener, S
                 public void run() {
                     if (!isTaskRunAllowed()) {
                         // do not run timer task as it was not allowed
-                        LOG.debug("Run not allowed for timer: {}", endpoint);
+                        log.debug("Run not allowed for timer: {}", endpoint);
                         return;
                     }
 
@@ -80,13 +80,13 @@ public class TimerConsumer extends DefaultConsumer implements StartupListener, S
                         } else {
                             // no need to fire anymore as we exceeded repeat
                             // count
-                            LOG.debug("Cancelling {} timer as repeat count limit reached after {} counts.", endpoint.getTimerName(), endpoint.getRepeatCount());
+                            log.debug("Cancelling {} timer as repeat count limit reached after {} counts.", endpoint.getTimerName(), endpoint.getRepeatCount());
                             cancel();
                         }
                     } catch (Throwable e) {
                         // catch all to avoid the JVM closing the thread and not
                         // firing again
-                        LOG.warn("Error processing exchange. This exception will be ignored, to let the timer be able to trigger again.", e);
+                        log.warn("Error processing exchange. This exception will be ignored, to let the timer be able to trigger again.", e);
                     }
                 }
             };
@@ -189,8 +189,8 @@ public class TimerConsumer extends DefaultConsumer implements StartupListener, S
         // also set now on in header with same key as quartz to be consistent
         exchange.getIn().setHeader("firedTime", now);
 
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Timer {} is firing #{} count", endpoint.getTimerName(), counter);
+        if (log.isTraceEnabled()) {
+            log.trace("Timer {} is firing #{} count", endpoint.getTimerName(), counter);
         }
 
         if (!endpoint.isSynchronous()) {
diff --git a/camel-core/src/main/java/org/apache/camel/component/xslt/XsltComponent.java b/camel-core/src/main/java/org/apache/camel/component/xslt/XsltComponent.java
index a2a705b..aa57b52 100644
--- a/camel-core/src/main/java/org/apache/camel/component/xslt/XsltComponent.java
+++ b/camel-core/src/main/java/org/apache/camel/component/xslt/XsltComponent.java
@@ -36,8 +36,6 @@ import org.slf4j.LoggerFactory;
  */
 public class XsltComponent extends DefaultComponent {
 
-    private static final Logger LOG = LoggerFactory.getLogger(XsltComponent.class);
-
     @Metadata(label = "advanced")
     private XmlConverter xmlConverter;
     @Metadata(label = "advanced")
@@ -202,7 +200,7 @@ public class XsltComponent extends DefaultComponent {
             // if its a http uri, then append additional parameters as they are part of the uri
             resourceUri = ResourceHelper.appendParameters(resourceUri, parameters);
         }
-        LOG.debug("{} using schema resource: {}", this, resourceUri);
+        log.debug("{} using schema resource: {}", this, resourceUri);
         endpoint.setResourceUri(resourceUri);
 
         if (!parameters.isEmpty()) {
diff --git a/camel-core/src/main/java/org/apache/camel/component/xslt/XsltEndpoint.java b/camel-core/src/main/java/org/apache/camel/component/xslt/XsltEndpoint.java
index 96d5ac6..bd6c581 100644
--- a/camel-core/src/main/java/org/apache/camel/component/xslt/XsltEndpoint.java
+++ b/camel-core/src/main/java/org/apache/camel/component/xslt/XsltEndpoint.java
@@ -59,8 +59,6 @@ import org.slf4j.LoggerFactory;
 public class XsltEndpoint extends ProcessorEndpoint {
     public static final String SAXON_TRANSFORMER_FACTORY_CLASS_NAME = "net.sf.saxon.TransformerFactoryImpl";
 
-    private static final Logger LOG = LoggerFactory.getLogger(XsltEndpoint.class);
-
     private volatile boolean cacheCleared;
     private volatile XsltBuilder xslt;
     private Map<String, Object> parameters;
@@ -118,7 +116,7 @@ public class XsltEndpoint extends ProcessorEndpoint {
 
     public XsltEndpoint findOrCreateEndpoint(String uri, String newResourceUri) {
         String newUri = uri.replace(resourceUri, newResourceUri);
-        LOG.trace("Getting endpoint with URI: {}", newUri);
+        log.trace("Getting endpoint with URI: {}", newUri);
         return getCamelContext().getEndpoint(newUri, XsltEndpoint.class);
     }
 
@@ -403,7 +401,7 @@ public class XsltEndpoint extends ProcessorEndpoint {
      * @throws IOException is thrown if error loading resource
      */
     protected void loadResource(String resourceUri) throws TransformerException, IOException {
-        LOG.trace("{} loading schema resource: {}", this, resourceUri);
+        log.trace("{} loading schema resource: {}", this, resourceUri);
         Source source = xslt.getUriResolver().resolve(resourceUri, null);
         if (source == null) {
             throw new IOException("Cannot load schema resource " + resourceUri);
@@ -422,7 +420,7 @@ public class XsltEndpoint extends ProcessorEndpoint {
         final ClassResolver resolver = ctx.getClassResolver();
         final Injector injector = ctx.getInjector();
 
-        LOG.debug("{} using schema resource: {}", this, resourceUri);
+        log.debug("{} using schema resource: {}", this, resourceUri);
 
         this.xslt = injector.newInstance(XsltBuilder.class);
         if (converter != null) {
@@ -439,7 +437,7 @@ public class XsltEndpoint extends ProcessorEndpoint {
         if (factory == null && transformerFactoryClass != null) {
             // provide the class loader of this component to work in OSGi environments
             Class<TransformerFactory> factoryClass = resolver.resolveMandatoryClass(transformerFactoryClass, TransformerFactory.class, XsltComponent.class.getClassLoader());
-            LOG.debug("Using TransformerFactoryClass {}", factoryClass);
+            log.debug("Using TransformerFactoryClass {}", factoryClass);
             factory = injector.newInstance(factoryClass);
 
             if (useSaxon) {
@@ -450,7 +448,7 @@ public class XsltEndpoint extends ProcessorEndpoint {
         }
 
         if (factory != null) {
-            LOG.debug("Using TransformerFactory {}", factory);
+            log.debug("Using TransformerFactory {}", factory);
             xslt.getConverter().setTransformerFactory(factory);
         }
         if (resultHandlerFactory != null) {
diff --git a/camel-core/src/main/java/org/apache/camel/impl/ConsumerCache.java b/camel-core/src/main/java/org/apache/camel/impl/ConsumerCache.java
index 351d0d0..02cf4c8 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/ConsumerCache.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/ConsumerCache.java
@@ -34,7 +34,6 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public class ConsumerCache extends ServiceSupport {
-    private static final Logger LOG = LoggerFactory.getLogger(ConsumerCache.class);
 
     private final CamelContext camelContext;
     private final ServicePool<PollingConsumer> consumers;
@@ -98,7 +97,7 @@ public class ConsumerCache extends ServiceSupport {
     }
  
     public Exchange receive(Endpoint endpoint) {
-        LOG.debug("<<<< {}", endpoint);
+        log.debug("<<<< {}", endpoint);
         PollingConsumer consumer = null;
         try {
             consumer = acquirePollingConsumer(endpoint);
@@ -111,7 +110,7 @@ public class ConsumerCache extends ServiceSupport {
     }
 
     public Exchange receive(Endpoint endpoint, long timeout) {
-        LOG.debug("<<<< {}", endpoint);
+        log.debug("<<<< {}", endpoint);
         PollingConsumer consumer = null;
         try {
             consumer = acquirePollingConsumer(endpoint);
@@ -124,7 +123,7 @@ public class ConsumerCache extends ServiceSupport {
     }
 
     public Exchange receiveNoWait(Endpoint endpoint) {
-        LOG.debug("<<<< {}", endpoint);
+        log.debug("<<<< {}", endpoint);
         PollingConsumer consumer = null;
         try {
             consumer = acquirePollingConsumer(endpoint);
@@ -156,7 +155,7 @@ public class ConsumerCache extends ServiceSupport {
      */
     public int size() {
         int size = consumers.size();
-        LOG.trace("size = {}", size);
+        log.trace("size = {}", size);
         return size;
     }
 
@@ -222,7 +221,7 @@ public class ConsumerCache extends ServiceSupport {
             consumers.stop();
             consumers.start();
         } catch (Exception e) {
-            LOG.debug("Error restarting consumer pool", e);
+            log.debug("Error restarting consumer pool", e);
         }
         if (statistics != null) {
             statistics.clear();
diff --git a/camel-core/src/main/java/org/apache/camel/impl/DefaultAsyncProcessorAwaitManager.java b/camel-core/src/main/java/org/apache/camel/impl/DefaultAsyncProcessorAwaitManager.java
index f0891a5..d2aa223 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/DefaultAsyncProcessorAwaitManager.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/DefaultAsyncProcessorAwaitManager.java
@@ -39,8 +39,6 @@ import org.slf4j.LoggerFactory;
 
 public class DefaultAsyncProcessorAwaitManager extends ServiceSupport implements AsyncProcessorAwaitManager {
 
-    private static final Logger LOG = LoggerFactory.getLogger(DefaultAsyncProcessorAwaitManager.class);
-
     private final AsyncProcessorAwaitManager.Statistics statistics = new UtilizationStatistics();
     private final AtomicLong blockedCounter = new AtomicLong();
     private final AtomicLong interruptedCounter = new AtomicLong();
@@ -65,7 +63,7 @@ public class DefaultAsyncProcessorAwaitManager extends ServiceSupport implements
 
     @Override
     public void await(Exchange exchange, CountDownLatch latch) {
-        LOG.trace("Waiting for asynchronous callback before continuing for exchangeId: {} -> {}",
+        log.trace("Waiting for asynchronous callback before continuing for exchangeId: {} -> {}",
                 exchange.getExchangeId(), exchange);
         try {
             if (statistics.isStatisticsEnabled()) {
@@ -73,11 +71,11 @@ public class DefaultAsyncProcessorAwaitManager extends ServiceSupport implements
             }
             inflight.put(exchange, new AwaitThreadEntry(Thread.currentThread(), exchange, latch));
             latch.await();
-            LOG.trace("Asynchronous callback received, will continue routing exchangeId: {} -> {}",
+            log.trace("Asynchronous callback received, will continue routing exchangeId: {} -> {}",
                     exchange.getExchangeId(), exchange);
 
         } catch (InterruptedException e) {
-            LOG.trace("Interrupted while waiting for callback, will continue routing exchangeId: {} -> {}",
+            log.trace("Interrupted while waiting for callback, will continue routing exchangeId: {} -> {}",
                     exchange.getExchangeId(), exchange);
             exchange.setException(e);
         } finally {
@@ -104,7 +102,7 @@ public class DefaultAsyncProcessorAwaitManager extends ServiceSupport implements
 
     @Override
     public void countDown(Exchange exchange, CountDownLatch latch) {
-        LOG.trace("Asynchronous callback received for exchangeId: {}", exchange.getExchangeId());
+        log.trace("Asynchronous callback received for exchangeId: {}", exchange.getExchangeId());
         latch.countDown();
     }
 
@@ -152,7 +150,7 @@ public class DefaultAsyncProcessorAwaitManager extends ServiceSupport implements
                 if (routeStackTrace != null) {
                     sb.append(routeStackTrace);
                 }
-                LOG.warn(sb.toString());
+                log.warn(sb.toString());
 
             } catch (Exception e) {
                 throw ObjectHelper.wrapRuntimeCamelException(e);
@@ -189,7 +187,7 @@ public class DefaultAsyncProcessorAwaitManager extends ServiceSupport implements
         Collection<AwaitThread> threads = browse();
         int count = threads.size();
         if (count > 0) {
-            LOG.warn("Shutting down while there are still {} inflight threads currently blocked.", count);
+            log.warn("Shutting down while there are still {} inflight threads currently blocked.", count);
 
             StringBuilder sb = new StringBuilder();
             for (AwaitThread entry : threads) {
@@ -197,19 +195,19 @@ public class DefaultAsyncProcessorAwaitManager extends ServiceSupport implements
             }
 
             if (isInterruptThreadsWhileStopping()) {
-                LOG.warn("The following threads are blocked and will be interrupted so the threads are released:\n{}", sb);
+                log.warn("The following threads are blocked and will be interrupted so the threads are released:\n{}", sb);
                 for (AwaitThread entry : threads) {
                     try {
                         interrupt(entry.getExchange());
                     } catch (Throwable e) {
-                        LOG.warn("Error while interrupting thread: " + entry.getBlockedThread().getName() + ". This exception is ignored.", e);
+                        log.warn("Error while interrupting thread: " + entry.getBlockedThread().getName() + ". This exception is ignored.", e);
                     }
                 }
             } else {
-                LOG.warn("The following threads are blocked, and may reside in the JVM:\n{}", sb);
+                log.warn("The following threads are blocked, and may reside in the JVM:\n{}", sb);
             }
         } else {
-            LOG.debug("Shutting down with no inflight threads.");
+            log.debug("Shutting down with no inflight threads.");
         }
 
         inflight.clear();
diff --git a/camel-core/src/main/java/org/apache/camel/impl/DefaultCamelContext.java b/camel-core/src/main/java/org/apache/camel/impl/DefaultCamelContext.java
index 4bfdb90..daf04a1 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/DefaultCamelContext.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/DefaultCamelContext.java
@@ -191,8 +191,6 @@ import org.apache.camel.util.TimeUtils;
 import org.apache.camel.util.URISupport;
 import org.apache.camel.util.function.ThrowingRunnable;
 import org.apache.camel.util.jsse.SSLContextParameters;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.slf4j.MDC;
 
 import static org.apache.camel.impl.MDCUnitOfWork.MDC_CAMEL_CONTEXT_ID;
@@ -204,7 +202,7 @@ import static org.apache.camel.impl.MDCUnitOfWork.MDC_CAMEL_CONTEXT_ID;
  */
 @SuppressWarnings("deprecation")
 public class DefaultCamelContext extends ServiceSupport implements ModelCamelContext, ManagedCamelContext, Suspendable {
-    private final Logger log = LoggerFactory.getLogger(getClass());
+
     private String version;
     private final AtomicBoolean vetoStated = new AtomicBoolean();
     private JAXBContext jaxbContext;
diff --git a/camel-core/src/main/java/org/apache/camel/impl/DefaultComponent.java b/camel-core/src/main/java/org/apache/camel/impl/DefaultComponent.java
index d059fab..af4c1f6 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/DefaultComponent.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/DefaultComponent.java
@@ -49,7 +49,7 @@ import org.slf4j.LoggerFactory;
  * Default component to use for base for components implementations.
  */
 public abstract class DefaultComponent extends ServiceSupport implements Component {
-    private static final Logger LOG = LoggerFactory.getLogger(DefaultComponent.class);
+
     private static final Pattern RAW_PATTERN = Pattern.compile("RAW(.*&&.*)");
 
     private final List<Supplier<ComponentExtension>> extensions = new ArrayList<>();
@@ -113,12 +113,12 @@ public abstract class DefaultComponent extends ServiceSupport implements Compone
         uri = useRawUri() ? uri : encodedUri;
 
         validateURI(uri, path, parameters);
-        if (LOG.isTraceEnabled()) {
+        if (log.isTraceEnabled()) {
             // at trace level its okay to have parameters logged, that may contain passwords
-            LOG.trace("Creating endpoint uri=[{}], path=[{}], parameters=[{}]", URISupport.sanitizeUri(uri), URISupport.sanitizePath(path), parameters);
-        } else if (LOG.isDebugEnabled()) {
+            log.trace("Creating endpoint uri=[{}], path=[{}], parameters=[{}]", URISupport.sanitizeUri(uri), URISupport.sanitizePath(path), parameters);
+        } else if (log.isDebugEnabled()) {
             // but at debug level only output sanitized uris
-            LOG.debug("Creating endpoint uri=[{}], path=[{}]", URISupport.sanitizeUri(uri), URISupport.sanitizePath(path));
+            log.debug("Creating endpoint uri=[{}], path=[{}]", URISupport.sanitizeUri(uri), URISupport.sanitizePath(path));
         }
         Endpoint endpoint = createEndpoint(uri, path, parameters);
         if (endpoint == null) {
@@ -245,10 +245,10 @@ public abstract class DefaultComponent extends ServiceSupport implements Compone
             // only resolve property placeholders if its in use
             Component existing = CamelContextHelper.lookupPropertiesComponent(camelContext, false);
             if (existing != null) {
-                LOG.debug("Resolving property placeholders on component: {}", this);
+                log.debug("Resolving property placeholders on component: {}", this);
                 CamelContextHelper.resolvePropertyPlaceholders(camelContext, this);
             } else {
-                LOG.debug("Cannot resolve property placeholders on component: {} as PropertiesComponent is not in use", this);
+                log.debug("Cannot resolve property placeholders on component: {} as PropertiesComponent is not in use", this);
             }
         }
     }
diff --git a/camel-core/src/main/java/org/apache/camel/impl/DefaultConsumer.java b/camel-core/src/main/java/org/apache/camel/impl/DefaultConsumer.java
index c24701c..9ab5a3d 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/DefaultConsumer.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/DefaultConsumer.java
@@ -31,8 +31,6 @@ import org.apache.camel.util.AsyncProcessorConverterHelper;
 import org.apache.camel.util.ServiceHelper;
 import org.apache.camel.util.URISupport;
 import org.apache.camel.util.UnitOfWorkHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * A default consumer useful for implementation inheritance.
@@ -40,7 +38,7 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public class DefaultConsumer extends ServiceSupport implements Consumer, RouteAware {
-    protected final Logger log = LoggerFactory.getLogger(getClass());
+
     private transient String consumerToString;
     private final Endpoint endpoint;
     private final Processor processor;
diff --git a/camel-core/src/main/java/org/apache/camel/impl/DefaultConsumerTemplate.java b/camel-core/src/main/java/org/apache/camel/impl/DefaultConsumerTemplate.java
index 0d72921..175e6d5 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/DefaultConsumerTemplate.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/DefaultConsumerTemplate.java
@@ -41,7 +41,6 @@ import static org.apache.camel.util.ObjectHelper.wrapRuntimeCamelException;
  */
 public class DefaultConsumerTemplate extends ServiceSupport implements ConsumerTemplate {
 
-    private static final Logger LOG = LoggerFactory.getLogger(DefaultConsumerTemplate.class);
     private final CamelContext camelContext;
     private ConsumerCache consumerCache;
     private int maximumCacheSize;
@@ -207,13 +206,13 @@ public class DefaultConsumerTemplate extends ServiceSupport implements ConsumerT
             if (exchange.getUnitOfWork() == null) {
                 // handover completions and done them manually to ensure they are being executed
                 List<Synchronization> synchronizations = exchange.handoverCompletions();
-                UnitOfWorkHelper.doneSynchronizations(exchange, synchronizations, LOG);
+                UnitOfWorkHelper.doneSynchronizations(exchange, synchronizations, log);
             } else {
                 // done the unit of work
                 exchange.getUnitOfWork().done(exchange);
             }
         } catch (Throwable e) {
-            LOG.warn("Exception occurred during done UnitOfWork for Exchange: " + exchange
+            log.warn("Exception occurred during done UnitOfWork for Exchange: " + exchange
                     + ". This exception will be ignored.", e);
         }
     }
diff --git a/camel-core/src/main/java/org/apache/camel/impl/DefaultEndpoint.java b/camel-core/src/main/java/org/apache/camel/impl/DefaultEndpoint.java
index 50695b5..d7d7632 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/DefaultEndpoint.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/DefaultEndpoint.java
@@ -57,7 +57,6 @@ import org.slf4j.LoggerFactory;
  */
 public abstract class DefaultEndpoint extends ServiceSupport implements Endpoint, HasId, CamelContextAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(DefaultEndpoint.class);
     private final String id = EndpointHelper.createEndpointId();
     private transient String endpointUriToString;
     private String endpointUri;
@@ -206,8 +205,8 @@ public abstract class DefaultEndpoint extends ServiceSupport implements Endpoint
 
     public PollingConsumer createPollingConsumer() throws Exception {
         // should not call configurePollingConsumer when its EventDrivenPollingConsumer
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Creating EventDrivenPollingConsumer with queueSize: {} blockWhenFull: {} blockTimeout: {}",
+        if (log.isDebugEnabled()) {
+            log.debug("Creating EventDrivenPollingConsumer with queueSize: {} blockWhenFull: {} blockTimeout: {}",
                     new Object[]{getPollingConsumerQueueSize(), isPollingConsumerBlockWhenFull(), getPollingConsumerBlockTimeout()});
         }
         EventDrivenPollingConsumer consumer = new EventDrivenPollingConsumer(this, getPollingConsumerQueueSize());
diff --git a/camel-core/src/main/java/org/apache/camel/impl/DefaultInflightRepository.java b/camel-core/src/main/java/org/apache/camel/impl/DefaultInflightRepository.java
index f63569d..102c358 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/DefaultInflightRepository.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/DefaultInflightRepository.java
@@ -42,7 +42,6 @@ import org.slf4j.LoggerFactory;
  */
 public class DefaultInflightRepository extends ServiceSupport implements InflightRepository {
 
-    private static final Logger LOG = LoggerFactory.getLogger(DefaultInflightRepository.class);
     private final ConcurrentMap<String, Exchange> inflight = new ConcurrentHashMap<>();
     private final ConcurrentMap<String, AtomicInteger> routeCount = new ConcurrentHashMap<>();
 
@@ -172,9 +171,9 @@ public class DefaultInflightRepository extends ServiceSupport implements Infligh
     protected void doStop() throws Exception {
         int count = size();
         if (count > 0) {
-            LOG.warn("Shutting down while there are still {} inflight exchanges.", count);
+            log.warn("Shutting down while there are still {} inflight exchanges.", count);
         } else {
-            LOG.debug("Shutting down with no inflight exchanges.");
+            log.debug("Shutting down with no inflight exchanges.");
         }
         routeCount.clear();
     }
diff --git a/camel-core/src/main/java/org/apache/camel/impl/DefaultProducer.java b/camel-core/src/main/java/org/apache/camel/impl/DefaultProducer.java
index 3885e64..019b748 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/DefaultProducer.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/DefaultProducer.java
@@ -30,7 +30,7 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public abstract class DefaultProducer extends ServiceSupport implements Producer {
-    protected final Logger log = LoggerFactory.getLogger(getClass());
+
     private transient String producerToString;
     private final Endpoint endpoint;
 
diff --git a/camel-core/src/main/java/org/apache/camel/impl/DefaultScheduledPollConsumerScheduler.java b/camel-core/src/main/java/org/apache/camel/impl/DefaultScheduledPollConsumerScheduler.java
index f2b2093..20be481 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/DefaultScheduledPollConsumerScheduler.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/DefaultScheduledPollConsumerScheduler.java
@@ -26,6 +26,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.camel.CamelContext;
 import org.apache.camel.Consumer;
 import org.apache.camel.spi.ScheduledPollConsumerScheduler;
+import org.apache.camel.support.ServiceSupport;
 import org.apache.camel.util.ObjectHelper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -33,9 +34,8 @@ import org.slf4j.LoggerFactory;
 /**
  * Default {@link org.apache.camel.impl.ScheduledBatchPollingConsumer}.
  */
-public class DefaultScheduledPollConsumerScheduler extends org.apache.camel.support.ServiceSupport implements ScheduledPollConsumerScheduler {
+public class DefaultScheduledPollConsumerScheduler extends ServiceSupport implements ScheduledPollConsumerScheduler {
 
-    private static final Logger LOG = LoggerFactory.getLogger(DefaultScheduledPollConsumerScheduler.class);
     private CamelContext camelContext;
     private Consumer consumer;
     private ScheduledExecutorService scheduledExecutorService;
@@ -137,16 +137,16 @@ public class DefaultScheduledPollConsumerScheduler extends org.apache.camel.supp
         // only schedule task if we have not already done that
         if (futures.size() == 0) {
             if (isUseFixedDelay()) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Scheduling poll (fixed delay) with initialDelay: {}, delay: {} ({}) for: {}",
+                if (log.isDebugEnabled()) {
+                    log.debug("Scheduling poll (fixed delay) with initialDelay: {}, delay: {} ({}) for: {}",
                             new Object[]{getInitialDelay(), getDelay(), getTimeUnit().name().toLowerCase(Locale.ENGLISH), consumer.getEndpoint()});
                 }
                 for (int i = 0; i < concurrentTasks; i++) {
                     futures.add(scheduledExecutorService.scheduleWithFixedDelay(task, getInitialDelay(), getDelay(), getTimeUnit()));
                 }
             } else {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Scheduling poll (fixed rate) with initialDelay: {}, delay: {} ({}) for: {}",
+                if (log.isDebugEnabled()) {
+                    log.debug("Scheduling poll (fixed rate) with initialDelay: {}, delay: {} ({}) for: {}",
                             new Object[]{getInitialDelay(), getDelay(), getTimeUnit().name().toLowerCase(Locale.ENGLISH), consumer.getEndpoint()});
                 }
                 for (int i = 0; i < concurrentTasks; i++) {
@@ -180,7 +180,7 @@ public class DefaultScheduledPollConsumerScheduler extends org.apache.camel.supp
     @Override
     protected void doStop() throws Exception {
         if (isSchedulerStarted()) {
-            LOG.debug("This consumer is stopping, so cancelling scheduled task: {}", futures);
+            log.debug("This consumer is stopping, so cancelling scheduled task: {}", futures);
             for (ScheduledFuture<?> future : futures) {
                 future.cancel(true);
             }
diff --git a/camel-core/src/main/java/org/apache/camel/impl/DefaultShutdownStrategy.java b/camel-core/src/main/java/org/apache/camel/impl/DefaultShutdownStrategy.java
index 91a7b73..7d70ff6 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/DefaultShutdownStrategy.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/DefaultShutdownStrategy.java
@@ -114,7 +114,6 @@ import org.slf4j.LoggerFactory;
  * @version
  */
 public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownStrategy, CamelContextAware {
-    private static final Logger LOG = LoggerFactory.getLogger(DefaultShutdownStrategy.class);
 
     private CamelContext camelContext;
     private ExecutorService executor;
@@ -186,9 +185,9 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
         routesOrdered.sort(comparator);
 
         if (suspendOnly) {
-            LOG.info("Starting to graceful suspend {} routes (timeout {} {})", routesOrdered.size(), timeout, timeUnit.toString().toLowerCase(Locale.ENGLISH));
+            log.info("Starting to graceful suspend {} routes (timeout {} {})", routesOrdered.size(), timeout, timeUnit.toString().toLowerCase(Locale.ENGLISH));
         } else {
-            LOG.info("Starting to graceful shutdown {} routes (timeout {} {})", routesOrdered.size(), timeout, timeUnit.toString().toLowerCase(Locale.ENGLISH));
+            log.info("Starting to graceful shutdown {} routes (timeout {} {})", routesOrdered.size(), timeout, timeUnit.toString().toLowerCase(Locale.ENGLISH));
         }
 
         // use another thread to perform the shutdowns so we can support timeout
@@ -215,7 +214,7 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
 
             // if set, stop processing and return false to indicate that the shutdown is aborting
             if (!forceShutdown && abortAfterTimeout) {
-                LOG.warn("Timeout occurred during graceful shutdown. Aborting the shutdown now."
+                log.warn("Timeout occurred during graceful shutdown. Aborting the shutdown now."
                         + " Notice: some resources may still be running as graceful shutdown did not complete successfully.");
 
                 // we attempt to force shutdown so lets log the current inflight exchanges which are affected
@@ -224,7 +223,7 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
                 return false;
             } else {
                 if (forceShutdown || shutdownNowOnTimeout) {
-                    LOG.warn("Timeout occurred during graceful shutdown. Forcing the routes to be shutdown now."
+                    log.warn("Timeout occurred during graceful shutdown. Forcing the routes to be shutdown now."
                             + " Notice: some resources may still be running as graceful shutdown did not complete successfully.");
 
                     // we attempt to force shutdown so lets log the current inflight exchanges which are affected
@@ -240,7 +239,7 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
                         }
                     }
                 } else {
-                    LOG.warn("Timeout occurred during graceful shutdown. Will ignore shutting down the remainder routes."
+                    log.warn("Timeout occurred during graceful shutdown. Will ignore shutting down the remainder routes."
                             + " Notice: some resources may still be running as graceful shutdown did not complete successfully.");
 
                     logInflightExchanges(context, routes, isLogInflightExchangesOnTimeout());
@@ -253,7 +252,7 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
         // convert to seconds as its easier to read than a big milli seconds number
         long seconds = TimeUnit.SECONDS.convert(watch.taken(), TimeUnit.MILLISECONDS);
 
-        LOG.info("Graceful shutdown of {} routes completed in {} seconds", routesOrdered.size(), seconds);
+        log.info("Graceful shutdown of {} routes completed in {} seconds", routesOrdered.size(), seconds);
         return true;
     }
 
@@ -342,7 +341,7 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
             // it has completed its current task
             ShutdownRunningTask current = order.getRoute().getRouteContext().getShutdownRunningTask();
             if (current != ShutdownRunningTask.CompleteCurrentTaskOnly) {
-                LOG.debug("Changing shutdownRunningTask from {} to " +  ShutdownRunningTask.CompleteCurrentTaskOnly
+                log.debug("Changing shutdownRunningTask from {} to " +  ShutdownRunningTask.CompleteCurrentTaskOnly
                     + " on route {} to shutdown faster", current, order.getRoute().getId());
                 order.getRoute().getRouteContext().setShutdownRunningTask(ShutdownRunningTask.CompleteCurrentTaskOnly);
             }
@@ -369,19 +368,19 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
      *
      * @param consumer the consumer to shutdown
      */
-    protected static void shutdownNow(Consumer consumer) {
-        LOG.trace("Shutting down: {}", consumer);
+    protected void shutdownNow(Consumer consumer) {
+        log.trace("Shutting down: {}", consumer);
 
         // allow us to do custom work before delegating to service helper
         try {
             ServiceHelper.stopService(consumer);
         } catch (Throwable e) {
-            LOG.warn("Error occurred while shutting down route: " + consumer + ". This exception will be ignored.", e);
+            log.warn("Error occurred while shutting down route: " + consumer + ". This exception will be ignored.", e);
             // fire event
             EventHelper.notifyServiceStopFailure(consumer.getEndpoint().getCamelContext(), consumer, e);
         }
 
-        LOG.trace("Shutdown complete for: {}", consumer);
+        log.trace("Shutdown complete for: {}", consumer);
     }
 
     /**
@@ -389,19 +388,19 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
      *
      * @param consumer the consumer to suspend
      */
-    protected static void suspendNow(Consumer consumer) {
-        LOG.trace("Suspending: {}", consumer);
+    protected void suspendNow(Consumer consumer) {
+        log.trace("Suspending: {}", consumer);
 
         // allow us to do custom work before delegating to service helper
         try {
             ServiceHelper.suspendService(consumer);
         } catch (Throwable e) {
-            LOG.warn("Error occurred while suspending route: " + consumer + ". This exception will be ignored.", e);
+            log.warn("Error occurred while suspending route: " + consumer + ". This exception will be ignored.", e);
             // fire event
             EventHelper.notifyServiceStopFailure(consumer.getEndpoint().getCamelContext(), consumer, e);
         }
 
-        LOG.trace("Suspend complete for: {}", consumer);
+        log.trace("Suspend complete for: {}", consumer);
     }
 
     private ExecutorService getExecutorService() {
@@ -443,7 +442,7 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
      * @param forced  whether to force shutdown
      * @param includeChildren whether to prepare the child of the service as well
      */
-    private static void prepareShutdown(Service service, boolean suspendOnly, boolean forced, boolean includeChildren, boolean suppressLogging) {
+    private void prepareShutdown(Service service, boolean suspendOnly, boolean forced, boolean includeChildren, boolean suppressLogging) {
         Set<Service> list;
         if (includeChildren) {
             // include error handlers as we want to prepare them for shutdown as well
@@ -456,13 +455,13 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
         for (Service child : list) {
             if (child instanceof ShutdownPrepared) {
                 try {
-                    LOG.trace("Preparing {} shutdown on {}", forced ? "forced" : "", child);
+                    log.trace("Preparing {} shutdown on {}", forced ? "forced" : "", child);
                     ((ShutdownPrepared) child).prepareShutdown(suspendOnly, forced);
                 } catch (Exception e) {
                     if (suppressLogging) {
-                        LOG.trace("Error during prepare shutdown on " + child + ". This exception will be ignored.", e);
+                        log.trace("Error during prepare shutdown on " + child + ". This exception will be ignored.", e);
                     } else {
-                        LOG.warn("Error during prepare shutdown on " + child + ". This exception will be ignored.", e);
+                        log.warn("Error during prepare shutdown on " + child + ". This exception will be ignored.", e);
                     }
                 }
             }
@@ -493,7 +492,7 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
     /**
      * Shutdown task which shutdown all the routes in a graceful manner.
      */
-    static class ShutdownTask implements Runnable {
+    class ShutdownTask implements Runnable {
 
         private final CamelContext context;
         private final List<RouteStartupOrder> routes;
@@ -524,7 +523,7 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
             // 2) wait until all inflight and pending exchanges has been completed
             // 3) shutdown the deferred routes
 
-            LOG.debug("There are {} routes to {}", routes.size(), suspendOnly ? "suspend" : "shutdown");
+            log.debug("There are {} routes to {}", routes.size(), suspendOnly ? "suspend" : "shutdown");
 
             // list of deferred consumers to shutdown when all exchanges has been completed routed
             // and thus there are no more inflight exchanges so they can be safely shutdown at that time
@@ -534,8 +533,8 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
                 ShutdownRoute shutdownRoute = order.getRoute().getRouteContext().getShutdownRoute();
                 ShutdownRunningTask shutdownRunningTask = order.getRoute().getRouteContext().getShutdownRunningTask();
 
-                if (LOG.isTraceEnabled()) {
-                    LOG.trace("{}{} with options [{},{}]",
+                if (log.isTraceEnabled()) {
+                    log.trace("{}{} with options [{},{}]",
                             suspendOnly ? "Suspending route: " : "Shutting down route: ",
                             order.getRoute().getId(), shutdownRoute, shutdownRunningTask);
                 }
@@ -568,15 +567,15 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
                         suspendNow(consumer);
                         // add it to the deferred list so the route will be shutdown later
                         deferredConsumers.add(new ShutdownDeferredConsumer(order.getRoute(), consumer));
-                        LOG.debug("Route: {} suspended and shutdown deferred, was consuming from: {}", order.getRoute().getId(), order.getRoute().getEndpoint());
+                        log.debug("Route: {} suspended and shutdown deferred, was consuming from: {}", order.getRoute().getId(), order.getRoute().getEndpoint());
                     } else if (shutdown) {
                         shutdownNow(consumer);
-                        LOG.info("Route: {} shutdown complete, was consuming from: {}", order.getRoute().getId(), order.getRoute().getEndpoint());
+                        log.info("Route: {} shutdown complete, was consuming from: {}", order.getRoute().getId(), order.getRoute().getEndpoint());
                     } else {
                         // we will stop it later, but for now it must run to be able to help all inflight messages
                         // be safely completed
                         deferredConsumers.add(new ShutdownDeferredConsumer(order.getRoute(), consumer));
-                        LOG.debug("Route: " + order.getRoute().getId() + (suspendOnly ? " shutdown deferred." : " suspension deferred."));
+                        log.debug("Route: " + order.getRoute().getId() + (suspendOnly ? " shutdown deferred." : " suspension deferred."));
                     }
                 }
             }
@@ -608,7 +607,7 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
                         String routeId = order.getRoute().getId();
                         routeInflight.put(routeId, inflight);
                         size += inflight;
-                        LOG.trace("{} inflight and pending exchanges for route: {}", inflight, routeId);
+                        log.trace("{} inflight and pending exchanges for route: {}", inflight, routeId);
                     }
                 }
                 if (size > 0) {
@@ -624,7 +623,7 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
                                 + (TimeUnit.SECONDS.convert(timeout, timeUnit) - (loopCount++ * loopDelaySeconds)) + " seconds.";
                         msg += " Inflights per route: [" + csb.toString() + "]";
 
-                        LOG.info(msg);
+                        log.info(msg);
 
                         // log verbose if DEBUG logging is enabled
                         logInflightExchanges(context, routes, logInflightExchangesOnTimeout);
@@ -632,10 +631,10 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
                         Thread.sleep(loopDelaySeconds * 1000);
                     } catch (InterruptedException e) {
                         if (abortAfterTimeout) {
-                            LOG.warn("Interrupted while waiting during graceful shutdown, will abort.");
+                            log.warn("Interrupted while waiting during graceful shutdown, will abort.");
                             return;
                         } else {
-                            LOG.warn("Interrupted while waiting during graceful shutdown, will force shutdown now.");
+                            log.warn("Interrupted while waiting during graceful shutdown, will force shutdown now.");
                             break;
                         }
                     }
@@ -648,11 +647,11 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
             for (ShutdownDeferredConsumer deferred : deferredConsumers) {
                 Consumer consumer = deferred.getConsumer();
                 if (consumer instanceof ShutdownAware) {
-                    LOG.trace("Route: {} preparing to shutdown.", deferred.getRoute().getId());
+                    log.trace("Route: {} preparing to shutdown.", deferred.getRoute().getId());
                     boolean forced = context.getShutdownStrategy().forceShutdown(consumer);
                     boolean suppress = context.getShutdownStrategy().isSuppressLoggingOnTimeout();
                     prepareShutdown(consumer, suspendOnly, forced, false, suppress);
-                    LOG.debug("Route: {} preparing to shutdown complete.", deferred.getRoute().getId());
+                    log.debug("Route: {} preparing to shutdown complete.", deferred.getRoute().getId());
                 }
             }
 
@@ -661,10 +660,10 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
                 Consumer consumer = deferred.getConsumer();
                 if (suspendOnly) {
                     suspendNow(consumer);
-                    LOG.info("Route: {} suspend complete, was consuming from: {}", deferred.getRoute().getId(), deferred.getConsumer().getEndpoint());
+                    log.info("Route: {} suspend complete, was consuming from: {}", deferred.getRoute().getId(), deferred.getConsumer().getEndpoint());
                 } else {
                     shutdownNow(consumer);
-                    LOG.info("Route: {} shutdown complete, was consuming from: {}", deferred.getRoute().getId(), deferred.getConsumer().getEndpoint());
+                    log.info("Route: {} shutdown complete, was consuming from: {}", deferred.getRoute().getId(), deferred.getConsumer().getEndpoint());
                 }
             }
 
@@ -709,9 +708,9 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
      *
      * @param infoLevel <tt>true</tt> to log at INFO level, <tt>false</tt> to log at DEBUG level
      */
-    protected static void logInflightExchanges(CamelContext camelContext, List<RouteStartupOrder> routes, boolean infoLevel) {
+    protected void logInflightExchanges(CamelContext camelContext, List<RouteStartupOrder> routes, boolean infoLevel) {
         // check if we need to log
-        if (!infoLevel && !LOG.isDebugEnabled()) {
+        if (!infoLevel && !log.isDebugEnabled()) {
             return;
         }
 
@@ -751,9 +750,9 @@ public class DefaultShutdownStrategy extends ServiceSupport implements ShutdownS
         }
 
         if (infoLevel) {
-            LOG.info(sb.toString());
+            log.info(sb.toString());
         } else {
-            LOG.debug(sb.toString());
+            log.debug(sb.toString());
         }
     }
 
diff --git a/camel-core/src/main/java/org/apache/camel/impl/DefaultStreamCachingStrategy.java b/camel-core/src/main/java/org/apache/camel/impl/DefaultStreamCachingStrategy.java
index e974d74..c6bbea1 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/DefaultStreamCachingStrategy.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/DefaultStreamCachingStrategy.java
@@ -29,6 +29,7 @@ import org.apache.camel.Exchange;
 import org.apache.camel.Message;
 import org.apache.camel.StreamCache;
 import org.apache.camel.spi.StreamCachingStrategy;
+import org.apache.camel.support.ServiceSupport;
 import org.apache.camel.util.FilePathResolver;
 import org.apache.camel.util.FileUtil;
 import org.apache.camel.util.IOHelper;
@@ -38,9 +39,7 @@ import org.slf4j.LoggerFactory;
 /**
  * Default implementation of {@link StreamCachingStrategy}
  */
-public class DefaultStreamCachingStrategy extends org.apache.camel.support.ServiceSupport implements CamelContextAware, StreamCachingStrategy {
-
-    private static final Logger LOG = LoggerFactory.getLogger(DefaultStreamCachingStrategy.class);
+public class DefaultStreamCachingStrategy extends ServiceSupport implements CamelContextAware, StreamCachingStrategy {
 
     private CamelContext camelContext;
     private boolean enabled;
@@ -169,7 +168,7 @@ public class DefaultStreamCachingStrategy extends org.apache.camel.support.Servi
         }
 
         boolean answer = anySpoolRules ? any : all;
-        LOG.debug("Should spool cache {} -> {}", length, answer);
+        log.debug("Should spool cache {} -> {}", length, answer);
         return answer;
     }
 
@@ -181,8 +180,8 @@ public class DefaultStreamCachingStrategy extends org.apache.camel.support.Servi
         Message message = exchange.hasOut() ? exchange.getOut() : exchange.getIn();
         StreamCache cache = message.getBody(StreamCache.class);
         if (cache != null) {
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Cached stream to {} -> {}", cache.inMemory() ? "memory" : "spool", cache);
+            if (log.isTraceEnabled()) {
+                log.trace("Cached stream to {} -> {}", cache.inMemory() ? "memory" : "spool", cache);
             }
             if (statistics.isStatisticsEnabled()) {
                 try {
@@ -192,7 +191,7 @@ public class DefaultStreamCachingStrategy extends org.apache.camel.support.Servi
                         statistics.updateSpool(cache.length());
                     }
                 } catch (Exception e) {
-                    LOG.debug("Error updating cache statistics. This exception is ignored.", e);
+                    log.debug("Error updating cache statistics. This exception is ignored.", e);
                 }
             }
         }
@@ -222,7 +221,7 @@ public class DefaultStreamCachingStrategy extends org.apache.camel.support.Servi
     @Override
     protected void doStart() throws Exception {
         if (!enabled) {
-            LOG.debug("StreamCaching is not enabled");
+            log.debug("StreamCaching is not enabled");
             return;
         }
 
@@ -249,16 +248,16 @@ public class DefaultStreamCachingStrategy extends org.apache.camel.support.Servi
 
             if (spoolDirectory.exists()) {
                 if (spoolDirectory.isDirectory()) {
-                    LOG.debug("Using spool directory: {}", spoolDirectory);
+                    log.debug("Using spool directory: {}", spoolDirectory);
                 } else {
-                    LOG.warn("Spool directory: {} is not a directory. This may cause problems spooling to disk for the stream caching!", spoolDirectory);
+                    log.warn("Spool directory: {} is not a directory. This may cause problems spooling to disk for the stream caching!", spoolDirectory);
                 }
             } else {
                 boolean created = spoolDirectory.mkdirs();
                 if (!created) {
-                    LOG.warn("Cannot create spool directory: {}. This may cause problems spooling to disk for the stream caching!", spoolDirectory);
+                    log.warn("Cannot create spool directory: {}. This may cause problems spooling to disk for the stream caching!", spoolDirectory);
                 } else {
-                    LOG.debug("Created spool directory: {}", spoolDirectory);
+                    log.debug("Created spool directory: {}", spoolDirectory);
                 }
 
             }
@@ -275,24 +274,24 @@ public class DefaultStreamCachingStrategy extends org.apache.camel.support.Servi
             }
         }
 
-        LOG.debug("StreamCaching configuration {}", this);
+        log.debug("StreamCaching configuration {}", this);
 
         if (spoolDirectory != null) {
-            LOG.info("StreamCaching in use with spool directory: {} and rules: {}", spoolDirectory.getPath(), spoolRules);
+            log.info("StreamCaching in use with spool directory: {} and rules: {}", spoolDirectory.getPath(), spoolRules);
         } else {
-            LOG.info("StreamCaching in use with rules: {}", spoolRules);
+            log.info("StreamCaching in use with rules: {}", spoolRules);
         }
     }
 
     @Override
     protected void doStop() throws Exception {
         if (spoolThreshold > 0 & spoolDirectory != null  && isRemoveSpoolDirectoryWhenStopping()) {
-            LOG.debug("Removing spool directory: {}", spoolDirectory);
+            log.debug("Removing spool directory: {}", spoolDirectory);
             FileUtil.removeDir(spoolDirectory);
         }
 
-        if (LOG.isDebugEnabled() && statistics.isStatisticsEnabled()) {
-            LOG.debug("Stopping StreamCachingStrategy with statistics: {}", statistics);
+        if (log.isDebugEnabled() && statistics.isStatisticsEnabled()) {
+            log.debug("Stopping StreamCachingStrategy with statistics: {}", statistics);
         }
 
         statistics.reset();
@@ -313,7 +312,7 @@ public class DefaultStreamCachingStrategy extends org.apache.camel.support.Servi
 
         public boolean shouldSpoolCache(long length) {
             if (spoolThreshold > 0 && length > spoolThreshold) {
-                LOG.trace("Should spool cache fixed threshold {} > {} -> true", length, spoolThreshold);
+                log.trace("Should spool cache fixed threshold {} > {} -> true", length, spoolThreshold);
                 return true;
             }
             return false;
@@ -347,15 +346,15 @@ public class DefaultStreamCachingStrategy extends org.apache.camel.support.Servi
                 double calc = (used / upper) * 100;
                 int percentage = (int) calc;
 
-                if (LOG.isTraceEnabled()) {
+                if (log.isTraceEnabled()) {
                     long u = heapUsage.getHeapMemoryUsage().getUsed();
                     long c = heapUsage.getHeapMemoryUsage().getCommitted();
                     long m = heapUsage.getHeapMemoryUsage().getMax();
-                    LOG.trace("Heap memory: [used={}M ({}%), committed={}M, max={}M]", u >> 20, percentage, c >> 20, m >> 20);
+                    log.trace("Heap memory: [used={}M ({}%), committed={}M, max={}M]", u >> 20, percentage, c >> 20, m >> 20);
                 }
 
                 if (percentage > spoolUsedHeapMemoryThreshold) {
-                    LOG.trace("Should spool cache heap memory threshold {} > {} -> true", percentage, spoolUsedHeapMemoryThreshold);
+                    log.trace("Should spool cache heap memory threshold {} > {} -> true", percentage, spoolUsedHeapMemoryThreshold);
                     return true;
                 }
             }
diff --git a/camel-core/src/main/java/org/apache/camel/impl/EventDrivenPollingConsumer.java b/camel-core/src/main/java/org/apache/camel/impl/EventDrivenPollingConsumer.java
index b94063c..1fac509 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/EventDrivenPollingConsumer.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/EventDrivenPollingConsumer.java
@@ -43,7 +43,7 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public class EventDrivenPollingConsumer extends PollingConsumerSupport implements Processor, IsSingleton {
-    private static final Logger LOG = LoggerFactory.getLogger(EventDrivenPollingConsumer.class);
+
     private final BlockingQueue<Exchange> queue;
     private ExceptionHandler interruptedExceptionHandler;
     private Consumer consumer;
@@ -127,7 +127,7 @@ public class EventDrivenPollingConsumer extends PollingConsumerSupport implement
                 }
             }
         }
-        LOG.trace("Consumer is not running, so returning null");
+        log.trace("Consumer is not running, so returning null");
         return null;
     }
 
@@ -194,7 +194,7 @@ public class EventDrivenPollingConsumer extends PollingConsumerSupport implement
             try {
                 timeout = strategy.beforePoll(timeout);
             } catch (Exception e) {
-                LOG.debug("Error occurred before polling " + consumer + ". This exception will be ignored.", e);
+                log.debug("Error occurred before polling " + consumer + ". This exception will be ignored.", e);
             }
         }
         return timeout;
@@ -206,7 +206,7 @@ public class EventDrivenPollingConsumer extends PollingConsumerSupport implement
             try {
                 strategy.afterPoll();
             } catch (Exception e) {
-                LOG.debug("Error occurred after polling " + consumer + ". This exception will be ignored.", e);
+                log.debug("Error occurred after polling " + consumer + ". This exception will be ignored.", e);
             }
         }
     }
diff --git a/camel-core/src/main/java/org/apache/camel/impl/FileStateRepository.java b/camel-core/src/main/java/org/apache/camel/impl/FileStateRepository.java
index 398d7ca..4890689 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/FileStateRepository.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/FileStateRepository.java
@@ -40,7 +40,7 @@ import org.slf4j.LoggerFactory;
  */
 @ManagedResource(description = "File based state repository")
 public class FileStateRepository extends ServiceSupport implements StateRepository<String, String> {
-    private static final Logger LOG = LoggerFactory.getLogger(FileStateRepository.class);
+
     private static final String STORE_DELIMITER = "\n";
     private static final String KEY_VALUE_DELIMITER = "=";
     private final AtomicBoolean init = new AtomicBoolean();
@@ -143,19 +143,19 @@ public class FileStateRepository extends ServiceSupport implements StateReposito
      * @param key the state key
      */
     private void appendToStore(String key, String value) {
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Appending {}={} to state filestore: {}", key, value, fileStore);
+        if (log.isDebugEnabled()) {
+            log.debug("Appending {}={} to state filestore: {}", key, value, fileStore);
         }
         FileOutputStream fos = null;
         try {
             // create store parent directory if missing
             File storeParentDirectory = fileStore.getParentFile();
             if (storeParentDirectory != null && !storeParentDirectory.exists()) {
-                LOG.info("Parent directory of file store {} doesn't exist. Creating.", fileStore);
+                log.info("Parent directory of file store {} doesn't exist. Creating.", fileStore);
                 if (fileStore.getParentFile().mkdirs()) {
-                    LOG.info("Parent directory of file store {} successfully created.", fileStore);
+                    log.info("Parent directory of file store {} successfully created.", fileStore);
                 } else {
-                    LOG.warn("Parent directory of file store {} cannot be created.", fileStore);
+                    log.warn("Parent directory of file store {} cannot be created.", fileStore);
                 }
             }
             // create store if missing
@@ -171,7 +171,7 @@ public class FileStateRepository extends ServiceSupport implements StateReposito
         } catch (IOException e) {
             throw ObjectHelper.wrapRuntimeCamelException(e);
         } finally {
-            IOHelper.close(fos, "Appending to file state repository", LOG);
+            IOHelper.close(fos, "Appending to file state repository", log);
         }
     }
 
@@ -180,7 +180,7 @@ public class FileStateRepository extends ServiceSupport implements StateReposito
      * to the file store.
      */
     protected void trunkStore() {
-        LOG.info("Trunking state filestore: {}", fileStore);
+        log.info("Trunking state filestore: {}", fileStore);
         FileOutputStream fos = null;
         try {
             fos = new FileOutputStream(fileStore);
@@ -193,7 +193,7 @@ public class FileStateRepository extends ServiceSupport implements StateReposito
         } catch (IOException e) {
             throw ObjectHelper.wrapRuntimeCamelException(e);
         } finally {
-            IOHelper.close(fos, "Trunking file state repository", LOG);
+            IOHelper.close(fos, "Trunking file state repository", log);
         }
     }
 
@@ -203,7 +203,7 @@ public class FileStateRepository extends ServiceSupport implements StateReposito
     protected void loadStore() throws IOException {
         // auto create starting directory if needed
         if (!fileStore.exists()) {
-            LOG.debug("Creating filestore: {}", fileStore);
+            log.debug("Creating filestore: {}", fileStore);
             File parent = fileStore.getParentFile();
             if (parent != null) {
                 parent.mkdirs();
@@ -214,7 +214,7 @@ public class FileStateRepository extends ServiceSupport implements StateReposito
             }
         }
 
-        LOG.trace("Loading to 1st level cache from state filestore: {}", fileStore);
+        log.trace("Loading to 1st level cache from state filestore: {}", fileStore);
 
         cache.clear();
         try (Scanner scanner = new Scanner(fileStore, null, STORE_DELIMITER)) {
@@ -229,7 +229,7 @@ public class FileStateRepository extends ServiceSupport implements StateReposito
             throw ObjectHelper.wrapRuntimeCamelException(e);
         }
 
-        LOG.debug("Loaded {} to the 1st level cache from state filestore: {}", cache.size(), fileStore);
+        log.debug("Loaded {} to the 1st level cache from state filestore: {}", cache.size(), fileStore);
     }
 
     @Override
diff --git a/camel-core/src/main/java/org/apache/camel/impl/InterceptSendToEndpointProcessor.java b/camel-core/src/main/java/org/apache/camel/impl/InterceptSendToEndpointProcessor.java
index d8cc101..5ed03e5 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/InterceptSendToEndpointProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/InterceptSendToEndpointProcessor.java
@@ -20,7 +20,6 @@ import org.apache.camel.AsyncCallback;
 import org.apache.camel.AsyncProcessor;
 import org.apache.camel.Endpoint;
 import org.apache.camel.Exchange;
-import org.apache.camel.ExchangePattern;
 import org.apache.camel.Producer;
 import org.apache.camel.util.ServiceHelper;
 import org.slf4j.Logger;
@@ -34,7 +33,6 @@ import static org.apache.camel.processor.PipelineHelper.continueProcessing;
  */
 public class InterceptSendToEndpointProcessor extends DefaultAsyncProducer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(InterceptSendToEndpointProcessor.class);
     private final InterceptSendToEndpoint endpoint;
     private final Endpoint delegate;
     private final Producer producer;
@@ -55,8 +53,8 @@ public class InterceptSendToEndpointProcessor extends DefaultAsyncProducer {
     @Override
     public boolean process(Exchange exchange, AsyncCallback callback) {
         // process the detour so we do the detour routing
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Sending to endpoint: {} is intercepted and detoured to: {} for exchange: {}", getEndpoint(), endpoint.getDetour(), exchange);
+        if (log.isDebugEnabled()) {
+            log.debug("Sending to endpoint: {} is intercepted and detoured to: {} for exchange: {}", getEndpoint(), endpoint.getDetour(), exchange);
         }
         // add header with the real endpoint uri
         exchange.getIn().setHeader(Exchange.INTERCEPTED_ENDPOINT, delegate.getEndpointUri());
@@ -72,7 +70,7 @@ public class InterceptSendToEndpointProcessor extends DefaultAsyncProducer {
 
         // Decide whether to continue or not; similar logic to the Pipeline
         // check for error if so we should break out
-        if (!continueProcessing(exchange, "skip sending to original intended destination: " + getEndpoint(), LOG)) {
+        if (!continueProcessing(exchange, "skip sending to original intended destination: " + getEndpoint(), log)) {
             callback.done(true);
             return true;
         }
@@ -107,8 +105,8 @@ public class InterceptSendToEndpointProcessor extends DefaultAsyncProducer {
                 return true;
             }
         } else {
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Stop() means skip sending exchange to original intended destination: {} for exchange: {}", getEndpoint(), exchange);
+            if (log.isDebugEnabled()) {
+                log.debug("Stop() means skip sending exchange to original intended destination: {} for exchange: {}", getEndpoint(), exchange);
             }
             callback.done(true);
             return true;
diff --git a/camel-core/src/main/java/org/apache/camel/impl/PollingConsumerSupport.java b/camel-core/src/main/java/org/apache/camel/impl/PollingConsumerSupport.java
index 1890d3c..14d0f06 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/PollingConsumerSupport.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/PollingConsumerSupport.java
@@ -30,7 +30,7 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public abstract class PollingConsumerSupport extends ServiceSupport implements PollingConsumer {
-    protected final Logger log = LoggerFactory.getLogger(getClass());
+
     private final Endpoint endpoint;
     private ExceptionHandler exceptionHandler;
 
diff --git a/camel-core/src/main/java/org/apache/camel/impl/ProducerCache.java b/camel-core/src/main/java/org/apache/camel/impl/ProducerCache.java
index d6bdadb..2caecb6 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/ProducerCache.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/ProducerCache.java
@@ -46,8 +46,6 @@ import org.slf4j.LoggerFactory;
  */
 public class ProducerCache extends ServiceSupport {
 
-    private static final Logger LOG = LoggerFactory.getLogger(ProducerCache.class);
-
     private final CamelContext camelContext;
     private final ServicePool<AsyncProducer> producers;
     private final Object source;
@@ -152,7 +150,7 @@ public class ProducerCache extends ServiceSupport {
         AsyncProducer producer = acquireProducer(endpoint);
         try {
             // now lets dispatch
-            LOG.debug(">>>> {} {}", endpoint, exchange);
+            log.debug(">>>> {} {}", endpoint, exchange);
 
             // set property which endpoint we send to
             exchange.setProperty(Exchange.TO_ENDPOINT, endpoint.getEndpointUri());
@@ -287,7 +285,7 @@ public class ProducerCache extends ServiceSupport {
 
             if (producer == null) {
                 if (isStopped()) {
-                    LOG.warn("Ignoring exchange sent after processor is stopped: {}", exchange);
+                    log.warn("Ignoring exchange sent after processor is stopped: {}", exchange);
                     callback.done(true);
                     return true;
                 } else {
@@ -342,7 +340,7 @@ public class ProducerCache extends ServiceSupport {
     protected boolean asyncDispatchExchange(Endpoint endpoint, AsyncProducer producer,
                                             Processor resultProcessor, Exchange exchange, AsyncCallback callback) {
         // now lets dispatch
-        LOG.debug(">>>> {} {}", endpoint, exchange);
+        log.debug(">>>> {} {}", endpoint, exchange);
 
         // set property which endpoint we send to
         exchange.setProperty(Exchange.TO_ENDPOINT, endpoint.getEndpointUri());
@@ -392,7 +390,7 @@ public class ProducerCache extends ServiceSupport {
     public int size() {
         int size = producers.size();
 
-        LOG.trace("size = {}", size);
+        log.trace("size = {}", size);
         return size;
     }
 
@@ -456,7 +454,7 @@ public class ProducerCache extends ServiceSupport {
             producers.stop();
             producers.start();
         } catch (Exception e) {
-            LOG.debug("Error restarting producers", e);
+            log.debug("Error restarting producers", e);
         }
         if (statistics != null) {
             statistics.clear();
diff --git a/camel-core/src/main/java/org/apache/camel/impl/RouteService.java b/camel-core/src/main/java/org/apache/camel/impl/RouteService.java
index e224127..8baaa55 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/RouteService.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/RouteService.java
@@ -61,8 +61,6 @@ import static org.apache.camel.impl.MDCUnitOfWork.MDC_ROUTE_ID;
  */
 public class RouteService extends ChildServiceSupport {
 
-    private static final Logger LOG = LoggerFactory.getLogger(RouteService.class);
-
     private final DefaultCamelContext camelContext;
     private final RouteDefinition routeDefinition;
     private final List<RouteContext> routeContexts;
@@ -165,7 +163,7 @@ public class RouteService extends ChildServiceSupport {
                     // warm up the route first
                     route.warmUp();
 
-                    LOG.debug("Starting services on route: {}", route.getId());
+                    log.debug("Starting services on route: {}", route.getId());
                     List<Service> services = route.getServices();
 
                     // callback that we are staring these services
@@ -250,7 +248,7 @@ public class RouteService extends ChildServiceSupport {
         
         for (Route route : routes) {
             try (MDCHelper mdcHelper = new MDCHelper(route.getId())) {
-                LOG.debug("Stopping services on route: {}", route.getId());
+                log.debug("Stopping services on route: {}", route.getId());
 
                 // gather list of services to stop as we need to start child services as well
                 Set<Service> services = gatherChildServices(route, true);
@@ -286,7 +284,7 @@ public class RouteService extends ChildServiceSupport {
     protected void doShutdown() throws Exception {
         for (Route route : routes) {
             try (MDCHelper mdcHelper = new MDCHelper(route.getId())) {
-                LOG.debug("Shutting down services on route: {}", route.getId());
+                log.debug("Shutting down services on route: {}", route.getId());
 
                 // gather list of services to stop as we need to start child services as well
                 Set<Service> services = gatherChildServices(route, true);
@@ -363,7 +361,7 @@ public class RouteService extends ChildServiceSupport {
 
     protected void startChildService(Route route, List<Service> services) throws Exception {
         for (Service service : services) {
-            LOG.debug("Starting child service on route: {} -> {}", route.getId(), service);
+            log.debug("Starting child service on route: {} -> {}", route.getId(), service);
             for (LifecycleStrategy strategy : camelContext.getLifecycleStrategies()) {
                 strategy.onServiceAdd(camelContext, service, route);
             }
@@ -374,7 +372,7 @@ public class RouteService extends ChildServiceSupport {
 
     protected void stopChildService(Route route, Set<Service> services, boolean shutdown) throws Exception {
         for (Service service : services) {
-            LOG.debug("{} child service on route: {} -> {}", shutdown ? "Shutting down" : "Stopping", route.getId(), service);
+            log.debug("{} child service on route: {} -> {}", shutdown ? "Shutting down" : "Stopping", route.getId(), service);
             if (service instanceof ErrorHandler) {
                 // special for error handlers
                 for (LifecycleStrategy strategy : camelContext.getLifecycleStrategies()) {
diff --git a/camel-core/src/main/java/org/apache/camel/impl/ScheduledBatchPollingConsumer.java b/camel-core/src/main/java/org/apache/camel/impl/ScheduledBatchPollingConsumer.java
index 661eb7a..927f849 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/ScheduledBatchPollingConsumer.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/ScheduledBatchPollingConsumer.java
@@ -31,7 +31,7 @@ import org.slf4j.LoggerFactory;
  * A useful base class for any consumer which is polling batch based
  */
 public abstract class ScheduledBatchPollingConsumer extends ScheduledPollConsumer implements BatchConsumer, ShutdownAware {
-    private static final Logger LOG = LoggerFactory.getLogger(ScheduledBatchPollingConsumer.class);
+
     protected volatile ShutdownRunningTask shutdownRunningTask;
     protected volatile int pendingExchanges;
     protected int maxMessagesPerPoll;
@@ -67,7 +67,7 @@ public abstract class ScheduledBatchPollingConsumer extends ScheduledPollConsume
             // in the processBatch method and until an exchange gets enlisted as in-flight
             // which happens later, so we need to signal back to the shutdown strategy that
             // there is a pending exchange. When we are no longer polling, then we will return 0
-            LOG.trace("Currently polling so returning 1 as pending exchanges");
+            log.trace("Currently polling so returning 1 as pending exchanges");
             answer = 1;
         }
 
diff --git a/camel-core/src/main/java/org/apache/camel/impl/ScheduledPollConsumer.java b/camel-core/src/main/java/org/apache/camel/impl/ScheduledPollConsumer.java
index b19f707..1f59d0d 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/ScheduledPollConsumer.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/ScheduledPollConsumer.java
@@ -40,7 +40,6 @@ import org.slf4j.LoggerFactory;
  * A useful base class for any consumer which is polling based
  */
 public abstract class ScheduledPollConsumer extends DefaultConsumer implements Runnable, Suspendable, PollingConsumerPollingStrategy {
-    private static final Logger LOG = LoggerFactory.getLogger(ScheduledPollConsumer.class);
 
     private ScheduledPollConsumerScheduler scheduler;
     private ScheduledExecutorService scheduledExecutorService;
@@ -86,15 +85,15 @@ public abstract class ScheduledPollConsumer extends DefaultConsumer implements R
         try {
             // log starting
             if (LoggingLevel.ERROR == runLoggingLevel) {
-                LOG.error("Scheduled task started on:   {}", this.getEndpoint());
+                log.error("Scheduled task started on:   {}", this.getEndpoint());
             } else if (LoggingLevel.WARN == runLoggingLevel) {
-                LOG.warn("Scheduled task started on:   {}", this.getEndpoint());
+                log.warn("Scheduled task started on:   {}", this.getEndpoint());
             } else if (LoggingLevel.INFO == runLoggingLevel) {
-                LOG.info("Scheduled task started on:   {}", this.getEndpoint());
+                log.info("Scheduled task started on:   {}", this.getEndpoint());
             } else if (LoggingLevel.DEBUG == runLoggingLevel) {
-                LOG.debug("Scheduled task started on:   {}", this.getEndpoint());
+                log.debug("Scheduled task started on:   {}", this.getEndpoint());
             } else {
-                LOG.trace("Scheduled task started on:   {}", this.getEndpoint());
+                log.trace("Scheduled task started on:   {}", this.getEndpoint());
             }
 
             // execute scheduled task
@@ -102,26 +101,26 @@ public abstract class ScheduledPollConsumer extends DefaultConsumer implements R
 
             // log completed
             if (LoggingLevel.ERROR == runLoggingLevel) {
-                LOG.error("Scheduled task completed on: {}", this.getEndpoint());
+                log.error("Scheduled task completed on: {}", this.getEndpoint());
             } else if (LoggingLevel.WARN == runLoggingLevel) {
-                LOG.warn("Scheduled task completed on: {}", this.getEndpoint());
+                log.warn("Scheduled task completed on: {}", this.getEndpoint());
             } else if (LoggingLevel.INFO == runLoggingLevel) {
-                LOG.info("Scheduled task completed on: {}", this.getEndpoint());
+                log.info("Scheduled task completed on: {}", this.getEndpoint());
             } else if (LoggingLevel.DEBUG == runLoggingLevel) {
-                LOG.debug("Scheduled task completed on: {}", this.getEndpoint());
+                log.debug("Scheduled task completed on: {}", this.getEndpoint());
             } else {
-                LOG.trace("Scheduled task completed on: {}", this.getEndpoint());
+                log.trace("Scheduled task completed on: {}", this.getEndpoint());
             }
 
         } catch (Error e) {
             // must catch Error, to ensure the task is re-scheduled
-            LOG.error("Error occurred during running scheduled task on: " + this.getEndpoint() + ", due: " + e.getMessage(), e);
+            log.error("Error occurred during running scheduled task on: " + this.getEndpoint() + ", due: " + e.getMessage(), e);
         }
     }
 
     private void doRun() {
         if (isSuspended()) {
-            LOG.trace("Cannot start to poll: {} as its suspended", this.getEndpoint());
+            log.trace("Cannot start to poll: {} as its suspended", this.getEndpoint());
             return;
         }
 
@@ -133,9 +132,9 @@ public abstract class ScheduledPollConsumer extends DefaultConsumer implements R
             if (backoffCounter++ < backoffMultiplier) {
                 // yes we should backoff
                 if (idleCounter > 0) {
-                    LOG.debug("doRun() backoff due subsequent {} idles (backoff at {}/{})", idleCounter, backoffCounter, backoffMultiplier);
+                    log.debug("doRun() backoff due subsequent {} idles (backoff at {}/{})", idleCounter, backoffCounter, backoffMultiplier);
                 } else {
-                    LOG.debug("doRun() backoff due subsequent {} errors (backoff at {}/{})", errorCounter, backoffCounter, backoffMultiplier);
+                    log.debug("doRun() backoff due subsequent {} errors (backoff at {}/{})", errorCounter, backoffCounter, backoffMultiplier);
                 }
                 return;
             } else {
@@ -143,7 +142,7 @@ public abstract class ScheduledPollConsumer extends DefaultConsumer implements R
                 idleCounter = 0;
                 errorCounter = 0;
                 backoffCounter = 0;
-                LOG.trace("doRun() backoff finished, resetting counters.");
+                log.trace("doRun() backoff finished, resetting counters.");
             }
         }
 
@@ -160,9 +159,9 @@ public abstract class ScheduledPollConsumer extends DefaultConsumer implements R
                 if (isPollAllowed()) {
 
                     if (retryCounter == -1) {
-                        LOG.trace("Starting to poll: {}", this.getEndpoint());
+                        log.trace("Starting to poll: {}", this.getEndpoint());
                     } else {
-                        LOG.debug("Retrying attempt {} to poll: {}", retryCounter, this.getEndpoint());
+                        log.debug("Retrying attempt {} to poll: {}", retryCounter, this.getEndpoint());
                     }
 
                     // mark we are polling which should also include the begin/poll/commit
@@ -172,7 +171,7 @@ public abstract class ScheduledPollConsumer extends DefaultConsumer implements R
                         if (begin) {
                             retryCounter++;
                             polledMessages = poll();
-                            LOG.trace("Polled {} messages", polledMessages);
+                            log.trace("Polled {} messages", polledMessages);
 
                             if (polledMessages == 0 && isSendEmptyMessageWhenIdle()) {
                                 // send an "empty" exchange
@@ -184,17 +183,17 @@ public abstract class ScheduledPollConsumer extends DefaultConsumer implements R
                             if (polledMessages > 0 && isGreedy()) {
                                 done = false;
                                 retryCounter = -1;
-                                LOG.trace("Greedy polling after processing {} messages", polledMessages);
+                                log.trace("Greedy polling after processing {} messages", polledMessages);
                             }
                         } else {
-                            LOG.debug("Cannot begin polling as pollStrategy returned false: {}", pollStrategy);
+                            log.debug("Cannot begin polling as pollStrategy returned false: {}", pollStrategy);
                         }
                     } finally {
                         polling = false;
                     }
                 }
 
-                LOG.trace("Finished polling: {}", this.getEndpoint());
+                log.trace("Finished polling: {}", this.getEndpoint());
             } catch (Exception e) {
                 try {
                     boolean retry = pollStrategy.rollback(this, getEndpoint(), retryCounter, e);
@@ -221,7 +220,7 @@ public abstract class ScheduledPollConsumer extends DefaultConsumer implements R
                     getExceptionHandler().handleException("Consumer " + this + " failed polling endpoint: " + getEndpoint()
                             + ". Will try again at next poll", cause);
                 } catch (Throwable e) {
-                    LOG.warn("Error handling exception. This exception will be ignored.", e);
+                    log.warn("Error handling exception. This exception will be ignored.", e);
                 }
             }
         }
@@ -233,7 +232,7 @@ public abstract class ScheduledPollConsumer extends DefaultConsumer implements R
             idleCounter = polledMessages == 0 ? ++idleCounter : 0;
             errorCounter = 0;
         }
-        LOG.trace("doRun() done with idleCounter={}, errorCounter={}", idleCounter, errorCounter);
+        log.trace("doRun() done with idleCounter={}, errorCounter={}", idleCounter, errorCounter);
 
         // avoid this thread to throw exceptions because the thread pool wont re-schedule a new thread
     }
@@ -411,7 +410,7 @@ public abstract class ScheduledPollConsumer extends DefaultConsumer implements R
             if (backoffIdleThreshold <= 0 && backoffErrorThreshold <= 0) {
                 throw new IllegalArgumentException("backoffIdleThreshold and/or backoffErrorThreshold must be configured to a positive value when using backoffMultiplier");
             }
-            LOG.debug("Using backoff[multiplier={}, idleThreshold={}, errorThreshold={}] on {}", backoffMultiplier, backoffIdleThreshold, backoffErrorThreshold, getEndpoint());
+            log.debug("Using backoff[multiplier={}, idleThreshold={}, errorThreshold={}] on {}", backoffMultiplier, backoffIdleThreshold, backoffErrorThreshold, getEndpoint());
         }
 
         if (scheduler == null) {
@@ -490,7 +489,7 @@ public abstract class ScheduledPollConsumer extends DefaultConsumer implements R
 
     @Override
     public long beforePoll(long timeout) throws Exception {
-        LOG.trace("Before poll {}", getEndpoint());
+        log.trace("Before poll {}", getEndpoint());
         // resume or start our self
         if (!ServiceHelper.resumeService(this)) {
             ServiceHelper.startService(this);
@@ -502,7 +501,7 @@ public abstract class ScheduledPollConsumer extends DefaultConsumer implements R
 
     @Override
     public void afterPoll() throws Exception {
-        LOG.trace("After poll {}", getEndpoint());
+        log.trace("After poll {}", getEndpoint());
         // suspend or stop our self
         if (!ServiceHelper.suspendService(this)) {
             ServiceHelper.stopService(this);
diff --git a/camel-core/src/main/java/org/apache/camel/impl/ThrottlingExceptionRoutePolicy.java b/camel-core/src/main/java/org/apache/camel/impl/ThrottlingExceptionRoutePolicy.java
index cf9b7d4..042559a 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/ThrottlingExceptionRoutePolicy.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/ThrottlingExceptionRoutePolicy.java
@@ -50,7 +50,6 @@ import org.slf4j.LoggerFactory;
  * to determine if the processes that cause the route to be open are now available
  */
 public class ThrottlingExceptionRoutePolicy extends RoutePolicySupport implements CamelContextAware {
-    private static final Logger LOG = LoggerFactory.getLogger(ThrottlingExceptionRoutePolicy.class);
 
     private static final int STATE_CLOSED = 0;
     private static final int STATE_HALF_OPEN = 1;
@@ -102,7 +101,7 @@ public class ThrottlingExceptionRoutePolicy extends RoutePolicySupport implement
 
     @Override
     public void onInit(Route route) {
-        LOG.debug("Initializing ThrottlingExceptionRoutePolicy route policy...");
+        log.debug("Initializing ThrottlingExceptionRoutePolicy route policy...");
         logState();
     }
 
@@ -118,7 +117,7 @@ public class ThrottlingExceptionRoutePolicy extends RoutePolicySupport implement
     public void onExchangeDone(Route route, Exchange exchange) {
         if (keepOpen.get()) {
             if (state.get() != STATE_OPEN) {
-                LOG.debug("opening circuit b/c keepOpen is on");
+                log.debug("opening circuit b/c keepOpen is on");
                 openCircuit(route);
             }
         } else {
@@ -161,9 +160,9 @@ public class ThrottlingExceptionRoutePolicy extends RoutePolicySupport implement
             }
         }
 
-        if (LOG.isDebugEnabled()) {
+        if (log.isDebugEnabled()) {
             String exceptionName = exchange.getException() == null ? "none" : exchange.getException().getClass().getSimpleName();
-            LOG.debug("hasFailed ({}) with Throttled Exception: {} for exchangeId: {}", answer, exceptionName, exchange.getExchangeId());
+            log.debug("hasFailed ({}) with Throttled Exception: {} for exchangeId: {}", answer, exceptionName, exchange.getExchangeId());
         }
         return answer;
     }
@@ -175,32 +174,32 @@ public class ThrottlingExceptionRoutePolicy extends RoutePolicySupport implement
 
         if (state.get() == STATE_CLOSED) {
             if (failureLimitReached) {
-                LOG.debug("Opening circuit...");
+                log.debug("Opening circuit...");
                 openCircuit(route);
             }
         } else if (state.get() == STATE_HALF_OPEN) {
             if (failureLimitReached) {
-                LOG.debug("Opening circuit...");
+                log.debug("Opening circuit...");
                 openCircuit(route);
             } else {
-                LOG.debug("Closing circuit...");
+                log.debug("Closing circuit...");
                 closeCircuit(route);
             }
         } else if (state.get() == STATE_OPEN) {
             if (!keepOpen.get()) {
                 long elapsedTimeSinceOpened = System.currentTimeMillis() - openedAt;
                 if (halfOpenAfter <= elapsedTimeSinceOpened) {
-                    LOG.debug("Checking an open circuit...");
+                    log.debug("Checking an open circuit...");
                     if (halfOpenHandler != null) {
                         if (halfOpenHandler.isReadyToBeClosed()) {
-                            LOG.debug("Closing circuit...");
+                            log.debug("Closing circuit...");
                             closeCircuit(route);
                         } else {
-                            LOG.debug("Opening circuit...");
+                            log.debug("Opening circuit...");
                             openCircuit(route);
                         }
                     } else {
-                        LOG.debug("Half opening circuit...");
+                        log.debug("Half opening circuit...");
                         halfOpenCircuit(route);
                     }
                 } else {
@@ -276,8 +275,8 @@ public class ThrottlingExceptionRoutePolicy extends RoutePolicySupport implement
     }
 
     private void logState() {
-        if (LOG.isDebugEnabled()) {
-            LOG.debug(dumpState());
+        if (log.isDebugEnabled()) {
+            log.debug(dumpState());
         }
     }
 
diff --git a/camel-core/src/main/java/org/apache/camel/impl/cluster/AbstractCamelClusterService.java b/camel-core/src/main/java/org/apache/camel/impl/cluster/AbstractCamelClusterService.java
index 9652d08..b38fe2c 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/cluster/AbstractCamelClusterService.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/cluster/AbstractCamelClusterService.java
@@ -35,7 +35,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public abstract class AbstractCamelClusterService<T extends CamelClusterView> extends ServiceSupport implements CamelClusterService {
-    private static final Logger LOGGER = LoggerFactory.getLogger(AbstractCamelClusterService.class);
 
     private final Map<String, ViewHolder<T>> views;
     private final Map<String, Object> attributes;
@@ -193,10 +192,10 @@ public abstract class AbstractCamelClusterService<T extends CamelClusterView> ex
                 ViewHolder<T> holder = views.get(namespace);
 
                 if (holder != null) {
-                    LOGGER.info("Force start of view {}", namespace);
+                    log.info("Force start of view {}", namespace);
                     holder.startView();
                 } else {
-                    LOGGER.warn("Error forcing start of view {}: it does not exist", namespace);
+                    log.warn("Error forcing start of view {}: it does not exist", namespace);
                 }
             }
         );
@@ -210,10 +209,10 @@ public abstract class AbstractCamelClusterService<T extends CamelClusterView> ex
                 ViewHolder<T> holder = views.get(namespace);
 
                 if (holder != null) {
-                    LOGGER.info("Force stop of view {}", namespace);
+                    log.info("Force stop of view {}", namespace);
                     holder.stopView();
                 } else {
-                    LOGGER.warn("Error forcing stop of view {}: it does not exist", namespace);
+                    log.warn("Error forcing stop of view {}: it does not exist", namespace);
                 }
             }
         );
@@ -275,7 +274,7 @@ public abstract class AbstractCamelClusterService<T extends CamelClusterView> ex
         }
 
         V retain() {
-            LOGGER.debug("Retain view {}, old-refs={}", view.getNamespace(), count.get());
+            log.debug("Retain view {}, old-refs={}", view.getNamespace(), count.get());
 
             count.retain();
 
@@ -283,22 +282,22 @@ public abstract class AbstractCamelClusterService<T extends CamelClusterView> ex
         }
 
         void release() {
-            LOGGER.debug("Release view {}, old-refs={}", view.getNamespace(), count.get());
+            log.debug("Release view {}, old-refs={}", view.getNamespace(), count.get());
 
             count.release();
         }
 
         void startView() throws Exception {
             if (AbstractCamelClusterService.this.isRunAllowed()) {
-                LOGGER.debug("Start view {}", view.getNamespace());
+                log.debug("Start view {}", view.getNamespace());
                 view.start();
             } else {
-                LOGGER.debug("Can't start view {} as cluster service is not running, view will be started on service start-up", view.getNamespace());
+                log.debug("Can't start view {} as cluster service is not running, view will be started on service start-up", view.getNamespace());
             }
         }
 
         void stopView() throws Exception {
-            LOGGER.debug("Stop view {}", view.getNamespace());
+            log.debug("Stop view {}", view.getNamespace());
             view.stop();
         }
     }
diff --git a/camel-core/src/main/java/org/apache/camel/impl/cluster/ClusteredRoutePolicy.java b/camel-core/src/main/java/org/apache/camel/impl/cluster/ClusteredRoutePolicy.java
index b35972b..98420ab 100644
--- a/camel-core/src/main/java/org/apache/camel/impl/cluster/ClusteredRoutePolicy.java
+++ b/camel-core/src/main/java/org/apache/camel/impl/cluster/ClusteredRoutePolicy.java
@@ -48,7 +48,6 @@ import org.slf4j.LoggerFactory;
 
 @ManagedResource(description = "Clustered Route policy using")
 public final class ClusteredRoutePolicy extends RoutePolicySupport implements CamelContextAware {
-    private static final Logger LOGGER = LoggerFactory.getLogger(ClusteredRoutePolicy.class);
 
     private final AtomicBoolean leader;
     private final Set<Route> startedRoutes;
@@ -161,7 +160,7 @@ public final class ClusteredRoutePolicy extends RoutePolicySupport implements Ca
     public void onInit(Route route) {
         super.onInit(route);
 
-        LOGGER.info("Route managed by {}. Setting route {} AutoStartup flag to false.", getClass(), route.getId());
+        log.info("Route managed by {}. Setting route {} AutoStartup flag to false.", getClass(), route.getId());
         definition(route).setAutoStartup("false");
 
         this.refCount.retain();
@@ -178,7 +177,7 @@ public final class ClusteredRoutePolicy extends RoutePolicySupport implements Ca
             );
         }
 
-        LOGGER.debug("ClusteredRoutePolicy {} is using ClusterService instance {} (id={}, type={})",
+        log.debug("ClusteredRoutePolicy {} is using ClusterService instance {} (id={}, type={})",
             this,
             clusterService,
             clusterService.getId(),
@@ -208,10 +207,10 @@ public final class ClusteredRoutePolicy extends RoutePolicySupport implements Ca
 
     private synchronized void setLeader(boolean isLeader) {
         if (isLeader && leader.compareAndSet(false, isLeader)) {
-            LOGGER.debug("Leadership taken");
+            log.debug("Leadership taken");
             startManagedRoutes();
         } else if (!isLeader && leader.getAndSet(isLeader)) {
-            LOGGER.debug("Leadership lost");
+            log.debug("Leadership lost");
             stopManagedRoutes();
         }
     }
@@ -235,7 +234,7 @@ public final class ClusteredRoutePolicy extends RoutePolicySupport implements Ca
             for (Route route : stoppedRoutes) {
                 ServiceStatus status = definition(route).getStatus(getCamelContext());
                 if (status.isStartable()) {
-                    LOGGER.debug("Starting route '{}'", route.getId());
+                    log.debug("Starting route '{}'", route.getId());
                     camelContext.getRouteController().startRoute(route.getId());
 
                     startedRoutes.add(route);
@@ -267,7 +266,7 @@ public final class ClusteredRoutePolicy extends RoutePolicySupport implements Ca
             for (Route route : startedRoutes) {
                 ServiceStatus status = definition(route).getStatus(getCamelContext());
                 if (status.isStoppable()) {
-                    LOGGER.debug("Stopping route '{}'", route.getId());
+                    log.debug("Stopping route '{}'", route.getId());
                     stopRoute(route);
 
                     stoppedRoutes.add(route);
@@ -281,7 +280,7 @@ public final class ClusteredRoutePolicy extends RoutePolicySupport implements Ca
     }
 
     private void onCamelContextStarted() {
-        LOGGER.debug("Apply cluster policy (stopped-routes='{}', started-routes='{}')",
+        log.debug("Apply cluster policy (stopped-routes='{}', started-routes='{}')",
             stoppedRoutes.stream().map(Route::getId).collect(Collectors.joining(",")),
             startedRoutes.stream().map(Route::getId).collect(Collectors.joining(","))
         );
@@ -336,7 +335,7 @@ public final class ClusteredRoutePolicy extends RoutePolicySupport implements Ca
 
                 // Eventually delay the startup of the routes a later time
                 if (initialDelay.toMillis() > 0) {
-                    LOGGER.debug("Policy will be effective in {}", initialDelay);
+                    log.debug("Policy will be effective in {}", initialDelay);
                     executorService.schedule(ClusteredRoutePolicy.this::onCamelContextStarted, initialDelay.toMillis(), TimeUnit.MILLISECONDS);
                 } else {
                     ClusteredRoutePolicy.this.onCamelContextStarted();
diff --git a/camel-core/src/main/java/org/apache/camel/management/DefaultManagementLifecycleStrategy.java b/camel-core/src/main/java/org/apache/camel/management/DefaultManagementLifecycleStrategy.java
index 5ff6da4..3e2a133 100644
--- a/camel-core/src/main/java/org/apache/camel/management/DefaultManagementLifecycleStrategy.java
+++ b/camel-core/src/main/java/org/apache/camel/management/DefaultManagementLifecycleStrategy.java
@@ -107,8 +107,6 @@ import org.apache.camel.support.ServiceSupport;
 import org.apache.camel.support.TimerListenerManager;
 import org.apache.camel.util.KeyValueHolder;
 import org.apache.camel.util.ObjectHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Default JMX managed lifecycle strategy that registered objects using the configured
@@ -120,7 +118,6 @@ import org.slf4j.LoggerFactory;
 @SuppressWarnings("deprecation")
 public class DefaultManagementLifecycleStrategy extends ServiceSupport implements LifecycleStrategy, CamelContextAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(DefaultManagementLifecycleStrategy.class);
     // the wrapped processors is for performance counters, which are in use for the created routes
     // when a route is removed, we should remove the associated processors from this map
     private final Map<Processor, KeyValueHolder<ProcessorDefinition<?>, InstrumentationProcessor>> wrappedProcessors = new HashMap<>();
@@ -179,7 +176,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
                         throw new VetoCamelContextStartException("CamelContext (" + context.getName() + ") with ObjectName[" + on + "] is already registered."
                             + " Make sure to use unique names on CamelContext when using multiple CamelContexts in the same MBeanServer.", context);
                     } else {
-                        LOG.warn("This CamelContext(" + context.getName() + ") will be registered using the name: " + managementName
+                        log.warn("This CamelContext(" + context.getName() + ") will be registered using the name: " + managementName
                             + " due to clash with an existing name already registered in MBeanServer.");
                     }
                 }
@@ -224,7 +221,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
             }
             manageObject(me);
         } catch (Exception e) {
-            LOG.warn("Could not register CamelHealth MBean. This exception will be ignored.", e);
+            log.warn("Could not register CamelHealth MBean. This exception will be ignored.", e);
         }
 
         try {
@@ -235,7 +232,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
             }
             manageObject(me);
         } catch (Exception e) {
-            LOG.warn("Could not register RouteController MBean. This exception will be ignored.", e);
+            log.warn("Could not register RouteController MBean. This exception will be ignored.", e);
         }
     }
 
@@ -253,8 +250,8 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
             newName = strategy.getNextName();
             ObjectName on = getManagementStrategy().getManagementNamingStrategy().getObjectNameForCamelContext(newName, name);
             done = !getManagementStrategy().isManaged(mc, on);
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Using name: {} in ObjectName[{}] exists? {}", name, on, done);
+            if (log.isTraceEnabled()) {
+                log.trace("Using name: {} in ObjectName[{}] exists? {}", name, on, done);
             }
         }
         return newName;
@@ -274,7 +271,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
             return;
         }
 
-        LOG.debug("Registering {} pre registered services", preServices.size());
+        log.debug("Registering {} pre registered services", preServices.size());
         for (PreRegisterService pre : preServices) {
             if (pre.getComponent() != null) {
                 onComponentAdd(pre.getName(), pre.getComponent());
@@ -302,7 +299,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
                 unmanageObject(mc);
             }
         } catch (Exception e) {
-            LOG.warn("Could not unregister RouteController MBean", e);
+            log.warn("Could not unregister RouteController MBean", e);
         }
 
         try {
@@ -312,7 +309,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
                 unmanageObject(mc);
             }
         } catch (Exception e) {
-            LOG.warn("Could not unregister CamelHealth MBean", e);
+            log.warn("Could not unregister CamelHealth MBean", e);
         }
 
         try {
@@ -322,7 +319,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
                 unmanageObject(mc);
             }
         } catch (Exception e) {
-            LOG.warn("Could not unregister CamelContext MBean", e);
+            log.warn("Could not unregister CamelContext MBean", e);
         }
 
         camelContextMBean = null;
@@ -341,7 +338,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
             Object mc = getManagementObjectStrategy().getManagedObjectForComponent(camelContext, component, name);
             manageObject(mc);
         } catch (Exception e) {
-            LOG.warn("Could not register Component MBean", e);
+            log.warn("Could not register Component MBean", e);
         }
     }
 
@@ -354,7 +351,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
             Object mc = getManagementObjectStrategy().getManagedObjectForComponent(camelContext, component, name);
             unmanageObject(mc);
         } catch (Exception e) {
-            LOG.warn("Could not unregister Component MBean", e);
+            log.warn("Could not unregister Component MBean", e);
         }
     }
 
@@ -387,7 +384,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
             }
             manageObject(me);
         } catch (Exception e) {
-            LOG.warn("Could not register Endpoint MBean for endpoint: " + endpoint + ". This exception will be ignored.", e);
+            log.warn("Could not register Endpoint MBean for endpoint: " + endpoint + ". This exception will be ignored.", e);
         }
     }
 
@@ -401,7 +398,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
             Object me = getManagementObjectStrategy().getManagedObjectForEndpoint(camelContext, endpoint);
             unmanageObject(me);
         } catch (Exception e) {
-            LOG.warn("Could not unregister Endpoint MBean for endpoint: " + endpoint + ". This exception will be ignored.", e);
+            log.warn("Could not unregister Endpoint MBean for endpoint: " + endpoint + ". This exception will be ignored.", e);
         }
     }
 
@@ -430,14 +427,14 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
 
         // skip already managed services, for example if a route has been restarted
         if (getManagementStrategy().isManaged(managedObject, null)) {
-            LOG.trace("The service is already managed: {}", service);
+            log.trace("The service is already managed: {}", service);
             return;
         }
 
         try {
             manageObject(managedObject);
         } catch (Exception e) {
-            LOG.warn("Could not register service: " + service + " as Service MBean.", e);
+            log.warn("Could not register service: " + service + " as Service MBean.", e);
         }
     }
 
@@ -452,7 +449,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
             try {
                 unmanageObject(managedObject);
             } catch (Exception e) {
-                LOG.warn("Could not unregister service: " + service + " as Service MBean.", e);
+                log.warn("Could not unregister service: " + service + " as Service MBean.", e);
             }
         }
     }
@@ -594,7 +591,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
 
             // skip already managed routes, for example if the route has been restarted
             if (getManagementStrategy().isManaged(mr, null)) {
-                LOG.trace("The route is already managed: {}", route);
+                log.trace("The route is already managed: {}", route);
                 continue;
             }
 
@@ -623,9 +620,9 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
             try {
                 manageObject(mr);
             } catch (JMException e) {
-                LOG.warn("Could not register Route MBean", e);
+                log.warn("Could not register Route MBean", e);
             } catch (Exception e) {
-                LOG.warn("Could not create Route MBean", e);
+                log.warn("Could not create Route MBean", e);
             }
         }
     }
@@ -640,15 +637,15 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
             Object mr = getManagementObjectStrategy().getManagedObjectForRoute(camelContext, route);
 
             // skip unmanaged routes
-            if (!getManagementStrategy().isManaged(mr, null)) {
-                LOG.trace("The route is not managed: {}", route);
+            if (!getManagementStrategy().isManaged(mr)) {
+                log.trace("The route is not managed: {}", route);
                 continue;
             }
 
             try {
                 unmanageObject(mr);
             } catch (Exception e) {
-                LOG.warn("Could not unregister Route MBean", e);
+                log.warn("Could not unregister Route MBean", e);
             }
 
             // remove from known routes ids, as the route has been removed
@@ -670,14 +667,14 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
 
         // skip already managed services, for example if a route has been restarted
         if (getManagementStrategy().isManaged(me, null)) {
-            LOG.trace("The error handler builder is already managed: {}", errorHandlerBuilder);
+            log.trace("The error handler builder is already managed: {}", errorHandlerBuilder);
             return;
         }
 
         try {
             manageObject(me);
         } catch (Exception e) {
-            LOG.warn("Could not register error handler builder: " + errorHandlerBuilder + " as ErrorHandler MBean.", e);
+            log.warn("Could not register error handler builder: " + errorHandlerBuilder + " as ErrorHandler MBean.", e);
         }
     }
 
@@ -691,7 +688,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
             try {
                 unmanageObject(me);
             } catch (Exception e) {
-                LOG.warn("Could not unregister error handler: " + me + " as ErrorHandler MBean.", e);
+                log.warn("Could not unregister error handler: " + me + " as ErrorHandler MBean.", e);
             }
         }
     }
@@ -708,7 +705,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
 
         // skip already managed services, for example if a route has been restarted
         if (getManagementStrategy().isManaged(mtp, null)) {
-            LOG.trace("The thread pool is already managed: {}", threadPool);
+            log.trace("The thread pool is already managed: {}", threadPool);
             return;
         }
 
@@ -718,7 +715,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
             // we need to keep track here, as we cannot re-construct the thread pool ObjectName when removing the thread pool
             managedThreadPools.put(threadPool, mtp);
         } catch (Exception e) {
-            LOG.warn("Could not register thread pool: " + threadPool + " as ThreadPool MBean.", e);
+            log.warn("Could not register thread pool: " + threadPool + " as ThreadPool MBean.", e);
         }
     }
 
@@ -732,14 +729,14 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
         if (mtp != null) {
             // skip unmanaged routes
             if (!getManagementStrategy().isManaged(mtp, null)) {
-                LOG.trace("The thread pool is not managed: {}", threadPool);
+                log.trace("The thread pool is not managed: {}", threadPool);
                 return;
             }
 
             try {
                 unmanageObject(mtp);
             } catch (Exception e) {
-                LOG.warn("Could not unregister ThreadPool MBean", e);
+                log.warn("Could not unregister ThreadPool MBean", e);
             }
         }
     }
@@ -908,7 +905,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
             return false;
         }
 
-        LOG.trace("Checking whether to register {} from route: {}", service, route);
+        log.trace("Checking whether to register {} from route: {}", service, route);
 
         ManagementAgent agent = getManagementStrategy().getManagementAgent();
         if (agent == null) {
@@ -963,7 +960,7 @@ public class DefaultManagementLifecycleStrategy extends ServiceSupport implement
                     && camelContext.getManagementStrategy().getManagementAgent().getLoadStatisticsEnabled();
             boolean disabled = !load || camelContext.getManagementStrategy().getManagementAgent().getStatisticsLevel() == ManagementStatisticsLevel.Off;
 
-            LOG.debug("Load performance statistics {}", disabled ? "disabled" : "enabled");
+            log.debug("Load performance statistics {}", disabled ? "disabled" : "enabled");
             if (!disabled) {
                 // must use 1 sec interval as the load statistics is based on 1 sec calculations
                 loadTimer.setInterval(1000);
diff --git a/camel-core/src/main/java/org/apache/camel/management/DefaultManagementMBeanAssembler.java b/camel-core/src/main/java/org/apache/camel/management/DefaultManagementMBeanAssembler.java
index dc8f970..444722f 100644
--- a/camel-core/src/main/java/org/apache/camel/management/DefaultManagementMBeanAssembler.java
+++ b/camel-core/src/main/java/org/apache/camel/management/DefaultManagementMBeanAssembler.java
@@ -32,8 +32,6 @@ import org.apache.camel.spi.ManagementMBeanAssembler;
 import org.apache.camel.support.ServiceSupport;
 import org.apache.camel.util.ObjectHelper;
 import org.apache.camel.util.ServiceHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * An assembler to assemble a {@link javax.management.modelmbean.ModelMBean} which can be used
@@ -43,7 +41,7 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public class DefaultManagementMBeanAssembler extends ServiceSupport implements ManagementMBeanAssembler {
-    private static final Logger LOG = LoggerFactory.getLogger(DefaultManagementMBeanAssembler.class);
+
     protected final MBeanInfoAssembler assembler;
     protected final CamelContext camelContext;
 
@@ -62,7 +60,7 @@ public class DefaultManagementMBeanAssembler extends ServiceSupport implements M
             // there may be a custom embedded instance which have additional methods
             custom = ((ManagedInstance) obj).getInstance();
             if (custom != null && ObjectHelper.hasAnnotation(custom.getClass().getAnnotations(), ManagedResource.class)) {
-                LOG.trace("Assembling MBeanInfo for: {} from custom @ManagedResource object: {}", name, custom);
+                log.trace("Assembling MBeanInfo for: {} from custom @ManagedResource object: {}", name, custom);
                 // get the mbean info into different groups (mbi = both, standard = standard out of the box mbi)
                 mbi = assembler.getMBeanInfo(obj, custom, name.toString());
                 standardMbi = assembler.getMBeanInfo(obj, null, name.toString());
@@ -71,7 +69,7 @@ public class DefaultManagementMBeanAssembler extends ServiceSupport implements M
 
         if (mbi == null) {
             // use the default provided mbean which has been annotated with JMX annotations
-            LOG.trace("Assembling MBeanInfo for: {} from @ManagedResource object: {}", name, obj);
+            log.trace("Assembling MBeanInfo for: {} from @ManagedResource object: {}", name, obj);
             mbi = assembler.getMBeanInfo(obj, null, name.toString());
         }
 
diff --git a/camel-core/src/main/java/org/apache/camel/processor/CamelInternalProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/CamelInternalProcessor.java
index b1da7ef..b184be5 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/CamelInternalProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/CamelInternalProcessor.java
@@ -83,7 +83,6 @@ import org.slf4j.LoggerFactory;
  */
 public class CamelInternalProcessor extends DelegateAsyncProcessor {
 
-    private static final Logger LOG = LoggerFactory.getLogger(CamelInternalProcessor.class);
     private final List<CamelInternalProcessorAdvice> advices = new ArrayList<>();
 
     public CamelInternalProcessor() {
@@ -163,11 +162,11 @@ public class CamelInternalProcessor extends DelegateAsyncProcessor {
         Object synchronous = exchange.removeProperty(Exchange.UNIT_OF_WORK_PROCESS_SYNC);
         if (exchange.isTransacted() || synchronous != null) {
             // must be synchronized for transacted exchanges
-            if (LOG.isTraceEnabled()) {
+            if (log.isTraceEnabled()) {
                 if (exchange.isTransacted()) {
-                    LOG.trace("Transacted Exchange must be routed synchronously for exchangeId: {} -> {}", exchange.getExchangeId(), exchange);
+                    log.trace("Transacted Exchange must be routed synchronously for exchangeId: {} -> {}", exchange.getExchangeId(), exchange);
                 } else {
-                    LOG.trace("Synchronous UnitOfWork Exchange must be routed synchronously for exchangeId: {} -> {}", exchange.getExchangeId(), exchange);
+                    log.trace("Synchronous UnitOfWork Exchange must be routed synchronously for exchangeId: {} -> {}", exchange.getExchangeId(), exchange);
                 }
             }
             // ----------------------------------------------------------
@@ -196,8 +195,8 @@ public class CamelInternalProcessor extends DelegateAsyncProcessor {
             // ----------------------------------------------------------
             // CAMEL END USER - DEBUG ME HERE +++ START +++
             // ----------------------------------------------------------
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Processing exchange for exchangeId: {} -> {}", exchange.getExchangeId(), exchange);
+            if (log.isTraceEnabled()) {
+                log.trace("Processing exchange for exchangeId: {} -> {}", exchange.getExchangeId(), exchange);
             }
             boolean sync = processor.process(exchange, async);
             // ----------------------------------------------------------
@@ -209,8 +208,8 @@ public class CamelInternalProcessor extends DelegateAsyncProcessor {
                 uow.afterProcess(processor, exchange, callback, sync);
             }
 
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Exchange processed and is continued routed {} for exchangeId: {} -> {}",
+            if (log.isTraceEnabled()) {
+                log.trace("Exchange processed and is continued routed {} for exchangeId: {} -> {}",
                         new Object[]{sync ? "synchronously" : "asynchronously", exchange.getExchangeId(), exchange});
             }
             return sync;
@@ -276,7 +275,7 @@ public class CamelInternalProcessor extends DelegateAsyncProcessor {
         if (stop != null) {
             boolean doStop = exchange.getContext().getTypeConverter().convertTo(Boolean.class, stop);
             if (doStop) {
-                LOG.debug("Exchange is marked to stop routing: {}", exchange);
+                log.debug("Exchange is marked to stop routing: {}", exchange);
                 return false;
             }
         }
@@ -285,7 +284,7 @@ public class CamelInternalProcessor extends DelegateAsyncProcessor {
         boolean forceShutdown = exchange.getContext().getShutdownStrategy().forceShutdown(this);
         if (forceShutdown) {
             String msg = "Run not allowed as ShutdownStrategy is forcing shutting down, will reject executing exchange: " + exchange;
-            LOG.debug(msg);
+            log.debug(msg);
             if (exchange.getException() == null) {
                 exchange.setException(new RejectedExecutionException(msg));
             }
@@ -335,6 +334,7 @@ public class CamelInternalProcessor extends DelegateAsyncProcessor {
      */
     public static class InstrumentationAdvice implements CamelInternalProcessorAdvice<StopWatch> {
 
+        private final Logger log = LoggerFactory.getLogger(getClass());
         private PerformanceCounter counter;
         private String type;
 
@@ -362,8 +362,8 @@ public class CamelInternalProcessor extends DelegateAsyncProcessor {
         }
 
         protected void recordTime(Exchange exchange, long duration) {
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("{}Recording duration: {} millis for exchange: {}", type != null ? type + ": " : "", duration, exchange);
+            if (log.isTraceEnabled()) {
+                log.trace("{}Recording duration: {} millis for exchange: {}", type != null ? type + ": " : "", duration, exchange);
             }
 
             if (!exchange.isFailed() && exchange.getException() == null) {
@@ -430,6 +430,7 @@ public class CamelInternalProcessor extends DelegateAsyncProcessor {
      */
     public static class RoutePolicyAdvice implements CamelInternalProcessorAdvice {
 
+        private final Logger log = LoggerFactory.getLogger(getClass());
         private final List<RoutePolicy> routePolicies;
         private Route route;
 
@@ -464,7 +465,7 @@ public class CamelInternalProcessor extends DelegateAsyncProcessor {
                         policy.onExchangeBegin(route, exchange);
                     }
                 } catch (Exception e) {
-                    LOG.warn("Error occurred during onExchangeBegin on RoutePolicy: " + policy
+                    log.warn("Error occurred during onExchangeBegin on RoutePolicy: " + policy
                             + ". This exception will be ignored", e);
                 }
             }
@@ -485,7 +486,7 @@ public class CamelInternalProcessor extends DelegateAsyncProcessor {
                         policy.onExchangeDone(route, exchange);
                     }
                 } catch (Exception e) {
-                    LOG.warn("Error occurred during onExchangeDone on RoutePolicy: " + policy
+                    log.warn("Error occurred during onExchangeDone on RoutePolicy: " + policy
                             + ". This exception will be ignored", e);
                 }
             }
@@ -811,6 +812,7 @@ public class CamelInternalProcessor extends DelegateAsyncProcessor {
      */
     public static class DelayerAdvice implements CamelInternalProcessorAdvice {
 
+        private final Logger log = LoggerFactory.getLogger(getClass());
         private final long delay;
 
         public DelayerAdvice(long delay) {
@@ -820,10 +822,10 @@ public class CamelInternalProcessor extends DelegateAsyncProcessor {
         @Override
         public Object before(Exchange exchange) throws Exception {
             try {
-                LOG.trace("Sleeping for: {} millis", delay);
+                log.trace("Sleeping for: {} millis", delay);
                 Thread.sleep(delay);
             } catch (InterruptedException e) {
-                LOG.debug("Sleep interrupted");
+                log.debug("Sleep interrupted");
                 Thread.currentThread().interrupt();
                 throw e;
             }
diff --git a/camel-core/src/main/java/org/apache/camel/processor/CatchProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/CatchProcessor.java
index c2ea27f..cf7f8e5 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/CatchProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/CatchProcessor.java
@@ -36,7 +36,6 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public class CatchProcessor extends DelegateAsyncProcessor implements Traceable, IdAware {
-    private static final Logger LOG = LoggerFactory.getLogger(CatchProcessor.class);
 
     private String id;
     private final List<Class<? extends Throwable>> exceptions;
@@ -76,8 +75,8 @@ public class CatchProcessor extends DelegateAsyncProcessor implements Traceable,
             callback.done(true);
             return true;
         }
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("This CatchProcessor catches the exception: {} caused by: {}", caught.getClass().getName(), e.getMessage());
+        if (log.isTraceEnabled()) {
+            log.trace("This CatchProcessor catches the exception: {} caused by: {}", caught.getClass().getName(), e.getMessage());
         }
 
         // store the last to endpoint as the failure endpoint
@@ -94,8 +93,8 @@ public class CatchProcessor extends DelegateAsyncProcessor implements Traceable,
         // is the exception handled by the catch clause
         final boolean handled = handles(exchange);
 
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("The exception is handled: {} for the exception: {} caused by: {}",
+        if (log.isDebugEnabled()) {
+            log.debug("The exception is handled: {} for the exception: {} caused by: {}",
                     new Object[]{handled, e.getClass().getName(), e.getMessage()});
         }
 
diff --git a/camel-core/src/main/java/org/apache/camel/processor/ChoiceProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/ChoiceProcessor.java
index 5c1c54f..1c3f118 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/ChoiceProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/ChoiceProcessor.java
@@ -44,7 +44,7 @@ import static org.apache.camel.processor.PipelineHelper.continueProcessing;
  * @version 
  */
 public class ChoiceProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable, IdAware {
-    private static final Logger LOG = LoggerFactory.getLogger(ChoiceProcessor.class);
+
     private String id;
     private final List<FilterProcessor> filters;
     private final Processor otherwise;
@@ -103,7 +103,7 @@ public class ChoiceProcessor extends ServiceSupport implements AsyncProcessor, N
             }
 
             // check for error if so we should break out
-            if (!continueProcessing(exchange, "so breaking out of choice", LOG)) {
+            if (!continueProcessing(exchange, "so breaking out of choice", log)) {
                 break;
             }
 
diff --git a/camel-core/src/main/java/org/apache/camel/processor/ClaimCheckProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/ClaimCheckProcessor.java
index 19743f5..d0da898 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/ClaimCheckProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/ClaimCheckProcessor.java
@@ -43,7 +43,6 @@ import org.slf4j.LoggerFactory;
  */
 public class ClaimCheckProcessor extends ServiceSupport implements AsyncProcessor, IdAware, CamelContextAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(ClaimCheckProcessor.class);
     private CamelContext camelContext;
     private String id;
     private String operation;
@@ -122,13 +121,13 @@ public class ClaimCheckProcessor extends ServiceSupport implements AsyncProcesso
                 Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
                 boolean addedNew = repo.add(key, copy);
                 if (addedNew) {
-                    LOG.debug("Add: {} -> {}", key, copy);
+                    log.debug("Add: {} -> {}", key, copy);
                 } else {
-                    LOG.debug("Override: {} -> {}", key, copy);
+                    log.debug("Override: {} -> {}", key, copy);
                 }
             } else if ("Get".equals(operation)) {
                 Exchange copy = repo.get(key);
-                LOG.debug("Get: {} -> {}", key, exchange);
+                log.debug("Get: {} -> {}", key, exchange);
                 if (copy != null) {
                     Exchange result = aggregationStrategy.aggregate(exchange, copy);
                     if (result != null) {
@@ -137,7 +136,7 @@ public class ClaimCheckProcessor extends ServiceSupport implements AsyncProcesso
                 }
             } else if ("GetAndRemove".equals(operation)) {
                 Exchange copy = repo.getAndRemove(key);
-                LOG.debug("GetAndRemove: {} -> {}", key, exchange);
+                log.debug("GetAndRemove: {} -> {}", key, exchange);
                 if (copy != null) {
                     // prepare the exchanges for aggregation
                     ExchangeHelper.prepareAggregation(exchange, copy);
@@ -149,11 +148,11 @@ public class ClaimCheckProcessor extends ServiceSupport implements AsyncProcesso
             } else if ("Push".equals(operation)) {
                 // copy exchange, and do not share the unit of work
                 Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
-                LOG.debug("Push: {} -> {}", key, copy);
+                log.debug("Push: {} -> {}", key, copy);
                 repo.push(copy);
             } else if ("Pop".equals(operation)) {
                 Exchange copy = repo.pop();
-                LOG.debug("Pop: {} -> {}", key, exchange);
+                log.debug("Pop: {} -> {}", key, exchange);
                 if (copy != null) {
                     // prepare the exchanges for aggregation
                     ExchangeHelper.prepareAggregation(exchange, copy);
diff --git a/camel-core/src/main/java/org/apache/camel/processor/DelayProcessorSupport.java b/camel-core/src/main/java/org/apache/camel/processor/DelayProcessorSupport.java
index 80abba9..a3269f0 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/DelayProcessorSupport.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/DelayProcessorSupport.java
@@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public abstract class DelayProcessorSupport extends DelegateAsyncProcessor {
-    protected final Logger log = LoggerFactory.getLogger(getClass());
+
     private final CamelContext camelContext;
     private final ScheduledExecutorService executorService;
     private final boolean shutdownExecutorService;
diff --git a/camel-core/src/main/java/org/apache/camel/processor/Enricher.java b/camel-core/src/main/java/org/apache/camel/processor/Enricher.java
index 2f488b3..d980e69a 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/Enricher.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/Enricher.java
@@ -57,7 +57,6 @@ import static org.apache.camel.util.ExchangeHelper.copyResultsPreservePattern;
  */
 public class Enricher extends ServiceSupport implements AsyncProcessor, IdAware, CamelContextAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(Enricher.class);
     private CamelContext camelContext;
     private String id;
     private ProducerCache producerCache;
@@ -166,8 +165,8 @@ public class Enricher extends ServiceSupport implements AsyncProcessor, IdAware,
             producer = producerCache.acquireProducer(endpoint);
         } catch (Throwable e) {
             if (isIgnoreInvalidEndpoint()) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Endpoint uri is invalid: " + recipient + ". This exception will be ignored.", e);
+                if (log.isDebugEnabled()) {
+                    log.debug("Endpoint uri is invalid: " + recipient + ". This exception will be ignored.", e);
                 }
             } else {
                 exchange.setException(e);
@@ -238,13 +237,13 @@ public class Enricher extends ServiceSupport implements AsyncProcessor, IdAware,
         });
 
         if (!sync) {
-            LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", exchange.getExchangeId());
+            log.trace("Processing exchangeId: {} is continued being processed asynchronously", exchange.getExchangeId());
             // the remainder of the routing slip will be completed async
             // so we break out now, then the callback will be invoked which then continue routing from where we left here
             return false;
         }
 
-        LOG.trace("Processing exchangeId: {} is continued being processed synchronously", exchange.getExchangeId());
+        log.trace("Processing exchangeId: {} is continued being processed synchronously", exchange.getExchangeId());
 
         if (watch != null) {
             // emit event that the exchange was sent to the endpoint
@@ -346,7 +345,7 @@ public class Enricher extends ServiceSupport implements AsyncProcessor, IdAware,
 
         if (producerCache == null) {
             producerCache = new ProducerCache(this, camelContext, cacheSize);
-            LOG.debug("Enricher {} using ProducerCache with cacheSize={}", this, producerCache.getCapacity());
+            log.debug("Enricher {} using ProducerCache with cacheSize={}", this, producerCache.getCapacity());
         }
 
         ServiceHelper.startService(producerCache, aggregationStrategy);
diff --git a/camel-core/src/main/java/org/apache/camel/processor/ErrorHandlerSupport.java b/camel-core/src/main/java/org/apache/camel/processor/ErrorHandlerSupport.java
index 90ba3c1..b86740a 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/ErrorHandlerSupport.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/ErrorHandlerSupport.java
@@ -40,8 +40,6 @@ import org.slf4j.LoggerFactory;
  */
 public abstract class ErrorHandlerSupport extends ChildServiceSupport implements ErrorHandler {
 
-    protected final Logger log = LoggerFactory.getLogger(getClass());
-
     protected final Map<ExceptionPolicyKey, OnExceptionDefinition> exceptionPolicies = new LinkedHashMap<>();
     protected ExceptionPolicyStrategy exceptionPolicy = createDefaultExceptionPolicyStrategy();
 
diff --git a/camel-core/src/main/java/org/apache/camel/processor/FatalFallbackErrorHandler.java b/camel-core/src/main/java/org/apache/camel/processor/FatalFallbackErrorHandler.java
index 77cc9fa..f2a3d9d 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/FatalFallbackErrorHandler.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/FatalFallbackErrorHandler.java
@@ -39,8 +39,6 @@ import static org.apache.camel.builder.ExpressionBuilder.routeIdExpression;
  */
 public class FatalFallbackErrorHandler extends DelegateAsyncProcessor implements ErrorHandler {
 
-    private static final Logger LOG = LoggerFactory.getLogger(FatalFallbackErrorHandler.class);
-
     private boolean deadLetterChannel;
 
     public FatalFallbackErrorHandler(Processor processor) {
@@ -65,7 +63,7 @@ public class FatalFallbackErrorHandler extends DelegateAsyncProcessor implements
             exchange.setProperty(Exchange.FATAL_FALLBACK_ERROR_HANDLER, fatals);
         }
         if (fatals.contains(id)) {
-            LOG.warn("Circular error-handler detected at route: {} - breaking out processing Exchange: {}", id, exchange);
+            log.warn("Circular error-handler detected at route: {} - breaking out processing Exchange: {}", id, exchange);
             // mark this exchange as already been error handler handled (just by having this property)
             // the false value mean the caught exception will be kept on the exchange, causing the
             // exception to be propagated back to the caller, and to break out routing
@@ -163,15 +161,15 @@ public class FatalFallbackErrorHandler extends DelegateAsyncProcessor implements
         // when using dead letter channel we only want to log at WARN level
         if (deadLetterChannel) {
             if (t != null) {
-                LOG.warn(message, t);
+                log.warn(message, t);
             } else {
-                LOG.warn(message);
+                log.warn(message);
             }
         } else {
             if (t != null) {
-                LOG.error(message, t);
+                log.error(message, t);
             } else {
-                LOG.error(message);
+                log.error(message);
             }
         }
     }
diff --git a/camel-core/src/main/java/org/apache/camel/processor/FilterProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/FilterProcessor.java
index 3963ebd..f9dd9b9 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/FilterProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/FilterProcessor.java
@@ -33,7 +33,7 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public class FilterProcessor extends DelegateAsyncProcessor implements Traceable, IdAware {
-    private static final Logger LOG = LoggerFactory.getLogger(FilterProcessor.class);
+
     private String id;
     private final Predicate predicate;
     private transient long filtered;
@@ -64,7 +64,7 @@ public class FilterProcessor extends DelegateAsyncProcessor implements Traceable
     public boolean matches(Exchange exchange) {
         boolean matches = predicate.matches(exchange);
 
-        LOG.debug("Filter matches: {} for exchange: {}", matches, exchange);
+        log.debug("Filter matches: {} for exchange: {}", matches, exchange);
 
         // set property whether the filter matches or not
         exchange.setProperty(Exchange.FILTER_MATCHED, matches);
diff --git a/camel-core/src/main/java/org/apache/camel/processor/FinallyProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/FinallyProcessor.java
index bf543dd..f6af37c 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/FinallyProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/FinallyProcessor.java
@@ -32,7 +32,6 @@ import org.slf4j.LoggerFactory;
  */
 public class FinallyProcessor extends DelegateAsyncProcessor implements Traceable, IdAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(FinallyProcessor.class);
     private String id;
 
     public FinallyProcessor(Processor processor) {
@@ -84,7 +83,7 @@ public class FinallyProcessor extends DelegateAsyncProcessor implements Traceabl
         this.id = id;
     }
 
-    private static final class FinallyAsyncCallback implements AsyncCallback {
+    private final class FinallyAsyncCallback implements AsyncCallback {
 
         private final Exchange exchange;
         private final AsyncCallback callback;
@@ -120,7 +119,7 @@ public class FinallyProcessor extends DelegateAsyncProcessor implements Traceabl
                 if (!doneSync) {
                     // signal callback to continue routing async
                     ExchangeHelper.prepareOutToIn(exchange);
-                    LOG.trace("Processing complete for exchangeId: {} >>> {}", exchange.getExchangeId(), exchange);
+                    log.trace("Processing complete for exchangeId: {} >>> {}", exchange.getExchangeId(), exchange);
                 }
             } finally {
                 // callback must always be called
diff --git a/camel-core/src/main/java/org/apache/camel/processor/LogProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/LogProcessor.java
index 3909e7b..20d95cc 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/LogProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/LogProcessor.java
@@ -39,7 +39,6 @@ import org.slf4j.LoggerFactory;
  */
 public class LogProcessor extends ServiceSupport implements AsyncProcessor, Traceable, IdAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(LogProcessor.class);
     private String id;
     private final Expression expression;
     private final CamelLogger logger;
@@ -89,9 +88,9 @@ public class LogProcessor extends ServiceSupport implements AsyncProcessor, Trac
                 String output = listener.onLog(exchange, logger, message);
                 message = output != null ? output : message;
             } catch (Throwable t) {
-                LOG.warn("Ignoring an exception thrown by {}: {}", listener.getClass().getName(), t.getMessage());
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("", t);
+                log.warn("Ignoring an exception thrown by {}: {}", listener.getClass().getName(), t.getMessage());
+                if (log.isDebugEnabled()) {
+                    log.debug("", t);
                 }
             }
         }
diff --git a/camel-core/src/main/java/org/apache/camel/processor/LoopProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/LoopProcessor.java
index 07bb255..531572c 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/LoopProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/LoopProcessor.java
@@ -36,7 +36,6 @@ import static org.apache.camel.processor.PipelineHelper.continueProcessing;
  * The processor which sends messages in a loop.
  */
 public class LoopProcessor extends DelegateAsyncProcessor implements Traceable, IdAware {
-    private static final Logger LOG = LoggerFactory.getLogger(LoopProcessor.class);
 
     private String id;
     private final Expression expression;
@@ -96,23 +95,23 @@ public class LoopProcessor extends DelegateAsyncProcessor implements Traceable,
             boolean sync = process(target, callback, index, count, doWhile, original);
 
             if (!sync) {
-                LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", target.getExchangeId());
+                log.trace("Processing exchangeId: {} is continued being processed asynchronously", target.getExchangeId());
                 // the remainder of the loop will be completed async
                 // so we break out now, then the callback will be invoked which then continue routing from where we left here
                 return false;
             }
 
-            LOG.trace("Processing exchangeId: {} is continued being processed synchronously", target.getExchangeId());
+            log.trace("Processing exchangeId: {} is continued being processed synchronously", target.getExchangeId());
 
             // check for error if so we should break out
-            if (!continueProcessing(target, "so breaking out of loop", LOG)) {
+            if (!continueProcessing(target, "so breaking out of loop", log)) {
                 break;
             }
         }
 
         // we are done so prepare the result
         ExchangeHelper.copyResults(exchange, target);
-        LOG.trace("Processing complete for exchangeId: {} >>> {}", exchange.getExchangeId(), exchange);
+        log.trace("Processing complete for exchangeId: {} >>> {}", exchange.getExchangeId(), exchange);
         callback.done(true);
         return true;
     }
@@ -122,7 +121,7 @@ public class LoopProcessor extends DelegateAsyncProcessor implements Traceable,
                               final Exchange original) {
 
         // set current index as property
-        LOG.debug("LoopProcessor: iteration #{}", index.get());
+        log.debug("LoopProcessor: iteration #{}", index.get());
         exchange.setProperty(Exchange.LOOP_INDEX, index.get());
 
         boolean sync = processor.process(exchange, new AsyncCallback() {
@@ -154,7 +153,7 @@ public class LoopProcessor extends DelegateAsyncProcessor implements Traceable,
                 while ((predicate != null && doWhile.get()) || (index.get() < count.get())) {
 
                     // check for error if so we should break out
-                    if (!continueProcessing(target, "so breaking out of loop", LOG)) {
+                    if (!continueProcessing(target, "so breaking out of loop", log)) {
                         break;
                     }
 
@@ -164,7 +163,7 @@ public class LoopProcessor extends DelegateAsyncProcessor implements Traceable,
                     // process again
                     boolean sync = process(target, callback, index, count, doWhile, original);
                     if (!sync) {
-                        LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", target.getExchangeId());
+                        log.trace("Processing exchangeId: {} is continued being processed asynchronously", target.getExchangeId());
                         // the remainder of the routing slip will be completed async
                         // so we break out now, then the callback will be invoked which then continue routing from where we left here
                         return;
@@ -173,7 +172,7 @@ public class LoopProcessor extends DelegateAsyncProcessor implements Traceable,
 
                 // we are done so prepare the result
                 ExchangeHelper.copyResults(exchange, target);
-                LOG.trace("Processing complete for exchangeId: {} >>> {}", exchange.getExchangeId(), exchange);
+                log.trace("Processing complete for exchangeId: {} >>> {}", exchange.getExchangeId(), exchange);
                 callback.done(false);
             }
         });
diff --git a/camel-core/src/main/java/org/apache/camel/processor/MulticastProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/MulticastProcessor.java
index 44ab3cb..e69a698 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/MulticastProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/MulticastProcessor.java
@@ -85,8 +85,6 @@ import static org.apache.camel.util.ObjectHelper.notNull;
  */
 public class MulticastProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable, IdAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(MulticastProcessor.class);
-
     /**
      * Class that represent each step in the multicast route to do
      */
@@ -292,7 +290,7 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
                     aggregationOnTheFlyDone, allTasksSubmitted, executionException);
             final AtomicBoolean aggregationTaskSubmitted = new AtomicBoolean();
 
-            LOG.trace("Starting to submit parallel tasks");
+            log.trace("Starting to submit parallel tasks");
             
             try {
                 while (it.hasNext()) {
@@ -326,7 +324,7 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
     
                             // Decide whether to continue with the multicast or not; similar logic to the Pipeline
                             Integer number = getExchangeIndex(subExchange);
-                            boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
+                            boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, log);
                             if (stopOnException && !continueProcessing) {
                                 // signal to stop running
                                 running.set(false);
@@ -338,7 +336,7 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
                                 }
                             }
     
-                            LOG.trace("Parallel processing complete for exchange: {}", subExchange);
+                            log.trace("Parallel processing complete for exchange: {}", subExchange);
                             return subExchange;
                         }
                     });
@@ -354,32 +352,32 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
                     executionException.set(ObjectHelper.wrapRuntimeCamelException(e));
                 }
                 // and because of the exception we must signal we are done so the latch can open and let the other thread continue processing
-                LOG.debug("Signaling we are done aggregating on the fly for exchangeId: {}", original.getExchangeId());
-                LOG.trace("Aggregate on the fly task done for exchangeId: {}", original.getExchangeId());
+                log.debug("Signaling we are done aggregating on the fly for exchangeId: {}", original.getExchangeId());
+                log.trace("Aggregate on the fly task done for exchangeId: {}", original.getExchangeId());
                 aggregationOnTheFlyDone.countDown();
             }
 
             // signal all tasks has been submitted
-            LOG.trace("Signaling that all {} tasks has been submitted.", total.get());
+            log.trace("Signaling that all {} tasks has been submitted.", total.get());
             allTasksSubmitted.set(true);
 
             // its to hard to do parallel async routing so we let the caller thread be synchronously
             // and have it pickup the replies and do the aggregation (eg we use a latch to wait)
             // wait for aggregation to be done
-            LOG.debug("Waiting for on-the-fly aggregation to complete aggregating {} responses for exchangeId: {}", total.get(), original.getExchangeId());
+            log.debug("Waiting for on-the-fly aggregation to complete aggregating {} responses for exchangeId: {}", total.get(), original.getExchangeId());
             aggregationOnTheFlyDone.await();
 
             // did we fail for whatever reason, if so throw that caused exception
             if (executionException.get() != null) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Parallel processing failed due {}", executionException.get().getMessage());
+                if (log.isDebugEnabled()) {
+                    log.debug("Parallel processing failed due {}", executionException.get().getMessage());
                 }
                 throw executionException.get();
             }
         }
 
         // no everything is okay so we are done
-        LOG.debug("Done parallel processing {} exchanges", total);
+        log.debug("Done parallel processing {} exchanges", total);
     }
 
     /**
@@ -419,7 +417,7 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
         }
 
         public void run() {
-            LOG.trace("Aggregate on the fly task started for exchangeId: {}", original.getExchangeId());
+            log.trace("Aggregate on the fly task started for exchangeId: {}", original.getExchangeId());
 
             try {
                 aggregateOnTheFly();
@@ -431,8 +429,8 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
                 }
             } finally {
                 // must signal we are done so the latch can open and let the other thread continue processing
-                LOG.debug("Signaling we are done aggregating on the fly for exchangeId: {}", original.getExchangeId());
-                LOG.trace("Aggregate on the fly task done for exchangeId: {}", original.getExchangeId());
+                log.debug("Signaling we are done aggregating on the fly for exchangeId: {}", original.getExchangeId());
+                log.trace("Aggregate on the fly task done for exchangeId: {}", original.getExchangeId());
                 aggregationOnTheFlyDone.countDown();
             }
         }
@@ -447,7 +445,7 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
             while (!done) {
                 // check if we have already aggregate everything
                 if (allTasksSubmitted.get() && aggregated.intValue() >= total.get()) {
-                    LOG.debug("Done aggregating {} exchanges on the fly.", aggregated);
+                    log.debug("Done aggregating {} exchanges on the fly.", aggregated);
                     break;
                 }
 
@@ -456,16 +454,16 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
                     // we are timed out but try to grab if some tasks has been completed
                     // poll will return null if no tasks is present
                     future = completion.poll();
-                    LOG.trace("Polled completion task #{} after timeout to grab already completed tasks: {}", aggregated, future);
+                    log.trace("Polled completion task #{} after timeout to grab already completed tasks: {}", aggregated, future);
                 } else if (timeout > 0) {
                     long left = timeout - watch.taken();
                     if (left < 0) {
                         left = 0;
                     }
-                    LOG.trace("Polling completion task #{} using timeout {} millis.", aggregated, left);
+                    log.trace("Polling completion task #{} using timeout {} millis.", aggregated, left);
                     future = completion.poll(left, TimeUnit.MILLISECONDS);
                 } else {
-                    LOG.trace("Polling completion task #{}", aggregated);
+                    log.trace("Polling completion task #{}", aggregated);
                     // we must not block so poll every second
                     future = completion.poll(1, TimeUnit.SECONDS);
                     if (future == null) {
@@ -488,7 +486,7 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
 
                     // Decide whether to continue with the multicast or not; similar logic to the Pipeline
                     Integer number = getExchangeIndex(subExchange);
-                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
+                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, log);
                     if (stopOnException && !continueProcessing) {
                         // we want to stop on exception and an exception or failure occurred
                         // this is similar to what the pipeline does, so we should do the same to not surprise end users
@@ -511,10 +509,10 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
 
             if (timedOut.get() || stoppedOnException) {
                 if (timedOut.get()) {
-                    LOG.debug("Cancelling tasks due timeout after {} millis.", timeout);
+                    log.debug("Cancelling tasks due timeout after {} millis.", timeout);
                 }
                 if (stoppedOnException) {
-                    LOG.debug("Cancelling tasks due stopOnException.");
+                    log.debug("Cancelling tasks due stopOnException.");
                 }
                 // cancel tasks as we timed out (its safe to cancel done tasks)
                 running.set(false);
@@ -552,7 +550,7 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
                     // wrap in exception to explain where it failed
                     CamelExchangeException cex = new CamelExchangeException("Parallel processing failed for number " + aggregated.get(), subExchange, e);
                     subExchange.setException(cex);
-                    LOG.debug(cex.getMessage(), cex);
+                    log.debug(cex.getMessage(), cex);
                 }
             } finally {
                 aggregated.incrementAndGet();
@@ -598,9 +596,9 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
                 ((TimeoutAwareAggregationStrategy) strategy).timeout(oldExchange, aggregated.intValue(), total.intValue(), timeout);
             } else {
                 // log a WARN we timed out since it will not be aggregated and the Exchange will be lost
-                LOG.warn("Parallel processing timed out after {} millis for number {}. This task will be cancelled and will not be aggregated.", timeout, aggregated.intValue());
+                log.warn("Parallel processing timed out after {} millis for number {}. This task will be cancelled and will not be aggregated.", timeout, aggregated.intValue());
             }
-            LOG.debug("Timeout occurred after {} millis for number {} task.", timeout, aggregated.intValue());
+            log.debug("Timeout occurred after {} millis for number {} task.", timeout, aggregated.intValue());
             timedOut.set(true);
 
             // mark that index as timed out, which allows us to try to retrieve
@@ -629,21 +627,21 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
 
             boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
             if (!sync) {
-                if (LOG.isTraceEnabled()) {
-                    LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", pair.getExchange().getExchangeId());
+                if (log.isTraceEnabled()) {
+                    log.trace("Processing exchangeId: {} is continued being processed asynchronously", pair.getExchange().getExchangeId());
                 }
                 // the remainder of the multicast will be completed async
                 // so we break out now, then the callback will be invoked which then continue routing from where we left here
                 return false;
             }
 
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Processing exchangeId: {} is continued being processed synchronously", pair.getExchange().getExchangeId());
+            if (log.isTraceEnabled()) {
+                log.trace("Processing exchangeId: {} is continued being processed synchronously", pair.getExchange().getExchangeId());
             }
 
             // Decide whether to continue with the multicast or not; similar logic to the Pipeline
             // remember to test for stop on exception and aggregate before copying back results
-            boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
+            boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), log);
             if (stopOnException && !continueProcessing) {
                 if (subExchange.getException() != null) {
                     // wrap in exception to explain where it failed
@@ -657,7 +655,7 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
                 return true;
             }
 
-            LOG.trace("Sequential processing complete for number {} exchange: {}", total, subExchange);
+            log.trace("Sequential processing complete for number {} exchange: {}", total, subExchange);
 
             if (parallelAggregate) {
                 doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
@@ -668,7 +666,7 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
             total.incrementAndGet();
         }
 
-        LOG.debug("Done sequential processing {} exchanges", total);
+        log.debug("Done sequential processing {} exchanges", total);
 
         return true;
     }
@@ -720,7 +718,7 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
 
                     // Decide whether to continue with the multicast or not; similar logic to the Pipeline
                     // remember to test for stop on exception and aggregate before copying back results
-                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
+                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), log);
                     if (stopOnException && !continueProcessing) {
                         if (subExchange.getException() != null) {
                             // wrap in exception to explain where it failed
@@ -762,13 +760,13 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
                         boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
 
                         if (!sync) {
-                            LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", original.getExchangeId());
+                            log.trace("Processing exchangeId: {} is continued being processed asynchronously", original.getExchangeId());
                             return;
                         }
 
                         // Decide whether to continue with the multicast or not; similar logic to the Pipeline
                         // remember to test for stop on exception and aggregate before copying back results
-                        continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
+                        continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), log);
                         if (stopOnException && !continueProcessing) {
                             if (subExchange.getException() != null) {
                                 // wrap in exception to explain where it failed
@@ -864,7 +862,7 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
 
         // we are done so close the pairs iterator
         if (pairs instanceof Closeable) {
-            IOHelper.close((Closeable) pairs, "pairs", LOG);
+            IOHelper.close((Closeable) pairs, "pairs", log);
         }
 
         AggregationStrategy strategy = getAggregationStrategy(subExchange);
@@ -1064,11 +1062,11 @@ public class MulticastProcessor extends ServiceSupport implements AsyncProcessor
             // lookup cached first to reuse and preserve memory
             answer = errorHandlers.get(key);
             if (answer != null) {
-                LOG.trace("Using existing error handler for: {}", processor);
+                log.trace("Using existing error handler for: {}", processor);
                 return answer;
             }
 
-            LOG.trace("Creating error handler for: {}", processor);
+            log.trace("Creating error handler for: {}", processor);
             RouteDefinition route = (RouteDefinition) routeContext.getRoute();
             ErrorHandlerFactory builder = route.getErrorHandlerBuilder();
             // create error handler (create error handler directly to keep it light weight,
diff --git a/camel-core/src/main/java/org/apache/camel/processor/OnCompletionProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/OnCompletionProcessor.java
index 4579235..054c10e 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/OnCompletionProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/OnCompletionProcessor.java
@@ -48,7 +48,6 @@ import static org.apache.camel.util.ObjectHelper.notNull;
  */
 public class OnCompletionProcessor extends ServiceSupport implements AsyncProcessor, Traceable, IdAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(OnCompletionProcessor.class);
     private final CamelContext camelContext;
     private String id;
     private final Processor processor;
@@ -206,7 +205,7 @@ public class OnCompletionProcessor extends ServiceSupport implements AsyncProces
         }
 
         if (useOriginalBody) {
-            LOG.trace("Using the original IN message instead of current");
+            log.trace("Using the original IN message instead of current");
 
             Message original = ExchangeHelper.getOriginalInMessage(exchange);
             answer.setIn(original);
@@ -242,14 +241,14 @@ public class OnCompletionProcessor extends ServiceSupport implements AsyncProces
             if (executorService != null) {
                 executorService.submit(new Callable<Exchange>() {
                     public Exchange call() throws Exception {
-                        LOG.debug("Processing onComplete: {}", copy);
+                        log.debug("Processing onComplete: {}", copy);
                         doProcess(processor, copy);
                         return copy;
                     }
                 });
             } else {
                 // run without thread-pool
-                LOG.debug("Processing onComplete: {}", copy);
+                log.debug("Processing onComplete: {}", copy);
                 doProcess(processor, copy);
             }
         }
@@ -282,7 +281,7 @@ public class OnCompletionProcessor extends ServiceSupport implements AsyncProces
             if (executorService != null) {
                 executorService.submit(new Callable<Exchange>() {
                     public Exchange call() throws Exception {
-                        LOG.debug("Processing onFailure: {}", copy);
+                        log.debug("Processing onFailure: {}", copy);
                         doProcess(processor, copy);
                         // restore exception after processing
                         copy.setException(original);
@@ -291,7 +290,7 @@ public class OnCompletionProcessor extends ServiceSupport implements AsyncProces
                 });
             } else {
                 // run without thread-pool
-                LOG.debug("Processing onFailure: {}", copy);
+                log.debug("Processing onFailure: {}", copy);
                 doProcess(processor, copy);
                 // restore exception after processing
                 copy.setException(original);
@@ -344,14 +343,14 @@ public class OnCompletionProcessor extends ServiceSupport implements AsyncProces
             if (executorService != null) {
                 executorService.submit(new Callable<Exchange>() {
                     public Exchange call() throws Exception {
-                        LOG.debug("Processing onAfterRoute: {}", copy);
+                        log.debug("Processing onAfterRoute: {}", copy);
                         doProcess(processor, copy);
                         return copy;
                     }
                 });
             } else {
                 // run without thread-pool
-                LOG.debug("Processing onAfterRoute: {}", copy);
+                log.debug("Processing onAfterRoute: {}", copy);
                 doProcess(processor, copy);
             }
         }
diff --git a/camel-core/src/main/java/org/apache/camel/processor/Pipeline.java b/camel-core/src/main/java/org/apache/camel/processor/Pipeline.java
index e75c20c..c7a0ca3 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/Pipeline.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/Pipeline.java
@@ -41,7 +41,6 @@ import static org.apache.camel.processor.PipelineHelper.continueProcessing;
  * @version 
  */
 public class Pipeline extends MulticastProcessor {
-    private static final Logger LOG = LoggerFactory.getLogger(Pipeline.class);
 
     private String id;
 
@@ -102,16 +101,16 @@ public class Pipeline extends MulticastProcessor {
 
             // continue as long its being processed synchronously
             if (!sync) {
-                LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", exchange.getExchangeId());
+                log.trace("Processing exchangeId: {} is continued being processed asynchronously", exchange.getExchangeId());
                 // the remainder of the pipeline will be completed async
                 // so we break out now, then the callback will be invoked which then continue routing from where we left here
                 return false;
             }
 
-            LOG.trace("Processing exchangeId: {} is continued being processed synchronously", exchange.getExchangeId());
+            log.trace("Processing exchangeId: {} is continued being processed synchronously", exchange.getExchangeId());
 
             // check for error if so we should break out
-            if (!continueProcessing(nextExchange, "so breaking out of pipeline", LOG)) {
+            if (!continueProcessing(nextExchange, "so breaking out of pipeline", log)) {
                 break;
             }
         }
@@ -119,7 +118,7 @@ public class Pipeline extends MulticastProcessor {
         // logging nextExchange as it contains the exchange that might have altered the payload and since
         // we are logging the completion if will be confusing if we log the original instead
         // we could also consider logging the original and the nextExchange then we have *before* and *after* snapshots
-        LOG.trace("Processing complete for exchangeId: {} >>> {}", exchange.getExchangeId(), nextExchange);
+        log.trace("Processing complete for exchangeId: {} >>> {}", exchange.getExchangeId(), nextExchange);
 
         // copy results back to the original exchange
         ExchangeHelper.copyResults(exchange, nextExchange);
@@ -131,7 +130,7 @@ public class Pipeline extends MulticastProcessor {
     private boolean process(final Exchange original, final Exchange exchange, final AsyncCallback callback,
                             final Iterator<Processor> processors, final AsyncProcessor asyncProcessor) {
         // this does the actual processing so log at trace level
-        LOG.trace("Processing exchangeId: {} >>> {}", exchange.getExchangeId(), exchange);
+        log.trace("Processing exchangeId: {} >>> {}", exchange.getExchangeId(), exchange);
 
         // implement asynchronous routing logic in callback so we can have the callback being
         // triggered and then continue routing where we left
@@ -149,20 +148,20 @@ public class Pipeline extends MulticastProcessor {
                     AsyncProcessor processor = AsyncProcessorConverterHelper.convert(processors.next());
 
                     // check for error if so we should break out
-                    if (!continueProcessing(nextExchange, "so breaking out of pipeline", LOG)) {
+                    if (!continueProcessing(nextExchange, "so breaking out of pipeline", log)) {
                         break;
                     }
 
                     nextExchange = createNextExchange(nextExchange);
                     boolean isDoneSync = process(original, nextExchange, callback, processors, processor);
                     if (!isDoneSync) {
-                        LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", exchange.getExchangeId());
+                        log.trace("Processing exchangeId: {} is continued being processed asynchronously", exchange.getExchangeId());
                         return;
                     }
                 }
 
                 ExchangeHelper.copyResults(original, nextExchange);
-                LOG.trace("Processing complete for exchangeId: {} >>> {}", original.getExchangeId(), original);
+                log.trace("Processing complete for exchangeId: {} >>> {}", original.getExchangeId(), original);
                 callback.done(false);
             }
         });
@@ -189,7 +188,7 @@ public class Pipeline extends MulticastProcessor {
         if (stop != null) {
             boolean doStop = exchange.getContext().getTypeConverter().convertTo(Boolean.class, stop);
             if (doStop) {
-                LOG.debug("ExchangeId: {} is marked to stop routing: {}", exchange.getExchangeId(), exchange);
+                log.debug("ExchangeId: {} is marked to stop routing: {}", exchange.getExchangeId(), exchange);
                 answer = false;
             }
         } else {
@@ -197,7 +196,7 @@ public class Pipeline extends MulticastProcessor {
             answer = it.hasNext();
         }
 
-        LOG.trace("ExchangeId: {} should continue routing: {}", exchange.getExchangeId(), answer);
+        log.trace("ExchangeId: {} should continue routing: {}", exchange.getExchangeId(), answer);
         return answer;
     }
 
diff --git a/camel-core/src/main/java/org/apache/camel/processor/PollEnricher.java b/camel-core/src/main/java/org/apache/camel/processor/PollEnricher.java
index 06408fc..571461c 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/PollEnricher.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/PollEnricher.java
@@ -57,7 +57,6 @@ import static org.apache.camel.util.ExchangeHelper.copyResultsPreservePattern;
  */
 public class PollEnricher extends ServiceSupport implements AsyncProcessor, IdAware, CamelContextAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(PollEnricher.class);
     private CamelContext camelContext;
     private ConsumerCache consumerCache;
     private String id;
@@ -202,8 +201,8 @@ public class PollEnricher extends ServiceSupport implements AsyncProcessor, IdAw
             consumer = consumerCache.acquirePollingConsumer(endpoint);
         } catch (Throwable e) {
             if (isIgnoreInvalidEndpoint()) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Endpoint uri is invalid: " + recipient + ". This exception will be ignored.", e);
+                if (log.isDebugEnabled()) {
+                    log.debug("Endpoint uri is invalid: " + recipient + ". This exception will be ignored.", e);
                 }
             } else {
                 exchange.setException(e);
@@ -230,20 +229,20 @@ public class PollEnricher extends ServiceSupport implements AsyncProcessor, IdAw
         Exchange resourceExchange;
         try {
             if (timeout < 0) {
-                LOG.debug("Consumer receive: {}", consumer);
+                log.debug("Consumer receive: {}", consumer);
                 resourceExchange = consumer.receive();
             } else if (timeout == 0) {
-                LOG.debug("Consumer receiveNoWait: {}", consumer);
+                log.debug("Consumer receiveNoWait: {}", consumer);
                 resourceExchange = consumer.receiveNoWait();
             } else {
-                LOG.debug("Consumer receive with timeout: {} ms. {}", timeout, consumer);
+                log.debug("Consumer receive with timeout: {} ms. {}", timeout, consumer);
                 resourceExchange = consumer.receive(timeout);
             }
 
             if (resourceExchange == null) {
-                LOG.debug("Consumer received no exchange");
+                log.debug("Consumer received no exchange");
             } else {
-                LOG.debug("Consumer received: {}", resourceExchange);
+                log.debug("Consumer received: {}", resourceExchange);
             }
         } catch (Exception e) {
             exchange.setException(new CamelExchangeException("Error during poll", exchange, e));
@@ -373,7 +372,7 @@ public class PollEnricher extends ServiceSupport implements AsyncProcessor, IdAw
         if (consumerCache == null) {
             // create consumer cache if we use dynamic expressions for computing the endpoints to poll
             consumerCache = new ConsumerCache(this, camelContext, cacheSize);
-            LOG.debug("PollEnrich {} using ConsumerCache with cacheSize={}", this, cacheSize);
+            log.debug("PollEnrich {} using ConsumerCache with cacheSize={}", this, cacheSize);
         }
         if (aggregationStrategy instanceof CamelContextAware) {
             ((CamelContextAware) aggregationStrategy).setCamelContext(camelContext);
diff --git a/camel-core/src/main/java/org/apache/camel/processor/RecipientList.java b/camel-core/src/main/java/org/apache/camel/processor/RecipientList.java
index 108b1dd..73d6587 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/RecipientList.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/RecipientList.java
@@ -52,7 +52,6 @@ import static org.apache.camel.util.ObjectHelper.notNull;
  */
 public class RecipientList extends ServiceSupport implements AsyncProcessor, IdAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(RecipientList.class);
     private static final String IGNORE_DELIMITER_MARKER = "false";
     private final CamelContext camelContext;
     private String id;
@@ -187,7 +186,7 @@ public class RecipientList extends ServiceSupport implements AsyncProcessor, IdA
     protected void doStart() throws Exception {
         if (producerCache == null) {
             producerCache = new ProducerCache(this, camelContext, cacheSize);
-            LOG.debug("RecipientList {} using ProducerCache with cacheSize={}", this, producerCache.getCapacity());
+            log.debug("RecipientList {} using ProducerCache with cacheSize={}", this, producerCache.getCapacity());
         }
         ServiceHelper.startService(aggregationStrategy, producerCache);
     }
diff --git a/camel-core/src/main/java/org/apache/camel/processor/RecipientListProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/RecipientListProcessor.java
index b955aea..569cd1d 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/RecipientListProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/RecipientListProcessor.java
@@ -60,7 +60,7 @@ import org.slf4j.LoggerFactory;
  */
 public class RecipientListProcessor extends MulticastProcessor {
 
-    private static final Logger LOG = LoggerFactory.getLogger(RecipientListProcessor.class);
+    private static final Logger log = LoggerFactory.getLogger(RecipientListProcessor.class);
     private final Iterator<Object> iter;
     private boolean ignoreInvalidEndpoints;
     private ProducerCache producerCache;
@@ -110,20 +110,20 @@ public class RecipientListProcessor extends MulticastProcessor {
 
         public void begin() {
             // we have already acquired and prepare the producer
-            LOG.trace("RecipientProcessorExchangePair #{} begin: {}", index, exchange);
+            log.trace("RecipientProcessorExchangePair #{} begin: {}", index, exchange);
             exchange.setProperty(Exchange.RECIPIENT_LIST_ENDPOINT, endpoint.getEndpointUri());
             // ensure stream caching is reset
             MessageHelper.resetStreamCache(exchange.getIn());
             // if the MEP on the endpoint is different then
             if (pattern != null) {
                 originalPattern = exchange.getPattern();
-                LOG.trace("Using exchangePattern: {} on exchange: {}", pattern, exchange);
+                log.trace("Using exchangePattern: {} on exchange: {}", pattern, exchange);
                 exchange.setPattern(pattern);
             }
         }
 
         public void done() {
-            LOG.trace("RecipientProcessorExchangePair #{} done: {}", index, exchange);
+            log.trace("RecipientProcessorExchangePair #{} done: {}", index, exchange);
             try {
                 // preserve original MEP
                 if (originalPattern != null) {
@@ -132,8 +132,8 @@ public class RecipientListProcessor extends MulticastProcessor {
                 // when we are done we should release back in pool
                 producerCache.releaseProducer(endpoint, producer);
             } catch (Exception e) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Error releasing producer: " + producer + ". This exception will be ignored.", e);
+                if (log.isDebugEnabled()) {
+                    log.debug("Error releasing producer: " + producer + ". This exception will be ignored.", e);
                 }
             }
         }
@@ -194,8 +194,8 @@ public class RecipientListProcessor extends MulticastProcessor {
                 producer = producerCache.acquireProducer(endpoint);
             } catch (Exception e) {
                 if (isIgnoreInvalidEndpoints()) {
-                    if (LOG.isDebugEnabled()) {
-                        LOG.debug("Endpoint uri is invalid: " + recipient + ". This exception will be ignored.", e);
+                    if (log.isDebugEnabled()) {
+                        log.debug("Endpoint uri is invalid: " + recipient + ". This exception will be ignored.", e);
                     }
                     continue;
                 } else {
diff --git a/camel-core/src/main/java/org/apache/camel/processor/RoutingSlip.java b/camel-core/src/main/java/org/apache/camel/processor/RoutingSlip.java
index 915b09b..b6fed24 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/RoutingSlip.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/RoutingSlip.java
@@ -57,7 +57,7 @@ import static org.apache.camel.util.ObjectHelper.notNull;
  * pipeline to ensure it works the same and the async routing engine is flawless.
  */
 public class RoutingSlip extends ServiceSupport implements AsyncProcessor, Traceable, IdAware {
-    protected final Logger log = LoggerFactory.getLogger(getClass());
+
     protected String id;
     protected ProducerCache producerCache;
     protected int cacheSize;
diff --git a/camel-core/src/main/java/org/apache/camel/processor/SamplingThrottler.java b/camel-core/src/main/java/org/apache/camel/processor/SamplingThrottler.java
index 9d6bbfe..314dc4c 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/SamplingThrottler.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/SamplingThrottler.java
@@ -42,7 +42,6 @@ import org.slf4j.LoggerFactory;
  */
 public class SamplingThrottler extends DelegateAsyncProcessor implements Traceable, IdAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(SamplingThrottler.class);
     private String id;
     private long messageFrequency;
     private long currentMessageCount;
@@ -129,13 +128,13 @@ public class SamplingThrottler extends DelegateAsyncProcessor implements Traceab
                 long now = System.currentTimeMillis();
                 if (now >= timeOfLastExchange + periodInMillis) {
                     doSend = true;
-                    if (LOG.isTraceEnabled()) {
-                        LOG.trace(sampled.sample());
+                    if (log.isTraceEnabled()) {
+                        log.trace(sampled.sample());
                     }
                     timeOfLastExchange = now;
                 } else {
-                    if (LOG.isTraceEnabled()) {
-                        LOG.trace(sampled.drop());
+                    if (log.isTraceEnabled()) {
+                        log.trace(sampled.drop());
                     }
                 }
             }
diff --git a/camel-core/src/main/java/org/apache/camel/processor/SendDynamicProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/SendDynamicProcessor.java
index 21d3d87..f924d7a 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/SendDynamicProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/SendDynamicProcessor.java
@@ -18,8 +18,6 @@ package org.apache.camel.processor;
 
 import org.apache.camel.AsyncCallback;
 import org.apache.camel.AsyncProcessor;
-import org.apache.camel.AsyncProducer;
-import org.apache.camel.impl.ProducerCache.AsyncProducerCallback;
 import org.apache.camel.CamelContext;
 import org.apache.camel.CamelContextAware;
 import org.apache.camel.Endpoint;
@@ -48,7 +46,7 @@ import org.slf4j.LoggerFactory;
  * @see org.apache.camel.processor.SendProcessor
  */
 public class SendDynamicProcessor extends ServiceSupport implements AsyncProcessor, IdAware, CamelContextAware {
-    protected static final Logger LOG = LoggerFactory.getLogger(SendDynamicProcessor.class);
+
     protected SendDynamicAware dynamicAware;
     protected CamelContext camelContext;
     protected final String uri;
@@ -120,8 +118,8 @@ public class SendDynamicProcessor extends ServiceSupport implements AsyncProcess
                         preAwareProcessor = dynamicAware.createPreProcessor(exchange, entry);
                         postAwareProcessor = dynamicAware.createPostProcessor(exchange, entry);
                         if (staticUri != null) {
-                            if (LOG.isDebugEnabled()) {
-                                LOG.debug("Optimising toD via SendDynamicAware component: {} to use static uri: {}", scheme, URISupport.sanitizeUri(staticUri));
+                            if (log.isDebugEnabled()) {
+                                log.debug("Optimising toD via SendDynamicAware component: {} to use static uri: {}", scheme, URISupport.sanitizeUri(staticUri));
                             }
                         }
                     }
@@ -133,8 +131,8 @@ public class SendDynamicProcessor extends ServiceSupport implements AsyncProcess
                 endpoint = resolveEndpoint(exchange, recipient);
             }
             if (endpoint == null) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Send dynamic evaluated as null so cannot send to any endpoint");
+                if (log.isDebugEnabled()) {
+                    log.debug("Send dynamic evaluated as null so cannot send to any endpoint");
                 }
                 // no endpoint to send to, so ignore
                 callback.done(true);
@@ -143,8 +141,8 @@ public class SendDynamicProcessor extends ServiceSupport implements AsyncProcess
             destinationExchangePattern = EndpointHelper.resolveExchangePatternFromUrl(endpoint.getEndpointUri());
         } catch (Throwable e) {
             if (isIgnoreInvalidEndpoint()) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Endpoint uri is invalid: " + recipient + ". This exception will be ignored.", e);
+                if (log.isDebugEnabled()) {
+                    log.debug("Endpoint uri is invalid: " + recipient + ". This exception will be ignored.", e);
                 }
             } else {
                 exchange.setException(e);
@@ -172,7 +170,7 @@ public class SendDynamicProcessor extends ServiceSupport implements AsyncProcess
                 c.done(true);
             }
 
-            LOG.debug(">>>> {} {}", endpoint, e);
+            log.debug(">>>> {} {}", endpoint, e);
             return p.process(target, new AsyncCallback() {
                 public void done(boolean doneSync) {
                     // restore previous MEP
@@ -251,7 +249,7 @@ public class SendDynamicProcessor extends ServiceSupport implements AsyncProcess
     protected void doStart() throws Exception {
         if (producerCache == null) {
             producerCache = new ProducerCache(this, camelContext, cacheSize);
-            LOG.debug("DynamicSendTo {} using ProducerCache with cacheSize={}", this, producerCache.getCapacity());
+            log.debug("DynamicSendTo {} using ProducerCache with cacheSize={}", this, producerCache.getCapacity());
         }
 
         if (isAllowOptimisedComponents() && uri != null) {
@@ -265,15 +263,15 @@ public class SendDynamicProcessor extends ServiceSupport implements AsyncProcess
                     SendDynamicAwareResolver resolver = new SendDynamicAwareResolver();
                     dynamicAware = resolver.resolve(camelContext, scheme);
                     if (dynamicAware != null) {
-                        if (LOG.isDebugEnabled()) {
-                            LOG.debug("Detected SendDynamicAware component: {} optimising toD: {}", scheme, URISupport.sanitizeUri(uri));
+                        if (log.isDebugEnabled()) {
+                            log.debug("Detected SendDynamicAware component: {} optimising toD: {}", scheme, URISupport.sanitizeUri(uri));
                         }
                     }
                 }
             } catch (Throwable e) {
                 // ignore
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Error creating optimised SendDynamicAwareResolver for uri: " + URISupport.sanitizeUri(uri)
+                if (log.isDebugEnabled()) {
+                    log.debug("Error creating optimised SendDynamicAwareResolver for uri: " + URISupport.sanitizeUri(uri)
                         + " due to " + e.getMessage() + ". This exception is ignored", e);
                 }
             }
diff --git a/camel-core/src/main/java/org/apache/camel/processor/SendProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/SendProcessor.java
index 0548bf7..2ce2328 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/SendProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/SendProcessor.java
@@ -17,7 +17,6 @@
 package org.apache.camel.processor;
 
 import java.net.URISyntaxException;
-import java.util.HashMap;
 
 import org.apache.camel.AsyncCallback;
 import org.apache.camel.AsyncProcessor;
@@ -27,15 +26,11 @@ import org.apache.camel.Endpoint;
 import org.apache.camel.EndpointAware;
 import org.apache.camel.Exchange;
 import org.apache.camel.ExchangePattern;
-import org.apache.camel.Producer;
 import org.apache.camel.Traceable;
-import org.apache.camel.impl.DefaultCamelContext;
 import org.apache.camel.impl.InterceptSendToEndpoint;
 import org.apache.camel.impl.ProducerCache;
-import org.apache.camel.impl.ServicePool;
 import org.apache.camel.spi.IdAware;
 import org.apache.camel.support.ServiceSupport;
-import org.apache.camel.util.AsyncProcessorConverterHelper;
 import org.apache.camel.util.AsyncProcessorHelper;
 import org.apache.camel.util.EndpointHelper;
 import org.apache.camel.util.EventHelper;
@@ -52,7 +47,7 @@ import org.slf4j.LoggerFactory;
  * @see SendDynamicProcessor
  */
 public class SendProcessor extends ServiceSupport implements AsyncProcessor, Traceable, EndpointAware, IdAware {
-    protected static final Logger LOG = LoggerFactory.getLogger(SendProcessor.class);
+
     protected transient String traceLabelToString;
     protected final CamelContext camelContext;
     protected final ExchangePattern pattern;
@@ -138,7 +133,7 @@ public class SendProcessor extends ServiceSupport implements AsyncProcessor, Tra
             final StopWatch watch = sw;
 
             try {
-                LOG.debug(">>>> {} {}", destination, exchange);
+                log.debug(">>>> {} {}", destination, exchange);
                 return producer.process(exchange, new AsyncCallback() {
                     @Override
                     public void done(boolean doneSync) {
@@ -164,7 +159,7 @@ public class SendProcessor extends ServiceSupport implements AsyncProcessor, Tra
         }
 
         configureExchange(exchange, pattern);
-        LOG.debug(">>>> {} {}", destination, exchange);
+        log.debug(">>>> {} {}", destination, exchange);
 
         // send the exchange to the destination using the producer cache for the non optimized producers
         return producerCache.doInAsyncProducer(destination, exchange, callback, (producer, ex, cb) -> producer.process(ex, doneSync -> {
@@ -218,8 +213,8 @@ public class SendProcessor extends ServiceSupport implements AsyncProcessor, Tra
         // lookup this before we can use the destination
         Endpoint lookup = camelContext.hasEndpoint(destination.getEndpointKey());
         if (lookup instanceof InterceptSendToEndpoint) {
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Intercepted sending to {} -> {}",
+            if (log.isDebugEnabled()) {
+                log.debug("Intercepted sending to {} -> {}",
                         URISupport.sanitizeUri(destination.getEndpointUri()), URISupport.sanitizeUri(lookup.getEndpointUri()));
             }
             destination = lookup;
diff --git a/camel-core/src/main/java/org/apache/camel/processor/StreamResequencer.java b/camel-core/src/main/java/org/apache/camel/processor/StreamResequencer.java
index 6b1df38..fde1a47 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/StreamResequencer.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/StreamResequencer.java
@@ -69,8 +69,6 @@ import org.slf4j.LoggerFactory;
  */
 public class StreamResequencer extends ServiceSupport implements SequenceSender<Exchange>, AsyncProcessor, Navigate<Processor>, Traceable, IdAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(StreamResequencer.class);
-
     private String id;
     private final CamelContext camelContext;
     private final ExceptionHandler exceptionHandler;
@@ -238,7 +236,7 @@ public class StreamResequencer extends ServiceSupport implements SequenceSender<
             delivery.request();
         } catch (Exception e) {
             if (isIgnoreInvalidExchanges()) {
-                LOG.debug("Invalid Exchange. This Exchange will be ignored: {}", exchange);
+                log.debug("Invalid Exchange. This Exchange will be ignored: {}", exchange);
             } else {
                 exchange.setException(new CamelExchangeException("Error processing Exchange in StreamResequencer", exchange, e));
             }
diff --git a/camel-core/src/main/java/org/apache/camel/processor/ThreadsProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/ThreadsProcessor.java
index 94af2e7..a510cc3 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/ThreadsProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/ThreadsProcessor.java
@@ -59,7 +59,6 @@ import org.slf4j.LoggerFactory;
  */
 public class ThreadsProcessor extends ServiceSupport implements AsyncProcessor, IdAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(ThreadsProcessor.class);
     private String id;
     private final CamelContext camelContext;
     private final ExecutorService executorService;
@@ -80,7 +79,7 @@ public class ThreadsProcessor extends ServiceSupport implements AsyncProcessor,
 
         @Override
         public void run() {
-            LOG.trace("Continue routing exchange {}", exchange);
+            log.trace("Continue routing exchange {}", exchange);
             if (shutdown.get()) {
                 exchange.setException(new RejectedExecutionException("ThreadsProcessor is not running."));
             }
@@ -91,7 +90,7 @@ public class ThreadsProcessor extends ServiceSupport implements AsyncProcessor,
         public void reject() {
             // reject should mark the exchange with an rejected exception and mark not to route anymore
             exchange.setException(new RejectedExecutionException());
-            LOG.trace("Rejected routing exchange {}", exchange);
+            log.trace("Rejected routing exchange {}", exchange);
             if (shutdown.get()) {
                 exchange.setException(new RejectedExecutionException("ThreadsProcessor is not running."));
             }
@@ -126,7 +125,7 @@ public class ThreadsProcessor extends ServiceSupport implements AsyncProcessor,
         // we cannot execute this asynchronously for transacted exchanges, as the transaction manager doesn't support
         // using different threads in the same transaction
         if (exchange.isTransacted()) {
-            LOG.trace("Transacted Exchange must be routed synchronously for exchangeId: {} -> {}", exchange.getExchangeId(), exchange);
+            log.trace("Transacted Exchange must be routed synchronously for exchangeId: {} -> {}", exchange.getExchangeId(), exchange);
             callback.done(true);
             return true;
         }
@@ -134,7 +133,7 @@ public class ThreadsProcessor extends ServiceSupport implements AsyncProcessor,
         try {
             // process the call in asynchronous mode
             ProcessCall call = new ProcessCall(exchange, callback, false);
-            LOG.trace("Submitting task {}", call);
+            log.trace("Submitting task {}", call);
             executorService.submit(call);
             // tell Camel routing engine we continue routing asynchronous
             return false;
diff --git a/camel-core/src/main/java/org/apache/camel/processor/Throttler.java b/camel-core/src/main/java/org/apache/camel/processor/Throttler.java
index d1bd9e5..42259fe 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/Throttler.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/Throttler.java
@@ -70,7 +70,6 @@ public class Throttler extends DelegateAsyncProcessor implements Traceable, IdAw
 
     private enum State { SYNC, ASYNC, ASYNC_REJECTED }
 
-    private final Logger log = LoggerFactory.getLogger(Throttler.class);
     private final CamelContext camelContext;
     private final ScheduledExecutorService asyncExecutor;
     private final boolean shutdownAsyncExecutor;
diff --git a/camel-core/src/main/java/org/apache/camel/processor/ThroughputLogger.java b/camel-core/src/main/java/org/apache/camel/processor/ThroughputLogger.java
index e4bf8c0..05e7bc6 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/ThroughputLogger.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/ThroughputLogger.java
@@ -30,8 +30,6 @@ import org.apache.camel.support.ServiceSupport;
 import org.apache.camel.util.AsyncProcessorHelper;
 import org.apache.camel.util.CamelLogger;
 import org.apache.camel.util.ObjectHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * A logger for logging message throughput.
@@ -39,7 +37,6 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public class ThroughputLogger extends ServiceSupport implements AsyncProcessor, IdAware {
-    private static final Logger LOG = LoggerFactory.getLogger(ThroughputLogger.class);
 
     private String id;
     private final AtomicInteger receivedCounter = new AtomicInteger();
@@ -54,22 +51,22 @@ public class ThroughputLogger extends ServiceSupport implements AsyncProcessor,
     private String action = "Received";
     private CamelContext camelContext;
     private ScheduledExecutorService logSchedulerService;
-    private CamelLogger log;
+    private CamelLogger logger;
     private String lastLogMessage;
     private double rate;
     private double average;
 
-    public ThroughputLogger(CamelLogger log) {
-        this.log = log;
+    public ThroughputLogger(CamelLogger logger) {
+        this.logger = logger;
     }
 
-    public ThroughputLogger(CamelLogger log, Integer groupSize) {
-        this(log);
+    public ThroughputLogger(CamelLogger logger, Integer groupSize) {
+        this(logger);
         setGroupSize(groupSize);
     }
 
-    public ThroughputLogger(CamelLogger log, CamelContext camelContext, Long groupInterval, Long groupDelay, Boolean groupActiveOnly) {
-        this(log);
+    public ThroughputLogger(CamelLogger logger, CamelContext camelContext, Long groupInterval, Long groupDelay, Boolean groupActiveOnly) {
+        this(logger);
         this.camelContext = camelContext;
         setGroupInterval(groupInterval);
         setGroupActiveOnly(groupActiveOnly);
@@ -100,7 +97,7 @@ public class ThroughputLogger extends ServiceSupport implements AsyncProcessor,
         if (groupSize != null) {
             if (receivedCount % groupSize == 0) {
                 lastLogMessage = createLogMessage(exchange, receivedCount);
-                log.log(lastLogMessage);
+                logger.log(lastLogMessage);
             }
         }
 
@@ -196,7 +193,7 @@ public class ThroughputLogger extends ServiceSupport implements AsyncProcessor,
 
             logSchedulerService = camelContext.getExecutorServiceManager().newSingleThreadScheduledExecutor(this, "ThroughputLogger");
             Runnable scheduledLogTask = new ScheduledLogTask();
-            LOG.info("Scheduling throughput log to run every {} millis.", groupInterval);
+            log.info("Scheduling throughput logger to run every {} millis.", groupInterval);
             // must use fixed rate to have it trigger at every X interval
             logSchedulerService.scheduleAtFixedRate(scheduledLogTask, groupDelay, groupInterval, TimeUnit.MILLISECONDS);
         }
@@ -235,7 +232,7 @@ public class ThroughputLogger extends ServiceSupport implements AsyncProcessor,
         public void run() {
             // only run if CamelContext has been fully started
             if (!camelContext.getStatus().isStarted()) {
-                LOG.trace("ThroughputLogger cannot start because CamelContext({}) has not been started yet", camelContext.getName());
+                log.trace("ThroughputLogger cannot start because CamelContext({}) has not been started yet", camelContext.getName());
                 return;
             }
 
@@ -245,14 +242,14 @@ public class ThroughputLogger extends ServiceSupport implements AsyncProcessor,
 
     protected void createGroupIntervalLogMessage() {
         
-        // this indicates that no messages have been received yet...don't log yet
+        // this indicates that no messages have been received yet...don't logger yet
         if (startTime == 0) {
             return;
         }
         
         int receivedCount = receivedCounter.get();
 
-        // if configured, hide log messages when no new messages have been received
+        // if configured, hide logger messages when no new messages have been received
         if (groupActiveOnly && receivedCount == groupReceivedCount) {
             return;
         }
@@ -273,7 +270,7 @@ public class ThroughputLogger extends ServiceSupport implements AsyncProcessor,
         lastLogMessage = getAction() + ": " + currentCount + " new messages, with total " + receivedCount + " so far. Last group took: " + duration
                 + " millis which is: " + numberFormat.format(rate)
                 + " messages per second. average: " + numberFormat.format(average);
-        log.log(lastLogMessage);
+        logger.log(lastLogMessage);
     }
 
     protected double messagesPerSecond(long messageCount, long startTime, long endTime) {
diff --git a/camel-core/src/main/java/org/apache/camel/processor/TryProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/TryProcessor.java
index 2573fe1..1cb861d 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/TryProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/TryProcessor.java
@@ -41,7 +41,6 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public class TryProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable, IdAware {
-    private static final Logger LOG = LoggerFactory.getLogger(TryProcessor.class);
 
     protected String id;
     protected final Processor tryProcessor;
@@ -85,19 +84,19 @@ public class TryProcessor extends ServiceSupport implements AsyncProcessor, Navi
 
             // continue as long its being processed synchronously
             if (!sync) {
-                LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", exchange.getExchangeId());
+                log.trace("Processing exchangeId: {} is continued being processed asynchronously", exchange.getExchangeId());
                 // the remainder of the try .. catch .. finally will be completed async
                 // so we break out now, then the callback will be invoked which then continue routing from where we left here
                 return false;
             }
 
-            LOG.trace("Processing exchangeId: {} is continued being processed synchronously", exchange.getExchangeId());
+            log.trace("Processing exchangeId: {} is continued being processed synchronously", exchange.getExchangeId());
         }
 
         ExchangeHelper.prepareOutToIn(exchange);
         exchange.removeProperty(Exchange.TRY_ROUTE_BLOCK);
         exchange.setProperty(Exchange.EXCEPTION_HANDLED, lastHandled);
-        LOG.trace("Processing complete for exchangeId: {} >>> {}", exchange.getExchangeId(), exchange);
+        log.trace("Processing complete for exchangeId: {} >>> {}", exchange.getExchangeId(), exchange);
         callback.done(true);
         return true;
     }
@@ -106,7 +105,7 @@ public class TryProcessor extends ServiceSupport implements AsyncProcessor, Navi
                               final Iterator<Processor> processors, final AsyncProcessor processor,
                               final Object lastHandled) {
         // this does the actual processing so log at trace level
-        LOG.trace("Processing exchangeId: {} >>> {}", exchange.getExchangeId(), exchange);
+        log.trace("Processing exchangeId: {} >>> {}", exchange.getExchangeId(), exchange);
 
         // implement asynchronous routing logic in callback so we can have the callback being
         // triggered and then continue routing where we left
@@ -127,7 +126,7 @@ public class TryProcessor extends ServiceSupport implements AsyncProcessor, Navi
                     doneSync = process(exchange, callback, processors, processor, lastHandled);
 
                     if (!doneSync) {
-                        LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", exchange.getExchangeId());
+                        log.trace("Processing exchangeId: {} is continued being processed asynchronously", exchange.getExchangeId());
                         // the remainder of the try .. catch .. finally will be completed async
                         // so we break out now, then the callback will be invoked which then continue routing from where we left here
                         return;
@@ -137,7 +136,7 @@ public class TryProcessor extends ServiceSupport implements AsyncProcessor, Navi
                 ExchangeHelper.prepareOutToIn(exchange);
                 exchange.removeProperty(Exchange.TRY_ROUTE_BLOCK);
                 exchange.setProperty(Exchange.EXCEPTION_HANDLED, lastHandled);
-                LOG.trace("Processing complete for exchangeId: {} >>> {}", exchange.getExchangeId(), exchange);
+                log.trace("Processing complete for exchangeId: {} >>> {}", exchange.getExchangeId(), exchange);
                 callback.done(false);
             }
         });
@@ -150,7 +149,7 @@ public class TryProcessor extends ServiceSupport implements AsyncProcessor, Navi
         if (stop != null) {
             boolean doStop = exchange.getContext().getTypeConverter().convertTo(Boolean.class, stop);
             if (doStop) {
-                LOG.debug("Exchange is marked to stop routing: {}", exchange);
+                log.debug("Exchange is marked to stop routing: {}", exchange);
                 return false;
             }
         }
diff --git a/camel-core/src/main/java/org/apache/camel/processor/WireTapProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/WireTapProcessor.java
index 63f7ae4..06f68a4 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/WireTapProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/WireTapProcessor.java
@@ -53,7 +53,7 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public class WireTapProcessor extends ServiceSupport implements AsyncProcessor, Traceable, ShutdownAware, IdAware, CamelContextAware {
-    private static final Logger LOG = LoggerFactory.getLogger(WireTapProcessor.class);
+
     private String id;
     private CamelContext camelContext;
     private final SendDynamicProcessor dynamicProcessor;
@@ -156,10 +156,10 @@ public class WireTapProcessor extends ServiceSupport implements AsyncProcessor,
             public Exchange call() throws Exception {
                 taskCount.increment();
                 try {
-                    LOG.debug(">>>> (wiretap) {} {}", uri, wireTapExchange);
+                    log.debug(">>>> (wiretap) {} {}", uri, wireTapExchange);
                     processor.process(wireTapExchange);
                 } catch (Throwable e) {
-                    LOG.warn("Error occurred during processing " + wireTapExchange + " wiretap to " + uri + ". This exception will be ignored.", e);
+                    log.warn("Error occurred during processing " + wireTapExchange + " wiretap to " + uri + ". This exception will be ignored.", e);
                 } finally {
                     taskCount.decrement();
                 }
diff --git a/camel-core/src/main/java/org/apache/camel/processor/aggregate/AggregateProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/aggregate/AggregateProcessor.java
index d9078d7..129d86f 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/aggregate/AggregateProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/aggregate/AggregateProcessor.java
@@ -89,8 +89,6 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
 
     public static final String AGGREGATE_TIMEOUT_CHECKER = "AggregateTimeoutChecker";
 
-    private static final Logger LOG = LoggerFactory.getLogger(AggregateProcessor.class);
-
     private final Lock lock = new ReentrantLock();
     private final AtomicBoolean aggregateRepositoryWarned = new AtomicBoolean();
     private final CamelContext camelContext;
@@ -296,7 +294,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
         if (ObjectHelper.isEmpty(key)) {
             // we have a bad correlation key
             if (isIgnoreInvalidCorrelationKeys()) {
-                LOG.debug("Invalid correlation key. This Exchange will be ignored: {}", exchange);
+                log.debug("Invalid correlation key. This Exchange will be ignored: {}", exchange);
                 return;
             } else {
                 throw new CamelExchangeException("Invalid correlation key", exchange);
@@ -329,7 +327,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
                     exhaustedRetries = false;
                     break;
                 } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
-                    LOG.trace("On attempt {} OptimisticLockingAggregationRepository: {} threw OptimisticLockingException while trying to add() key: {} and exchange: {}",
+                    log.trace("On attempt {} OptimisticLockingAggregationRepository: {} threw OptimisticLockingException while trying to add() key: {} and exchange: {}",
                               new Object[]{attempt, aggregationRepository, key, copy, e});
                     optimisticLockRetryPolicy.doDelay(attempt);
                 }
@@ -396,7 +394,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
      * @throws org.apache.camel.CamelExchangeException is thrown if error aggregating
      */
     private List<Exchange> doAggregation(String key, Exchange newExchange) throws CamelExchangeException {
-        LOG.trace("onAggregation +++ start +++ with correlation key: {}", key);
+        log.trace("onAggregation +++ start +++ with correlation key: {}", key);
 
         List<Exchange> list = new ArrayList<>();
         String complete = null;
@@ -487,7 +485,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
         if (aggregationRepository instanceof RecoverableAggregationRepository) {
             boolean valid = oldExchange == null || answer.getExchangeId().equals(oldExchange.getExchangeId());
             if (!valid && aggregateRepositoryWarned.compareAndSet(false, true)) {
-                LOG.warn("AggregationStrategy should return the oldExchange instance instead of the newExchange whenever possible"
+                log.warn("AggregationStrategy should return the oldExchange instance instead of the newExchange whenever possible"
                     + " as otherwise this can lead to unexpected behavior with some RecoverableAggregationRepository implementations");
             }
         }
@@ -512,7 +510,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
             doAggregationComplete(complete, list, key, originalExchange, answer);
         }
 
-        LOG.trace("onAggregation +++  end  +++ with correlation key: {}", key);
+        log.trace("onAggregation +++  end  +++ with correlation key: {}", key);
         return list;
     }
 
@@ -548,7 +546,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
     }
 
     protected void doAggregationRepositoryAdd(CamelContext camelContext, String key, Exchange oldExchange, Exchange newExchange) {
-        LOG.trace("In progress aggregated oldExchange: {}, newExchange: {} with correlation key: {}", oldExchange, newExchange, key);
+        log.trace("In progress aggregated oldExchange: {}, newExchange: {} with correlation key: {}", oldExchange, newExchange, key);
         if (optimisticLocking) {
             try {
                 ((OptimisticLockingAggregationRepository)aggregationRepository).add(camelContext, key, oldExchange, newExchange);
@@ -567,7 +565,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
             strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
         }
         if (strategy instanceof OptimisticLockingAwareAggregationStrategy) {
-            LOG.trace("onOptimisticLockFailure with AggregationStrategy: {}, oldExchange: {}, newExchange: {}",
+            log.trace("onOptimisticLockFailure with AggregationStrategy: {}, oldExchange: {}, newExchange: {}",
                       new Object[]{strategy, oldExchange, newExchange});
             ((OptimisticLockingAwareAggregationStrategy)strategy).onOptimisticLockFailure(oldExchange, newExchange);
         }
@@ -654,8 +652,8 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
         if (getCompletionTimeoutExpression() != null) {
             Long value = getCompletionTimeoutExpression().evaluate(exchange, Long.class);
             if (value != null && value > 0) {
-                if (LOG.isTraceEnabled()) {
-                    LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}",
+                if (log.isTraceEnabled()) {
+                    log.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}",
                             new Object[]{key, value, exchange});
                 }
                 addExchangeToTimeoutMap(key, exchange, value);
@@ -664,8 +662,8 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
         }
         if (!timeoutSet && getCompletionTimeout() > 0) {
             // timeout is used so use the timeout map to keep an eye on this
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}",
+            if (log.isTraceEnabled()) {
+                log.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}",
                         new Object[]{key, getCompletionTimeout(), exchange});
             }
             addExchangeToTimeoutMap(key, exchange, getCompletionTimeout());
@@ -703,7 +701,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
 
         if (!fromTimeout && timeoutMap != null) {
             // cleanup timeout map if it was a incoming exchange which triggered the timeout (and not the timeout checker)
-            LOG.trace("Removing correlation key {} from timeout", key);
+            log.trace("Removing correlation key {} from timeout", key);
             timeoutMap.remove(key);
         }
 
@@ -728,7 +726,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
         Exchange answer;
         if (fromTimeout && isDiscardOnCompletionTimeout()) {
             // discard due timeout
-            LOG.debug("Aggregation for correlation key {} discarding aggregated exchange: {}", key, aggregated);
+            log.debug("Aggregation for correlation key {} discarding aggregated exchange: {}", key, aggregated);
             // must confirm the discarded exchange
             aggregationRepository.confirm(aggregated.getContext(), aggregated.getExchangeId());
             // and remove redelivery state as well
@@ -744,7 +742,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
     }
 
     private void onSubmitCompletion(final String key, final Exchange exchange) {
-        LOG.debug("Aggregation complete for correlation key {} sending aggregated exchange: {}", key, exchange);
+        log.debug("Aggregation complete for correlation key {} sending aggregated exchange: {}", key, exchange);
 
         // add this as in progress before we submit the task
         inProgressCompleteExchanges.add(exchange.getExchangeId());
@@ -782,7 +780,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
         // send this exchange
         executorService.submit(new Runnable() {
             public void run() {
-                LOG.debug("Processing aggregated exchange: {}", exchange);
+                log.debug("Processing aggregated exchange: {}", exchange);
 
                 // add on completion task so we remember to update the inProgressCompleteExchanges
                 exchange.addOnCompletion(new AggregateOnCompletion(exchange.getExchangeId()));
@@ -798,7 +796,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
                     // if there was an exception then let the exception handler handle it
                     getExceptionHandler().handleException("Error processing aggregated exchange", exchange, exchange.getException());
                 } else {
-                    LOG.trace("Processing aggregated exchange: {} complete.", exchange);
+                    log.trace("Processing aggregated exchange: {} complete.", exchange);
                 }
             }
         });
@@ -818,20 +816,20 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
         }
 
         StopWatch watch = new StopWatch();
-        LOG.trace("Starting restoring CompletionTimeout for {} existing exchanges from the aggregation repository...", keys.size());
+        log.trace("Starting restoring CompletionTimeout for {} existing exchanges from the aggregation repository...", keys.size());
 
         for (String key : keys) {
             Exchange exchange = aggregationRepository.get(camelContext, key);
             // grab the timeout value
             long timeout = exchange.hasProperties() ? exchange.getProperty(Exchange.AGGREGATED_TIMEOUT, 0, long.class) : 0;
             if (timeout > 0) {
-                LOG.trace("Restoring CompletionTimeout for exchangeId: {} with timeout: {} millis.", exchange.getExchangeId(), timeout);
+                log.trace("Restoring CompletionTimeout for exchangeId: {} with timeout: {} millis.", exchange.getExchangeId(), timeout);
                 addExchangeToTimeoutMap(key, exchange, timeout);
             }
         }
 
         // log duration of this task so end user can see how long it takes to pre-check this upon starting
-        LOG.info("Restored {} CompletionTimeout conditions in the AggregationTimeoutChecker in {}",
+        log.info("Restored {} CompletionTimeout conditions in the AggregationTimeoutChecker in {}",
                 timeoutMap.size(), TimeUtils.printDuration(watch.taken()));
     }
 
@@ -1084,7 +1082,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
         }
 
         public void onFailure(Exchange exchange) {
-            LOG.trace("Aggregated exchange onFailure: {}", exchange);
+            log.trace("Aggregated exchange onFailure: {}", exchange);
 
             // must remember to remove in progress when we failed
             inProgressCompleteExchanges.remove(exchangeId);
@@ -1092,7 +1090,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
         }
 
         public void onComplete(Exchange exchange) {
-            LOG.trace("Aggregated exchange onComplete: {}", exchange);
+            log.trace("Aggregated exchange onComplete: {}", exchange);
 
             // only confirm if we processed without a problem
             try {
@@ -1142,7 +1140,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
 
             boolean inProgress = inProgressCompleteExchanges.contains(exchangeId);
             if (inProgress) {
-                LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId);
+                log.trace("Aggregated exchange with id: {} is already in progress.", exchangeId);
                 return true;
             }
 
@@ -1165,7 +1163,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
             }
 
             if (optimisticLocking && evictionStolen) {
-                LOG.debug("Another Camel instance has already successfully correlated or processed this timeout eviction "
+                log.debug("Another Camel instance has already successfully correlated or processed this timeout eviction "
                           + "for exchange with id: {} and correlation id: {}", exchangeId, key);
             }
             return true;
@@ -1180,11 +1178,11 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
         public void run() {
             // only run if CamelContext has been fully started
             if (!camelContext.getStatus().isStarted()) {
-                LOG.trace("Completion interval task cannot start due CamelContext({}) has not been started yet", camelContext.getName());
+                log.trace("Completion interval task cannot start due CamelContext({}) has not been started yet", camelContext.getName());
                 return;
             }
 
-            LOG.trace("Starting completion interval task");
+            log.trace("Starting completion interval task");
 
             // trigger completion for all in the repository
             Set<String> keys = aggregationRepository.getKeys();
@@ -1201,7 +1199,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
                         if (exchange == null) {
                             stolenInterval = true;
                         } else {
-                            LOG.trace("Completion interval triggered for correlation key: {}", key);
+                            log.trace("Completion interval triggered for correlation key: {}", key);
                             // indicate it was completed by interval
                             exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "interval");
                             try {
@@ -1214,7 +1212,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
                             }
                         }
                         if (optimisticLocking && stolenInterval) {
-                            LOG.debug("Another Camel instance has already processed this interval aggregation for exchange with correlation id: {}", key);
+                            log.debug("Another Camel instance has already processed this interval aggregation for exchange with correlation id: {}", key);
                         }
                     }
                 } finally {
@@ -1224,7 +1222,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
                 }
             }
 
-            LOG.trace("Completion interval task complete");
+            log.trace("Completion interval task complete");
         }
     }
 
@@ -1241,11 +1239,11 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
         public void run() {
             // only run if CamelContext has been fully started
             if (!camelContext.getStatus().isStarted()) {
-                LOG.trace("Recover check cannot start due CamelContext({}) has not been started yet", camelContext.getName());
+                log.trace("Recover check cannot start due CamelContext({}) has not been started yet", camelContext.getName());
                 return;
             }
 
-            LOG.trace("Starting recover check");
+            log.trace("Starting recover check");
 
             // copy the current in progress before doing scan
             final Set<String> copyOfInProgress = new LinkedHashSet<>(inProgressCompleteExchanges);
@@ -1255,7 +1253,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
 
                 // we may shutdown while doing recovery
                 if (!isRunAllowed()) {
-                    LOG.info("We are shutting down so stop recovering");
+                    log.info("We are shutting down so stop recovering");
                     return;
                 }
                 if (!optimisticLocking) {
@@ -1266,9 +1264,9 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
                     // its safer to consider it in progress than risk duplicates due both in progress + recovered
                     boolean inProgress = copyOfInProgress.contains(exchangeId) || inProgressCompleteExchanges.contains(exchangeId);
                     if (inProgress) {
-                        LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId);
+                        log.trace("Aggregated exchange with id: {} is already in progress.", exchangeId);
                     } else {
-                        LOG.debug("Loading aggregated exchange with id: {} to be recovered.", exchangeId);
+                        log.debug("Loading aggregated exchange with id: {} to be recovered.", exchangeId);
                         Exchange exchange = recoverable.recover(camelContext, exchangeId);
                         if (exchange != null) {
                             // get the correlation key
@@ -1281,7 +1279,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
 
                             // if we are exhausted, then move to dead letter channel
                             if (data != null && recoverable.getMaximumRedeliveries() > 0 && data.redeliveryCounter >= recoverable.getMaximumRedeliveries()) {
-                                LOG.warn("The recovered exchange is exhausted after " + recoverable.getMaximumRedeliveries()
+                                log.warn("The recovered exchange is exhausted after " + recoverable.getMaximumRedeliveries()
                                         + " attempts, will now be moved to dead letter channel: " + recoverable.getDeadLetterUri());
 
                                 // send to DLC
@@ -1316,7 +1314,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
                                     exchange.getIn().setHeader(Exchange.REDELIVERY_MAX_COUNTER, recoverable.getMaximumRedeliveries());
                                 }
 
-                                LOG.debug("Delivery attempt: {} to recover aggregated exchange with id: {}", data.redeliveryCounter, exchangeId);
+                                log.debug("Delivery attempt: {} to recover aggregated exchange with id: {}", data.redeliveryCounter, exchangeId);
 
                                 // not exhaust so resubmit the recovered exchange
                                 onSubmitCompletion(key, exchange);
@@ -1330,7 +1328,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
                 }
             }
 
-            LOG.trace("Recover check complete");
+            log.trace("Recover check complete");
         }
     }
 
@@ -1346,7 +1344,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
         }
         if (strategy instanceof PreCompletionAwareAggregationStrategy) {
             preCompletion = true;
-            LOG.info("PreCompletionAwareAggregationStrategy detected. Aggregator {} is in pre-completion mode.", getId());
+            log.info("PreCompletionAwareAggregationStrategy detected. Aggregator {} is in pre-completion mode.", getId());
         }
 
         if (!preCompletion) {
@@ -1361,24 +1359,24 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
 
         if (getCloseCorrelationKeyOnCompletion() != null) {
             if (getCloseCorrelationKeyOnCompletion() > 0) {
-                LOG.info("Using ClosedCorrelationKeys with a LRUCache with a capacity of {}", getCloseCorrelationKeyOnCompletion());
+                log.info("Using ClosedCorrelationKeys with a LRUCache with a capacity of {}", getCloseCorrelationKeyOnCompletion());
                 closedCorrelationKeys = LRUCacheFactory.newLRUCache(getCloseCorrelationKeyOnCompletion());
             } else {
-                LOG.info("Using ClosedCorrelationKeys with unbounded capacity");
+                log.info("Using ClosedCorrelationKeys with unbounded capacity");
                 closedCorrelationKeys = new ConcurrentHashMap<>();
             }
         }
 
         if (aggregationRepository == null) {
             aggregationRepository = new MemoryAggregationRepository(optimisticLocking);
-            LOG.info("Defaulting to MemoryAggregationRepository");
+            log.info("Defaulting to MemoryAggregationRepository");
         }
 
         if (optimisticLocking) {
             if (!(aggregationRepository instanceof OptimisticLockingAggregationRepository)) {
                 throw new IllegalArgumentException("Optimistic locking cannot be enabled without using an AggregationRepository that implements OptimisticLockingAggregationRepository");
             }
-            LOG.info("Optimistic locking is enabled");
+            log.info("Optimistic locking is enabled");
         }
 
         ServiceHelper.startService(aggregationStrategy, processor, aggregationRepository);
@@ -1395,7 +1393,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
                 // create a background recover thread to check every interval
                 recoverService = camelContext.getExecutorServiceManager().newScheduledThreadPool(this, "AggregateRecoverChecker", 1);
                 Runnable recoverTask = new RecoverTask(recoverable);
-                LOG.info("Using RecoverableAggregationRepository by scheduling recover checker to run every {} millis.", interval);
+                log.info("Using RecoverableAggregationRepository by scheduling recover checker to run every {} millis.", interval);
                 // use fixed delay so there is X interval between each run
                 recoverService.scheduleWithFixedDelay(recoverTask, 1000L, interval, TimeUnit.MILLISECONDS);
 
@@ -1404,7 +1402,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
                     if (max <= 0) {
                         throw new IllegalArgumentException("Option maximumRedeliveries must be a positive number, was: " + max);
                     }
-                    LOG.info("After {} failed redelivery attempts Exchanges will be moved to deadLetterUri: {}", max, recoverable.getDeadLetterUri());
+                    log.info("After {} failed redelivery attempts Exchanges will be moved to deadLetterUri: {}", max, recoverable.getDeadLetterUri());
 
                     // dead letter uri must be a valid endpoint
                     Endpoint endpoint = camelContext.getEndpoint(recoverable.getDeadLetterUri());
@@ -1420,7 +1418,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
             throw new IllegalArgumentException("Only one of completionInterval or completionTimeout can be used, not both.");
         }
         if (getCompletionInterval() > 0) {
-            LOG.info("Using CompletionInterval to run every {} millis.", getCompletionInterval());
+            log.info("Using CompletionInterval to run every {} millis.", getCompletionInterval());
             if (getTimeoutCheckerExecutorService() == null) {
                 setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1));
                 shutdownTimeoutCheckerExecutorService = true;
@@ -1431,7 +1429,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
 
         // start timeout service if its in use
         if (getCompletionTimeout() > 0 || getCompletionTimeoutExpression() != null) {
-            LOG.info("Using CompletionTimeout to trigger after {} millis of inactivity.", getCompletionTimeout());
+            log.info("Using CompletionTimeout to trigger after {} millis of inactivity.", getCompletionTimeout());
             if (getTimeoutCheckerExecutorService() == null) {
                 setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1));
                 shutdownTimeoutCheckerExecutorService = true;
@@ -1514,18 +1512,18 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
 
         StopWatch watch = new StopWatch();
         while (inProgressCompleteExchanges.size() > 0) {
-            LOG.trace("Waiting for {} inflight exchanges to complete", getInProgressCompleteExchanges());
+            log.trace("Waiting for {} inflight exchanges to complete", getInProgressCompleteExchanges());
             try {
                 Thread.sleep(100);
             } catch (InterruptedException e) {
                 // break out as we got interrupted such as the JVM terminating
-                LOG.warn("Interrupted while waiting for {} inflight exchanges to complete.", getInProgressCompleteExchanges());
+                log.warn("Interrupted while waiting for {} inflight exchanges to complete.", getInProgressCompleteExchanges());
                 break;
             }
         }
 
         if (expected > 0) {
-            LOG.info("Forcing completion of all groups with {} exchanges completed in {}", expected, TimeUtils.printDuration(watch.taken()));
+            log.info("Forcing completion of all groups with {} exchanges completed in {}", expected, TimeUtils.printDuration(watch.taken()));
         }
     }
 
@@ -1559,7 +1557,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
             Exchange exchange = aggregationRepository.get(camelContext, key);
             if (exchange != null) {
                 total = 1;
-                LOG.trace("Force completion triggered for correlation key: {}", key);
+                log.trace("Force completion triggered for correlation key: {}", key);
                 // indicate it was completed by a force completion request
                 exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "force");
                 Exchange answer = onCompletion(key, exchange, exchange, false);
@@ -1572,10 +1570,10 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
                 lock.unlock(); 
             }
         }
-        LOG.trace("Completed force completion of group {}", key);
+        log.trace("Completed force completion of group {}", key);
 
         if (total > 0) {
-            LOG.debug("Forcing completion of group {} with {} exchanges", key, total);
+            log.debug("Forcing completion of group {} with {} exchanges", key, total);
         }
         return total;
     }
@@ -1585,11 +1583,11 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
         // only run if CamelContext has been fully started or is stopping
         boolean allow = camelContext.getStatus().isStarted() || camelContext.getStatus().isStopping();
         if (!allow) {
-            LOG.warn("Cannot start force completion of all groups because CamelContext({}) has not been started", camelContext.getName());
+            log.warn("Cannot start force completion of all groups because CamelContext({}) has not been started", camelContext.getName());
             return 0;
         }
 
-        LOG.trace("Starting force completion of all groups task");
+        log.trace("Starting force completion of all groups task");
 
         // trigger completion for all in the repository
         Set<String> keys = aggregationRepository.getKeys();
@@ -1605,7 +1603,7 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
                 for (String key : keys) {
                     Exchange exchange = aggregationRepository.get(camelContext, key);
                     if (exchange != null) {
-                        LOG.trace("Force completion triggered for correlation key: {}", key);
+                        log.trace("Force completion triggered for correlation key: {}", key);
                         // indicate it was completed by a force completion request
                         exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "force");
                         Exchange answer = onCompletion(key, exchange, exchange, false);
@@ -1620,10 +1618,10 @@ public class AggregateProcessor extends ServiceSupport implements AsyncProcessor
                 }
             }
         }
-        LOG.trace("Completed force completion of all groups task");
+        log.trace("Completed force completion of all groups task");
 
         if (total > 0) {
-            LOG.debug("Forcing completion of all groups with {} exchanges", total);
+            log.debug("Forcing completion of all groups with {} exchanges", total);
         }
         return total;
     }
diff --git a/camel-core/src/main/java/org/apache/camel/processor/idempotent/FileIdempotentRepository.java b/camel-core/src/main/java/org/apache/camel/processor/idempotent/FileIdempotentRepository.java
index c6758b9..e4c49f6 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/idempotent/FileIdempotentRepository.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/idempotent/FileIdempotentRepository.java
@@ -53,7 +53,7 @@ import org.slf4j.LoggerFactory;
  */
 @ManagedResource(description = "File based idempotent repository")
 public class FileIdempotentRepository extends ServiceSupport implements IdempotentRepository<String> {
-    private static final Logger LOG = LoggerFactory.getLogger(FileIdempotentRepository.class);
+
     private static final String STORE_DELIMITER = "\n";
 
     private final AtomicBoolean init = new AtomicBoolean();
@@ -142,7 +142,7 @@ public class FileIdempotentRepository extends ServiceSupport implements Idempote
 
                 // check if we hit maximum capacity (if enabled) and report a warning about this
                 if (maxFileStoreSize > 0 && fileStore.length() > maxFileStoreSize) {
-                    LOG.warn("Maximum capacity of file store: {} hit at {} bytes. Dropping {} oldest entries from the file store", fileStore, maxFileStoreSize, dropOldestFileStore);
+                    log.warn("Maximum capacity of file store: {} hit at {} bytes. Dropping {} oldest entries from the file store", fileStore, maxFileStoreSize, dropOldestFileStore);
                     trunkStore();
                 }
 
@@ -308,17 +308,17 @@ public class FileIdempotentRepository extends ServiceSupport implements Idempote
      * @param key  the key
      */
     protected void appendToStore(final String key) {
-        LOG.debug("Appending: {} to idempotent filestore: {}", key, fileStore);
+        log.debug("Appending: {} to idempotent filestore: {}", key, fileStore);
         FileOutputStream fos = null;
         try {
             // create store parent directory if missing
             File storeParentDirectory = fileStore.getParentFile();
             if (storeParentDirectory != null && !storeParentDirectory.exists()) {
-                LOG.info("Parent directory of file store {} doesn't exist. Creating.", fileStore);
+                log.info("Parent directory of file store {} doesn't exist. Creating.", fileStore);
                 if (fileStore.getParentFile().mkdirs()) {
-                    LOG.info("Parent directory of filestore: {} successfully created.", fileStore);
+                    log.info("Parent directory of filestore: {} successfully created.", fileStore);
                 } else {
-                    LOG.warn("Parent directory of filestore: {} cannot be created.", fileStore);
+                    log.warn("Parent directory of filestore: {} cannot be created.", fileStore);
                 }
             }
             // create store if missing
@@ -332,12 +332,12 @@ public class FileIdempotentRepository extends ServiceSupport implements Idempote
         } catch (IOException e) {
             throw ObjectHelper.wrapRuntimeCamelException(e);
         } finally {
-            IOHelper.close(fos, "Appending to file idempotent repository", LOG);
+            IOHelper.close(fos, "Appending to file idempotent repository", log);
         }
     }
 
     protected synchronized void removeFromStore(String key) {
-        LOG.debug("Removing: {} from idempotent filestore: {}", key, fileStore);
+        log.debug("Removing: {} from idempotent filestore: {}", key, fileStore);
 
         // we need to re-load the entire file and remove the key and then re-write the file
         List<String> lines = new ArrayList<>();
@@ -364,7 +364,7 @@ public class FileIdempotentRepository extends ServiceSupport implements Idempote
 
         if (found) {
             // rewrite file
-            LOG.debug("Rewriting idempotent filestore: {} due to key: {} removed", fileStore, key);
+            log.debug("Rewriting idempotent filestore: {} due to key: {} removed", fileStore, key);
             FileOutputStream fos = null;
             try {
                 fos = new FileOutputStream(fileStore);
@@ -375,7 +375,7 @@ public class FileIdempotentRepository extends ServiceSupport implements Idempote
             } catch (IOException e) {
                 throw ObjectHelper.wrapRuntimeCamelException(e);
             } finally {
-                IOHelper.close(fos, "Rewriting file idempotent repository", LOG);
+                IOHelper.close(fos, "Rewriting file idempotent repository", log);
             }
         }
     }
@@ -400,7 +400,7 @@ public class FileIdempotentRepository extends ServiceSupport implements Idempote
             return;
         }
 
-        LOG.debug("Trunking: {} oldest entries from idempotent filestore: {}", dropOldestFileStore, fileStore);
+        log.debug("Trunking: {} oldest entries from idempotent filestore: {}", dropOldestFileStore, fileStore);
 
         // we need to re-load the entire file and remove the key and then re-write the file
         List<String> lines = new ArrayList<>();
@@ -426,7 +426,7 @@ public class FileIdempotentRepository extends ServiceSupport implements Idempote
 
         if (!lines.isEmpty()) {
             // rewrite file
-            LOG.debug("Rewriting idempotent filestore: {} with {} entries:", fileStore, lines.size());
+            log.debug("Rewriting idempotent filestore: {} with {} entries:", fileStore, lines.size());
             FileOutputStream fos = null;
             try {
                 fos = new FileOutputStream(fileStore);
@@ -437,11 +437,11 @@ public class FileIdempotentRepository extends ServiceSupport implements Idempote
             } catch (IOException e) {
                 throw ObjectHelper.wrapRuntimeCamelException(e);
             } finally {
-                IOHelper.close(fos, "Rewriting file idempotent repository", LOG);
+                IOHelper.close(fos, "Rewriting file idempotent repository", log);
             }
         } else {
             // its a small file so recreate the file
-            LOG.debug("Clearing idempotent filestore: {}", fileStore);
+            log.debug("Clearing idempotent filestore: {}", fileStore);
             clearStore();
         }
     }
@@ -462,7 +462,7 @@ public class FileIdempotentRepository extends ServiceSupport implements Idempote
     protected void loadStore() throws IOException {
         // auto create starting directory if needed
         if (!fileStore.exists()) {
-            LOG.debug("Creating filestore: {}", fileStore);
+            log.debug("Creating filestore: {}", fileStore);
             File parent = fileStore.getParentFile();
             if (parent != null) {
                 parent.mkdirs();
@@ -473,7 +473,7 @@ public class FileIdempotentRepository extends ServiceSupport implements Idempote
             }
         }
 
-        LOG.trace("Loading to 1st level cache from idempotent filestore: {}", fileStore);
+        log.trace("Loading to 1st level cache from idempotent filestore: {}", fileStore);
 
         cache.clear();
         try (Scanner scanner = new Scanner(fileStore, null, STORE_DELIMITER)) {
@@ -485,7 +485,7 @@ public class FileIdempotentRepository extends ServiceSupport implements Idempote
             throw ObjectHelper.wrapRuntimeCamelException(e);
         }
 
-        LOG.debug("Loaded {} to the 1st level cache from idempotent filestore: {}", cache.size(), fileStore);
+        log.debug("Loaded {} to the 1st level cache from idempotent filestore: {}", cache.size(), fileStore);
     }
 
     @Override
diff --git a/camel-core/src/main/java/org/apache/camel/processor/idempotent/IdempotentConsumer.java b/camel-core/src/main/java/org/apache/camel/processor/idempotent/IdempotentConsumer.java
index 9ce5d85..9e2adec 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/idempotent/IdempotentConsumer.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/idempotent/IdempotentConsumer.java
@@ -53,7 +53,7 @@ import org.slf4j.LoggerFactory;
  * @see org.apache.camel.spi.ExchangeIdempotentRepository
  */
 public class IdempotentConsumer extends ServiceSupport implements CamelContextAware, AsyncProcessor, Navigate<Processor>, IdAware {
-    private static final Logger LOG = LoggerFactory.getLogger(IdempotentConsumer.class);
+
     private CamelContext camelContext;
     private String id;
     private final Expression messageIdExpression;
@@ -147,7 +147,7 @@ public class IdempotentConsumer extends ServiceSupport implements CamelContextAw
 
                 if (skipDuplicate) {
                     // if we should skip duplicate then we are done
-                    LOG.debug("Ignoring duplicate message with id: {} for exchange: {}", messageId, exchange);
+                    log.debug("Ignoring duplicate message with id: {} for exchange: {}", messageId, exchange);
                     callback.done(true);
                     return true;
                 }
diff --git a/camel-core/src/main/java/org/apache/camel/processor/interceptor/BacklogDebugger.java b/camel-core/src/main/java/org/apache/camel/processor/interceptor/BacklogDebugger.java
index 5fb2efa..a0cedad 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/interceptor/BacklogDebugger.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/interceptor/BacklogDebugger.java
@@ -67,12 +67,10 @@ import org.slf4j.LoggerFactory;
  */
 public class BacklogDebugger extends ServiceSupport implements InterceptStrategy {
 
-    private static final Logger LOG = LoggerFactory.getLogger(BacklogDebugger.class);
-
     private long fallbackTimeout = 300;
     private final CamelContext camelContext;
     private LoggingLevel loggingLevel = LoggingLevel.INFO;
-    private final CamelLogger logger = new CamelLogger(LOG, loggingLevel);
+    private final CamelLogger logger = new CamelLogger(log, loggingLevel);
     private final AtomicBoolean enabled = new AtomicBoolean();
     private final AtomicLong debugCounter = new AtomicLong(0);
     private final Debugger debugger;
diff --git a/camel-core/src/main/java/org/apache/camel/processor/interceptor/BacklogTracer.java b/camel-core/src/main/java/org/apache/camel/processor/interceptor/BacklogTracer.java
index a308493..3a1490a 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/interceptor/BacklogTracer.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/interceptor/BacklogTracer.java
@@ -48,7 +48,6 @@ public final class BacklogTracer extends ServiceSupport implements InterceptStra
 
     // lets limit the tracer to 10 thousand messages in total
     public static final int MAX_BACKLOG_SIZE = 10 * 1000;
-    private static final Logger LOG = LoggerFactory.getLogger(BacklogTracer.class);
     private final CamelContext camelContext;
     private boolean enabled;
     private final AtomicLong traceCounter = new AtomicLong(0);
@@ -123,8 +122,8 @@ public final class BacklogTracer extends ServiceSupport implements InterceptStra
             filter = shouldTraceFilter(exchange);
         }
 
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Should trace evaluated {} -> pattern: {}, filter: {}", definition.getId(), pattern, filter);
+        if (log.isTraceEnabled()) {
+            log.trace("Should trace evaluated {} -> pattern: {}, filter: {}", definition.getId(), pattern, filter);
         }
         return pattern && filter;
     }
diff --git a/camel-core/src/main/java/org/apache/camel/processor/interceptor/DefaultChannel.java b/camel-core/src/main/java/org/apache/camel/processor/interceptor/DefaultChannel.java
index 96aa6d6..9cf70d3 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/interceptor/DefaultChannel.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/interceptor/DefaultChannel.java
@@ -62,8 +62,6 @@ import org.slf4j.LoggerFactory;
  */
 public class DefaultChannel extends CamelInternalProcessor implements ModelChannel {
 
-    private static final Logger LOG = LoggerFactory.getLogger(DefaultChannel.class);
-
     private final List<InterceptStrategy> interceptors = new ArrayList<>();
     private Processor errorHandler;
     // the next processor (non wrapped)
@@ -200,7 +198,7 @@ public class DefaultChannel extends CamelInternalProcessor implements ModelChann
         // the definition to wrap should be the fine grained,
         // so if a child is set then use it, if not then its the original output used
         ProcessorDefinition<?> targetOutputDef = childDefinition != null ? childDefinition : outputDefinition;
-        LOG.debug("Initialize channel for target: '{}'", targetOutputDef);
+        log.debug("Initialize channel for target: '{}'", targetOutputDef);
 
         // fix parent/child relationship. This will be the case of the routes has been
         // defined using XML DSL or end user may have manually assembled a route from the model.
@@ -263,7 +261,7 @@ public class DefaultChannel extends CamelInternalProcessor implements ModelChann
             // use the fine grained definition (eg the child if available). Its always possible to get back to the parent
             Processor wrapped = strategy.wrapProcessorInInterceptors(routeContext.getCamelContext(), targetOutputDef, target, next);
             if (!(wrapped instanceof AsyncProcessor)) {
-                LOG.warn("Interceptor: " + strategy + " at: " + outputDefinition + " does not return an AsyncProcessor instance."
+                log.warn("Interceptor: " + strategy + " at: " + outputDefinition + " does not return an AsyncProcessor instance."
                         + " This causes the asynchronous routing engine to not work as optimal as possible."
                         + " See more details at the InterceptStrategy javadoc."
                         + " Camel will use a bridge to adapt the interceptor to the asynchronous routing engine,"
diff --git a/camel-core/src/main/java/org/apache/camel/support/EventNotifierSupport.java b/camel-core/src/main/java/org/apache/camel/support/EventNotifierSupport.java
index 37d8947..296b4d5 100644
--- a/camel-core/src/main/java/org/apache/camel/support/EventNotifierSupport.java
+++ b/camel-core/src/main/java/org/apache/camel/support/EventNotifierSupport.java
@@ -17,8 +17,6 @@
 package org.apache.camel.support;
 
 import org.apache.camel.spi.EventNotifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Base class to extend for custom {@link EventNotifier} implementations.
@@ -26,7 +24,7 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public abstract class EventNotifierSupport extends ServiceSupport implements EventNotifier {
-    protected Logger log = LoggerFactory.getLogger(getClass());
+
     private boolean ignoreCamelContextEvents;
     private boolean ignoreRouteEvents;
     private boolean ignoreServiceEvents;
diff --git a/camel-core/src/main/java/org/apache/camel/support/RoutePolicySupport.java b/camel-core/src/main/java/org/apache/camel/support/RoutePolicySupport.java
index 139037b..4d1d9b5 100644
--- a/camel-core/src/main/java/org/apache/camel/support/RoutePolicySupport.java
+++ b/camel-core/src/main/java/org/apache/camel/support/RoutePolicySupport.java
@@ -35,7 +35,6 @@ import org.slf4j.LoggerFactory;
  */
 public abstract class RoutePolicySupport extends ServiceSupport implements RoutePolicy {
 
-    protected final Logger log = LoggerFactory.getLogger(getClass());
     private ExceptionHandler exceptionHandler;
 
     public void onInit(Route route) {
diff --git a/camel-core/src/main/java/org/apache/camel/support/ServiceSupport.java b/camel-core/src/main/java/org/apache/camel/support/ServiceSupport.java
index 4667ee3..c1ee206 100644
--- a/camel-core/src/main/java/org/apache/camel/support/ServiceSupport.java
+++ b/camel-core/src/main/java/org/apache/camel/support/ServiceSupport.java
@@ -41,7 +41,6 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public abstract class ServiceSupport implements StatefulService {
-    private static final Logger LOG = LoggerFactory.getLogger(ServiceSupport.class);
 
     protected final AtomicBoolean started = new AtomicBoolean(false);
     protected final AtomicBoolean starting = new AtomicBoolean(false);
@@ -52,7 +51,7 @@ public abstract class ServiceSupport implements StatefulService {
     protected final AtomicBoolean shuttingdown = new AtomicBoolean(false);
     protected final AtomicBoolean shutdown = new AtomicBoolean(false);
 
-    private String version;
+    protected final Logger log = LoggerFactory.getLogger(getClass());
 
     /**
      * <b>Important: </b> You should override the lifecycle methods that start with <tt>do</tt>, eg {@link #doStart()},
@@ -63,11 +62,11 @@ public abstract class ServiceSupport implements StatefulService {
     public void start() throws Exception {
         if (isStarting() || isStarted()) {
             // only start service if not already started
-            LOG.trace("Service already started");
+            log.trace("Service already started");
             return;
         }
         if (starting.compareAndSet(false, true)) {
-            LOG.trace("Starting service");
+            log.trace("Starting service");
             try {
                 doStart();
                 started.set(true);
@@ -107,11 +106,11 @@ public abstract class ServiceSupport implements StatefulService {
      */
     public void stop() throws Exception {
         if (isStopped()) {
-            LOG.trace("Service already stopped");
+            log.trace("Service already stopped");
             return;
         }
         if (isStopping()) {
-            LOG.trace("Service already stopping");
+            log.trace("Service already stopping");
             return;
         }
         stopping.set(true);
@@ -192,7 +191,7 @@ public abstract class ServiceSupport implements StatefulService {
     @Override
     public void shutdown() throws Exception {
         if (shutdown.get()) {
-            LOG.trace("Service already shut down");
+            log.trace("Service already shut down");
             return;
         }
         // ensure we are stopped first
diff --git a/camel-core/src/main/java/org/apache/camel/util/component/AbstractApiConsumer.java b/camel-core/src/main/java/org/apache/camel/util/component/AbstractApiConsumer.java
index 900fb82..7b95a96 100644
--- a/camel-core/src/main/java/org/apache/camel/util/component/AbstractApiConsumer.java
+++ b/camel-core/src/main/java/org/apache/camel/util/component/AbstractApiConsumer.java
@@ -33,9 +33,6 @@ import org.slf4j.LoggerFactory;
 public abstract class AbstractApiConsumer<E extends Enum<E> & ApiName, T>
     extends ScheduledPollConsumer implements PropertyNamesInterceptor, PropertiesInterceptor, ResultInterceptor {
 
-    // logger
-    protected final Logger log = LoggerFactory.getLogger(getClass());
-
     // API Endpoint
     protected final AbstractApiEndpoint<E, T> endpoint;
 
diff --git a/camel-core/src/test/java/org/apache/camel/component/log/LogCustomLoggerTest.java b/camel-core/src/test/java/org/apache/camel/component/log/LogCustomLoggerTest.java
index 4a9045b..8ef0d1c 100644
--- a/camel-core/src/test/java/org/apache/camel/component/log/LogCustomLoggerTest.java
+++ b/camel-core/src/test/java/org/apache/camel/component/log/LogCustomLoggerTest.java
@@ -70,7 +70,7 @@ public class LogCustomLoggerTest extends ContextTestSupport {
         ConsumingAppender.newAppender(
             LogComponent.class.getCanonicalName(),
             "LogComponent",
-            Level.TRACE,
+            Level.INFO,
             event -> sw2.append(event.getLoggerName()));
     }
 
diff --git a/camel-core/src/test/java/org/apache/camel/impl/CustomIdFactoryTest.java b/camel-core/src/test/java/org/apache/camel/impl/CustomIdFactoryTest.java
index d7f6c87..7a175d4 100644
--- a/camel-core/src/test/java/org/apache/camel/impl/CustomIdFactoryTest.java
+++ b/camel-core/src/test/java/org/apache/camel/impl/CustomIdFactoryTest.java
@@ -23,7 +23,6 @@ import org.apache.camel.NamedNode;
 import org.apache.camel.Processor;
 import org.apache.camel.builder.RouteBuilder;
 import org.apache.camel.model.OptionalIdentifiedDefinition;
-import org.apache.camel.model.ProcessorDefinition;
 import org.apache.camel.processor.DelegateProcessor;
 import org.apache.camel.spi.InterceptStrategy;
 import org.apache.camel.spi.NodeIdFactory;
@@ -125,7 +124,7 @@ public class CustomIdFactoryTest extends ContextTestSupport {
             return new DelegateProcessor(target) {
                 @Override
                 protected void processNext(Exchange exchange) throws Exception {
-                    LOG.debug("Debugging at: {} with id: {} with exchange: {}", definition, definition.getId(), exchange);
+                    log.debug("Debugging at: {} with id: {} with exchange: {}", definition, definition.getId(), exchange);
 
                     // record the path taken at runtime
                     ids += definition.getId();
diff --git a/camel-core/src/test/java/org/apache/camel/impl/transformer/TransformerRouteTest.java b/camel-core/src/test/java/org/apache/camel/impl/transformer/TransformerRouteTest.java
index d170fb4..a6cca02 100644
--- a/camel-core/src/test/java/org/apache/camel/impl/transformer/TransformerRouteTest.java
+++ b/camel-core/src/test/java/org/apache/camel/impl/transformer/TransformerRouteTest.java
@@ -296,11 +296,11 @@ public class TransformerRouteTest extends ContextTestSupport {
                 public boolean process(Exchange exchange, AsyncCallback callback) {
                     Object input = exchange.getIn().getBody();
                     if (input instanceof XOrderResponse) {
-                        LOG.info("Endpoint: XOrderResponse -> XML");
+                        log.info("Endpoint: XOrderResponse -> XML");
                         exchange.getIn().setBody("<XOrderResponse/>");
                     } else {
                         assertEquals("<XOrder/>", input);
-                        LOG.info("Endpoint: XML -> XOrder");
+                        log.info("Endpoint: XML -> XOrder");
                         exchange.getIn().setBody(new XOrder());
                         
                     }
@@ -327,7 +327,7 @@ public class TransformerRouteTest extends ContextTestSupport {
         @Override
         public void transform(Message message, DataType from, DataType to) throws Exception {
             assertEquals("name=XOrder", message.getBody());
-            LOG.info("Bean: Other -> XOrder");
+            log.info("Bean: Other -> XOrder");
             message.setBody(new XOrder());
         }
     }
@@ -335,7 +335,7 @@ public class TransformerRouteTest extends ContextTestSupport {
     public static class XOrderResponseToOtherTransformer extends Transformer {
         @Override
         public void transform(Message message, DataType from, DataType to) throws Exception {
-            LOG.info("Bean: XOrderResponse -> Other");
+            log.info("Bean: XOrderResponse -> Other");
             message.setBody("name=XOrderResponse");
         }
     }
diff --git a/camel-core/src/test/java/org/apache/camel/impl/validator/ValidatorRouteTest.java b/camel-core/src/test/java/org/apache/camel/impl/validator/ValidatorRouteTest.java
index 4531b27..c45b7e0 100644
--- a/camel-core/src/test/java/org/apache/camel/impl/validator/ValidatorRouteTest.java
+++ b/camel-core/src/test/java/org/apache/camel/impl/validator/ValidatorRouteTest.java
@@ -161,7 +161,7 @@ public class ValidatorRouteTest extends ContextTestSupport {
         public void validate(Message message, DataType type) throws ValidationException {
             message.getExchange().setProperty(VALIDATOR_INVOKED, OtherXOrderValidator.class);
             assertEquals("name=XOrder", message.getBody());
-            LOG.info("Java validation: other XOrder");
+            log.info("Java validation: other XOrder");
         }
     }
     
@@ -170,7 +170,7 @@ public class ValidatorRouteTest extends ContextTestSupport {
         public void validate(Message message, DataType type) throws ValidationException {
             message.getExchange().setProperty(VALIDATOR_INVOKED, OtherXOrderResponseValidator.class);
             assertEquals("name=XOrderResponse", message.getBody());
-            LOG.info("Java validation: other XOrderResponse");
+            log.info("Java validation: other XOrderResponse");
         }
     }
     
diff --git a/camel-core/src/test/java/org/apache/camel/processor/LogEipPropagateExceptionTest.java b/camel-core/src/test/java/org/apache/camel/processor/LogEipPropagateExceptionTest.java
index 2aa4e4b..67e2d46 100644
--- a/camel-core/src/test/java/org/apache/camel/processor/LogEipPropagateExceptionTest.java
+++ b/camel-core/src/test/java/org/apache/camel/processor/LogEipPropagateExceptionTest.java
@@ -73,7 +73,7 @@ public class LogEipPropagateExceptionTest extends ContextTestSupport {
 
                 from("direct:handleFailure")
                     .errorHandler(noErrorHandler())
-                    .log("FAULTY LOG")
+                    .log("FAULTY log")
                     .to("mock:handleFailure");
 
                 from("direct:startSuccess")
diff --git a/components/camel-ahc-ws/src/main/java/org/apache/camel/component/ahc/ws/WsEndpoint.java b/components/camel-ahc-ws/src/main/java/org/apache/camel/component/ahc/ws/WsEndpoint.java
index c34b6ae..f453e3d 100644
--- a/components/camel-ahc-ws/src/main/java/org/apache/camel/component/ahc/ws/WsEndpoint.java
+++ b/components/camel-ahc-ws/src/main/java/org/apache/camel/component/ahc/ws/WsEndpoint.java
@@ -41,7 +41,6 @@ import org.slf4j.LoggerFactory;
 @UriEndpoint(firstVersion = "2.14.0", scheme = "ahc-ws,ahc-wss", extendsScheme = "ahc,ahc", title = "AHC Websocket,AHC Secure Websocket",
         syntax = "ahc-ws:httpUri", consumerClass = WsConsumer.class, label = "websocket")
 public class WsEndpoint extends AhcEndpoint {
-    private static final transient Logger LOG = LoggerFactory.getLogger(WsEndpoint.class);
 
     private final Set<WsConsumer> consumers = new HashSet<>();
     private final WsListener listener = new WsListener();
@@ -120,7 +119,7 @@ public class WsEndpoint extends AhcEndpoint {
     public void connect() throws Exception {
         String uri = getHttpUri().toASCIIString();
 
-        LOG.debug("Connecting to {}", uri);
+        log.debug("Connecting to {}", uri);
         websocket = getClient().prepareGet(uri).execute(
             new WebSocketUpgradeHandler.Builder()
                 .addWebSocketListener(listener).build()).get();
@@ -129,8 +128,8 @@ public class WsEndpoint extends AhcEndpoint {
     @Override
     protected void doStop() throws Exception {
         if (websocket != null && websocket.isOpen()) {
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Disconnecting from {}", getHttpUri().toASCIIString());
+            if (log.isDebugEnabled()) {
+                log.debug("Disconnecting from {}", getHttpUri().toASCIIString());
             }
             websocket.removeWebSocketListener(listener);
             websocket.sendCloseFrame();
@@ -151,7 +150,7 @@ public class WsEndpoint extends AhcEndpoint {
     void reConnect() throws Exception {
         if (websocket == null || !websocket.isOpen()) {
             String uri = getHttpUri().toASCIIString();
-            LOG.info("Reconnecting websocket: {}", uri);
+            log.info("Reconnecting websocket: {}", uri);
             connect();
         }
     }
@@ -160,22 +159,22 @@ public class WsEndpoint extends AhcEndpoint {
 
         @Override
         public void onOpen(WebSocket websocket) {
-            LOG.debug("Websocket opened");
+            log.debug("Websocket opened");
         }
 
         @Override
         public void onClose(WebSocket websocket, int code, String reason) {
-            LOG.debug("websocket closed - reconnecting");
+            log.debug("websocket closed - reconnecting");
             try {
                 reConnect();
             } catch (Exception e) {
-                LOG.warn("Error re-connecting to websocket", e);
+                log.warn("Error re-connecting to websocket", e);
             }
         }
 
         @Override
         public void onError(Throwable t) {
-            LOG.debug("websocket on error", t);
+            log.debug("websocket on error", t);
             if (isSendMessageOnError()) {
                 for (WsConsumer consumer : consumers) {
                     consumer.sendMessage(t);
@@ -185,7 +184,7 @@ public class WsEndpoint extends AhcEndpoint {
 
         @Override
         public void onBinaryFrame(byte[] message, boolean finalFragment, int rsv) {
-            LOG.debug("Received message --> {}", message);
+            log.debug("Received message --> {}", message);
             for (WsConsumer consumer : consumers) {
                 consumer.sendMessage(message);
             }
@@ -193,7 +192,7 @@ public class WsEndpoint extends AhcEndpoint {
 
         @Override
         public void onTextFrame(String message, boolean finalFragment, int rsv) {
-            LOG.debug("Received message --> {}", message);
+            log.debug("Received message --> {}", message);
             for (WsConsumer consumer : consumers) {
                 consumer.sendMessage(message);
             }
diff --git a/components/camel-ahc/src/main/java/org/apache/camel/component/ahc/AhcComponent.java b/components/camel-ahc/src/main/java/org/apache/camel/component/ahc/AhcComponent.java
index d229714..ca48903 100644
--- a/components/camel-ahc/src/main/java/org/apache/camel/component/ahc/AhcComponent.java
+++ b/components/camel-ahc/src/main/java/org/apache/camel/component/ahc/AhcComponent.java
@@ -42,8 +42,6 @@ import org.slf4j.LoggerFactory;
  */
 public class AhcComponent extends HeaderFilterStrategyComponent implements SSLContextParametersAware {
     
-    private static final Logger LOG = LoggerFactory.getLogger(AhcComponent.class);
-    
     private static final String CLIENT_CONFIG_PREFIX = "clientConfig.";
     private static final String CLIENT_REALM_CONFIG_PREFIX = "clientConfig.realm.";
 
@@ -88,12 +86,12 @@ public class AhcComponent extends HeaderFilterStrategyComponent implements SSLCo
                     ? new DefaultAsyncHttpClientConfig.Builder() : AhcComponent.cloneConfig(endpoint.getClientConfig());
             
             if (endpoint.getClient() != null) {
-                LOG.warn("The user explicitly set an AsyncHttpClient instance on the component or "
+                log.warn("The user explicitly set an AsyncHttpClient instance on the component or "
                          + "endpoint, but this endpoint URI contains client configuration parameters.  "
                          + "Are you sure that this is what was intended?  The AsyncHttpClient will be used"
                          + " and the URI parameters will be ignored.");
             } else if (endpoint.getClientConfig() != null) {
-                LOG.warn("The user explicitly set an AsyncHttpClientConfig instance on the component or "
+                log.warn("The user explicitly set an AsyncHttpClientConfig instance on the component or "
                          + "endpoint, but this endpoint URI contains client configuration parameters.  "
                          + "Are you sure that this is what was intended?  The URI parameters will be applied"
                          + " to a clone of the supplied AsyncHttpClientConfig in order to prevent unintended modification"
diff --git a/components/camel-as2/camel-as2-component/src/main/java/org/apache/camel/component/as2/AS2Component.java b/components/camel-as2/camel-as2-component/src/main/java/org/apache/camel/component/as2/AS2Component.java
index 5444f61..c5d9619 100644
--- a/components/camel-as2/camel-as2-component/src/main/java/org/apache/camel/component/as2/AS2Component.java
+++ b/components/camel-as2/camel-as2-component/src/main/java/org/apache/camel/component/as2/AS2Component.java
@@ -32,8 +32,6 @@ import org.slf4j.LoggerFactory;
  */
 public class AS2Component extends AbstractApiComponent<AS2ApiName, AS2Configuration, AS2ApiCollection> {
     
-    private static final Logger LOG = LoggerFactory.getLogger(AS2Component.class);
-
     public AS2Component() {
         super(AS2Endpoint.class, AS2ApiName.class, AS2ApiCollection.getCollection());
     }
@@ -67,7 +65,7 @@ public class AS2Component extends AbstractApiComponent<AS2ApiName, AS2Configurat
     protected void doStart() throws Exception {
         super.doStart();
         if (Security.getProvider("BC") == null) {
-            LOG.debug("Adding BouncyCastleProvider as security provider");
+            log.debug("Adding BouncyCastleProvider as security provider");
             Security.addProvider(new BouncyCastleProvider());
         }
     }
diff --git a/components/camel-asterisk/src/main/java/org/apache/camel/component/asterisk/AsteriskProducer.java b/components/camel-asterisk/src/main/java/org/apache/camel/component/asterisk/AsteriskProducer.java
index eaff92e..04a32bc 100644
--- a/components/camel-asterisk/src/main/java/org/apache/camel/component/asterisk/AsteriskProducer.java
+++ b/components/camel-asterisk/src/main/java/org/apache/camel/component/asterisk/AsteriskProducer.java
@@ -25,14 +25,11 @@ import org.asteriskjava.manager.AuthenticationFailedException;
 import org.asteriskjava.manager.TimeoutException;
 import org.asteriskjava.manager.action.ManagerAction;
 import org.asteriskjava.manager.response.ManagerResponse;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The Asterisk producer.
  */
 public class AsteriskProducer extends DefaultProducer {
-    private static final Logger LOG = LoggerFactory.getLogger(AsteriskProducer.class);
 
     private final AsteriskEndpoint endpoint;
     private final AsteriskConnection connection;
@@ -70,7 +67,7 @@ public class AsteriskProducer extends DefaultProducer {
         // Action must be set
         ObjectHelper.notNull(action, "action");
 
-        LOG.debug("Send action {}", action);
+        log.debug("Send action {}", action);
 
         ManagerAction managerAction = action.apply(exchange);
         ManagerResponse managerResponse = connection.sendAction(managerAction);
diff --git a/components/camel-atmos/src/main/java/org/apache/camel/component/atmos/AtmosEndpoint.java b/components/camel-atmos/src/main/java/org/apache/camel/component/atmos/AtmosEndpoint.java
index c729284..df507ea 100644
--- a/components/camel-atmos/src/main/java/org/apache/camel/component/atmos/AtmosEndpoint.java
+++ b/components/camel-atmos/src/main/java/org/apache/camel/component/atmos/AtmosEndpoint.java
@@ -30,8 +30,6 @@ import org.apache.camel.component.atmos.util.AtmosOperation;
 import org.apache.camel.impl.DefaultEndpoint;
 import org.apache.camel.spi.UriEndpoint;
 import org.apache.camel.spi.UriParam;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.apache.camel.component.atmos.util.AtmosConstants.POLL_CONSUMER_DELAY;
 
@@ -41,8 +39,6 @@ import static org.apache.camel.component.atmos.util.AtmosConstants.POLL_CONSUMER
 @UriEndpoint(firstVersion = "2.15.0", scheme = "atmos", title = "Atmos", syntax = "atmos:name/operation", consumerClass = AtmosScheduledPollConsumer.class, label = "file,cloud")
 public class AtmosEndpoint extends DefaultEndpoint {
 
-    private static final transient Logger LOG = LoggerFactory.getLogger(AtmosEndpoint.class);
-
     @UriParam
     private AtmosConfiguration configuration;
 
@@ -69,8 +65,8 @@ public class AtmosEndpoint extends DefaultEndpoint {
      * @throws Exception
      */
     public Producer createProducer() throws Exception {
-        LOG.debug("resolve producer atmos endpoint {{}}", configuration.getOperation());
-        LOG.debug("resolve producer atmos attached client: {}", configuration.getClient());
+        log.debug("resolve producer atmos endpoint {{}}", configuration.getOperation());
+        log.debug("resolve producer atmos attached client: {}", configuration.getClient());
         if (configuration.getOperation() == AtmosOperation.put) {
             return new AtmosPutProducer(this, configuration);
         } else if (this.configuration.getOperation() == AtmosOperation.del) {
@@ -92,8 +88,8 @@ public class AtmosEndpoint extends DefaultEndpoint {
      * @throws Exception
      */
     public Consumer createConsumer(Processor processor) throws Exception {
-        LOG.debug("resolve consumer atmos endpoint {{}}", configuration.getOperation());
-        LOG.debug("resolve consumer atmos attached client:{}", configuration.getClient());
+        log.debug("resolve consumer atmos endpoint {{}}", configuration.getOperation());
+        log.debug("resolve consumer atmos attached client:{}", configuration.getClient());
 
         AtmosScheduledPollConsumer consumer;
         if (this.configuration.getOperation() == AtmosOperation.get) {
diff --git a/components/camel-atmos/src/main/java/org/apache/camel/component/atmos/integration/consumer/AtmosScheduledPollConsumer.java b/components/camel-atmos/src/main/java/org/apache/camel/component/atmos/integration/consumer/AtmosScheduledPollConsumer.java
index 3a491ac..a58f99d 100644
--- a/components/camel-atmos/src/main/java/org/apache/camel/component/atmos/integration/consumer/AtmosScheduledPollConsumer.java
+++ b/components/camel-atmos/src/main/java/org/apache/camel/component/atmos/integration/consumer/AtmosScheduledPollConsumer.java
@@ -25,7 +25,7 @@ import org.slf4j.LoggerFactory;
 
 
 public abstract class AtmosScheduledPollConsumer extends ScheduledPollConsumer {
-    protected static final transient Logger LOG = LoggerFactory.getLogger(AtmosScheduledPollConsumer.class);
+
     protected AtmosEndpoint endpoint;
     protected AtmosConfiguration configuration;
 
diff --git a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/AbstractAtomixClientProducer.java b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/AbstractAtomixClientProducer.java
index 21e785f..3a6d24a 100644
--- a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/AbstractAtomixClientProducer.java
+++ b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/AbstractAtomixClientProducer.java
@@ -40,7 +40,7 @@ import static org.apache.camel.component.atomix.client.AtomixClientConstants.RES
 import static org.apache.camel.component.atomix.client.AtomixClientConstants.RESOURCE_NAME;
 
 public abstract class AbstractAtomixClientProducer<E extends AbstractAtomixClientEndpoint, R extends Resource> extends DefaultProducer implements AsyncProcessor {
-    private static final Logger LOGGER = LoggerFactory.getLogger(AbstractAtomixClientProducer.class);
+
     private final Map<String, AtomixAsyncMessageProcessor> processors;
     private ConcurrentMap<String, R> resources;
 
@@ -142,7 +142,7 @@ public abstract class AbstractAtomixClientProducer<E extends AbstractAtomixClien
                 throw new IllegalArgumentException("Second argument should be of type AsyncCallback");
             }
 
-            LOGGER.debug("bind key={}, class={}, method={}",
+            log.debug("bind key={}, class={}, method={}",
                 annotation.value(), this.getClass(), method.getName());
 
             this.processors.put(annotation.value(), (m, c) -> (boolean)method.invoke(this, m, c));
diff --git a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/map/AtomixMapConsumer.java b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/map/AtomixMapConsumer.java
index 73269ee..bf4e41c 100644
--- a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/map/AtomixMapConsumer.java
+++ b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/map/AtomixMapConsumer.java
@@ -29,7 +29,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 final class AtomixMapConsumer extends AbstractAtomixClientConsumer<AtomixMapEndpoint> {
-    private static final Logger LOGGER = LoggerFactory.getLogger(AtomixMapConsumer.class);
 
     private final List<Listener<DistributedMap.EntryEvent<Object, Object>>> listeners;
     private final String resourceName;
@@ -58,12 +57,12 @@ final class AtomixMapConsumer extends AbstractAtomixClientConsumer<AtomixMapEndp
 
         Object key = getAtomixEndpoint().getConfiguration().getKey();
         if (key == null) {
-            LOGGER.debug("Subscribe to events for map: {}", resourceName);
+            log.debug("Subscribe to events for map: {}", resourceName);
             this.listeners.add(this.map.onAdd(this::onEvent).join());
             this.listeners.add(this.map.onRemove(this::onEvent).join());
             this.listeners.add(this.map.onUpdate(this::onEvent).join());
         } else {
-            LOGGER.debug("Subscribe to events for map: {}, key: {}", resourceName, key);
+            log.debug("Subscribe to events for map: {}, key: {}", resourceName, key);
             this.listeners.add(this.map.onAdd(key, this::onEvent).join());
             this.listeners.add(this.map.onRemove(key, this::onEvent).join());
             this.listeners.add(this.map.onUpdate(key, this::onEvent).join());
diff --git a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/messaging/AtomixMessagingConsumer.java b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/messaging/AtomixMessagingConsumer.java
index 547a66c..b738353 100644
--- a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/messaging/AtomixMessagingConsumer.java
+++ b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/messaging/AtomixMessagingConsumer.java
@@ -37,7 +37,6 @@ import static org.apache.camel.component.atomix.client.AtomixClientConstants.MEM
 import static org.apache.camel.component.atomix.client.AtomixClientConstants.RESOURCE_NAME;
 
 final class AtomixMessagingConsumer extends AbstractAtomixClientConsumer<AtomixMessagingEndpoint> {
-    private static final Logger LOGGER = LoggerFactory.getLogger(AtomixMessagingConsumer.class);
 
     private final List<Listener<Message<Object>>> listeners;
     private final String resultHeader;
@@ -74,7 +73,7 @@ final class AtomixMessagingConsumer extends AbstractAtomixClientConsumer<AtomixM
         this.localMember = group.join(memberName).join();
         this.consumer = localMember.messaging().consumer(channelName);
 
-        LOGGER.debug("Subscribe to group: {}, member: {}, channel: {}", groupName, memberName, channelName);
+        log.debug("Subscribe to group: {}, member: {}, channel: {}", groupName, memberName, channelName);
         this.listeners.add(consumer.onMessage(this::onMessage));
     }
 
diff --git a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/queue/AtomixQueueConsumer.java b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/queue/AtomixQueueConsumer.java
index eef6eec..4a9486d 100644
--- a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/queue/AtomixQueueConsumer.java
+++ b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/queue/AtomixQueueConsumer.java
@@ -29,7 +29,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 final class AtomixQueueConsumer extends AbstractAtomixClientConsumer<AtomixQueueEndpoint> {
-    private static final Logger LOGGER = LoggerFactory.getLogger(AtomixQueueConsumer.class);
 
     private final List<Listener<DistributedQueue.ValueEvent<Object>>> listeners;
     private final String resourceName;
@@ -56,7 +55,7 @@ final class AtomixQueueConsumer extends AbstractAtomixClientConsumer<AtomixQueue
             .join();
 
 
-        LOGGER.debug("Subscribe to events for queue: {}", resourceName);
+        log.debug("Subscribe to events for queue: {}", resourceName);
         this.listeners.add(this.queue.onAdd(this::onEvent).join());
         this.listeners.add(this.queue.onRemove(this::onEvent).join());
     }
diff --git a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/set/AtomixSetConsumer.java b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/set/AtomixSetConsumer.java
index e20a719..7b85e2a 100644
--- a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/set/AtomixSetConsumer.java
+++ b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/set/AtomixSetConsumer.java
@@ -29,7 +29,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 final class AtomixSetConsumer extends AbstractAtomixClientConsumer<AtomixSetEndpoint> {
-    private static final Logger LOGGER = LoggerFactory.getLogger(AtomixSetConsumer.class);
 
     private final List<Listener<DistributedSet.ValueEvent<Object>>> listeners;
     private final String resourceName;
@@ -56,7 +55,7 @@ final class AtomixSetConsumer extends AbstractAtomixClientConsumer<AtomixSetEndp
             .join();
 
 
-        LOGGER.debug("Subscribe to events for set: {}", resourceName);
+        log.debug("Subscribe to events for set: {}", resourceName);
         this.listeners.add(this.set.onAdd(this::onEvent).join());
         this.listeners.add(this.set.onRemove(this::onEvent).join());
     }
diff --git a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/value/AtomixValueConsumer.java b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/value/AtomixValueConsumer.java
index 5c067bf..2192ab1 100644
--- a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/value/AtomixValueConsumer.java
+++ b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/client/value/AtomixValueConsumer.java
@@ -29,7 +29,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 final class AtomixValueConsumer extends AbstractAtomixClientConsumer<AtomixValueEndpoint> {
-    private static final Logger LOGGER = LoggerFactory.getLogger(AtomixValueConsumer.class);
 
     private final List<Listener<DistributedValue.ChangeEvent<Object>>> listeners;
     private final String resourceName;
@@ -56,7 +55,7 @@ final class AtomixValueConsumer extends AbstractAtomixClientConsumer<AtomixValue
             .join();
 
 
-        LOGGER.debug("Subscribe to events for value: {}", resourceName);
+        log.debug("Subscribe to events for value: {}", resourceName);
         this.listeners.add(this.value.onChange(this::onEvent).join());
     }
 
diff --git a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/cluster/AtomixClusterService.java b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/cluster/AtomixClusterService.java
index 3f14f9c..1ef2d04 100644
--- a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/cluster/AtomixClusterService.java
+++ b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/cluster/AtomixClusterService.java
@@ -29,7 +29,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public final class AtomixClusterService extends AbstractCamelClusterService<AtomixClusterView> {
-    private static final Logger LOGGER = LoggerFactory.getLogger(AtomixClusterService.class);
 
     private Address address;
     private AtomixClusterConfiguration configuration;
@@ -144,7 +143,7 @@ public final class AtomixClusterService extends AbstractCamelClusterService<Atom
         super.doStop();
 
         if (atomix != null) {
-            LOGGER.debug("Leaving atomix cluster replica {}", atomix);
+            log.debug("Leaving atomix cluster replica {}", atomix);
             atomix.leave().join();
         }
     }
@@ -164,13 +163,13 @@ public final class AtomixClusterService extends AbstractCamelClusterService<Atom
             atomix = AtomixClusterHelper.createReplica(getCamelContext(), address, configuration);
 
             if (ObjectHelper.isNotEmpty(configuration.getNodes())) {
-                LOGGER.debug("Bootstrap cluster on address {} for nodes: {}", address, configuration.getNodes());
+                log.debug("Bootstrap cluster on address {} for nodes: {}", address, configuration.getNodes());
                 this.atomix.bootstrap(configuration.getNodes()).join();
-                LOGGER.debug("Bootstrap cluster done");
+                log.debug("Bootstrap cluster done");
             } else {
-                LOGGER.debug("Bootstrap cluster on address {}", address, configuration.getNodes());
+                log.debug("Bootstrap cluster on address {}", address, configuration.getNodes());
                 this.atomix.bootstrap().join();
-                LOGGER.debug("Bootstrap cluster done");
+                log.debug("Bootstrap cluster done");
             }
         }
 
diff --git a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/cluster/AtomixClusterView.java b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/cluster/AtomixClusterView.java
index 6305944..64852ba 100644
--- a/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/cluster/AtomixClusterView.java
+++ b/components/camel-atomix/src/main/java/org/apache/camel/component/atomix/cluster/AtomixClusterView.java
@@ -31,11 +31,8 @@ import org.apache.camel.cluster.CamelClusterService;
 import org.apache.camel.component.atomix.AtomixConfiguration;
 import org.apache.camel.impl.cluster.AbstractCamelClusterView;
 import org.apache.camel.util.ObjectHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 final class AtomixClusterView extends AbstractCamelClusterView {
-    private static final Logger LOGGER = LoggerFactory.getLogger(AtomixClusterView.class);
 
     private final Atomix atomix;
     private final AtomixLocalMember localMember;
@@ -84,7 +81,7 @@ final class AtomixClusterView extends AbstractCamelClusterView {
     @Override
     protected void doStart() throws Exception {
         if (!localMember.hasJoined()) {
-            LOGGER.debug("Get group {}", getNamespace());
+            log.debug("Get group {}", getNamespace());
 
             group = this.atomix.getGroup(
                 getNamespace(),
@@ -92,28 +89,28 @@ final class AtomixClusterView extends AbstractCamelClusterView {
                 new DistributedGroup.Options(configuration.getResourceOptions(getNamespace()))
             ).get();
 
-            LOGGER.debug("Listen election events");
+            log.debug("Listen election events");
             group.election().onElection(term -> {
                 if (isRunAllowed()) {
                     fireLeadershipChangedEvent(Optional.of(toClusterMember(term.leader())));
                 }
             });
 
-            LOGGER.debug("Listen join events");
+            log.debug("Listen join events");
             group.onJoin(member -> {
                 if (isRunAllowed()) {
                     fireMemberAddedEvent(toClusterMember(member));
                 }
             });
 
-            LOGGER.debug("Listen leave events");
+            log.debug("Listen leave events");
             group.onLeave(member -> {
                 if (isRunAllowed()) {
                     fireMemberRemovedEvent(toClusterMember(member));
                 }
             });
 
-            LOGGER.debug("Join group {}", getNamespace());
+            log.debug("Join group {}", getNamespace());
             localMember.join();
         }
     }
@@ -178,11 +175,11 @@ final class AtomixClusterView extends AbstractCamelClusterView {
             if (member == null && group != null) {
                 String id = getClusterService().getId();
                 if (ObjectHelper.isEmpty(id) || configuration.isEphemeral()) {
-                    LOGGER.debug("Joining group: {}", group);
+                    log.debug("Joining group: {}", group);
                     member = group.join().join();
-                    LOGGER.debug("Group {} joined with id {}", group, member.id());
+                    log.debug("Group {} joined with id {}", group, member.id());
                 } else {
-                    LOGGER.debug("Joining group: {}, with id: {}", group, id);
+                    log.debug("Joining group: {}, with id: {}", group, id);
                     member = group.join(id).join();
                 }
             }
@@ -194,7 +191,7 @@ final class AtomixClusterView extends AbstractCamelClusterView {
             if (member != null) {
                 String id = member.id();
 
-                LOGGER.debug("Member {} : leave group {}", id, group);
+                log.debug("Member {} : leave group {}", id, group);
 
                 member.leave().join();
                 group.remove(id).join();
diff --git a/components/camel-aws-xray/src/main/java/org/apache/camel/component/aws/xray/XRayTracer.java b/components/camel-aws-xray/src/main/java/org/apache/camel/component/aws/xray/XRayTracer.java
index 1f3d3a2..1f20958 100644
--- a/components/camel-aws-xray/src/main/java/org/apache/camel/component/aws/xray/XRayTracer.java
+++ b/components/camel-aws-xray/src/main/java/org/apache/camel/component/aws/xray/XRayTracer.java
@@ -258,14 +258,14 @@ public class XRayTracer extends ServiceSupport implements RoutePolicyFactory, St
 
             if (event instanceof ExchangeSendingEvent) {
                 ExchangeSendingEvent ese = (ExchangeSendingEvent) event;
-                LOG.trace("-> {} - target: {} (routeId: {})",
+                log.trace("-> {} - target: {} (routeId: {})",
                         event.getClass().getSimpleName(), ese.getEndpoint(),
                         ese.getExchange().getFromRouteId());
 
                 if (Thread.currentThread().getName().contains("Multicast")) {
                     // copy the segment from the exchange to the thread (local) context
                     Segment segment = (Segment) ese.getExchange().getProperty(CURRENT_SEGMENT);
-                    LOG.trace("Copying over segment {}/{} from exchange received from {} to exchange processing {}",
+                    log.trace("Copying over segment {}/{} from exchange received from {} to exchange processing {}",
                             segment.getId(), segment.getName(), ese.getExchange().getFromEndpoint(),
                             ese.getEndpoint());
                     AWSXRay.setTraceEntity(segment);
@@ -286,19 +286,19 @@ public class XRayTracer extends ServiceSupport implements RoutePolicyFactory, St
                     try {
                         Subsegment subsegment = AWSXRay.beginSubsegment(sanitizeName(name));
                         sd.pre(subsegment, ese.getExchange(), ese.getEndpoint());
-                        LOG.trace("Creating new subsegment with ID {} and name {}",
+                        log.trace("Creating new subsegment with ID {} and name {}",
                                 subsegment.getId(), subsegment.getName());
                     } catch (AlreadyEmittedException aeEx) {
-                        LOG.warn("Ignoring starting of subsegment " + name + " as its parent segment"
+                        log.warn("Ignoring starting of subsegment " + name + " as its parent segment"
                                 + " was already emitted to AWS.");
                     }
                 } else {
-                    LOG.trace("Ignoring creation of XRay subsegment as no segment exists in the current thread");
+                    log.trace("Ignoring creation of XRay subsegment as no segment exists in the current thread");
                 }
 
             } else if (event instanceof ExchangeSentEvent) {
                 ExchangeSentEvent ese = (ExchangeSentEvent) event;
-                LOG.trace("-> {} - target: {} (routeId: {})",
+                log.trace("-> {} - target: {} (routeId: {})",
                         event.getClass().getSimpleName(), ese.getEndpoint(), ese.getExchange().getFromRouteId());
 
                 SegmentDecorator sd = getSegmentDecorator(ese.getEndpoint());
@@ -309,15 +309,15 @@ public class XRayTracer extends ServiceSupport implements RoutePolicyFactory, St
                         Subsegment subsegment = AWSXRay.getCurrentSubsegment();
                         sd.post(subsegment, ese.getExchange(), ese.getEndpoint());
                         subsegment.close();
-                        LOG.trace("Closing down subsegment with ID {} and name {}",
+                        log.trace("Closing down subsegment with ID {} and name {}",
                                 subsegment.getId(), subsegment.getName());
                     } catch (AlreadyEmittedException aeEx) {
-                        LOG.warn("Ignoring close of subsegment " + name
+                        log.warn("Ignoring close of subsegment " + name
                                 + " as its parent segment was already emitted to AWS");
                     }
                 }
             } else {
-                LOG.trace("Received event {} from source {}", event, event.getSource());
+                log.trace("Received event {} from source {}", event, event.getSource());
             }
         }
 
@@ -364,7 +364,7 @@ public class XRayTracer extends ServiceSupport implements RoutePolicyFactory, St
                 return;
             }
 
-            LOG.trace("=> RoutePolicy-Begin: Route: {} - RouteId: {}", routeId, route.getId());
+            log.trace("=> RoutePolicy-Begin: Route: {} - RouteId: {}", routeId, route.getId());
 
             TraceID traceID;
             if (exchange.getIn().getHeaders().containsKey(XRAY_TRACE_ID)) {
@@ -399,7 +399,7 @@ public class XRayTracer extends ServiceSupport implements RoutePolicyFactory, St
                 Segment segment = AWSXRay.beginSegment(sanitizeName(route.getId()));
                 segment.setTraceId(traceID);
                 sd.pre(segment, exchange, route.getEndpoint());
-                LOG.trace("Created new XRay segment {} with name {}",
+                log.trace("Created new XRay segment {} with name {}",
                         segment.getId(), segment.getName());
                 exchange.setProperty(CURRENT_SEGMENT, segment);
             } else {
@@ -407,10 +407,10 @@ public class XRayTracer extends ServiceSupport implements RoutePolicyFactory, St
                 try {
                     Subsegment subsegment = AWSXRay.beginSubsegment(route.getId());
                     sd.pre(subsegment, exchange, route.getEndpoint());
-                    LOG.trace("Created new XRay subsegment {} with name {}",
+                    log.trace("Created new XRay subsegment {} with name {}",
                             subsegment.getId(), subsegment.getName());
                 } catch (AlreadyEmittedException aeEx) {
-                    LOG.warn("Ignoring opening of subsegment " + route.getId() + " as its parent segment "
+                    log.warn("Ignoring opening of subsegment " + route.getId() + " as its parent segment "
                             + segmentName + " was already emitted before.");
                 }
             }
@@ -423,7 +423,7 @@ public class XRayTracer extends ServiceSupport implements RoutePolicyFactory, St
                 return;
             }
 
-            LOG.trace("=> RoutePolicy-Done: Route: {} - RouteId: {}", routeId, route.getId());
+            log.trace("=> RoutePolicy-Done: Route: {} - RouteId: {}", routeId, route.getId());
 
             try {
                 SegmentDecorator sd = getSegmentDecorator(route.getEndpoint());
@@ -433,17 +433,17 @@ public class XRayTracer extends ServiceSupport implements RoutePolicyFactory, St
                     Subsegment subsegment = curSubSegment.get();
                     sd.post(subsegment, exchange, route.getEndpoint());
                     subsegment.close();
-                    LOG.trace("Closing down Subsegment {} with name {}",
+                    log.trace("Closing down Subsegment {} with name {}",
                             subsegment.getId(), subsegment.getName());
                 } else if (curSegment.isPresent()) {
                     Segment segment = curSegment.get();
                     sd.post(segment, exchange, route.getEndpoint());
                     segment.close();
-                    LOG.trace("Closing down Segment {} with name {}",
+                    log.trace("Closing down Segment {} with name {}",
                             segment.getId(), segment.getName());
                 }
             } catch (AlreadyEmittedException aeEx) {
-                LOG.warn("Ignoring closing of (sub)segment {} as the segment was already emitted.", route.getId());
+                log.warn("Ignoring closing of (sub)segment {} as the segment was already emitted.", route.getId());
             }
         }
 
diff --git a/components/camel-aws/src/main/java/org/apache/camel/component/aws/ddb/DdbEndpoint.java b/components/camel-aws/src/main/java/org/apache/camel/component/aws/ddb/DdbEndpoint.java
index 7766851..a86fb42 100644
--- a/components/camel-aws/src/main/java/org/apache/camel/component/aws/ddb/DdbEndpoint.java
+++ b/components/camel-aws/src/main/java/org/apache/camel/component/aws/ddb/DdbEndpoint.java
@@ -41,8 +41,6 @@ import org.apache.camel.impl.ScheduledPollEndpoint;
 import org.apache.camel.spi.UriEndpoint;
 import org.apache.camel.spi.UriParam;
 import org.apache.camel.util.ObjectHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The aws-ddb component is used for storing and retrieving data from Amazon's DynamoDB service.
@@ -50,8 +48,6 @@ import org.slf4j.LoggerFactory;
 @UriEndpoint(firstVersion = "2.10.0", scheme = "aws-ddb", title = "AWS DynamoDB", syntax = "aws-ddb:tableName", producerOnly = true, label = "cloud,database,nosql")
 public class DdbEndpoint extends ScheduledPollEndpoint {
 
-    private static final Logger LOG = LoggerFactory.getLogger(DdbEndpoint.class);
-
     @UriParam
     private DdbConfiguration configuration;
 
@@ -82,7 +78,7 @@ public class DdbEndpoint extends ScheduledPollEndpoint {
             : createDdbClient();
         
         String tableName = getConfiguration().getTableName();
-        LOG.trace("Querying whether table [{}] already exists...", tableName);
+        log.trace("Querying whether table [{}] already exists...", tableName);
 
         try {
             DescribeTableRequest request = new DescribeTableRequest().withTableName(tableName);
@@ -91,17 +87,17 @@ public class DdbEndpoint extends ScheduledPollEndpoint {
                 waitForTableToBecomeAvailable(tableName);
             }
 
-            LOG.trace("Table [{}] already exists", tableName);
+            log.trace("Table [{}] already exists", tableName);
             return;
         } catch (ResourceNotFoundException e) {
-            LOG.trace("Table [{}] doesn't exist yet", tableName);
-            LOG.trace("Creating table [{}]...", tableName);
+            log.trace("Table [{}] doesn't exist yet", tableName);
+            log.trace("Creating table [{}]...", tableName);
             TableDescription tableDescription = createTable(tableName);
             if (!isTableActive(tableDescription)) {
                 waitForTableToBecomeAvailable(tableName);
             }
 
-            LOG.trace("Table [{}] created", tableName);
+            log.trace("Table [{}] created", tableName);
         }
     }
     
@@ -169,7 +165,7 @@ public class DdbEndpoint extends ScheduledPollEndpoint {
     }
 
     private void waitForTableToBecomeAvailable(String tableName) {
-        LOG.trace("Waiting for [{}] to become ACTIVE...", tableName);
+        log.trace("Waiting for [{}] to become ACTIVE...", tableName);
 
         long waitTime = 5 * 60 * 1000;
         while (waitTime > 0) {
@@ -182,10 +178,10 @@ public class DdbEndpoint extends ScheduledPollEndpoint {
                 DescribeTableRequest request = new DescribeTableRequest().withTableName(tableName);
                 TableDescription tableDescription = getDdbClient().describeTable(request).getTable();
                 if (isTableActive(tableDescription)) {
-                    LOG.trace("Table [{}] became active", tableName);
+                    log.trace("Table [{}] became active", tableName);
                     return;
                 }
-                LOG.trace("Table [{}] not active yet", tableName);
+                log.trace("Table [{}] not active yet", tableName);
             } catch (AmazonServiceException ase) {
                 if (!ase.getErrorCode().equalsIgnoreCase("ResourceNotFoundException")) {
                     throw ase;
diff --git a/components/camel-aws/src/main/java/org/apache/camel/component/aws/ddbstream/DdbStreamConsumer.java b/components/camel-aws/src/main/java/org/apache/camel/component/aws/ddbstream/DdbStreamConsumer.java
index df7faad..7b91318 100644
--- a/components/camel-aws/src/main/java/org/apache/camel/component/aws/ddbstream/DdbStreamConsumer.java
+++ b/components/camel-aws/src/main/java/org/apache/camel/component/aws/ddbstream/DdbStreamConsumer.java
@@ -32,11 +32,8 @@ import org.apache.camel.Processor;
 import org.apache.camel.impl.ScheduledBatchPollingConsumer;
 import org.apache.camel.util.CastUtils;
 import org.apache.camel.util.ObjectHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public class DdbStreamConsumer extends ScheduledBatchPollingConsumer {
-    private static final Logger LOG = LoggerFactory.getLogger(DdbStreamConsumer.class);
 
     private final ShardIteratorHandler shardIteratorHandler;
     private String lastSeenSequenceNumber;
@@ -59,7 +56,7 @@ public class DdbStreamConsumer extends ScheduledBatchPollingConsumer {
                         .withLimit(getEndpoint().getConfiguration().getMaxResultsPerRequest());
             result = getClient().getRecords(req);
         } catch (ExpiredIteratorException e) {
-            LOG.warn("Expired Shard Iterator, attempting to resume from {}", lastSeenSequenceNumber, e);
+            log.warn("Expired Shard Iterator, attempting to resume from {}", lastSeenSequenceNumber, e);
             GetRecordsRequest req = new GetRecordsRequest()
                         .withShardIterator(shardIteratorHandler.getShardIterator(lastSeenSequenceNumber))
                         .withLimit(getEndpoint().getConfiguration().getMaxResultsPerRequest());
@@ -84,11 +81,11 @@ public class DdbStreamConsumer extends ScheduledBatchPollingConsumer {
         while (!exchanges.isEmpty()) {
             final Exchange exchange = ObjectHelper.cast(Exchange.class, exchanges.poll());
 
-            LOG.trace("Processing exchange [{}] started.", exchange);
+            log.trace("Processing exchange [{}] started.", exchange);
             getAsyncProcessor().process(exchange, new AsyncCallback() {
                 @Override
                 public void done(boolean doneSync) {
-                    LOG.trace("Processing exchange [{}] done.", exchange);
+                    log.trace("Processing exchange [{}] done.", exchange);
                 }
             });
             processedExchanges++;
diff --git a/components/camel-aws/src/main/java/org/apache/camel/component/aws/ec2/EC2Producer.java b/components/camel-aws/src/main/java/org/apache/camel/component/aws/ec2/EC2Producer.java
index 505c689..86affab 100644
--- a/components/camel-aws/src/main/java/org/apache/camel/component/aws/ec2/EC2Producer.java
+++ b/components/camel-aws/src/main/java/org/apache/camel/component/aws/ec2/EC2Producer.java
@@ -51,8 +51,6 @@ import org.apache.camel.Message;
 import org.apache.camel.impl.DefaultProducer;
 import org.apache.camel.util.ObjectHelper;
 import org.apache.camel.util.URISupport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.apache.camel.component.aws.common.AwsExchangeUtil.getMessageForResponse;
 
@@ -62,8 +60,6 @@ import static org.apache.camel.component.aws.common.AwsExchangeUtil.getMessageFo
  */
 public class EC2Producer extends DefaultProducer {
     
-    private static final Logger LOG = LoggerFactory.getLogger(EC2Producer.class);
-    
     private transient String ec2ProducerToString;
 
     public EC2Producer(Endpoint endpoint) {
@@ -199,10 +195,10 @@ public class EC2Producer extends DefaultProducer {
         try {
             result = ec2Client.runInstances(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Run Instances command returned the error code {}", ase.getErrorCode());
+            log.trace("Run Instances command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
-        LOG.trace("Creating and running instances with ami [{}] and instance type {}", ami, instanceType);
+        log.trace("Creating and running instances with ami [{}] and instance type {}", ami, instanceType);
         Message message = getMessageForResponse(exchange);
         message.setBody(result);
     }
@@ -220,10 +216,10 @@ public class EC2Producer extends DefaultProducer {
         try {
             result = ec2Client.startInstances(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Start Instances command returned the error code {}", ase.getErrorCode());
+            log.trace("Start Instances command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
-        LOG.trace("Starting instances with Ids [{}] ", Arrays.toString(instanceIds.toArray()));
+        log.trace("Starting instances with Ids [{}] ", Arrays.toString(instanceIds.toArray()));
         Message message = getMessageForResponse(exchange);
         message.setBody(result);        
     }
@@ -241,10 +237,10 @@ public class EC2Producer extends DefaultProducer {
         try {
             result = ec2Client.stopInstances(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Stop Instances command returned the error code {}", ase.getErrorCode());
+            log.trace("Stop Instances command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
-        LOG.trace("Stopping instances with Ids [{}] ", Arrays.toString(instanceIds.toArray()));
+        log.trace("Stopping instances with Ids [{}] ", Arrays.toString(instanceIds.toArray()));
         Message message = getMessageForResponse(exchange);
         message.setBody(result);        
     }
@@ -262,10 +258,10 @@ public class EC2Producer extends DefaultProducer {
         try {
             result = ec2Client.terminateInstances(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Terminate Instances command returned the error code {}", ase.getErrorCode());
+            log.trace("Terminate Instances command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
-        LOG.trace("Terminating instances with Ids [{}] ", Arrays.toString(instanceIds.toArray()));
+        log.trace("Terminating instances with Ids [{}] ", Arrays.toString(instanceIds.toArray()));
         Message message = getMessageForResponse(exchange);
         message.setBody(result);        
     }
@@ -281,7 +277,7 @@ public class EC2Producer extends DefaultProducer {
         try {
             result = ec2Client.describeInstances(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Describe Instances command returned the error code {}", ase.getErrorCode());
+            log.trace("Describe Instances command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -299,7 +295,7 @@ public class EC2Producer extends DefaultProducer {
         try {
             result = ec2Client.describeInstanceStatus(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Describe Instances Status command returned the error code {}", ase.getErrorCode());
+            log.trace("Describe Instances Status command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -316,10 +312,10 @@ public class EC2Producer extends DefaultProducer {
             throw new IllegalArgumentException("Instances Ids must be specified");
         }
         try {
-            LOG.trace("Rebooting instances with Ids [{}] ", Arrays.toString(instanceIds.toArray()));
+            log.trace("Rebooting instances with Ids [{}] ", Arrays.toString(instanceIds.toArray()));
             ec2Client.rebootInstances(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Reboot Instances command returned the error code {}", ase.getErrorCode());
+            log.trace("Reboot Instances command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
     }
@@ -337,10 +333,10 @@ public class EC2Producer extends DefaultProducer {
         try {
             result = ec2Client.monitorInstances(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Monitor Instances command returned the error code {}", ase.getErrorCode());
+            log.trace("Monitor Instances command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
-        LOG.trace("Start Monitoring instances with Ids [{}] ", Arrays.toString(instanceIds.toArray()));
+        log.trace("Start Monitoring instances with Ids [{}] ", Arrays.toString(instanceIds.toArray()));
         Message message = getMessageForResponse(exchange);
         message.setBody(result); 
     }
@@ -358,10 +354,10 @@ public class EC2Producer extends DefaultProducer {
         try {
             result = ec2Client.unmonitorInstances(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Unmonitor Instances command returned the error code {}", ase.getErrorCode());
+            log.trace("Unmonitor Instances command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
-        LOG.trace("Stop Monitoring instances with Ids [{}] ", Arrays.toString(instanceIds.toArray()));
+        log.trace("Stop Monitoring instances with Ids [{}] ", Arrays.toString(instanceIds.toArray()));
         Message message = getMessageForResponse(exchange);
         message.setBody(result); 
     }
@@ -386,10 +382,10 @@ public class EC2Producer extends DefaultProducer {
         try {
             result = ec2Client.createTags(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Create tags command returned the error code {}", ase.getErrorCode());
+            log.trace("Create tags command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
-        LOG.trace("Created tags [{}] on resources with Ids [{}] ", Arrays.toString(tags.toArray()), Arrays.toString(instanceIds.toArray()));
+        log.trace("Created tags [{}] on resources with Ids [{}] ", Arrays.toString(tags.toArray()), Arrays.toString(instanceIds.toArray()));
         Message message = getMessageForResponse(exchange);
         message.setBody(result); 
     }
@@ -414,10 +410,10 @@ public class EC2Producer extends DefaultProducer {
         try {
             result = ec2Client.deleteTags(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Delete tags command returned the error code {}", ase.getErrorCode());
+            log.trace("Delete tags command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
-        LOG.trace("Delete tags [{}] on resources with Ids [{}] ", Arrays.toString(tags.toArray()), Arrays.toString(instanceIds.toArray()));
+        log.trace("Delete tags [{}] on resources with Ids [{}] ", Arrays.toString(tags.toArray()), Arrays.toString(instanceIds.toArray()));
         Message message = getMessageForResponse(exchange);
         message.setBody(result); 
     }
diff --git a/components/camel-aws/src/main/java/org/apache/camel/component/aws/iam/IAMProducer.java b/components/camel-aws/src/main/java/org/apache/camel/component/aws/iam/IAMProducer.java
index ffe9b61..d31cdc7 100644
--- a/components/camel-aws/src/main/java/org/apache/camel/component/aws/iam/IAMProducer.java
+++ b/components/camel-aws/src/main/java/org/apache/camel/component/aws/iam/IAMProducer.java
@@ -40,8 +40,6 @@ import org.apache.camel.Message;
 import org.apache.camel.impl.DefaultProducer;
 import org.apache.camel.util.ObjectHelper;
 import org.apache.camel.util.URISupport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.apache.camel.component.aws.common.AwsExchangeUtil.getMessageForResponse;
 
@@ -51,8 +49,6 @@ import static org.apache.camel.component.aws.common.AwsExchangeUtil.getMessageFo
  */
 public class IAMProducer extends DefaultProducer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(IAMProducer.class);
-
     private transient String iamProducerToString;
 
     public IAMProducer(Endpoint endpoint) {
@@ -120,7 +116,7 @@ public class IAMProducer extends DefaultProducer {
         try {
             result = iamClient.listAccessKeys();
         } catch (AmazonServiceException ase) {
-            LOG.trace("List Access Keys command returned the error code {}", ase.getErrorCode());
+            log.trace("List Access Keys command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -137,7 +133,7 @@ public class IAMProducer extends DefaultProducer {
         try {
             result = iamClient.createUser(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Create user command returned the error code {}", ase.getErrorCode());
+            log.trace("Create user command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -154,7 +150,7 @@ public class IAMProducer extends DefaultProducer {
         try {
             result = iamClient.deleteUser(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Delete user command returned the error code {}", ase.getErrorCode());
+            log.trace("Delete user command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -171,7 +167,7 @@ public class IAMProducer extends DefaultProducer {
         try {
             result = iamClient.getUser(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("get user command returned the error code {}", ase.getErrorCode());
+            log.trace("get user command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -183,7 +179,7 @@ public class IAMProducer extends DefaultProducer {
         try {
             result = iamClient.listUsers();
         } catch (AmazonServiceException ase) {
-            LOG.trace("List users command returned the error code {}", ase.getErrorCode());
+            log.trace("List users command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -200,7 +196,7 @@ public class IAMProducer extends DefaultProducer {
         try {
             result = iamClient.createAccessKey(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Create Access Key command returned the error code {}", ase.getErrorCode());
+            log.trace("Create Access Key command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -223,7 +219,7 @@ public class IAMProducer extends DefaultProducer {
         try {
             result = iamClient.deleteAccessKey(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Delete Access Key command returned the error code {}", ase.getErrorCode());
+            log.trace("Delete Access Key command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -252,7 +248,7 @@ public class IAMProducer extends DefaultProducer {
         try {
             result = iamClient.updateAccessKey(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Update Access Key command returned the error code {}", ase.getErrorCode());
+            log.trace("Update Access Key command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
diff --git a/components/camel-aws/src/main/java/org/apache/camel/component/aws/kinesis/KinesisConsumer.java b/components/camel-aws/src/main/java/org/apache/camel/component/aws/kinesis/KinesisConsumer.java
index 52bd17a..0ce61ef 100644
--- a/components/camel-aws/src/main/java/org/apache/camel/component/aws/kinesis/KinesisConsumer.java
+++ b/components/camel-aws/src/main/java/org/apache/camel/component/aws/kinesis/KinesisConsumer.java
@@ -38,11 +38,8 @@ import org.apache.camel.Processor;
 import org.apache.camel.impl.ScheduledBatchPollingConsumer;
 import org.apache.camel.util.CastUtils;
 import org.apache.camel.util.ObjectHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public class KinesisConsumer extends ScheduledBatchPollingConsumer {
-    private static final Logger LOG = LoggerFactory.getLogger(KinesisConsumer.class);
 
     private String currentShardIterator;
     private boolean isShardClosed;
@@ -68,12 +65,12 @@ public class KinesisConsumer extends ScheduledBatchPollingConsumer {
         if (isShardClosed) {
             switch (getEndpoint().getConfiguration().getShardClosed()) {
             case ignore:
-                LOG.warn("The shard {} is in closed state");
+                log.warn("The shard {} is in closed state");
                 break;
             case silent:
                 break;
             case fail:
-                LOG.info("Shard Iterator reaches CLOSE status:", getEndpoint().getConfiguration().getStreamName(), getEndpoint().getConfiguration().getShardId());
+                log.info("Shard Iterator reaches CLOSE status:", getEndpoint().getConfiguration().getStreamName(), getEndpoint().getConfiguration().getShardId());
                 throw new ReachedClosedStatusException(getEndpoint().getConfiguration().getStreamName(), getEndpoint().getConfiguration().getShardId());
             default:
                 throw new IllegalArgumentException("Unsupported shard closed strategy");
@@ -89,11 +86,11 @@ public class KinesisConsumer extends ScheduledBatchPollingConsumer {
         while (!exchanges.isEmpty()) {
             final Exchange exchange = ObjectHelper.cast(Exchange.class, exchanges.poll());
 
-            LOG.trace("Processing exchange [{}] started.", exchange);
+            log.trace("Processing exchange [{}] started.", exchange);
             getAsyncProcessor().process(exchange, new AsyncCallback() {
                 @Override
                 public void done(boolean doneSync) {
-                    LOG.trace("Processing exchange [{}] done.", exchange);
+                    log.trace("Processing exchange [{}] done.", exchange);
                 }
             });
             processedExchanges++;
@@ -143,7 +140,7 @@ public class KinesisConsumer extends ScheduledBatchPollingConsumer {
                     isShardClosed = true;
                 }
             }
-            LOG.debug("ShardId is: {}", shardId);
+            log.debug("ShardId is: {}", shardId);
 
             GetShardIteratorRequest req = new GetShardIteratorRequest().withStreamName(getEndpoint().getConfiguration().getStreamName()).withShardId(shardId)
                 .withShardIteratorType(getEndpoint().getConfiguration().getIteratorType());
@@ -155,7 +152,7 @@ public class KinesisConsumer extends ScheduledBatchPollingConsumer {
             GetShardIteratorResult result = getClient().getShardIterator(req);
             currentShardIterator = result.getShardIterator();
         }
-        LOG.debug("Shard Iterator is: {}", currentShardIterator);
+        log.debug("Shard Iterator is: {}", currentShardIterator);
         return currentShardIterator;
     }
 
diff --git a/components/camel-aws/src/main/java/org/apache/camel/component/aws/kms/KMSProducer.java b/components/camel-aws/src/main/java/org/apache/camel/component/aws/kms/KMSProducer.java
index 90e014c..94a359f 100644
--- a/components/camel-aws/src/main/java/org/apache/camel/component/aws/kms/KMSProducer.java
+++ b/components/camel-aws/src/main/java/org/apache/camel/component/aws/kms/KMSProducer.java
@@ -37,8 +37,6 @@ import org.apache.camel.Message;
 import org.apache.camel.impl.DefaultProducer;
 import org.apache.camel.util.ObjectHelper;
 import org.apache.camel.util.URISupport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.apache.camel.component.aws.common.AwsExchangeUtil.getMessageForResponse;
 
@@ -48,8 +46,6 @@ import static org.apache.camel.component.aws.common.AwsExchangeUtil.getMessageFo
  */
 public class KMSProducer extends DefaultProducer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(KMSProducer.class);
-
     private transient String kmsProducerToString;
 
     public KMSProducer(Endpoint endpoint) {
@@ -116,7 +112,7 @@ public class KMSProducer extends DefaultProducer {
         try {
             result = kmsClient.listKeys(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("List Keys command returned the error code {}", ase.getErrorCode());
+            log.trace("List Keys command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -133,7 +129,7 @@ public class KMSProducer extends DefaultProducer {
         try {
             result = kmsClient.createKey(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Create Key command returned the error code {}", ase.getErrorCode());
+            log.trace("Create Key command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -152,7 +148,7 @@ public class KMSProducer extends DefaultProducer {
         try {
             result = kmsClient.disableKey(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Disable Key command returned the error code {}", ase.getErrorCode());
+            log.trace("Disable Key command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -175,7 +171,7 @@ public class KMSProducer extends DefaultProducer {
         try {
             result = kmsClient.scheduleKeyDeletion(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Schedule Key Deletion command returned the error code {}", ase.getErrorCode());
+            log.trace("Schedule Key Deletion command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -194,7 +190,7 @@ public class KMSProducer extends DefaultProducer {
         try {
             result = kmsClient.describeKey(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Describe Key command returned the error code {}", ase.getErrorCode());
+            log.trace("Describe Key command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -213,7 +209,7 @@ public class KMSProducer extends DefaultProducer {
         try {
             result = kmsClient.enableKey(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Enable Key command returned the error code {}", ase.getErrorCode());
+            log.trace("Enable Key command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
diff --git a/components/camel-aws/src/main/java/org/apache/camel/component/aws/lambda/LambdaProducer.java b/components/camel-aws/src/main/java/org/apache/camel/component/aws/lambda/LambdaProducer.java
index 2fcb27c..551dd18 100644
--- a/components/camel-aws/src/main/java/org/apache/camel/component/aws/lambda/LambdaProducer.java
+++ b/components/camel-aws/src/main/java/org/apache/camel/component/aws/lambda/LambdaProducer.java
@@ -48,8 +48,6 @@ import org.apache.camel.Message;
 import org.apache.camel.impl.DefaultProducer;
 import org.apache.camel.util.CastUtils;
 import org.apache.camel.util.ObjectHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.apache.camel.component.aws.common.AwsExchangeUtil.getMessageForResponse;
 
@@ -59,8 +57,6 @@ import static org.apache.camel.component.aws.common.AwsExchangeUtil.getMessageFo
  */
 public class LambdaProducer extends DefaultProducer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(LambdaProducer.class);
-
     public LambdaProducer(final Endpoint endpoint) {
         super(endpoint);
     }
@@ -96,7 +92,7 @@ public class LambdaProducer extends DefaultProducer {
         try {
             result = lambdaClient.getFunction(new GetFunctionRequest().withFunctionName(getConfiguration().getFunction()));
         } catch (AmazonServiceException ase) {
-            LOG.trace("getFunction command returned the error code {}", ase.getErrorCode());
+            log.trace("getFunction command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -108,7 +104,7 @@ public class LambdaProducer extends DefaultProducer {
         try {
             result = lambdaClient.deleteFunction(new DeleteFunctionRequest().withFunctionName(getConfiguration().getFunction()));
         } catch (AmazonServiceException ase) {
-            LOG.trace("deleteFunction command returned the error code {}", ase.getErrorCode());
+            log.trace("deleteFunction command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -120,7 +116,7 @@ public class LambdaProducer extends DefaultProducer {
         try {
             result = lambdaClient.listFunctions();
         } catch (AmazonServiceException ase) {
-            LOG.trace("listFunctions command returned the error code {}", ase.getErrorCode());
+            log.trace("listFunctions command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -135,7 +131,7 @@ public class LambdaProducer extends DefaultProducer {
                 .withPayload(exchange.getIn().getBody(String.class));
             result = lambdaClient.invoke(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("invokeFunction command returned the error code {}", ase.getErrorCode());
+            log.trace("invokeFunction command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -269,7 +265,7 @@ public class LambdaProducer extends DefaultProducer {
             result = lambdaClient.createFunction(request);
 
         } catch (AmazonServiceException ase) {
-            LOG.trace("createFunction command returned the error code {}", ase.getErrorCode());
+            log.trace("createFunction command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
 
@@ -333,7 +329,7 @@ public class LambdaProducer extends DefaultProducer {
             result = lambdaClient.updateFunctionCode(request);
 
         } catch (AmazonServiceException ase) {
-            LOG.trace("updateFunction command returned the error code {}", ase.getErrorCode());
+            log.trace("updateFunction command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
 
diff --git a/components/camel-aws/src/main/java/org/apache/camel/component/aws/mq/MQProducer.java b/components/camel-aws/src/main/java/org/apache/camel/component/aws/mq/MQProducer.java
index 6e697e9..bb12d17 100644
--- a/components/camel-aws/src/main/java/org/apache/camel/component/aws/mq/MQProducer.java
+++ b/components/camel-aws/src/main/java/org/apache/camel/component/aws/mq/MQProducer.java
@@ -39,8 +39,6 @@ import org.apache.camel.Message;
 import org.apache.camel.impl.DefaultProducer;
 import org.apache.camel.util.ObjectHelper;
 import org.apache.camel.util.URISupport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.apache.camel.component.aws.common.AwsExchangeUtil.getMessageForResponse;
 
@@ -50,8 +48,6 @@ import static org.apache.camel.component.aws.common.AwsExchangeUtil.getMessageFo
  */
 public class MQProducer extends DefaultProducer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(MQProducer.class);
-
     private transient String mqProducerToString;
 
     public MQProducer(Endpoint endpoint) {
@@ -118,7 +114,7 @@ public class MQProducer extends DefaultProducer {
         try {
             result = mqClient.listBrokers(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("List Brokers command returned the error code {}", ase.getErrorCode());
+            log.trace("List Brokers command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -145,7 +141,7 @@ public class MQProducer extends DefaultProducer {
         try {
             result = mqClient.createBroker(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Create Broker command returned the error code {}", ase.getErrorCode());
+            log.trace("Create Broker command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -165,7 +161,7 @@ public class MQProducer extends DefaultProducer {
         try {
             result = mqClient.deleteBroker(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Delete Broker command returned the error code {}", ase.getErrorCode());
+            log.trace("Delete Broker command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -185,7 +181,7 @@ public class MQProducer extends DefaultProducer {
         try {
             result = mqClient.rebootBroker(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Reboot Broker command returned the error code {}", ase.getErrorCode());
+            log.trace("Reboot Broker command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -212,7 +208,7 @@ public class MQProducer extends DefaultProducer {
         try {
             result = mqClient.updateBroker(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Update Broker command returned the error code {}", ase.getErrorCode());
+            log.trace("Update Broker command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
@@ -232,7 +228,7 @@ public class MQProducer extends DefaultProducer {
         try {
             result = mqClient.describeBroker(request);
         } catch (AmazonServiceException ase) {
-            LOG.trace("Reboot Broker command returned the error code {}", ase.getErrorCode());
+            log.trace("Reboot Broker command returned the error code {}", ase.getErrorCode());
             throw ase;
         }
         Message message = getMessageForResponse(exchange);
diff --git a/components/camel-aws/src/main/java/org/apache/camel/component/aws/s3/S3Consumer.java b/components/camel-aws/src/main/java/org/apache/camel/component/aws/s3/S3Consumer.java
index 5f1c00d..268bf62 100644
--- a/components/camel-aws/src/main/java/org/apache/camel/component/aws/s3/S3Consumer.java
+++ b/components/camel-aws/src/main/java/org/apache/camel/component/aws/s3/S3Consumer.java
@@ -40,8 +40,6 @@ import org.apache.camel.util.CastUtils;
 import org.apache.camel.util.IOHelper;
 import org.apache.camel.util.ObjectHelper;
 import org.apache.camel.util.URISupport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * A Consumer of messages from the Amazon Web Service Simple Storage Service
@@ -49,7 +47,6 @@ import org.slf4j.LoggerFactory;
  */
 public class S3Consumer extends ScheduledBatchPollingConsumer {
     
-    private static final Logger LOG = LoggerFactory.getLogger(S3Consumer.class);
     private String marker;
     private transient String s3ConsumerToString;
 
@@ -68,12 +65,12 @@ public class S3Consumer extends ScheduledBatchPollingConsumer {
         Queue<Exchange> exchanges;
         
         if (fileName != null) {
-            LOG.trace("Getting object in bucket [{}] with file name [{}]...", bucketName, fileName);
+            log.trace("Getting object in bucket [{}] with file name [{}]...", bucketName, fileName);
 
             S3Object s3Object = getAmazonS3Client().getObject(new GetObjectRequest(bucketName, fileName));
             exchanges = createExchanges(s3Object);
         } else {
-            LOG.trace("Queueing objects in bucket [{}]...", bucketName);
+            log.trace("Queueing objects in bucket [{}]...", bucketName);
 
             ListObjectsRequest listObjectsRequest = new ListObjectsRequest();
             listObjectsRequest.setBucketName(bucketName);
@@ -83,20 +80,20 @@ public class S3Consumer extends ScheduledBatchPollingConsumer {
             }
             // if there was a marker from previous poll then use that to continue from where we left last time
             if (marker != null) {
-                LOG.trace("Resuming from marker: {}", marker);
+                log.trace("Resuming from marker: {}", marker);
                 listObjectsRequest.setMarker(marker);
             }
 
             ObjectListing listObjects = getAmazonS3Client().listObjects(listObjectsRequest);
             if (listObjects.isTruncated()) {
                 marker = listObjects.getNextMarker();
-                LOG.trace("Returned list is truncated, so setting next marker: {}", marker);
+                log.trace("Returned list is truncated, so setting next marker: {}", marker);
             } else {
                 // no more data so clear marker
                 marker = null;
             }
-            if (LOG.isTraceEnabled()) {
-                LOG.trace("Found {} objects in bucket [{}]...", listObjects.getObjectSummaries().size(), bucketName);
+            if (log.isTraceEnabled()) {
+                log.trace("Found {} objects in bucket [{}]...", listObjects.getObjectSummaries().size(), bucketName);
             }
 
             exchanges = createExchanges(listObjects.getObjectSummaries());
@@ -112,8 +109,8 @@ public class S3Consumer extends ScheduledBatchPollingConsumer {
     }
     
     protected Queue<Exchange> createExchanges(List<S3ObjectSummary> s3ObjectSummaries) {
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Received {} messages in this poll", s3ObjectSummaries.size());
+        if (log.isTraceEnabled()) {
+            log.trace("Received {} messages in this poll", s3ObjectSummaries.size());
         }
 
         Collection<S3Object> s3Objects = new ArrayList<>();
@@ -127,7 +124,7 @@ public class S3Consumer extends ScheduledBatchPollingConsumer {
                 answer.add(exchange);
             }
         } catch (Throwable e) {
-            LOG.warn("Error getting S3Object due: {}", e.getMessage(), e);
+            log.warn("Error getting S3Object due: {}", e.getMessage(), e);
             // ensure all previous gathered s3 objects are closed
             // if there was an exception creating the exchanges in this batch
             s3Objects.forEach(IOHelper::close);
@@ -167,11 +164,11 @@ public class S3Consumer extends ScheduledBatchPollingConsumer {
                 }
             });
 
-            LOG.trace("Processing exchange [{}]...", exchange);
+            log.trace("Processing exchange [{}]...", exchange);
             getAsyncProcessor().process(exchange, new AsyncCallback() {
                 @Override
                 public void done(boolean doneSync) {
-                    LOG.trace("Processing exchange [{}] done.", exchange);
+                    log.trace("Processing exchange [{}] done.", exchange);
                 }
             });
         }
@@ -190,11 +187,11 @@ public class S3Consumer extends ScheduledBatchPollingConsumer {
                 String bucketName = exchange.getIn().getHeader(S3Constants.BUCKET_NAME, String.class);
                 String key = exchange.getIn().getHeader(S3Constants.KEY, String.class);
                 
-                LOG.trace("Deleting object from bucket {} with key {}...", bucketName, key);
+                log.trace("Deleting object from bucket {} with key {}...", bucketName, key);
                 
                 getAmazonS3Client().deleteObject(bucketName, key);
 
-                LOG.trace("Deleted object from bucket {} with key {}...", bucketName, key);
+                log.trace("Deleted object from bucket {} with key {}...", bucketName, key);
             }
         } catch (AmazonClientException e) {
             getExceptionHandler().handleException("Error occurred during deleting object. This exception is ignored.", exchange, e);
@@ -209,9 +206,9 @@ public class S3Consumer extends ScheduledBatchPollingConsumer {
     protected void processRollback(Exchange exchange) {
         Exception cause = exchange.getException();
         if (cause != null) {
-            LOG.warn("Exchange failed, so rolling back message status: {}", exchange, cause);
+            log.warn("Exchange failed, so rolling back message status: {}", exchange, cause);
         } else {
-            LOG.warn("Exchange failed, so rolling back message status: {}", exchange);
+            log.warn("Exchange failed, so rolling back message status: {}", exchange);
         }
     }
 
diff --git a/components/camel-aws/src/main/java/org/apache/camel/component/aws/sns/SnsEndpoint.java b/components/camel-aws/src/main/java/org/apache/camel/component/aws/sns/SnsEndpoint.java
index 6f88332..17b21df 100644
--- a/components/camel-aws/src/main/java/org/apache/camel/component/aws/sns/SnsEndpoint.java
+++ b/components/camel-aws/src/main/java/org/apache/camel/component/aws/sns/SnsEndpoint.java
@@ -43,8 +43,6 @@ import org.apache.camel.spi.UriEndpoint;
 import org.apache.camel.spi.UriParam;
 import org.apache.camel.spi.UriPath;
 import org.apache.camel.util.ObjectHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The aws-sns component is used for sending messages to an Amazon Simple Notification Topic.
@@ -53,8 +51,6 @@ import org.slf4j.LoggerFactory;
     producerOnly = true, label = "cloud,mobile,messaging")
 public class SnsEndpoint extends DefaultEndpoint implements HeaderFilterStrategyAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(SnsEndpoint.class);
-
     private AmazonSNS snsClient;
 
     @UriPath(description = "Topic name or ARN")
@@ -120,7 +116,7 @@ public class SnsEndpoint extends DefaultEndpoint implements HeaderFilterStrategy
                     }
                 } while (nextToken != null);
             } catch (final AmazonServiceException ase) {
-                LOG.trace("The list topics operation return the following error code {}", ase.getErrorCode());
+                log.trace("The list topics operation return the following error code {}", ase.getErrorCode());
                 throw ase;
             }
         }
@@ -129,20 +125,20 @@ public class SnsEndpoint extends DefaultEndpoint implements HeaderFilterStrategy
             // creates a new topic, or returns the URL of an existing one
             CreateTopicRequest request = new CreateTopicRequest(configuration.getTopicName());
 
-            LOG.trace("Creating topic [{}] with request [{}]...", configuration.getTopicName(), request);
+            log.trace("Creating topic [{}] with request [{}]...", configuration.getTopicName(), request);
 
             CreateTopicResult result = snsClient.createTopic(request);
             configuration.setTopicArn(result.getTopicArn());
 
-            LOG.trace("Topic created with Amazon resource name: {}", configuration.getTopicArn());
+            log.trace("Topic created with Amazon resource name: {}", configuration.getTopicArn());
         }
         
         if (ObjectHelper.isNotEmpty(configuration.getPolicy())) {
-            LOG.trace("Updating topic [{}] with policy [{}]", configuration.getTopicArn(), configuration.getPolicy());
+            log.trace("Updating topic [{}] with policy [{}]", configuration.getTopicArn(), configuration.getPolicy());
             
             snsClient.setTopicAttributes(new SetTopicAttributesRequest(configuration.getTopicArn(), "Policy", configuration.getPolicy()));
             
-            LOG.trace("Topic policy updated");
+            log.trace("Topic policy updated");
         }
         
     }
diff --git a/components/camel-aws/src/main/java/org/apache/camel/component/aws/sns/SnsProducer.java b/components/camel-aws/src/main/java/org/apache/camel/component/aws/sns/SnsProducer.java
index 2b447e1..caab137 100644
--- a/components/camel-aws/src/main/java/org/apache/camel/component/aws/sns/SnsProducer.java
+++ b/components/camel-aws/src/main/java/org/apache/camel/component/aws/sns/SnsProducer.java
@@ -31,8 +31,6 @@ import org.apache.camel.Message;
 import org.apache.camel.impl.DefaultProducer;
 import org.apache.camel.spi.HeaderFilterStrategy;
 import org.apache.camel.util.URISupport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.apache.camel.component.aws.common.AwsExchangeUtil.getMessageForResponse;
 
@@ -43,8 +41,6 @@ import static org.apache.camel.component.aws.common.AwsExchangeUtil.getMessageFo
  */
 public class SnsProducer extends DefaultProducer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(SnsProducer.class);
-
     private transient String snsProducerToString;
 
     public SnsProducer(Endpoint endpoint) {
@@ -60,11 +56,11 @@ public class SnsProducer extends DefaultProducer {
         request.setMessage(exchange.getIn().getBody(String.class));
         request.setMessageAttributes(this.translateAttributes(exchange.getIn().getHeaders(), exchange));
 
-        LOG.trace("Sending request [{}] from exchange [{}]...", request, exchange);
+        log.trace("Sending request [{}] from exchange [{}]...", request, exchange);
 
         PublishResult result = getEndpoint().getSNSClient().publish(request);
 
-        LOG.trace("Received result [{}]", result);
+        log.trace("Received result [{}]", result);
 
         Message message = getMessageForResponse(exchange);
         message.setHeader(SnsConstants.MESSAGE_ID, result.getMessageId());
@@ -112,7 +108,7 @@ public class SnsProducer extends DefaultProducer {
                     result.put(entry.getKey(), mav);
                 } else {
                     // cannot translate the message header to message attribute value
-                    LOG.warn("Cannot put the message header key={}, value={} into Sns MessageAttribute", entry.getKey(), entry.getValue());
+                    log.warn("Cannot put the message header key={}, value={} into Sns MessageAttribute", entry.getKey(), entry.getValue());
                 }
             }
         }
diff --git a/components/camel-aws/src/main/java/org/apache/camel/component/aws/sqs/SqsConsumer.java b/components/camel-aws/src/main/java/org/apache/camel/component/aws/sqs/SqsConsumer.java
index fe8b559..dadd5aa 100644
--- a/components/camel-aws/src/main/java/org/apache/camel/component/aws/sqs/SqsConsumer.java
+++ b/components/camel-aws/src/main/java/org/apache/camel/component/aws/sqs/SqsConsumer.java
@@ -46,8 +46,6 @@ import org.apache.camel.spi.Synchronization;
 import org.apache.camel.util.CastUtils;
 import org.apache.camel.util.ObjectHelper;
 import org.apache.camel.util.URISupport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * A Consumer of messages from the Amazon Web Service Simple Queue Service
@@ -55,7 +53,6 @@ import org.slf4j.LoggerFactory;
  */
 public class SqsConsumer extends ScheduledBatchPollingConsumer {
     
-    private static final Logger LOG = LoggerFactory.getLogger(SqsConsumer.class);
     private ScheduledExecutorService scheduledExecutor;
     private transient String sqsConsumerToString;
     private Collection<String> attributeNames;
@@ -92,19 +89,19 @@ public class SqsConsumer extends ScheduledBatchPollingConsumer {
             request.setMessageAttributeNames(messageAttributeNames);
         }
 
-        LOG.trace("Receiving messages with request [{}]...", request);
+        log.trace("Receiving messages with request [{}]...", request);
         
         ReceiveMessageResult messageResult = null;
         try {
             messageResult = getClient().receiveMessage(request);
         } catch (QueueDoesNotExistException e) {
-            LOG.info("Queue does not exist....recreating now...");
+            log.info("Queue does not exist....recreating now...");
             reConnectToQueue();
             messageResult = getClient().receiveMessage(request);
         }
 
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Received {} messages", messageResult.getMessages().size());
+        if (log.isTraceEnabled()) {
+            log.trace("Received {} messages", messageResult.getMessages().size());
         }
         
         Queue<Exchange> exchanges = createExchanges(messageResult.getMessages());
@@ -115,21 +112,21 @@ public class SqsConsumer extends ScheduledBatchPollingConsumer {
         try {
             getEndpoint().createQueue(getClient());
         } catch (QueueDeletedRecentlyException qdr) {
-            LOG.debug("Queue recently deleted, will retry in 30 seconds.");
+            log.debug("Queue recently deleted, will retry in 30 seconds.");
             try {
                 Thread.sleep(30000);
                 getEndpoint().createQueue(getClient());
             } catch (Exception e) {
-                LOG.warn("failed to retry queue connection.", e);
+                log.warn("failed to retry queue connection.", e);
             }
         } catch (Exception e) {
-            LOG.warn("Could not connect to queue in amazon.", e);
+            log.warn("Could not connect to queue in amazon.", e);
         }
     }
     
     protected Queue<Exchange> createExchanges(List<Message> messages) {
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Received {} messages in this poll", messages.size());
+        if (log.isTraceEnabled()) {
+            log.trace("Received {} messages in this poll", messages.size());
         }
         
         Queue<Exchange> answer = new LinkedList<>();
@@ -161,8 +158,8 @@ public class SqsConsumer extends ScheduledBatchPollingConsumer {
                 int delay = visibilityTimeout.intValue() / 2;
                 int period = visibilityTimeout.intValue();
                 int repeatSeconds = Double.valueOf(visibilityTimeout.doubleValue() * 1.5).intValue();
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Scheduled TimeoutExtender task to start after {} delay, and run with {}/{} period/repeat (seconds), to extend exchangeId: {}",
+                if (log.isDebugEnabled()) {
+                    log.debug("Scheduled TimeoutExtender task to start after {} delay, and run with {}/{} period/repeat (seconds), to extend exchangeId: {}",
                             new Object[]{delay, period, repeatSeconds, exchange.getExchangeId()});
                 }
                 final ScheduledFuture<?> scheduledFuture = this.scheduledExecutor.scheduleAtFixedRate(
@@ -180,7 +177,7 @@ public class SqsConsumer extends ScheduledBatchPollingConsumer {
 
                     private void cancelExtender(Exchange exchange) {
                         // cancel task as we are done
-                        LOG.trace("Processing done so cancelling TimeoutExtender task for exchangeId: {}", exchange.getExchangeId());
+                        log.trace("Processing done so cancelling TimeoutExtender task for exchangeId: {}", exchange.getExchangeId());
                         scheduledFuture.cancel(true);
                     }
                 });
@@ -202,11 +199,11 @@ public class SqsConsumer extends ScheduledBatchPollingConsumer {
                 }
             });
 
-            LOG.trace("Processing exchange [{}]...", exchange);
+            log.trace("Processing exchange [{}]...", exchange);
             getAsyncProcessor().process(exchange, new AsyncCallback() {
                 @Override
                 public void done(boolean doneSync) {
-                    LOG.trace("Processing exchange [{}] done.", exchange);
+                    log.trace("Processing exchange [{}] done.", exchange);
                 }
             });
         }
@@ -226,11 +223,11 @@ public class SqsConsumer extends ScheduledBatchPollingConsumer {
                 String receiptHandle = exchange.getIn().getHeader(SqsConstants.RECEIPT_HANDLE, String.class);
                 DeleteMessageRequest deleteRequest = new DeleteMessageRequest(getQueueUrl(), receiptHandle);
 
-                LOG.trace("Deleting message with receipt handle {}...", receiptHandle);
+                log.trace("Deleting message with receipt handle {}...", receiptHandle);
 
                 getClient().deleteMessage(deleteRequest);
 
-                LOG.trace("Deleted message with receipt handle {}...", receiptHandle);
+                log.trace("Deleted message with receipt handle {}...", receiptHandle);
             }
         } catch (AmazonClientException e) {
             getExceptionHandler().handleException("Error occurred during deleting message. This exception is ignored.", exchange, e);
@@ -323,15 +320,15 @@ public class SqsConsumer extends ScheduledBatchPollingConsumer {
                     exchange.getIn().getHeader(SqsConstants.RECEIPT_HANDLE, String.class), repeatSeconds);
 
             try {
-                LOG.trace("Extending visibility window by {} seconds for exchange {}", this.repeatSeconds, this.exchange);
+                log.trace("Extending visibility window by {} seconds for exchange {}", this.repeatSeconds, this.exchange);
                 getEndpoint().getClient().changeMessageVisibility(request);
-                LOG.debug("Extended visibility window by {} seconds for exchange {}", this.repeatSeconds, this.exchange);
+                log.debug("Extended visibility window by {} seconds for exchange {}", this.repeatSeconds, this.exchange);
             } catch (ReceiptHandleIsInvalidException e) {
                 // Ignore.
             } catch (MessageNotInflightException e) {
                 // Ignore.
             } catch (Exception e) {
-                LOG.warn("Extending visibility window failed for exchange " + exchange
+                log.warn("Extending visibility window failed for exchange " + exchange
                         + ". Will not attempt to extend visibility further. This exception will be ignored.", e);
             }
         }
diff --git a/components/camel-aws/src/main/java/org/apache/camel/component/aws/sqs/SqsEndpoint.java b/components/camel-aws/src/main/java/org/apache/camel/component/aws/sqs/SqsEndpoint.java
index 1182f12..7c96496 100644
--- a/components/camel-aws/src/main/java/org/apache/camel/component/aws/sqs/SqsEndpoint.java
+++ b/components/camel-aws/src/main/java/org/apache/camel/component/aws/sqs/SqsEndpoint.java
@@ -52,8 +52,6 @@ import org.apache.camel.spi.UriParam;
 import org.apache.camel.spi.UriPath;
 import org.apache.camel.util.FileUtil;
 import org.apache.camel.util.ObjectHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The aws-sqs component is used for sending and receiving messages to Amazon's SQS service.
@@ -62,8 +60,6 @@ import org.slf4j.LoggerFactory;
     consumerClass = SqsConsumer.class, label = "cloud,messaging")
 public class SqsEndpoint extends ScheduledPollEndpoint implements HeaderFilterStrategyAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(SqsEndpoint.class);
-
     private AmazonSQS client;
     private String queueUrl;
 
@@ -143,7 +139,7 @@ public class SqsEndpoint extends ScheduledPollEndpoint implements HeaderFilterSt
                 for (String url : listQueuesResult.getQueueUrls()) {
                     if (url.endsWith("/" + configuration.getQueueName())) {
                         queueUrl = url;
-                        LOG.trace("Queue available at '{}'.", queueUrl);
+                        log.trace("Queue available at '{}'.", queueUrl);
                         break;
                     }
                 }
@@ -153,13 +149,13 @@ public class SqsEndpoint extends ScheduledPollEndpoint implements HeaderFilterSt
         if (queueUrl == null) {
             createQueue(client);
         } else {
-            LOG.debug("Using Amazon SQS queue url: {}", queueUrl);
+            log.debug("Using Amazon SQS queue url: {}", queueUrl);
             updateQueueAttributes(client);
         }
     }
 
     protected void createQueue(AmazonSQS client) {
-        LOG.trace("Queue '{}' doesn't exist. Will create it...", configuration.getQueueName());
+        log.trace("Queue '{}' doesn't exist. Will create it...", configuration.getQueueName());
 
         // creates a new queue, or returns the URL of an existing one
         CreateQueueRequest request = new CreateQueueRequest(configuration.getQueueName());
@@ -186,12 +182,12 @@ public class SqsEndpoint extends ScheduledPollEndpoint implements HeaderFilterSt
         if (getConfiguration().getRedrivePolicy() != null) {
             request.getAttributes().put(QueueAttributeName.RedrivePolicy.name(), getConfiguration().getRedrivePolicy());
         }
-        LOG.trace("Creating queue [{}] with request [{}]...", configuration.getQueueName(), request);
+        log.trace("Creating queue [{}] with request [{}]...", configuration.getQueueName(), request);
 
         CreateQueueResult queueResult = client.createQueue(request);
         queueUrl = queueResult.getQueueUrl();
 
-        LOG.trace("Queue created and available at: {}", queueUrl);
+        log.trace("Queue created and available at: {}", queueUrl);
     }
 
     private void updateQueueAttributes(AmazonSQS client) {
@@ -216,9 +212,9 @@ public class SqsEndpoint extends ScheduledPollEndpoint implements HeaderFilterSt
             request.getAttributes().put(QueueAttributeName.RedrivePolicy.name(), getConfiguration().getRedrivePolicy());
         }
         if (!request.getAttributes().isEmpty()) {
-            LOG.trace("Updating queue '{}' with the provided queue attributes...", configuration.getQueueName());
+            log.trace("Updating queue '{}' with the provided queue attributes...", configuration.getQueueName());
             client.setQueueAttributes(request);
-            LOG.trace("Queue '{}' updated and available at {}'", configuration.getQueueName(), queueUrl);
+            log.trace("Queue '{}' updated and available at {}'", configuration.getQueueName(), queueUrl);
         }
     }
 
diff --git a/components/camel-aws/src/main/java/org/apache/camel/component/aws/sqs/SqsProducer.java b/components/camel-aws/src/main/java/org/apache/camel/component/aws/sqs/SqsProducer.java
index a0e68c8..898c7a6 100644
--- a/components/camel-aws/src/main/java/org/apache/camel/component/aws/sqs/SqsProducer.java
+++ b/components/camel-aws/src/main/java/org/apache/camel/component/aws/sqs/SqsProducer.java
@@ -32,8 +32,6 @@ import org.apache.camel.NoFactoryAvailableException;
 import org.apache.camel.impl.DefaultProducer;
 import org.apache.camel.spi.HeaderFilterStrategy;
 import org.apache.camel.util.URISupport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.apache.camel.component.aws.common.AwsExchangeUtil.getMessageForResponse;
 
@@ -44,8 +42,6 @@ import static org.apache.camel.component.aws.common.AwsExchangeUtil.getMessageFo
  */
 public class SqsProducer extends DefaultProducer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(SqsProducer.class);
-
     private transient String sqsProducerToString;
 
     public SqsProducer(SqsEndpoint endpoint) throws NoFactoryAvailableException {
@@ -59,11 +55,11 @@ public class SqsProducer extends DefaultProducer {
         addDelay(request, exchange);
         configureFifoAttributes(request, exchange);
 
-        LOG.trace("Sending request [{}] from exchange [{}]...", request, exchange);
+        log.trace("Sending request [{}] from exchange [{}]...", request, exchange);
 
         SendMessageResult result = getClient().sendMessage(request);
 
-        LOG.trace("Received result [{}]", result);
+        log.trace("Received result [{}]", result);
 
         Message message = getMessageForResponse(exchange);
         message.setHeader(SqsConstants.MESSAGE_ID, result.getMessageId());
@@ -88,13 +84,13 @@ public class SqsProducer extends DefaultProducer {
         Integer headerValue = exchange.getIn().getHeader(SqsConstants.DELAY_HEADER, Integer.class);
         Integer delayValue;
         if (headerValue == null) {
-            LOG.trace("Using the config delay");
+            log.trace("Using the config delay");
             delayValue = getEndpoint().getConfiguration().getDelaySeconds();
         } else {
-            LOG.trace("Using the header delay");
+            log.trace("Using the header delay");
             delayValue = headerValue;
         }
-        LOG.trace("found delay: {}", delayValue);
+        log.trace("found delay: {}", delayValue);
         request.setDelaySeconds(delayValue == null ? Integer.valueOf(0) : delayValue);
     }
 
@@ -169,7 +165,7 @@ public class SqsProducer extends DefaultProducer {
                     result.put(entry.getKey(), mav);
                 } else {
                     // cannot translate the message header to message attribute value
-                    LOG.warn("Cannot put the message header key={}, value={} into Sqs MessageAttribute", entry.getKey(), entry.getValue());
+                    log.warn("Cannot put the message header key={}, value={} into Sqs MessageAttribute", entry.getKey(), entry.getValue());
                 }
             }
         }
diff --git a/components/camel-azure/src/main/java/org/apache/camel/component/azure/blob/BlobServiceConsumer.java b/components/camel-azure/src/main/java/org/apache/camel/component/azure/blob/BlobServiceConsumer.java
index d712d1d..ad8feed 100644
--- a/components/camel-azure/src/main/java/org/apache/camel/component/azure/blob/BlobServiceConsumer.java
+++ b/components/camel-azure/src/main/java/org/apache/camel/component/azure/blob/BlobServiceConsumer.java
@@ -21,8 +21,6 @@ import org.apache.camel.Exchange;
 import org.apache.camel.NoFactoryAvailableException;
 import org.apache.camel.Processor;
 import org.apache.camel.impl.ScheduledPollConsumer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * A Consumer of the blob content from the Azure Blob Service
@@ -30,8 +28,7 @@ import org.slf4j.LoggerFactory;
 // Extending DefaultConsumer is simpler if the blob must exist before this consumer is started,
 // polling makes it easier to get the consumer working if no blob exists yet.
 public class BlobServiceConsumer extends ScheduledPollConsumer {
-    private static final Logger LOG = LoggerFactory.getLogger(BlobServiceConsumer.class);
-    
+
     public BlobServiceConsumer(BlobServiceEndpoint endpoint, Processor processor) throws NoFactoryAvailableException {
         super(endpoint, processor);
     }
@@ -40,7 +37,7 @@ public class BlobServiceConsumer extends ScheduledPollConsumer {
     protected int poll() throws Exception {
         Exchange exchange = super.getEndpoint().createExchange();
         try {
-            LOG.trace("Getting the blob content");
+            log.trace("Getting the blob content");
             getBlob(exchange);
             super.getAsyncProcessor().process(exchange);
             return 1;
diff --git a/components/camel-azure/src/main/java/org/apache/camel/component/azure/blob/BlobServiceEndpoint.java b/components/camel-azure/src/main/java/org/apache/camel/component/azure/blob/BlobServiceEndpoint.java
index a146aa5..6ee6703 100644
--- a/components/camel-azure/src/main/java/org/apache/camel/component/azure/blob/BlobServiceEndpoint.java
+++ b/components/camel-azure/src/main/java/org/apache/camel/component/azure/blob/BlobServiceEndpoint.java
@@ -25,8 +25,6 @@ import org.apache.camel.spi.Metadata;
 import org.apache.camel.spi.UriEndpoint;
 import org.apache.camel.spi.UriParam;
 import org.apache.camel.spi.UriPath;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The azure-blob component is used for storing and retrieving blobs from Azure Storage Blob Service.
@@ -39,8 +37,6 @@ import org.slf4j.LoggerFactory;
              label = "cloud,database,nosql")
 public class BlobServiceEndpoint extends DefaultEndpoint {
 
-    private static final Logger LOG = LoggerFactory.getLogger(BlobServiceEndpoint.class);
-    
     @UriPath(description = "Container or Blob compact Uri")
     @Metadata(required = "true")
     private String containerOrBlobUri; // to support component docs
@@ -53,7 +49,7 @@ public class BlobServiceEndpoint extends DefaultEndpoint {
     }
 
     public Consumer createConsumer(Processor processor) throws Exception {
-        LOG.trace("Creating a consumer");
+        log.trace("Creating a consumer");
         if (getConfiguration().getBlobName() == null) {
             throw new IllegalArgumentException("Blob name must be specified.");
         }
@@ -63,7 +59,7 @@ public class BlobServiceEndpoint extends DefaultEndpoint {
     }
 
     public Producer createProducer() throws Exception {
-        LOG.trace("Creating a producer");
+        log.trace("Creating a producer");
         if (getConfiguration().getBlobName() == null
             && getConfiguration().getOperation() != null 
             && BlobServiceOperations.listBlobs != configuration.getOperation()) {
diff --git a/components/camel-azure/src/main/java/org/apache/camel/component/azure/blob/BlobServiceProducer.java b/components/camel-azure/src/main/java/org/apache/camel/component/azure/blob/BlobServiceProducer.java
index 8e4f6b8..a3b1403 100644
--- a/components/camel-azure/src/main/java/org/apache/camel/component/azure/blob/BlobServiceProducer.java
+++ b/components/camel-azure/src/main/java/org/apache/camel/component/azure/blob/BlobServiceProducer.java
@@ -46,16 +46,12 @@ import org.apache.camel.component.azure.common.ExchangeUtil;
 import org.apache.camel.impl.DefaultProducer;
 import org.apache.camel.util.ObjectHelper;
 import org.apache.camel.util.URISupport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * A Producer which sends messages to the Azure Storage Blob Service
  */
 public class BlobServiceProducer extends DefaultProducer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(BlobServiceProducer.class);
-
     public BlobServiceProducer(final Endpoint endpoint) {
         super(endpoint);
     }
@@ -118,7 +114,7 @@ public class BlobServiceProducer extends DefaultProducer {
     private void listBlobs(Exchange exchange) throws Exception {
         CloudBlobContainer client = BlobServiceUtil.createBlobContainerClient(getConfiguration());
         BlobServiceRequestOptions opts = BlobServiceUtil.getRequestOptions(exchange);
-        LOG.trace("Getting the blob list from the container [{}] from exchange [{}]...", 
+        log.trace("Getting the blob list from the container [{}] from exchange [{}]...",
                   getConfiguration().getContainerName(), exchange);
         BlobServiceConfiguration cfg = getConfiguration();
         EnumSet<BlobListingDetails> details = null;
@@ -143,7 +139,7 @@ public class BlobServiceProducer extends DefaultProducer {
         
         InputStream inputStream = getInputStreamFromExchange(exchange);
         
-        LOG.trace("Putting a block blob [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
+        log.trace("Putting a block blob [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
         try {
             client.upload(inputStream, -1,
                           opts.getAccessCond(), opts.getRequestOpts(), opts.getOpContext());
@@ -169,7 +165,7 @@ public class BlobServiceProducer extends DefaultProducer {
         configureCloudBlobForWrite(client);
         BlobServiceRequestOptions opts = BlobServiceUtil.getRequestOptions(exchange);
         
-        LOG.trace("Putting a blob [{}] from blocks from exchange [{}]...", getConfiguration().getBlobName(), exchange);
+        log.trace("Putting a blob [{}] from blocks from exchange [{}]...", getConfiguration().getBlobName(), exchange);
         List<BlockEntry> blockEntries = new LinkedList<>();
         for (BlobBlock blobBlock : blobBlocks) {
             blockEntries.add(blobBlock.getBlockEntry());
@@ -200,7 +196,7 @@ public class BlobServiceProducer extends DefaultProducer {
         CloudBlockBlob client = BlobServiceUtil.createBlockBlobClient(getConfiguration());
         BlobServiceRequestOptions opts = BlobServiceUtil.getRequestOptions(exchange);
         
-        LOG.trace("Putting a blob [{}] block list from exchange [{}]...", getConfiguration().getBlobName(), exchange);
+        log.trace("Putting a blob [{}] block list from exchange [{}]...", getConfiguration().getBlobName(), exchange);
         client.commitBlockList(blockEntries, 
                                opts.getAccessCond(), opts.getRequestOpts(), opts.getOpContext());
     }
@@ -228,7 +224,7 @@ public class BlobServiceProducer extends DefaultProducer {
     private void getBlobBlockList(Exchange exchange) throws Exception {
         CloudBlockBlob client = BlobServiceUtil.createBlockBlobClient(getConfiguration());
         BlobServiceRequestOptions opts = BlobServiceUtil.getRequestOptions(exchange);
-        LOG.trace("Getting the blob block list [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
+        log.trace("Getting the blob block list [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
         BlockListingFilter filter = exchange.getIn().getBody(BlockListingFilter.class);
         if (filter == null) {
             filter = BlockListingFilter.COMMITTED;
@@ -255,7 +251,7 @@ public class BlobServiceProducer extends DefaultProducer {
     
     private void doCreateAppendBlob(CloudAppendBlob client, BlobServiceRequestOptions opts, Exchange exchange) 
         throws Exception {
-        LOG.trace("Creating an append blob [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
+        log.trace("Creating an append blob [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
         try {
             client.createOrReplace(opts.getAccessCond(), opts.getRequestOpts(), opts.getOpContext());
         } catch (StorageException ex) {
@@ -309,7 +305,7 @@ public class BlobServiceProducer extends DefaultProducer {
     
     private void doCreatePageBlob(CloudPageBlob client, BlobServiceRequestOptions opts, Exchange exchange) 
         throws Exception {
-        LOG.trace("Creating a page blob [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
+        log.trace("Creating a page blob [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
         Long pageSize = getPageBlobSize(exchange);
         try {
             client.create(pageSize,
@@ -325,7 +321,7 @@ public class BlobServiceProducer extends DefaultProducer {
     }
     
     private void uploadPageBlob(Exchange exchange) throws Exception {
-        LOG.trace("Updating a page blob [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
+        log.trace("Updating a page blob [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
         
         CloudPageBlob client = BlobServiceUtil.createPageBlobClient(getConfiguration());
         configureCloudBlobForWrite(client);
@@ -346,7 +342,7 @@ public class BlobServiceProducer extends DefaultProducer {
     }
     
     private void resizePageBlob(Exchange exchange) throws Exception {
-        LOG.trace("Resizing a page blob [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
+        log.trace("Resizing a page blob [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
         
         CloudPageBlob client = BlobServiceUtil.createPageBlobClient(getConfiguration());
         BlobServiceRequestOptions opts = BlobServiceUtil.getRequestOptions(exchange);
@@ -355,7 +351,7 @@ public class BlobServiceProducer extends DefaultProducer {
     }
     
     private void clearPageBlob(Exchange exchange) throws Exception {
-        LOG.trace("Clearing a page blob [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
+        log.trace("Clearing a page blob [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
                 
         CloudPageBlob client = BlobServiceUtil.createPageBlobClient(getConfiguration());
         BlobServiceRequestOptions opts = BlobServiceUtil.getRequestOptions(exchange);
@@ -400,7 +396,7 @@ public class BlobServiceProducer extends DefaultProducer {
         CloudPageBlob client = BlobServiceUtil.createPageBlobClient(getConfiguration());
         BlobServiceUtil.configureCloudBlobForRead(client, getConfiguration());
         BlobServiceRequestOptions opts = BlobServiceUtil.getRequestOptions(exchange);
-        LOG.trace("Getting the page blob ranges [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
+        log.trace("Getting the page blob ranges [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
         List<PageRange> ranges = 
             client.downloadPageRanges(opts.getAccessCond(), opts.getRequestOpts(), opts.getOpContext());
         ExchangeUtil.getMessageForResponse(exchange).setBody(ranges);
@@ -421,7 +417,7 @@ public class BlobServiceProducer extends DefaultProducer {
 
     
     private void doDeleteBlock(CloudBlob client, Exchange exchange) throws Exception {
-        LOG.trace("Deleting a blob [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
+        log.trace("Deleting a blob [{}] from exchange [{}]...", getConfiguration().getBlobName(), exchange);
         client.delete();
     }
 
diff --git a/components/camel-azure/src/main/java/org/apache/camel/component/azure/queue/QueueServiceConsumer.java b/components/camel-azure/src/main/java/org/apache/camel/component/azure/queue/QueueServiceConsumer.java
index a70ad46..32a41f2 100644
--- a/components/camel-azure/src/main/java/org/apache/camel/component/azure/queue/QueueServiceConsumer.java
+++ b/components/camel-azure/src/main/java/org/apache/camel/component/azure/queue/QueueServiceConsumer.java
@@ -21,15 +21,12 @@ import org.apache.camel.Exchange;
 import org.apache.camel.NoFactoryAvailableException;
 import org.apache.camel.Processor;
 import org.apache.camel.impl.ScheduledPollConsumer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * A Consumer of the queue content from the Azure Queue Service
  */
 public class QueueServiceConsumer extends ScheduledPollConsumer {
-    private static final Logger LOG = LoggerFactory.getLogger(QueueServiceConsumer.class);
-    
+
     public QueueServiceConsumer(QueueServiceEndpoint endpoint, Processor processor) throws NoFactoryAvailableException {
         super(endpoint, processor);
     }
@@ -38,7 +35,7 @@ public class QueueServiceConsumer extends ScheduledPollConsumer {
     protected int poll() throws Exception {
         Exchange exchange = super.getEndpoint().createExchange();
         try {
-            LOG.trace("Retrieving a message");
+            log.trace("Retrieving a message");
             retrieveMessage(exchange);
             super.getAsyncProcessor().process(exchange);
             return 1;
diff --git a/components/camel-azure/src/main/java/org/apache/camel/component/azure/queue/QueueServiceEndpoint.java b/components/camel-azure/src/main/java/org/apache/camel/component/azure/queue/QueueServiceEndpoint.java
index 5173ada..c5c542e 100644
--- a/components/camel-azure/src/main/java/org/apache/camel/component/azure/queue/QueueServiceEndpoint.java
+++ b/components/camel-azure/src/main/java/org/apache/camel/component/azure/queue/QueueServiceEndpoint.java
@@ -25,8 +25,6 @@ import org.apache.camel.spi.Metadata;
 import org.apache.camel.spi.UriEndpoint;
 import org.apache.camel.spi.UriParam;
 import org.apache.camel.spi.UriPath;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The azure-queue component is used for storing and retrieving messages from Azure Storage Queue Service.
@@ -39,8 +37,6 @@ import org.slf4j.LoggerFactory;
              label = "cloud,queue,azure")
 public class QueueServiceEndpoint extends DefaultEndpoint {
 
-    private static final Logger LOG = LoggerFactory.getLogger(QueueServiceEndpoint.class);
-    
     @UriPath(description = "Container Queue compact Uri")
     @Metadata(required = "true")
     private String containerAndQueueUri; // to support component docs
@@ -53,14 +49,14 @@ public class QueueServiceEndpoint extends DefaultEndpoint {
     }
 
     public Consumer createConsumer(Processor processor) throws Exception {
-        LOG.trace("Creating a consumer");
+        log.trace("Creating a consumer");
         QueueServiceConsumer consumer = new QueueServiceConsumer(this, processor);
         configureConsumer(consumer);
         return consumer;
     }
 
     public Producer createProducer() throws Exception {
-        LOG.trace("Creating a producer");
+        log.trace("Creating a producer");
         return new QueueServiceProducer(this);
     }
 
diff --git a/components/camel-azure/src/main/java/org/apache/camel/component/azure/queue/QueueServiceProducer.java b/components/camel-azure/src/main/java/org/apache/camel/component/azure/queue/QueueServiceProducer.java
index ff15369..ca59d7b 100644
--- a/components/camel-azure/src/main/java/org/apache/camel/component/azure/queue/QueueServiceProducer.java
+++ b/components/camel-azure/src/main/java/org/apache/camel/component/azure/queue/QueueServiceProducer.java
@@ -29,16 +29,12 @@ import org.apache.camel.component.azure.common.ExchangeUtil;
 import org.apache.camel.impl.DefaultProducer;
 import org.apache.camel.util.ObjectHelper;
 import org.apache.camel.util.URISupport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * A Producer which sends messages to the Azure Storage Queue Service
  */
 public class QueueServiceProducer extends DefaultProducer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(QueueServiceProducer.class);
-
     public QueueServiceProducer(final Endpoint endpoint) {
         super(endpoint);
     }
@@ -101,7 +97,7 @@ public class QueueServiceProducer extends DefaultProducer {
     }
     
     private void doCreateQueue(CloudQueue client, QueueServiceRequestOptions opts, Exchange exchange) throws Exception {
-        LOG.trace("Creating the queue [{}] from exchange [{}]...", 
+        log.trace("Creating the queue [{}] from exchange [{}]...",
                   getConfiguration().getQueueName(), exchange);
         client.createIfNotExists(opts.getRequestOpts(), opts.getOpContext());
         ExchangeUtil.getMessageForResponse(exchange)
@@ -109,7 +105,7 @@ public class QueueServiceProducer extends DefaultProducer {
     }
     
     private void deleteQueue(Exchange exchange) throws Exception {
-        LOG.trace("Deleting the queue [{}] from exchange [{}]...", 
+        log.trace("Deleting the queue [{}] from exchange [{}]...",
                   getConfiguration().getQueueName(), exchange);
         CloudQueue client = QueueServiceUtil.createQueueClient(getConfiguration());
         QueueServiceRequestOptions opts = QueueServiceUtil.getRequestOptions(exchange);
@@ -117,7 +113,7 @@ public class QueueServiceProducer extends DefaultProducer {
     }
     
     private void addMessage(Exchange exchange) throws Exception {
-        LOG.trace("Putting the message into the queue [{}] from exchange [{}]...", 
+        log.trace("Putting the message into the queue [{}] from exchange [{}]...",
                   getConfiguration().getQueueName(), exchange);
         CloudQueue client = QueueServiceUtil.createQueueClient(getConfiguration());
         QueueServiceRequestOptions opts = QueueServiceUtil.getRequestOptions(exchange);
@@ -139,7 +135,7 @@ public class QueueServiceProducer extends DefaultProducer {
         QueueServiceRequestOptions opts = QueueServiceUtil.getRequestOptions(exchange);
         
         CloudQueueMessage message = getCloudQueueMessage(exchange);
-        LOG.trace("Updating the message in the queue [{}] from exchange [{}]...", 
+        log.trace("Updating the message in the queue [{}] from exchange [{}]...",
                   getConfiguration().getQueueName(), exchange);
         
         EnumSet<MessageUpdateFields> fields = null;
@@ -158,7 +154,7 @@ public class QueueServiceProducer extends DefaultProducer {
     }
     
     private void deleteMessage(Exchange exchange) throws Exception {
-        LOG.trace("Deleting the message from the queue [{}] from exchange [{}]...", 
+        log.trace("Deleting the message from the queue [{}] from exchange [{}]...",
                   getConfiguration().getQueueName(), exchange);
         CloudQueue client = QueueServiceUtil.createQueueClient(getConfiguration());
         QueueServiceRequestOptions opts = QueueServiceUtil.getRequestOptions(exchange);
diff --git a/components/camel-barcode/src/main/java/org/apache/camel/dataformat/barcode/BarcodeDataFormat.java b/components/camel-barcode/src/main/java/org/apache/camel/dataformat/barcode/BarcodeDataFormat.java
index 8c16985..f5100a6 100644
--- a/components/camel-barcode/src/main/java/org/apache/camel/dataformat/barcode/BarcodeDataFormat.java
+++ b/components/camel-barcode/src/main/java/org/apache/camel/dataformat/barcode/BarcodeDataFormat.java
@@ -41,8 +41,6 @@ import org.apache.camel.spi.DataFormat;
 import org.apache.camel.spi.DataFormatName;
 import org.apache.camel.support.ServiceSupport;
 import org.apache.camel.util.ExchangeHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * {@link DataFormat} to create (encode) and
@@ -54,11 +52,6 @@ import org.slf4j.LoggerFactory;
 public class BarcodeDataFormat extends ServiceSupport implements DataFormat, DataFormatName {
 
     /**
-     * Logger.
-     */
-    private static final Logger LOG = LoggerFactory.getLogger(BarcodeDataFormat.class);
-
-    /**
      * The bean for the default parameters.
      */
     private BarcodeParameters params;
@@ -239,7 +232,7 @@ public class BarcodeDataFormat extends ServiceSupport implements DataFormat, Dat
      */
     public final void addToHintMap(final EncodeHintType hintType, final Object value) {
         this.writerHintMap.put(hintType, value);
-        LOG.info(String.format("Added '%s' with value '%s' to writer hint map.", hintType.toString(), value.toString()));
+        log.info(String.format("Added '%s' with value '%s' to writer hint map.", hintType.toString(), value.toString()));
     }
 
     /**
@@ -255,9 +248,9 @@ public class BarcodeDataFormat extends ServiceSupport implements DataFormat, Dat
     public final void removeFromHintMap(final EncodeHintType hintType) {
         if (this.writerHintMap.containsKey(hintType)) {
             this.writerHintMap.remove(hintType);
-            LOG.info(String.format("Removed '%s' from writer hint map.", hintType.toString()));
+            log.info(String.format("Removed '%s' from writer hint map.", hintType.toString()));
         } else {
-            LOG.warn(String.format("Could not find encode hint type '%s' in writer hint map.", hintType.toString()));
+            log.warn(String.format("Could not find encode hint type '%s' in writer hint map.", hintType.toString()));
         }
     }
 
@@ -267,9 +260,9 @@ public class BarcodeDataFormat extends ServiceSupport implements DataFormat, Dat
     public final void removeFromHintMap(final DecodeHintType hintType) {
         if (this.readerHintMap.containsKey(hintType)) {
             this.readerHintMap.remove(hintType);
-            LOG.info(String.format("Removed '%s' from reader hint map.", hintType.toString()));
+            log.info(String.format("Removed '%s' from reader hint map.", hintType.toString()));
         } else {
-            LOG.warn(String.format("Could not find decode hint type '%s' in reader hint map.", hintType.toString()));
+            log.warn(String.format("Could not find decode hint type '%s' in reader hint map.", hintType.toString()));
         }
     }
 
diff --git a/components/camel-beanstalk/src/main/java/org/apache/camel/component/beanstalk/BeanstalkConsumer.java b/components/camel-beanstalk/src/main/java/org/apache/camel/component/beanstalk/BeanstalkConsumer.java
index 07a2e6b..8829b9a 100644
--- a/components/camel-beanstalk/src/main/java/org/apache/camel/component/beanstalk/BeanstalkConsumer.java
+++ b/components/camel-beanstalk/src/main/java/org/apache/camel/component/beanstalk/BeanstalkConsumer.java
@@ -33,8 +33,6 @@ import org.apache.camel.component.beanstalk.processors.DeleteCommand;
 import org.apache.camel.component.beanstalk.processors.ReleaseCommand;
 import org.apache.camel.impl.ScheduledPollConsumer;
 import org.apache.camel.spi.Synchronization;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * PollingConsumer to read Beanstalk jobs.
@@ -53,7 +51,7 @@ import org.slf4j.LoggerFactory;
  * The reaction on failures is configurable: possible variants are "bury", "release" or "delete"
  */
 public class BeanstalkConsumer extends ScheduledPollConsumer {
-    private static final Logger LOG = LoggerFactory.getLogger(BeanstalkConsumer.class);
+
     private static final String[] STATS_KEY_STR = new String[]{"tube", "state"};
     private static final String[] STATS_KEY_INT = new String[]{"age", "time-left", "timeouts", "releases", "buries", "kicks"};
 
@@ -86,8 +84,8 @@ public class BeanstalkConsumer extends ScheduledPollConsumer {
                     return null;
                 }
 
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug(String.format("Received job ID %d (data length %d)", job.getJobId(), job.getData().length));
+                if (log.isDebugEnabled()) {
+                    log.debug(String.format("Received job ID %d (data length %d)", job.getJobId(), job.getData().length));
                 }
 
                 final Exchange exchange = getEndpoint().createExchange(ExchangePattern.InOnly);
@@ -222,7 +220,7 @@ public class BeanstalkConsumer extends ScheduledPollConsumer {
             try {
                 executor.submit(new RunCommand(successCommand, exchange)).get();
             } catch (Exception e) {
-                LOG.error(String.format("Could not run completion of exchange %s", exchange), e);
+                log.error(String.format("Could not run completion of exchange %s", exchange), e);
             }
         }
 
@@ -231,7 +229,7 @@ public class BeanstalkConsumer extends ScheduledPollConsumer {
             try {
                 executor.submit(new RunCommand(failureCommand, exchange)).get();
             } catch (Exception e) {
-                LOG.error(String.format("%s could not run failure of exchange %s", failureCommand.getClass().getName(), exchange), e);
+                log.error(String.format("%s could not run failure of exchange %s", failureCommand.getClass().getName(), exchange), e);
             }
         }
 
@@ -250,12 +248,12 @@ public class BeanstalkConsumer extends ScheduledPollConsumer {
                     try {
                         command.act(client, exchange);
                     } catch (BeanstalkException e) {
-                        LOG.warn(String.format("Post-processing %s of exchange %s failed, retrying.", command.getClass().getName(), exchange), e);
+                        log.warn(String.format("Post-processing %s of exchange %s failed, retrying.", command.getClass().getName(), exchange), e);
                         resetClient();
                         command.act(client, exchange);
                     }
                 } catch (final Exception e) {
-                    LOG.error(String.format("%s could not post-process exchange %s", command.getClass().getName(), exchange), e);
+                    log.error(String.format("%s could not post-process exchange %s", command.getClass().getName(), exchange), e);
                     exchange.setException(e);
                 }
             }
diff --git a/components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/csv/BindySimpleCsvMandatoryFieldsUnmarshallTest.java b/components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/csv/BindySimpleCsvMandatoryFieldsUnmarshallTest.java
index 1501aa8..e28c375 100644
--- a/components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/csv/BindySimpleCsvMandatoryFieldsUnmarshallTest.java
+++ b/components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/csv/BindySimpleCsvMandatoryFieldsUnmarshallTest.java
@@ -68,7 +68,7 @@ public class BindySimpleCsvMandatoryFieldsUnmarshallTest extends AbstractJUnit4S
             fail("Should have thrown an exception");
         } catch (CamelExecutionException e) {
             Assert.isInstanceOf(Exception.class, e.getCause());
-            // LOG.info(">> Error : " + e);
+            // log.info(">> Error : " + e);
         }
 
         resultEndpoint1.assertIsSatisfied();
@@ -183,7 +183,7 @@ public class BindySimpleCsvMandatoryFieldsUnmarshallTest extends AbstractJUnit4S
             template2.sendBody(header + record4);
             resultEndpoint2.assertIsSatisfied();
         } catch (CamelExecutionException e) {
-            // LOG.info(">> Error : " + e);
+            // log.info(">> Error : " + e);
         }
     }
 
diff --git a/components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/csv/BindySimpleCsvUnmarshallTest.java b/components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/csv/BindySimpleCsvUnmarshallTest.java
index aef74c2..b584dea 100644
--- a/components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/csv/BindySimpleCsvUnmarshallTest.java
+++ b/components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/csv/BindySimpleCsvUnmarshallTest.java
@@ -73,7 +73,7 @@ public class BindySimpleCsvUnmarshallTest extends AbstractJUnit4SpringContextTes
         /*
          * List<Exchange> exchanges = resultEndpoint.getExchanges();
          * for(Exchange exchange : exchanges) { Object body =
-         * exchange.getOut().getBody(); LOG.debug("Body received : " +
+         * exchange.getOut().getBody(); log.debug("Body received : " +
          * body.toString()); }
          */
 
diff --git a/components/camel-blueprint/src/main/java/org/apache/camel/blueprint/BlueprintCamelContext.java b/components/camel-blueprint/src/main/java/org/apache/camel/blueprint/BlueprintCamelContext.java
index ad56a4b..56b4b4b 100644
--- a/components/camel-blueprint/src/main/java/org/apache/camel/blueprint/BlueprintCamelContext.java
+++ b/components/camel-blueprint/src/main/java/org/apache/camel/blueprint/BlueprintCamelContext.java
@@ -42,16 +42,12 @@ import org.osgi.framework.ServiceRegistration;
 import org.osgi.service.blueprint.container.BlueprintContainer;
 import org.osgi.service.blueprint.container.BlueprintEvent;
 import org.osgi.service.blueprint.container.BlueprintListener;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * OSGi Blueprint based {@link org.apache.camel.CamelContext}.
  */
 public class BlueprintCamelContext extends DefaultCamelContext implements ServiceListener, BlueprintListener {
 
-    private static final Logger LOG = LoggerFactory.getLogger(BlueprintCamelContext.class);
-    
     protected final AtomicBoolean routeDefinitionValid = new AtomicBoolean(true);
 
     private BundleContext bundleContext;
@@ -98,7 +94,7 @@ public class BlueprintCamelContext extends DefaultCamelContext implements Servic
     }
    
     public void init() throws Exception {
-        LOG.trace("init {}", this);
+        log.trace("init {}", this);
 
         // add service listener so we can be notified when blueprint container is done
         // and we would be ready to start CamelContext
@@ -109,19 +105,19 @@ public class BlueprintCamelContext extends DefaultCamelContext implements Servic
     }
 
     public void destroy() throws Exception {
-        LOG.trace("destroy {}", this);
+        log.trace("destroy {}", this);
 
         // remove listener and stop this CamelContext
         try {
             bundleContext.removeServiceListener(this);
         } catch (Exception e) {
-            LOG.warn("Error removing ServiceListener: " + this + ". This exception is ignored.", e);
+            log.warn("Error removing ServiceListener: " + this + ". This exception is ignored.", e);
         }
         if (registration != null) {
             try {
                 registration.unregister();
             } catch (Exception e) {
-                LOG.warn("Error unregistering service registration: " + registration + ". This exception is ignored.", e);
+                log.warn("Error unregistering service registration: " + registration + ". This exception is ignored.", e);
             }
             registration = null;
         }
@@ -137,7 +133,7 @@ public class BlueprintCamelContext extends DefaultCamelContext implements Servic
 
     @Override
     public void blueprintEvent(BlueprintEvent event) {
-        if (LOG.isDebugEnabled()) {
+        if (log.isDebugEnabled()) {
             String eventTypeString;
 
             switch (event.getType()) {
@@ -167,23 +163,23 @@ public class BlueprintCamelContext extends DefaultCamelContext implements Servic
                 break;
             }
 
-            LOG.debug("Received BlueprintEvent[replay={} type={} bundle={}] %s", event.isReplay(), eventTypeString, event.getBundle().getSymbolicName(), event);
+            log.debug("Received BlueprintEvent[replay={} type={} bundle={}] %s", event.isReplay(), eventTypeString, event.getBundle().getSymbolicName(), event);
         }
 
         if (!event.isReplay() && this.getBundleContext().getBundle().getBundleId() == event.getBundle().getBundleId()) {
             if (event.getType() == BlueprintEvent.CREATED) {
                 try {
-                    LOG.info("Attempting to start CamelContext: {}", this.getName());
+                    log.info("Attempting to start CamelContext: {}", this.getName());
                     this.maybeStart();
                 } catch (Exception startEx) {
-                    LOG.error("Error occurred during starting CamelContext: {}", this.getName(), startEx);
+                    log.error("Error occurred during starting CamelContext: {}", this.getName(), startEx);
                 }
             } else if (event.getType() == BlueprintEvent.DESTROYING) {
                 try {
-                    LOG.info("Stopping CamelContext: {}", this.getName());
+                    log.info("Stopping CamelContext: {}", this.getName());
                     this.stop();
                 } catch (Exception stopEx) {
-                    LOG.error("Error occurred during stopping CamelContext: {}", this.getName(), stopEx);
+                    log.error("Error occurred during stopping CamelContext: {}", this.getName(), stopEx);
                 }
             }
         }
@@ -191,7 +187,7 @@ public class BlueprintCamelContext extends DefaultCamelContext implements Servic
 
     @Override
     public void serviceChanged(ServiceEvent event) {
-        if (LOG.isTraceEnabled()) {
+        if (log.isTraceEnabled()) {
             String eventTypeString;
 
             switch (event.getType()) {
@@ -213,7 +209,7 @@ public class BlueprintCamelContext extends DefaultCamelContext implements Servic
             }
 
             // use trace logging as this is very noisy
-            LOG.trace("Service: {} changed to: {}", event, eventTypeString);
+            log.trace("Service: {} changed to: {}", event, eventTypeString);
         }
     }
 
@@ -250,10 +246,10 @@ public class BlueprintCamelContext extends DefaultCamelContext implements Servic
     }
 
     private void maybeStart() throws Exception {
-        LOG.trace("maybeStart: {}", this);
+        log.trace("maybeStart: {}", this);
 
         if (!routeDefinitionValid.get()) {
-            LOG.trace("maybeStart: {} is skipping since CamelRoute definition is not correct.", this);
+            log.trace("maybeStart: {} is skipping since CamelRoute definition is not correct.", this);
             return;
         }
 
@@ -274,16 +270,16 @@ public class BlueprintCamelContext extends DefaultCamelContext implements Servic
         // when blueprint loading the bundle
         boolean skip = "true".equalsIgnoreCase(System.getProperty("skipStartingCamelContext"));
         if (skip) {
-            LOG.trace("maybeStart: {} is skipping as System property skipStartingCamelContext is set", this);
+            log.trace("maybeStart: {} is skipping as System property skipStartingCamelContext is set", this);
             return;
         }
 
         if (!isStarted() && !isStarting()) {
-            LOG.debug("Starting {}", this);
+            log.debug("Starting {}", this);
             start();
         } else {
             // ignore as Camel is already started
-            LOG.trace("Ignoring maybeStart() as {} is already started", this);
+            log.trace("Ignoring maybeStart() as {} is already started", this);
         }
     }
 
diff --git a/components/camel-caffeine/src/main/java/org/apache/camel/component/caffeine/processor/aggregate/CaffeineAggregationRepository.java b/components/camel-caffeine/src/main/java/org/apache/camel/component/caffeine/processor/aggregate/CaffeineAggregationRepository.java
index 8c4624d..601cdb8 100644
--- a/components/camel-caffeine/src/main/java/org/apache/camel/component/caffeine/processor/aggregate/CaffeineAggregationRepository.java
+++ b/components/camel-caffeine/src/main/java/org/apache/camel/component/caffeine/processor/aggregate/CaffeineAggregationRepository.java
@@ -28,11 +28,8 @@ import org.apache.camel.impl.DefaultExchange;
 import org.apache.camel.impl.DefaultExchangeHolder;
 import org.apache.camel.spi.RecoverableAggregationRepository;
 import org.apache.camel.support.ServiceSupport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public class CaffeineAggregationRepository extends ServiceSupport implements RecoverableAggregationRepository {
-    private static final Logger LOG = LoggerFactory.getLogger(CaffeineAggregationRepository.class);
 
     private CamelContext camelContext;
     private Cache<String, DefaultExchangeHolder> cache;
@@ -126,7 +123,7 @@ public class CaffeineAggregationRepository extends ServiceSupport implements Rec
 
     @Override
     public Exchange add(final CamelContext camelContext, final String key, final Exchange exchange) {
-        LOG.trace("Adding an Exchange with ID {} for key {} in a thread-safe manner.", exchange.getExchangeId(), key);
+        log.trace("Adding an Exchange with ID {} for key {} in a thread-safe manner.", exchange.getExchangeId(), key);
 
         final DefaultExchangeHolder oldHolder = cache.getIfPresent(key);
         final DefaultExchangeHolder newHolder = DefaultExchangeHolder.marshal(exchange, true, allowSerializedHeaders);
@@ -143,13 +140,13 @@ public class CaffeineAggregationRepository extends ServiceSupport implements Rec
 
     @Override
     public void remove(CamelContext camelContext, String key, Exchange exchange) {
-        LOG.trace("Removing an exchange with ID {} for key {}", exchange.getExchangeId(), key);
+        log.trace("Removing an exchange with ID {} for key {}", exchange.getExchangeId(), key);
         cache.invalidate(key);
     }
 
     @Override
     public void confirm(CamelContext camelContext, String exchangeId) {
-        LOG.trace("Confirming an exchange with ID {}.", exchangeId);
+        log.trace("Confirming an exchange with ID {}.", exchangeId);
         cache.invalidate(exchangeId);
     }
 
@@ -162,15 +159,15 @@ public class CaffeineAggregationRepository extends ServiceSupport implements Rec
 
     @Override
     public Set<String> scan(CamelContext camelContext) {
-        LOG.trace("Scanning for exchanges to recover in {} context", camelContext.getName());
+        log.trace("Scanning for exchanges to recover in {} context", camelContext.getName());
         Set<String> scanned = Collections.unmodifiableSet(getKeys());
-        LOG.trace("Found {} keys for exchanges to recover in {} context", scanned.size(), camelContext.getName());
+        log.trace("Found {} keys for exchanges to recover in {} context", scanned.size(), camelContext.getName());
         return scanned;
     }
 
     @Override
     public Exchange recover(CamelContext camelContext, String exchangeId) {
-        LOG.trace("Recovering an Exchange with ID {}.", exchangeId);
+        log.trace("Recovering an Exchange with ID {}.", exchangeId);
         return useRecovery ? unmarshallExchange(camelContext, cache.getIfPresent(exchangeId)) : null;
     }
 
diff --git a/components/camel-cassandraql/src/main/java/org/apache/camel/component/cassandra/CassandraProducer.java b/components/camel-cassandraql/src/main/java/org/apache/camel/component/cassandra/CassandraProducer.java
index 1b03c25..7a8d25f 100644
--- a/components/camel-cassandraql/src/main/java/org/apache/camel/component/cassandra/CassandraProducer.java
+++ b/components/camel-cassandraql/src/main/java/org/apache/camel/component/cassandra/CassandraProducer.java
@@ -41,7 +41,6 @@ import static org.apache.camel.utils.cassandra.CassandraUtils.isEmpty;
  */
 public class CassandraProducer extends DefaultProducer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(CassandraProducer.class);
     private PreparedStatement preparedStatement;
 
     public CassandraProducer(CassandraEndpoint endpoint) {
diff --git a/components/camel-cm-sms/src/main/java/org/apache/camel/component/cm/CMComponent.java b/components/camel-cm-sms/src/main/java/org/apache/camel/component/cm/CMComponent.java
index 5c77faf..2106cdb 100644
--- a/components/camel-cm-sms/src/main/java/org/apache/camel/component/cm/CMComponent.java
+++ b/components/camel-cm-sms/src/main/java/org/apache/camel/component/cm/CMComponent.java
@@ -27,16 +27,12 @@ import org.apache.camel.CamelContext;
 import org.apache.camel.Endpoint;
 import org.apache.camel.ResolveEndpointFailedException;
 import org.apache.camel.impl.DefaultComponent;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Represents the component that manages {@link CMEndpoint}s.
  */
 public class CMComponent extends DefaultComponent {
 
-    private static final Logger LOG = LoggerFactory.getLogger(CMComponent.class);
-
     private Validator validator;
 
     public CMComponent() {
@@ -54,7 +50,7 @@ public class CMComponent extends DefaultComponent {
         setProperties(config, parameters);
 
         // Validate configuration
-        LOG.debug("Validating uri based configuration");
+        log.debug("Validating uri based configuration");
         final Set<ConstraintViolation<CMConfiguration>> constraintViolations = getValidator().validate(config);
         if (constraintViolations.size() > 0) {
             final StringBuffer msg = new StringBuffer();
diff --git a/components/camel-cmis/src/main/java/org/apache/camel/component/cmis/CMISConsumer.java b/components/camel-cmis/src/main/java/org/apache/camel/component/cmis/CMISConsumer.java
index e51aaef..1c85d5b 100644
--- a/components/camel-cmis/src/main/java/org/apache/camel/component/cmis/CMISConsumer.java
+++ b/components/camel-cmis/src/main/java/org/apache/camel/component/cmis/CMISConsumer.java
@@ -23,14 +23,12 @@ import org.apache.camel.Exchange;
 import org.apache.camel.Processor;
 import org.apache.camel.impl.ScheduledPollConsumer;
 import org.apache.chemistry.opencmis.client.api.OperationContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The CMIS consumer.
  */
 public class CMISConsumer extends ScheduledPollConsumer {
-    private static final Logger LOG = LoggerFactory.getLogger(CMISConsumer.class);
+
     private final CMISSessionFacadeFactory sessionFacadeFactory;
     private CMISSessionFacade sessionFacade;
 
@@ -59,7 +57,7 @@ public class CMISConsumer extends ScheduledPollConsumer {
         Exchange exchange = getEndpoint().createExchange();
         exchange.getIn().setHeaders(properties);
         exchange.getIn().setBody(inputStream);
-        LOG.debug("Polling node : {}", properties.get("cmis:name"));
+        log.debug("Polling node : {}", properties.get("cmis:name"));
         getProcessor().process(exchange);
         return 1;
     }
diff --git a/components/camel-cmis/src/main/java/org/apache/camel/component/cmis/CMISProducer.java b/components/camel-cmis/src/main/java/org/apache/camel/component/cmis/CMISProducer.java
index dc92702..79ec22d 100644
--- a/components/camel-cmis/src/main/java/org/apache/camel/component/cmis/CMISProducer.java
+++ b/components/camel-cmis/src/main/java/org/apache/camel/component/cmis/CMISProducer.java
@@ -36,14 +36,12 @@ import org.apache.chemistry.opencmis.commons.PropertyIds;
 import org.apache.chemistry.opencmis.commons.data.ContentStream;
 import org.apache.chemistry.opencmis.commons.enums.VersioningState;
 import org.apache.chemistry.opencmis.commons.exceptions.CmisObjectNotFoundException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The CMIS producer.
  */
 public class CMISProducer extends DefaultProducer {
-    private static final Logger LOG = LoggerFactory.getLogger(CMISProducer.class);
+
     private final CMISSessionFacadeFactory sessionFacadeFactory;
     private CMISSessionFacade sessionFacade;
 
@@ -60,7 +58,7 @@ public class CMISProducer extends DefaultProducer {
 
     public void process(Exchange exchange) throws Exception {
         CmisObject cmisObject = createNode(exchange);
-        LOG.debug("Created node with id: {}", cmisObject.getId());
+        log.debug("Created node with id: {}", cmisObject.getId());
 
         // copy the header of in message to the out message
         exchange.getOut().copyFrom(exchange.getIn());
@@ -152,7 +150,7 @@ public class CMISProducer extends DefaultProducer {
         if (!cmisProperties.containsKey(PropertyIds.OBJECT_TYPE_ID)) {
             cmisProperties.put(PropertyIds.OBJECT_TYPE_ID, CamelCMISConstants.CMIS_FOLDER);
         }
-        LOG.debug("Creating folder with properties: {}", cmisProperties);
+        log.debug("Creating folder with properties: {}", cmisProperties);
         return parentFolder.createFolder(cmisProperties);
     }
 
@@ -165,7 +163,7 @@ public class CMISProducer extends DefaultProducer {
         if (getSessionFacade().isObjectTypeVersionable((String) cmisProperties.get(PropertyIds.OBJECT_TYPE_ID))) {
             versioningState = VersioningState.MAJOR;
         }
-        LOG.debug("Creating document with properties: {}", cmisProperties);
+        log.debug("Creating document with properties: {}", cmisProperties);
         return parentFolder.createDocument(cmisProperties, contentStream, versioningState);
     }
 
diff --git a/components/camel-crypto-cms/src/main/java/org/apache/camel/component/crypto/cms/CryptoCmsComponent.java b/components/camel-crypto-cms/src/main/java/org/apache/camel/component/crypto/cms/CryptoCmsComponent.java
index fba33da..ddf9512 100644
--- a/components/camel-crypto-cms/src/main/java/org/apache/camel/component/crypto/cms/CryptoCmsComponent.java
+++ b/components/camel-crypto-cms/src/main/java/org/apache/camel/component/crypto/cms/CryptoCmsComponent.java
@@ -38,13 +38,9 @@ import org.apache.camel.impl.DefaultComponent;
 import org.apache.camel.spi.Metadata;
 import org.apache.camel.util.ObjectHelper;
 import org.bouncycastle.jce.provider.BouncyCastleProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public class CryptoCmsComponent extends DefaultComponent {
 
-    private static final Logger LOG = LoggerFactory.getLogger(CryptoCmsComponent.class);
-
     @Metadata(label = "advanced")
     private SignedDataVerifierConfiguration signedDataVerifierConfiguration;
 
@@ -110,7 +106,7 @@ public class CryptoCmsComponent extends DefaultComponent {
             processor = new EnvelopedDataDecryptor(config);
         } else {
             String error = "Endpoint uri " + uri + " is wrong configured. Operation " + scheme + " is not supported. Supported operations are: sign, verify, encrypt, decrypt";
-            LOG.error(error);
+            log.error(error);
             throw new IllegalStateException(error);
         }
         CryptoCmsEndpoint endpoint = new CryptoCmsEndpoint(uri, this, processor);
@@ -152,7 +148,7 @@ public class CryptoCmsComponent extends DefaultComponent {
     @Override
     protected void doStart() throws Exception { // NOPMD
         if (Security.getProvider(BouncyCastleProvider.PROVIDER_NAME) == null) {
-            LOG.debug("Adding BouncyCastleProvider as security provider");
+            log.debug("Adding BouncyCastleProvider as security provider");
             Security.addProvider(new BouncyCastleProvider());
         }
         super.doStart();
diff --git a/components/camel-crypto-cms/src/main/java/org/apache/camel/component/crypto/cms/CryptoCmsProducer.java b/components/camel-crypto-cms/src/main/java/org/apache/camel/component/crypto/cms/CryptoCmsProducer.java
index 072a3a3..bd68bdd 100644
--- a/components/camel-crypto-cms/src/main/java/org/apache/camel/component/crypto/cms/CryptoCmsProducer.java
+++ b/components/camel-crypto-cms/src/main/java/org/apache/camel/component/crypto/cms/CryptoCmsProducer.java
@@ -22,7 +22,7 @@ import org.apache.camel.Processor;
 import org.apache.camel.impl.DefaultProducer;
 
 public class CryptoCmsProducer extends DefaultProducer {
-    // private static final Logger LOG =
+    // private static final Logger log =
     // LoggerFactory.getLogger(CmsProducer.class);
 
     private Processor processor;
diff --git a/components/camel-crypto/src/main/java/org/apache/camel/converter/crypto/PGPDataFormat.java b/components/camel-crypto/src/main/java/org/apache/camel/converter/crypto/PGPDataFormat.java
index 6819440..fed56b3 100644
--- a/components/camel-crypto/src/main/java/org/apache/camel/converter/crypto/PGPDataFormat.java
+++ b/components/camel-crypto/src/main/java/org/apache/camel/converter/crypto/PGPDataFormat.java
@@ -46,7 +46,7 @@ public class PGPDataFormat extends PGPKeyAccessDataFormat implements PGPPublicKe
     public static final String SIGNATURE_KEY_RING = "CamelPGPDataFormatSignatureKeyRing";
     public static final String SIGNATURE_KEY_PASSWORD = "CamelPGPDataFormatSignatureKeyPassword";
 
-    //private static final Logger LOG = LoggerFactory.getLogger(PGPDataFormatChanged.class);
+    //private static final Logger log = LoggerFactory.getLogger(PGPDataFormatChanged.class);
 
     private String password; // only for decryption
     private String keyFileName;
diff --git a/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfBlueprintEndpoint.java b/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfBlueprintEndpoint.java
index a26b8d7..4233a23 100644
--- a/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfBlueprintEndpoint.java
+++ b/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfBlueprintEndpoint.java
@@ -22,13 +22,9 @@ import org.apache.camel.util.ObjectHelper;
 import org.apache.cxf.BusFactory;
 import org.osgi.framework.BundleContext;
 import org.osgi.service.blueprint.container.BlueprintContainer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public class CxfBlueprintEndpoint extends CxfEndpoint {
 
-    private static final Logger LOG = LoggerFactory.getLogger(CxfBlueprintEndpoint.class);
-
     private BlueprintContainer blueprintContainer;
     private BundleContext bundleContext;
     private BlueprintCamelContext blueprintCamelContext;
@@ -56,7 +52,7 @@ public class CxfBlueprintEndpoint extends CxfEndpoint {
 
     protected void checkName(Object value, String name) {
         if (ObjectHelper.isEmpty(value)) {
-            LOG.warn("The " + name + " of " + this.getEndpointUri() + " is empty, cxf will try to load the first one in wsdl for you.");
+            log.warn("The " + name + " of " + this.getEndpointUri() + " is empty, cxf will try to load the first one in wsdl for you.");
         }
     }
 
diff --git a/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfComponent.java b/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfComponent.java
index 973e4e5..5c8833e 100644
--- a/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfComponent.java
+++ b/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfComponent.java
@@ -35,8 +35,6 @@ import org.slf4j.LoggerFactory;
  */
 public class CxfComponent extends HeaderFilterStrategyComponent implements SSLContextParametersAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(CxfComponent.class);
-
     @Metadata(label = "advanced")
     private Boolean allowStreaming;
     @Metadata(label = "security", defaultValue = "false")
diff --git a/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfConsumer.java b/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfConsumer.java
index ff6741b..512c0f7 100644
--- a/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfConsumer.java
+++ b/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfConsumer.java
@@ -48,8 +48,6 @@ import org.apache.cxf.service.model.BindingOperationInfo;
 import org.apache.cxf.transport.MessageObserver;
 import org.apache.cxf.ws.addressing.ContextUtils;
 import org.apache.cxf.ws.addressing.EndpointReferenceType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * A Consumer of exchanges for a service in CXF.  CxfConsumer acts a CXF
@@ -60,7 +58,7 @@ import org.slf4j.LoggerFactory;
  * @version
  */
 public class CxfConsumer extends DefaultConsumer implements Suspendable {
-    private static final Logger LOG = LoggerFactory.getLogger(CxfConsumer.class);
+
     private Server server;
     private CxfEndpoint cxfEndpoint;
 
@@ -154,14 +152,14 @@ public class CxfConsumer extends DefaultConsumer implements Suspendable {
 
         // we receive a CXF request when this method is called
         public Object invoke(Exchange cxfExchange, Object o) {
-            LOG.trace("Received CXF Request: {}", cxfExchange);
+            log.trace("Received CXF Request: {}", cxfExchange);
             Continuation continuation;
             if (!endpoint.isSynchronous() && isAsyncInvocationSupported(cxfExchange)
                 && (continuation = getContinuation(cxfExchange)) != null) {
-                LOG.trace("Calling the Camel async processors.");
+                log.trace("Calling the Camel async processors.");
                 return asyncInvoke(cxfExchange, continuation);
             } else {
-                LOG.trace("Calling the Camel sync processors.");
+                log.trace("Calling the Camel sync processors.");
                 return syncInvoke(cxfExchange);
             }
         }
@@ -175,7 +173,7 @@ public class CxfConsumer extends DefaultConsumer implements Suspendable {
                     final org.apache.camel.Exchange camelExchange = prepareCamelExchange(cxfExchange);
 
                     // Now we don't set up the timeout value
-                    LOG.trace("Suspending continuation of exchangeId: {}", camelExchange.getExchangeId());
+                    log.trace("Suspending continuation of exchangeId: {}", camelExchange.getExchangeId());
 
                     // The continuation could be called before the suspend is called
                     continuation.suspend(cxfEndpoint.getContinuationTimeout());
@@ -187,7 +185,7 @@ public class CxfConsumer extends DefaultConsumer implements Suspendable {
                         public void done(boolean doneSync) {
                             // make sure the continuation resume will not be called before the suspend method in other thread
                             synchronized (continuation) {
-                                LOG.trace("Resuming continuation of exchangeId: {}", camelExchange.getExchangeId());
+                                log.trace("Resuming continuation of exchangeId: {}", camelExchange.getExchangeId());
                                 // resume processing after both, sync and async callbacks
                                 continuation.resume();
                             }
@@ -236,14 +234,14 @@ public class CxfConsumer extends DefaultConsumer implements Suspendable {
             org.apache.camel.Exchange camelExchange = prepareCamelExchange(cxfExchange);
             try {
                 try {
-                    LOG.trace("Processing +++ START +++");
+                    log.trace("Processing +++ START +++");
                     // send Camel exchange to the target processor
                     getProcessor().process(camelExchange);
                 } catch (Exception e) {
                     throw new Fault(e);
                 }
 
-                LOG.trace("Processing +++ END +++");
+                log.trace("Processing +++ END +++");
                 setResponseBack(cxfExchange, camelExchange);
             }  catch (Exception ex) {
                 doneUoW(camelExchange);
@@ -274,7 +272,7 @@ public class CxfConsumer extends DefaultConsumer implements Suspendable {
 
             if (boi != null) {
                 camelExchange.setProperty(BindingOperationInfo.class.getName(), boi);
-                LOG.trace("Set exchange property: BindingOperationInfo: {}", boi);
+                log.trace("Set exchange property: BindingOperationInfo: {}", boi);
                 // set the message exchange patter with the boi
                 if (boi.getOperationInfo().isOneWay()) {
                     camelExchange.setPattern(ExchangePattern.InOnly);
@@ -288,7 +286,7 @@ public class CxfConsumer extends DefaultConsumer implements Suspendable {
 
             // set data format mode in Camel exchange
             camelExchange.setProperty(CxfConstants.DATA_FORMAT_PROPERTY, dataFormat);
-            LOG.trace("Set Exchange property: {}={}", DataFormat.class.getName(), dataFormat);
+            log.trace("Set Exchange property: {}={}", DataFormat.class.getName(), dataFormat);
 
             camelExchange.setProperty(Message.MTOM_ENABLED, String.valueOf(endpoint.isMtomEnabled()));
 
diff --git a/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfEndpoint.java b/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfEndpoint.java
index e24dc6e..0c25856 100644
--- a/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfEndpoint.java
+++ b/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfEndpoint.java
@@ -113,8 +113,6 @@ import org.apache.cxf.service.model.MessagePartInfo;
 import org.apache.cxf.staxutils.StaxSource;
 import org.apache.cxf.staxutils.StaxUtils;
 import org.apache.cxf.wsdl.WSDLManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The cxf component is used for SOAP WebServices using Apache CXF.
@@ -122,8 +120,6 @@ import org.slf4j.LoggerFactory;
 @UriEndpoint(firstVersion = "1.0.0", scheme = "cxf", title = "CXF", syntax = "cxf:beanId:address", consumerClass = CxfConsumer.class, label = "soap,webservice")
 public class CxfEndpoint extends DefaultEndpoint implements AsyncEndpoint, HeaderFilterStrategyAware, Service, Cloneable {
 
-    private static final Logger LOG = LoggerFactory.getLogger(CxfEndpoint.class);
-
     @UriParam(label = "advanced")
     protected Bus bus;
 
@@ -323,7 +319,7 @@ public class CxfEndpoint extends DefaultEndpoint implements AsyncEndpoint, Heade
                 sfb.getFeatures().add(feature);
             }
         } else {
-            LOG.debug("Ignore DataFormat mode {} since SEI class is annotated with WebServiceProvider", getDataFormat());
+            log.debug("Ignore DataFormat mode {} since SEI class is annotated with WebServiceProvider", getDataFormat());
         }
 
         if (isLoggingFeatureEnabled()) {
@@ -351,7 +347,7 @@ public class CxfEndpoint extends DefaultEndpoint implements AsyncEndpoint, Heade
             } else {
                 sfb.setProperties(getProperties());
             }
-            LOG.debug("ServerFactoryBean: {} added properties: {}", sfb, getProperties());
+            log.debug("ServerFactoryBean: {} added properties: {}", sfb, getProperties());
         }
         if (this.isSkipPayloadMessagePartCheck()) {
             if (sfb.getProperties() == null) {
@@ -537,7 +533,7 @@ public class CxfEndpoint extends DefaultEndpoint implements AsyncEndpoint, Heade
             } else {
                 factoryBean.setProperties(getProperties());
             }
-            LOG.debug("ClientFactoryBean: {} added properties: {}", factoryBean, getProperties());
+            log.debug("ClientFactoryBean: {} added properties: {}", factoryBean, getProperties());
         }
 
         // setup the basic authentication property
@@ -641,7 +637,7 @@ public class CxfEndpoint extends DefaultEndpoint implements AsyncEndpoint, Heade
 
     void checkName(Object value, String name) {
         if (ObjectHelper.isEmpty(value)) {
-            LOG.warn("The " + name + " of " + this.getEndpointUri() + " is empty, cxf will try to load the first one in wsdl for you.");
+            log.warn("The " + name + " of " + this.getEndpointUri() + " is empty, cxf will try to load the first one in wsdl for you.");
         }
     }
 
@@ -952,12 +948,12 @@ public class CxfEndpoint extends DefaultEndpoint implements AsyncEndpoint, Heade
         if (bus == null) {
             bus = CxfEndpointUtils.createBus(getCamelContext());
             this.createBus = true;
-            LOG.debug("Using DefaultBus {}", bus);
+            log.debug("Using DefaultBus {}", bus);
         }
 
         if (!getBusHasBeenCalled.getAndSet(true) && defaultBus) {
             BusFactory.setDefaultBus(bus);
-            LOG.debug("Set bus {} as thread default bus", bus);
+            log.debug("Set bus {} as thread default bus", bus);
         }
         return bus;
     }
@@ -1025,7 +1021,7 @@ public class CxfEndpoint extends DefaultEndpoint implements AsyncEndpoint, Heade
                                              this.properties);
             } catch (Throwable e) {
                 // TODO: Why dont't we rethrown this exception
-                LOG.warn("Error setting CamelContext. This exception will be ignored.", e);
+                log.warn("Error setting CamelContext. This exception will be ignored.", e);
             }
         }
     }
@@ -1050,7 +1046,7 @@ public class CxfEndpoint extends DefaultEndpoint implements AsyncEndpoint, Heade
                                              this.properties);
             } catch (Throwable e) {
                 // TODO: Why dont't we rethrown this exception
-                LOG.warn("Error setting properties. This exception will be ignored.", e);
+                log.warn("Error setting properties. This exception will be ignored.", e);
             }
         }
     }
@@ -1083,7 +1079,7 @@ public class CxfEndpoint extends DefaultEndpoint implements AsyncEndpoint, Heade
     protected void doStop() throws Exception {
         // we should consider to shutdown the bus if the bus is created by cxfEndpoint
         if (createBus && bus != null) {
-            LOG.info("shutdown the bus ... {}", bus);
+            log.info("shutdown the bus ... {}", bus);
             getBus().shutdown(false);
             // clean up the bus to create a new one if the endpoint is started again
             bus = null;
@@ -1263,7 +1259,7 @@ public class CxfEndpoint extends DefaultEndpoint implements AsyncEndpoint, Heade
                     }
                 } catch (XMLStreamException e) {
                     //ignore
-                    LOG.warn("Error finding the start element.", e);
+                    log.warn("Error finding the start element.", e);
                     return null;
                 }
                 return r.getLocalName();
@@ -1458,7 +1454,7 @@ public class CxfEndpoint extends DefaultEndpoint implements AsyncEndpoint, Heade
         try {
             return new URI(uriString);
         } catch (URISyntaxException e) {
-            LOG.error("cannot determine request URI", e);
+            log.error("cannot determine request URI", e);
             return null;
         }
     }
diff --git a/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfProducer.java b/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfProducer.java
index 3e02d69..7f31f44 100644
--- a/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfProducer.java
+++ b/components/camel-cxf/src/main/java/org/apache/camel/component/cxf/CxfProducer.java
@@ -47,8 +47,6 @@ import org.apache.cxf.message.Message;
 import org.apache.cxf.service.model.BindingMessageInfo;
 import org.apache.cxf.service.model.BindingOperationInfo;
 import org.apache.cxf.transport.Conduit;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * CxfProducer binds a Camel exchange to a CXF exchange, acts as a CXF 
@@ -58,7 +56,7 @@ import org.slf4j.LoggerFactory;
  * @version 
  */
 public class CxfProducer extends DefaultProducer implements AsyncProcessor {
-    private static final Logger LOG = LoggerFactory.getLogger(CxfProducer.class);
+
     private Client client;
     private CxfEndpoint endpoint;
 
@@ -112,7 +110,7 @@ public class CxfProducer extends DefaultProducer implements AsyncProcessor {
     // As the cxf client async and sync api is implement different,
     // so we don't delegate the sync process call to the async process 
     public boolean process(Exchange camelExchange, AsyncCallback callback) {
-        LOG.trace("Process exchange: {} in an async way.", camelExchange);
+        log.trace("Process exchange: {} in an async way.", camelExchange);
         
         try {
             // create CXF exchange
@@ -150,7 +148,7 @@ public class CxfProducer extends DefaultProducer implements AsyncProcessor {
      * invokes the CXF client.
      */
     public void process(Exchange camelExchange) throws Exception {
-        LOG.trace("Process exchange: {} in sync way.", camelExchange);
+        log.trace("Process exchange: {} in sync way.", camelExchange);
         
         // create CXF exchange
         ExchangeImpl cxfExchange = new ExchangeImpl();
@@ -182,7 +180,7 @@ public class CxfProducer extends DefaultProducer implements AsyncProcessor {
                         endpoint.getCookieHandler().storeCookies(camelExchange, endpoint.getRequestUri(camelExchange), cxfHeaders);
                     }
                 } catch (IOException e) {
-                    LOG.error("Cannot store cookies", e);
+                    log.error("Cannot store cookies", e);
                 }
             }
             // bind the CXF response to Camel exchange
@@ -203,7 +201,7 @@ public class CxfProducer extends DefaultProducer implements AsyncProcessor {
         // set data format mode in exchange
         DataFormat dataFormat = endpoint.getDataFormat();
         camelExchange.setProperty(CxfConstants.DATA_FORMAT_PROPERTY, dataFormat);   
-        LOG.trace("Set Camel Exchange property: {}={}", DataFormat.class.getName(), dataFormat);
+        log.trace("Set Camel Exchange property: {}={}", DataFormat.class.getName(), dataFormat);
         
         if (endpoint.getMergeProtocolHeaders()) {
             camelExchange.setProperty(CxfConstants.CAMEL_CXF_PROTOCOL_HEADERS_MERGED, Boolean.TRUE);
@@ -215,7 +213,7 @@ public class CxfProducer extends DefaultProducer implements AsyncProcessor {
         // don't let CXF ClientImpl close the input stream 
         if (dataFormat.dealias() == DataFormat.RAW) {
             cxfExchange.put(Client.KEEP_CONDUIT_ALIVE, true);
-            LOG.trace("Set CXF Exchange property: {}={}", Client.KEEP_CONDUIT_ALIVE, true);
+            log.trace("Set CXF Exchange property: {}={}", Client.KEEP_CONDUIT_ALIVE, true);
         }
      
         // bind the request CXF exchange
@@ -238,7 +236,7 @@ public class CxfProducer extends DefaultProducer implements AsyncProcessor {
                     requestContext.put(Message.PROTOCOL_HEADERS, transportHeaders);
                 }
             } catch (IOException e) {
-                LOG.warn("Cannot load cookies", e);
+                log.warn("Cannot load cookies", e);
             }
         }
 
@@ -264,13 +262,13 @@ public class CxfProducer extends DefaultProducer implements AsyncProcessor {
         
         // store the original boi in the exchange
         camelExchange.setProperty(BindingOperationInfo.class.getName(), boi);
-        LOG.trace("Set exchange property: BindingOperationInfo: {}", boi);
+        log.trace("Set exchange property: BindingOperationInfo: {}", boi);
 
         // Unwrap boi before passing it to make a client call
         if (endpoint.getDataFormat() != DataFormat.PAYLOAD && !endpoint.isWrapped() && boi != null) {
             if (boi.isUnwrappedCapable()) {
                 boi = boi.getUnwrappedOperation();
-                LOG.trace("Unwrapped BOI {}", boi);
+                log.trace("Unwrapped BOI {}", boi);
             }
         }
         return  boi;
@@ -367,10 +365,10 @@ public class CxfProducer extends DefaultProducer implements AsyncProcessor {
             params[0] = exchange.getIn().getBody();
         }
 
-        if (LOG.isTraceEnabled()) {
+        if (log.isTraceEnabled()) {
             if (params != null) {
                 for (int i = 0; i < params.length; i++) {
-                    LOG.trace("params[{}] = {}", i, params[i]);
+                    log.trace("params[{}] = {}", i, params[i]);
                 }
             }
         }
@@ -393,11 +391,11 @@ public class CxfProducer extends DefaultProducer implements AsyncProcessor {
         BindingOperationInfo answer = null;
         String lp = ex.getIn().getHeader(CxfConstants.OPERATION_NAME, String.class);
         if (lp == null) {
-            LOG.debug("CxfProducer cannot find the {} from message header, trying with defaultOperationName", CxfConstants.OPERATION_NAME);
+            log.debug("CxfProducer cannot find the {} from message header, trying with defaultOperationName", CxfConstants.OPERATION_NAME);
             lp = endpoint.getDefaultOperationName();
         }
         if (lp == null) {
-            LOG.debug("CxfProducer cannot find the {} from message header and there is no DefaultOperationName setting, CxfProducer will pick up the first available operation.",
+            log.debug("CxfProducer cannot find the {} from message header and there is no DefaultOperationName setting, CxfProducer will pick up the first available operation.",
                      CxfConstants.OPERATION_NAME);
             Collection<BindingOperationInfo> bois = 
                 client.getEndpoint().getEndpointInfo().getBinding().getOperations();
@@ -414,12 +412,12 @@ public class CxfProducer extends DefaultProducer implements AsyncProcessor {
             }
             if (ns == null) {
                 ns = client.getEndpoint().getService().getName().getNamespaceURI();
-                LOG.trace("Operation namespace not in header. Set it to: {}", ns);
+                log.trace("Operation namespace not in header. Set it to: {}", ns);
             }            
 
             QName qname = new QName(ns, lp);
 
-            LOG.trace("Operation qname = {}", qname);
+            log.trace("Operation qname = {}", qname);
             
             answer = client.getEndpoint().getEndpointInfo().getBinding().getOperation(qname);
             if (answer == null) {
diff --git a/components/camel-disruptor/src/test/java/org/apache/camel/component/disruptor/SedaDisruptorCompareTest.java b/components/camel-disruptor/src/test/java/org/apache/camel/component/disruptor/SedaDisruptorCompareTest.java
index b6fbb82..efac1b6 100644
--- a/components/camel-disruptor/src/test/java/org/apache/camel/component/disruptor/SedaDisruptorCompareTest.java
+++ b/components/camel-disruptor/src/test/java/org/apache/camel/component/disruptor/SedaDisruptorCompareTest.java
@@ -145,9 +145,9 @@ public class SedaDisruptorCompareTest extends CamelTestSupport {
         // It defines all parameters to the same values as the default, so the result should be the same as
         // 'seda:speedtest'. This shows that disruptor has a slight disadvantage as its name is longer than 'seda' :)
         // The reason why this test takes so long is because Camel has a SLF4J call in ProducerCache:
-        // LOG.debug(">>>> {} {}", endpoint, exchange);
+        // log.debug(">>>> {} {}", endpoint, exchange);
         // and the DefaultEndpoint.toString() method will use a Matcher to sanitize the URI.  There should be a guard
-        // before the debug() call to only evaluate the args when required: if(LOG.isDebugEnabled())...
+        // before the debug() call to only evaluate the args when required: if(log.isDebugEnabled())...
         if (SIZE_PARAMETER_VALUE == 0) {
             parameters
                 .add(new Object[] {"SEDA LONG {P=1, C=1, CCT=1, SIZE=0}",
diff --git a/components/camel-docker/src/main/java/org/apache/camel/component/docker/DockerComponent.java b/components/camel-docker/src/main/java/org/apache/camel/component/docker/DockerComponent.java
index 75a169a..b3f2df9 100644
--- a/components/camel-docker/src/main/java/org/apache/camel/component/docker/DockerComponent.java
+++ b/components/camel-docker/src/main/java/org/apache/camel/component/docker/DockerComponent.java
@@ -31,7 +31,6 @@ import org.slf4j.LoggerFactory;
  * Represents the component that manages {@link DockerEndpoint}.
  */
 public class DockerComponent extends DefaultComponent {
-    private static final Logger LOG = LoggerFactory.getLogger(DockerComponent.class);
 
     @Metadata(label = "advanced")
     private DockerConfiguration configuration = new DockerConfiguration();
diff --git a/components/camel-docker/src/main/java/org/apache/camel/component/docker/consumer/DockerEventsConsumer.java b/components/camel-docker/src/main/java/org/apache/camel/component/docker/consumer/DockerEventsConsumer.java
index 7f28df7..09ff9e6 100644
--- a/components/camel-docker/src/main/java/org/apache/camel/component/docker/consumer/DockerEventsConsumer.java
+++ b/components/camel-docker/src/main/java/org/apache/camel/component/docker/consumer/DockerEventsConsumer.java
@@ -31,11 +31,8 @@ import org.apache.camel.component.docker.DockerConstants;
 import org.apache.camel.component.docker.DockerEndpoint;
 import org.apache.camel.component.docker.DockerHelper;
 import org.apache.camel.impl.DefaultConsumer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public class DockerEventsConsumer extends DefaultConsumer {
-    private static final Logger LOG = LoggerFactory.getLogger(DockerEventsConsumer.class);
 
     private DockerEndpoint endpoint;
     private DockerComponent component;
@@ -84,18 +81,18 @@ public class DockerEventsConsumer extends DefaultConsumer {
     protected class EventsCallback extends EventsResultCallback {
 
         public void onNext(Event event) {
-            LOG.debug("Received Docker Event: {}", event);
+            log.debug("Received Docker Event: {}", event);
 
             final Exchange exchange = getEndpoint().createExchange();
             Message message = exchange.getIn();
             message.setBody(event);
 
             try {
-                LOG.trace("Processing exchange [{}]...", exchange);
+                log.trace("Processing exchange [{}]...", exchange);
                 getAsyncProcessor().process(exchange, new AsyncCallback() {
                     @Override
                     public void done(boolean doneSync) {
-                        LOG.trace("Done processing exchange [{}]...", exchange);
+                        log.trace("Done processing exchange [{}]...", exchange);
                     }
                 });
             } catch (Exception e) {
diff --git a/components/camel-dozer/src/main/java/org/apache/camel/component/dozer/DozerComponent.java b/components/camel-dozer/src/main/java/org/apache/camel/component/dozer/DozerComponent.java
index 7fcf5e1..042938f 100644
--- a/components/camel-dozer/src/main/java/org/apache/camel/component/dozer/DozerComponent.java
+++ b/components/camel-dozer/src/main/java/org/apache/camel/component/dozer/DozerComponent.java
@@ -27,8 +27,6 @@ import org.slf4j.LoggerFactory;
 
 public class DozerComponent extends DefaultComponent {
 
-    private static final Logger LOG = LoggerFactory.getLogger(DozerComponent.class);
-
     public DozerComponent() {
     }
 
diff --git a/components/camel-dozer/src/main/java/org/apache/camel/component/dozer/DozerEndpoint.java b/components/camel-dozer/src/main/java/org/apache/camel/component/dozer/DozerEndpoint.java
index 0ba1454..5561eb7 100644
--- a/components/camel-dozer/src/main/java/org/apache/camel/component/dozer/DozerEndpoint.java
+++ b/components/camel-dozer/src/main/java/org/apache/camel/component/dozer/DozerEndpoint.java
@@ -33,15 +33,12 @@ import org.apache.camel.impl.DefaultEndpoint;
 import org.apache.camel.spi.UriEndpoint;
 import org.apache.camel.spi.UriParam;
 import org.apache.camel.util.ResourceHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The dozer component provides the ability to map between Java beans using the Dozer mapping library.
  */
 @UriEndpoint(firstVersion = "2.15.0", scheme = "dozer", title = "Dozer", syntax = "dozer:name", producerOnly = true, label = "transformation")
 public class DozerEndpoint extends DefaultEndpoint {
-    private static final Logger LOG = LoggerFactory.getLogger(DozerEndpoint.class);
 
     // IDs for built-in custom converters used with the Dozer component
     private static final String CUSTOM_MAPPING_ID = "_customMapping";
@@ -117,7 +114,7 @@ public class DozerEndpoint extends DefaultEndpoint {
     }
 
     protected void initDozerBeanContainerAndMapper() throws Exception {
-        LOG.info("Configuring {}...", Mapper.class.getName());
+        log.info("Configuring {}...", Mapper.class.getName());
 
         if (mapper == null) {
             if (configuration.getMappingConfiguration() == null) {
diff --git a/components/camel-dozer/src/main/java/org/apache/camel/component/dozer/DozerProducer.java b/components/camel-dozer/src/main/java/org/apache/camel/component/dozer/DozerProducer.java
index 7050225..53e8402 100644
--- a/components/camel-dozer/src/main/java/org/apache/camel/component/dozer/DozerProducer.java
+++ b/components/camel-dozer/src/main/java/org/apache/camel/component/dozer/DozerProducer.java
@@ -23,16 +23,12 @@ import org.apache.camel.model.DataFormatDefinition;
 import org.apache.camel.processor.MarshalProcessor;
 import org.apache.camel.processor.UnmarshalProcessor;
 import org.apache.camel.spi.DataFormat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Producer class for Dozer endpoints.
  */
 public class DozerProducer extends DefaultProducer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(DozerProducer.class);
-    
     private DozerEndpoint endpoint;
     private UnmarshalProcessor unmarshaller;
     private MarshalProcessor marshaller;
@@ -51,7 +47,7 @@ public class DozerProducer extends DefaultProducer {
         // Unmarshal the source content only if an unmarshaller is configured.
         String unmarshalId = endpoint.getConfiguration().getUnmarshalId();
         if (unmarshalId != null) {
-            LOG.debug("Unmarshalling input data using data format '{}'.", unmarshalId);
+            log.debug("Unmarshalling input data using data format '{}'.", unmarshalId);
             resolveUnmarshaller(exchange, unmarshalId).process(exchange);
             if (exchange.getException() != null) {
                 throw exchange.getException();
@@ -68,7 +64,7 @@ public class DozerProducer extends DefaultProducer {
         // Convert to source model, if specified
         String sourceType = endpoint.getConfiguration().getSourceModel();
         if (sourceType != null) {
-            LOG.debug("Converting to source model {}.", sourceType);
+            log.debug("Converting to source model {}.", sourceType);
             Class<?> sourceModel = endpoint.getCamelContext()
                     .getClassResolver().resolveClass(sourceType);
             if (sourceModel == null) {
@@ -78,7 +74,7 @@ public class DozerProducer extends DefaultProducer {
         }
         
         // Perform mappings
-        LOG.debug("Mapping to target model {}.", targetModel.getName());
+        log.debug("Mapping to target model {}.", targetModel.getName());
         Object targetObject = endpoint.getMapper().map(msg.getBody(), targetModel);
         // Second pass to process literal mappings
         endpoint.getMapper().map(endpoint.getVariableMapper(), targetObject);
@@ -96,7 +92,7 @@ public class DozerProducer extends DefaultProducer {
         // Marshal the source content only if a marshaller is configured.
         String marshalId = endpoint.getConfiguration().getMarshalId();
         if (marshalId != null) {
-            LOG.debug("Marshalling output data using data format '{}'.", marshalId);
+            log.debug("Marshalling output data using data format '{}'.", marshalId);
             resolveMarshaller(exchange, marshalId).process(exchange);
             if (exchange.getException() != null) {
                 throw exchange.getException();
diff --git a/components/camel-ehcache/src/main/java/org/apache/camel/component/ehcache/processor/aggregate/EhcacheAggregationRepository.java b/components/camel-ehcache/src/main/java/org/apache/camel/component/ehcache/processor/aggregate/EhcacheAggregationRepository.java
index 7c5af56..f9a1b90 100644
--- a/components/camel-ehcache/src/main/java/org/apache/camel/component/ehcache/processor/aggregate/EhcacheAggregationRepository.java
+++ b/components/camel-ehcache/src/main/java/org/apache/camel/component/ehcache/processor/aggregate/EhcacheAggregationRepository.java
@@ -30,11 +30,8 @@ import org.apache.camel.support.ServiceSupport;
 import org.apache.camel.util.ObjectHelper;
 import org.ehcache.Cache;
 import org.ehcache.CacheManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public class EhcacheAggregationRepository extends ServiceSupport implements RecoverableAggregationRepository {
-    private static final Logger LOG = LoggerFactory.getLogger(EhcacheAggregationRepository.class);
 
     private CamelContext camelContext;
     private CacheManager cacheManager;
@@ -146,7 +143,7 @@ public class EhcacheAggregationRepository extends ServiceSupport implements Reco
 
     @Override
     public Exchange add(final CamelContext camelContext, final String key, final Exchange exchange) {
-        LOG.trace("Adding an Exchange with ID {} for key {} in a thread-safe manner.", exchange.getExchangeId(), key);
+        log.trace("Adding an Exchange with ID {} for key {} in a thread-safe manner.", exchange.getExchangeId(), key);
 
         final DefaultExchangeHolder oldHolder = cache.get(key);
         final DefaultExchangeHolder newHolder = DefaultExchangeHolder.marshal(exchange, true, allowSerializedHeaders);
@@ -163,13 +160,13 @@ public class EhcacheAggregationRepository extends ServiceSupport implements Reco
 
     @Override
     public void remove(CamelContext camelContext, String key, Exchange exchange) {
-        LOG.trace("Removing an exchange with ID {} for key {}", exchange.getExchangeId(), key);
+        log.trace("Removing an exchange with ID {} for key {}", exchange.getExchangeId(), key);
         cache.remove(key);
     }
 
     @Override
     public void confirm(CamelContext camelContext, String exchangeId) {
-        LOG.trace("Confirming an exchange with ID {}.", exchangeId);
+        log.trace("Confirming an exchange with ID {}.", exchangeId);
         cache.remove(exchangeId);
     }
 
@@ -183,15 +180,15 @@ public class EhcacheAggregationRepository extends ServiceSupport implements Reco
 
     @Override
     public Set<String> scan(CamelContext camelContext) {
-        LOG.trace("Scanning for exchanges to recover in {} context", camelContext.getName());
+        log.trace("Scanning for exchanges to recover in {} context", camelContext.getName());
         Set<String> scanned = Collections.unmodifiableSet(getKeys());
-        LOG.trace("Found {} keys for exchanges to recover in {} context", scanned.size(), camelContext.getName());
+        log.trace("Found {} keys for exchanges to recover in {} context", scanned.size(), camelContext.getName());
         return scanned;
     }
 
     @Override
     public Exchange recover(CamelContext camelContext, String exchangeId) {
-        LOG.trace("Recovering an Exchange with ID {}.", exchangeId);
+        log.trace("Recovering an Exchange with ID {}.", exchangeId);
         return useRecovery ? unmarshallExchange(camelContext, cache.get(exchangeId)) : null;
     }
 
diff --git a/components/camel-elasticsearch-rest/src/main/java/org/apache/camel/component/elasticsearch/ElasticsearchProducer.java b/components/camel-elasticsearch-rest/src/main/java/org/apache/camel/component/elasticsearch/ElasticsearchProducer.java
index ac436da..475e298 100644
--- a/components/camel-elasticsearch-rest/src/main/java/org/apache/camel/component/elasticsearch/ElasticsearchProducer.java
+++ b/components/camel-elasticsearch-rest/src/main/java/org/apache/camel/component/elasticsearch/ElasticsearchProducer.java
@@ -44,8 +44,6 @@ import org.elasticsearch.client.sniff.Sniffer;
 import org.elasticsearch.client.sniff.SnifferBuilder;
 import org.elasticsearch.rest.RestStatus;
 import org.elasticsearch.search.builder.SearchSourceBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 
 /**
@@ -53,8 +51,6 @@ import org.slf4j.LoggerFactory;
  */
 public class ElasticsearchProducer extends DefaultProducer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(ElasticsearchProducer.class);
-
     protected final ElasticsearchConfiguration configuration;
     private RestClient client;
     private Sniffer sniffer;
@@ -240,12 +236,12 @@ public class ElasticsearchProducer extends DefaultProducer {
 
     private void startClient() throws NoSuchMethodException, IllegalAccessException, InvocationTargetException, InstantiationException, UnknownHostException {
         if (client == null) {
-            LOG.info("Connecting to the ElasticSearch cluster: {}", configuration.getClusterName());
+            log.info("Connecting to the ElasticSearch cluster: {}", configuration.getClusterName());
             if (configuration.getHostAddressesList() != null
                 && !configuration.getHostAddressesList().isEmpty()) {
                 client = createClient();
             } else {
-                LOG.warn("Incorrect ip address and port parameters settings for ElasticSearch cluster");
+                log.warn("Incorrect ip address and port parameters settings for ElasticSearch cluster");
             }
         }
     }
@@ -277,7 +273,7 @@ public class ElasticsearchProducer extends DefaultProducer {
     @Override
     protected void doStop() throws Exception {
         if (client != null) {
-            LOG.info("Disconnecting from ElasticSearch cluster: {}", configuration.getClusterName());
+            log.info("Disconnecting from ElasticSearch cluster: {}", configuration.getClusterName());
             client.close();
             if (sniffer != null) {
                 sniffer.close();
diff --git a/components/camel-elsql/src/main/java/org/apache/camel/component/elsql/ElsqlEndpoint.java b/components/camel-elsql/src/main/java/org/apache/camel/component/elsql/ElsqlEndpoint.java
index 7b10470..6cc432e 100644
--- a/components/camel-elsql/src/main/java/org/apache/camel/component/elsql/ElsqlEndpoint.java
+++ b/components/camel-elsql/src/main/java/org/apache/camel/component/elsql/ElsqlEndpoint.java
@@ -39,9 +39,6 @@ import org.apache.camel.spi.UriParam;
 import org.apache.camel.spi.UriPath;
 import org.apache.camel.util.ObjectHelper;
 import org.apache.camel.util.ResourceHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.jdbc.core.namedparam.EmptySqlParameterSource;
 import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate;
 import org.springframework.jdbc.core.namedparam.SqlParameterSource;
 
@@ -52,8 +49,6 @@ import org.springframework.jdbc.core.namedparam.SqlParameterSource;
         label = "database,sql")
 public class ElsqlEndpoint extends DefaultSqlEndpoint {
 
-    private static final Logger LOG = LoggerFactory.getLogger(ElsqlEndpoint.class);
-
     private ElSql elSql;
     private final NamedParameterJdbcTemplate namedJdbcTemplate;
 
@@ -86,7 +81,7 @@ public class ElsqlEndpoint extends DefaultSqlEndpoint {
         final Exchange dummy = createExchange();
         final SqlParameterSource param = new ElsqlSqlMapSource(dummy, null);
         final String sql = elSql.getSql(elsqlName, new SpringSqlParams(param));
-        LOG.debug("ElsqlConsumer @{} using sql: {}", elsqlName, sql);
+        log.debug("ElsqlConsumer @{} using sql: {}", elsqlName, sql);
 
         final ElsqlConsumer consumer = new ElsqlConsumer(this, processor, namedJdbcTemplate, sql, param, preStategy, proStrategy);
         consumer.setMaxMessagesPerPoll(getMaxMessagesPerPoll());
diff --git a/components/camel-elsql/src/main/java/org/apache/camel/component/elsql/ElsqlProducer.java b/components/camel-elsql/src/main/java/org/apache/camel/component/elsql/ElsqlProducer.java
index 58feb1d..fae9239 100644
--- a/components/camel-elsql/src/main/java/org/apache/camel/component/elsql/ElsqlProducer.java
+++ b/components/camel-elsql/src/main/java/org/apache/camel/component/elsql/ElsqlProducer.java
@@ -33,8 +33,6 @@ import org.apache.camel.component.sql.SqlConstants;
 import org.apache.camel.component.sql.SqlOutputType;
 import org.apache.camel.component.sql.SqlPrepareStatementStrategy;
 import org.apache.camel.impl.DefaultProducer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.springframework.dao.DataAccessException;
 import org.springframework.jdbc.core.PreparedStatementCallback;
 import org.springframework.jdbc.core.PreparedStatementCreator;
@@ -51,7 +49,6 @@ import static org.springframework.jdbc.support.JdbcUtils.closeStatement;
 
 public class ElsqlProducer extends DefaultProducer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(ElsqlProducer.class);
     private final ElSql elSql;
     private final String elSqlName;
     private final NamedParameterJdbcTemplate jdbcTemplate;
@@ -81,7 +78,7 @@ public class ElsqlProducer extends DefaultProducer {
 
         final SqlParameterSource param = new ElsqlSqlMapSource(exchange, data);
         final String sql = elSql.getSql(elSqlName, new SpringSqlParams(param));
-        LOG.debug("ElsqlProducer @{} using sql: {}", elSqlName, sql);
+        log.debug("ElsqlProducer @{} using sql: {}", elSqlName, sql);
 
         // special for processing stream list (batch not supported)
         final SqlOutputType outputType = getEndpoint().getOutputType();
diff --git a/components/camel-eventadmin/src/main/java/org/apache/camel/component/eventadmin/EventAdminConsumer.java b/components/camel-eventadmin/src/main/java/org/apache/camel/component/eventadmin/EventAdminConsumer.java
index a8d6b0f..86586c8 100644
--- a/components/camel-eventadmin/src/main/java/org/apache/camel/component/eventadmin/EventAdminConsumer.java
+++ b/components/camel-eventadmin/src/main/java/org/apache/camel/component/eventadmin/EventAdminConsumer.java
@@ -25,12 +25,9 @@ import org.osgi.framework.ServiceRegistration;
 import org.osgi.service.event.Event;
 import org.osgi.service.event.EventConstants;
 import org.osgi.service.event.EventHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public class EventAdminConsumer extends DefaultConsumer implements EventHandler {
 
-    private static final Logger LOG = LoggerFactory.getLogger(EventAdminConsumer.class);
     private final EventAdminEndpoint endpoint;
     private ServiceRegistration<?> registration;
 
@@ -44,7 +41,7 @@ public class EventAdminConsumer extends DefaultConsumer implements EventHandler
         // TODO: populate exchange headers
         exchange.getIn().setBody(event);
 
-        LOG.trace("EventAdmin {} is firing", endpoint.getTopic());
+        log.trace("EventAdmin {} is firing", endpoint.getTopic());
         try {
             getProcessor().process(exchange);
             // log exception if an exception occurred and was not handled
diff --git a/components/camel-facebook/src/main/java/org/apache/camel/component/facebook/FacebookConsumer.java b/components/camel-facebook/src/main/java/org/apache/camel/component/facebook/FacebookConsumer.java
index a1d0020..3c2d03c 100644
--- a/components/camel-facebook/src/main/java/org/apache/camel/component/facebook/FacebookConsumer.java
+++ b/components/camel-facebook/src/main/java/org/apache/camel/component/facebook/FacebookConsumer.java
@@ -42,8 +42,7 @@ import org.apache.camel.component.facebook.data.FacebookPropertiesHelper;
 import org.apache.camel.component.facebook.data.ReadingBuilder;
 import org.apache.camel.impl.ScheduledPollConsumer;
 import org.apache.camel.util.ObjectHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+
 import static org.apache.camel.component.facebook.FacebookConstants.FACEBOOK_DATE_FORMAT;
 import static org.apache.camel.component.facebook.FacebookConstants.READING_PREFIX;
 import static org.apache.camel.component.facebook.FacebookConstants.READING_PROPERTY;
@@ -57,7 +56,6 @@ import static org.apache.camel.component.facebook.data.FacebookMethodsTypeHelper
  */
 public class FacebookConsumer extends ScheduledPollConsumer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(FacebookConsumer.class);
     private static final String SINCE_PREFIX = "since=";
 
     private final FacebookEndpoint endpoint;
@@ -96,10 +94,10 @@ public class FacebookConsumer extends ScheduledPollConsumer {
                 } catch (UnsupportedEncodingException e) {
                     throw new RuntimeCamelException(String.format("Error decoding %s.since with value %s due to: %s", READING_PREFIX, strSince, e.getMessage()), e);
                 }
-                LOG.debug("Using supplied property {}since value {}", READING_PREFIX, this.sinceTime);
+                log.debug("Using supplied property {}since value {}", READING_PREFIX, this.sinceTime);
             }
             if (queryString.contains("until=")) {
-                LOG.debug("Overriding configured property {}until", READING_PREFIX);
+                log.debug("Overriding configured property {}until", READING_PREFIX);
             }
         }
         this.endpointProperties = Collections.unmodifiableMap(properties);
@@ -135,7 +133,7 @@ public class FacebookConsumer extends ScheduledPollConsumer {
             result = filteredMethods.get(0);
         } else {
             result = getHighestPriorityMethod(filteredMethods);
-            LOG.warn("Using highest priority method {} from methods {}", method, filteredMethods);
+            log.warn("Using highest priority method {} from methods {}", method, filteredMethods);
         }
         return result;
     }
diff --git a/components/camel-facebook/src/main/java/org/apache/camel/component/facebook/FacebookEndpoint.java b/components/camel-facebook/src/main/java/org/apache/camel/component/facebook/FacebookEndpoint.java
index 38d291c..3ff4cd1 100644
--- a/components/camel-facebook/src/main/java/org/apache/camel/component/facebook/FacebookEndpoint.java
+++ b/components/camel-facebook/src/main/java/org/apache/camel/component/facebook/FacebookEndpoint.java
@@ -38,8 +38,6 @@ import org.apache.camel.spi.UriParam;
 import org.apache.camel.spi.UriPath;
 import org.apache.camel.util.EndpointHelper;
 import org.apache.camel.util.ObjectHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.apache.camel.component.facebook.data.FacebookMethodsTypeHelper.convertToGetMethod;
 import static org.apache.camel.component.facebook.data.FacebookMethodsTypeHelper.convertToSearchMethod;
@@ -56,8 +54,6 @@ import static org.apache.camel.component.facebook.data.FacebookPropertiesHelper.
 @UriEndpoint(firstVersion = "2.14.0", scheme = "facebook", title = "Facebook", syntax = "facebook:methodName", consumerClass = FacebookConsumer.class, label = "social")
 public class FacebookEndpoint extends DefaultEndpoint implements FacebookConstants {
 
-    private static final Logger LOG = LoggerFactory.getLogger(FacebookEndpoint.class);
-
     private FacebookNameStyle nameStyle;
 
     @UriPath(name = "methodName", description = "What operation to perform") @Metadata(required = "true")
@@ -165,10 +161,10 @@ public class FacebookEndpoint extends DefaultEndpoint implements FacebookConstan
         }
 
         // log missing/extra properties for debugging
-        if (LOG.isDebugEnabled()) {
+        if (log.isDebugEnabled()) {
             final Set<String> missing = getMissingProperties(method, nameStyle, arguments);
             if (!missing.isEmpty()) {
-                LOG.debug("Method {} could use one or more properties from {}", method, missing);
+                log.debug("Method {} could use one or more properties from {}", method, missing);
             }
         }
     }
diff --git a/components/camel-git/src/main/java/org/apache/camel/component/git/consumer/AbstractGitConsumer.java b/components/camel-git/src/main/java/org/apache/camel/component/git/consumer/AbstractGitConsumer.java
index 615a994..7f8bb00 100644
--- a/components/camel-git/src/main/java/org/apache/camel/component/git/consumer/AbstractGitConsumer.java
+++ b/components/camel-git/src/main/java/org/apache/camel/component/git/consumer/AbstractGitConsumer.java
@@ -25,13 +25,9 @@ import org.apache.camel.impl.ScheduledPollConsumer;
 import org.eclipse.jgit.api.Git;
 import org.eclipse.jgit.lib.Repository;
 import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public abstract class AbstractGitConsumer extends ScheduledPollConsumer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractGitConsumer.class);
-
     private final GitEndpoint endpoint;
 
     private Repository repo;
@@ -65,7 +61,7 @@ public abstract class AbstractGitConsumer extends ScheduledPollConsumer {
                     .findGitDir() // scan up the file system tree
                     .build();
         } catch (IOException e) {
-            LOG.error("There was an error, cannot open {} repository", endpoint.getLocalPath());
+            log.error("There was an error, cannot open {} repository", endpoint.getLocalPath());
             throw e;
         }
         return repo;
diff --git a/components/camel-git/src/main/java/org/apache/camel/component/git/producer/GitProducer.java b/components/camel-git/src/main/java/org/apache/camel/component/git/producer/GitProducer.java
index ccea8729..71f8b6a 100644
--- a/components/camel-git/src/main/java/org/apache/camel/component/git/producer/GitProducer.java
+++ b/components/camel-git/src/main/java/org/apache/camel/component/git/producer/GitProducer.java
@@ -47,14 +47,10 @@ import org.eclipse.jgit.transport.PushResult;
 import org.eclipse.jgit.transport.RemoteConfig;
 import org.eclipse.jgit.transport.URIish;
 import org.eclipse.jgit.transport.UsernamePasswordCredentialsProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 
 public class GitProducer extends DefaultProducer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(GitProducer.class);
-
     private final GitEndpoint endpoint;
 
     private Repository repo;
@@ -209,7 +205,7 @@ public class GitProducer extends DefaultProducer {
                 throw new IllegalArgumentException("The local repository directory already exists");
             }
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         } finally {
             if (ObjectHelper.isNotEmpty(result)) {
@@ -226,7 +222,7 @@ public class GitProducer extends DefaultProducer {
         try {
             result = Git.init().setDirectory(new File(endpoint.getLocalPath(), "")).setBare(false).call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         } finally {
             if (ObjectHelper.isNotEmpty(result)) {
@@ -248,7 +244,7 @@ public class GitProducer extends DefaultProducer {
             }
             git.add().addFilepattern(fileName).call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
     }
@@ -266,7 +262,7 @@ public class GitProducer extends DefaultProducer {
             }
             git.rm().addFilepattern(fileName).call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
     }
@@ -300,7 +296,7 @@ public class GitProducer extends DefaultProducer {
                 git.commit().setAllowEmpty(allowEmpty).setMessage(commitMessage).call();
             }
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
     }
@@ -334,7 +330,7 @@ public class GitProducer extends DefaultProducer {
                 git.commit().setAllowEmpty(allowEmpty).setAll(true).setMessage(commitMessage).call();
             }
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
     }
@@ -346,7 +342,7 @@ public class GitProducer extends DefaultProducer {
         try {
             git.branchCreate().setName(endpoint.getBranchName()).call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
     }
@@ -358,7 +354,7 @@ public class GitProducer extends DefaultProducer {
         try {
             git.branchDelete().setBranchNames(endpoint.getBranchName()).call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
     }
@@ -371,7 +367,7 @@ public class GitProducer extends DefaultProducer {
             }
             status = git.status().call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
         updateExchange(exchange, status);
@@ -385,7 +381,7 @@ public class GitProducer extends DefaultProducer {
             }
             revCommit = git.log().call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
         updateExchange(exchange, revCommit);
@@ -407,7 +403,7 @@ public class GitProducer extends DefaultProducer {
                 result = git.push().setRemote(endpoint.getRemoteName()).call();
             }
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
         updateExchange(exchange, result);
@@ -429,7 +425,7 @@ public class GitProducer extends DefaultProducer {
                 result = git.push().setRemote(endpoint.getRemoteName()).add(Constants.R_TAGS + endpoint.getTagName()).call();
             }
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
         updateExchange(exchange, result);
@@ -451,7 +447,7 @@ public class GitProducer extends DefaultProducer {
                 result = git.pull().setRemote(endpoint.getRemoteName()).call();
             }
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
         updateExchange(exchange, result);
@@ -468,7 +464,7 @@ public class GitProducer extends DefaultProducer {
             git.checkout().setName("master").call();
             result = git.merge().include(mergeBase).setFastForward(FastForwardMode.FF).setCommit(true).call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
         updateExchange(exchange, result);
@@ -481,7 +477,7 @@ public class GitProducer extends DefaultProducer {
         try {
             git.tag().setName(endpoint.getTagName()).call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
     }
@@ -493,7 +489,7 @@ public class GitProducer extends DefaultProducer {
         try {
             git.tagDelete().setTags(endpoint.getTagName()).call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
     }
@@ -503,7 +499,7 @@ public class GitProducer extends DefaultProducer {
         try {
             result = git.branchList().setListMode(ListMode.ALL).call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
         updateExchange(exchange, result);
@@ -514,7 +510,7 @@ public class GitProducer extends DefaultProducer {
         try {
             result = git.tagList().call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
         updateExchange(exchange, result);
@@ -538,7 +534,7 @@ public class GitProducer extends DefaultProducer {
             }
             result = git.cherryPick().include(commit).call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
         updateExchange(exchange, result);
@@ -552,7 +548,7 @@ public class GitProducer extends DefaultProducer {
             }
             result = git.clean().setCleanDirectories(true).call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
         updateExchange(exchange, result);
@@ -563,7 +559,7 @@ public class GitProducer extends DefaultProducer {
         try {
             result = git.gc().call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
         updateExchange(exchange, result);
@@ -583,7 +579,7 @@ public class GitProducer extends DefaultProducer {
             remoteAddCommand.setName(endpoint.getRemoteName());
             result = remoteAddCommand.call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
         updateExchange(exchange, result);
@@ -594,7 +590,7 @@ public class GitProducer extends DefaultProducer {
         try {
             result = git.remoteList().call();
         } catch (Exception e) {
-            LOG.error("There was an error in Git {} operation", operation);
+            log.error("There was an error in Git {} operation", operation);
             throw e;
         }
         updateExchange(exchange, result);
@@ -611,7 +607,7 @@ public class GitProducer extends DefaultProducer {
                 .findGitDir() // scan up the file system tree
                 .build();
         } catch (IOException e) {
-            LOG.error("There was an error, cannot open {} repository", endpoint.getLocalPath());
+            log.error("There was an error, cannot open {} repository", endpoint.getLocalPath());
             throw e;
         }
         return repo;
diff --git a/components/camel-google-calendar/src/main/java/org/apache/camel/component/google/calendar/stream/GoogleCalendarStreamConsumer.java b/components/camel-google-calendar/src/main/java/org/apache/camel/component/google/calendar/stream/GoogleCalendarStreamConsumer.java
index 5c31853..eb43131 100644
--- a/components/camel-google-calendar/src/main/java/org/apache/camel/component/google/calendar/stream/GoogleCalendarStreamConsumer.java
+++ b/components/camel-google-calendar/src/main/java/org/apache/camel/component/google/calendar/stream/GoogleCalendarStreamConsumer.java
@@ -34,15 +34,12 @@ import org.apache.camel.Processor;
 import org.apache.camel.impl.ScheduledBatchPollingConsumer;
 import org.apache.camel.util.CastUtils;
 import org.apache.camel.util.ObjectHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The GoogleCalendar consumer.
  */
 public class GoogleCalendarStreamConsumer extends ScheduledBatchPollingConsumer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(GoogleCalendarStreamConsumer.class);
     private DateTime lastUpdate;
 
     public GoogleCalendarStreamConsumer(Endpoint endpoint, Processor processor) {
@@ -133,7 +130,7 @@ public class GoogleCalendarStreamConsumer extends ScheduledBatchPollingConsumer
             getAsyncProcessor().process(exchange, new AsyncCallback() {
                 @Override
                 public void done(boolean doneSync) {
-                    LOG.trace("Processing exchange done");
+                    log.trace("Processing exchange done");
                 }
             });
         }
diff --git a/components/camel-google-calendar/src/main/java/org/apache/camel/component/google/calendar/stream/GoogleCalendarStreamEndpoint.java b/components/camel-google-calendar/src/main/java/org/apache/camel/component/google/calendar/stream/GoogleCalendarStreamEndpoint.java
index eab297b..35e89d8 100644
--- a/components/camel-google-calendar/src/main/java/org/apache/camel/component/google/calendar/stream/GoogleCalendarStreamEndpoint.java
+++ b/components/camel-google-calendar/src/main/java/org/apache/camel/component/google/calendar/stream/GoogleCalendarStreamEndpoint.java
@@ -48,8 +48,6 @@ import org.slf4j.LoggerFactory;
              label = "api,cloud")
 public class GoogleCalendarStreamEndpoint extends ScheduledPollEndpoint {
     
-    private static final Logger LOG = LoggerFactory.getLogger(GoogleCalendarStreamEndpoint.class);
-
     @UriParam
     private GoogleCalendarStreamConfiguration configuration;
 
diff --git a/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraConsumer.java b/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraConsumer.java
index 63afa93..174a34c 100644
--- a/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraConsumer.java
+++ b/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraConsumer.java
@@ -27,8 +27,6 @@ import org.apache.gora.persistency.Persistent;
 import org.apache.gora.query.Query;
 import org.apache.gora.query.Result;
 import org.apache.gora.store.DataStore;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Implementation of Camel-Gora {@link Consumer}.
@@ -36,11 +34,6 @@ import org.slf4j.LoggerFactory;
 public class GoraConsumer extends ScheduledPollConsumer {
 
     /**
-     * logger
-     */
-    private static final Logger LOG = LoggerFactory.getLogger(GoraConsumer.class);
-
-    /**
      * GORA datastore
      */
     private final DataStore<Object, Persistent> dataStore;
@@ -94,7 +87,7 @@ public class GoraConsumer extends ScheduledPollConsumer {
         //proceed with query
         final Result result = query.execute();
 
-        LOG.trace("Processing exchange [{}]...", exchange);
+        log.trace("Processing exchange [{}]...", exchange);
 
         try {
             getProcessor().process(exchange);
diff --git a/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraProducer.java b/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraProducer.java
index 80c4848..4eae170 100644
--- a/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraProducer.java
+++ b/components/camel-gora/src/main/java/org/apache/camel/component/gora/GoraProducer.java
@@ -35,11 +35,6 @@ import static org.apache.camel.component.gora.utils.GoraUtils.getValueFromExchan
 public class GoraProducer extends DefaultProducer {
 
     /**
-     * logger
-     */
-    private static final Logger LOG = LoggerFactory.getLogger(GoraProducer.class);
-
-    /**
      * Camel-Gora endpoint configuration
      */
     private final GoraConfiguration configuration;
diff --git a/components/camel-grpc/src/main/java/org/apache/camel/component/grpc/GrpcConsumer.java b/components/camel-grpc/src/main/java/org/apache/camel/component/grpc/GrpcConsumer.java
index dfe717f..7f53c15 100644
--- a/components/camel-grpc/src/main/java/org/apache/camel/component/grpc/GrpcConsumer.java
+++ b/components/camel-grpc/src/main/java/org/apache/camel/component/grpc/GrpcConsumer.java
@@ -41,14 +41,11 @@ import org.apache.camel.impl.DefaultConsumer;
 import org.apache.camel.spi.ClassResolver;
 import org.apache.camel.util.ObjectHelper;
 import org.apache.camel.util.ResourceHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Represents gRPC server consumer implementation
  */
 public class GrpcConsumer extends DefaultConsumer {
-    private static final Logger LOG = LoggerFactory.getLogger(GrpcConsumer.class);
 
     protected final GrpcConfiguration configuration;
     protected final GrpcEndpoint endpoint;
@@ -69,17 +66,17 @@ public class GrpcConsumer extends DefaultConsumer {
     protected void doStart() throws Exception {
         super.doStart();
         if (server == null) {
-            LOG.info("Starting the gRPC server");
+            log.info("Starting the gRPC server");
             initializeServer();
             server.start();
-            LOG.info("gRPC server started and listening on port: {}", server.getPort());
+            log.info("gRPC server started and listening on port: {}", server.getPort());
         }
     }
 
     @Override
     protected void doStop() throws Exception {
         if (server != null) {
-            LOG.debug("Terminating gRPC server");
+            log.debug("Terminating gRPC server");
             server.shutdown().shutdownNow();
             server = null;
         }
@@ -101,7 +98,7 @@ public class GrpcConsumer extends DefaultConsumer {
         }
         
         if (!ObjectHelper.isEmpty(configuration.getHost()) && !ObjectHelper.isEmpty(configuration.getPort())) {
-            LOG.debug("Building gRPC server on {}:{}", configuration.getHost(), configuration.getPort());
+            log.debug("Building gRPC server on {}:{}", configuration.getHost(), configuration.getPort());
             serverBuilder = NettyServerBuilder.forAddress(new InetSocketAddress(configuration.getHost(), configuration.getPort()));
         } else {
             throw new IllegalArgumentException("No server start properties (host, port) specified");
@@ -173,7 +170,7 @@ public class GrpcConsumer extends DefaultConsumer {
             });
             return false;
         } else {
-            LOG.warn("Consumer not ready to process exchanges. The exchange {} will be discarded", exchange);
+            log.warn("Consumer not ready to process exchanges. The exchange {} will be discarded", exchange);
             callback.done(true);
             return true;
         }
diff --git a/components/camel-grpc/src/main/java/org/apache/camel/component/grpc/GrpcProducer.java b/components/camel-grpc/src/main/java/org/apache/camel/component/grpc/GrpcProducer.java
index a411f8e..2e9867a 100644
--- a/components/camel-grpc/src/main/java/org/apache/camel/component/grpc/GrpcProducer.java
+++ b/components/camel-grpc/src/main/java/org/apache/camel/component/grpc/GrpcProducer.java
@@ -40,14 +40,11 @@ import org.apache.camel.impl.DefaultProducer;
 import org.apache.camel.spi.ClassResolver;
 import org.apache.camel.util.ObjectHelper;
 import org.apache.camel.util.ResourceHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Represents asynchronous and synchronous gRPC producer implementations.
  */
 public class GrpcProducer extends DefaultProducer implements AsyncProcessor {
-    private static final Logger LOG = LoggerFactory.getLogger(GrpcProducer.class);
 
     protected final GrpcConfiguration configuration;
     protected final GrpcEndpoint endpoint;
@@ -110,10 +107,10 @@ public class GrpcProducer extends DefaultProducer implements AsyncProcessor {
             }
             
             if (endpoint.isSynchronous()) {
-                LOG.debug("Getting synchronous method stub from channel");
+                log.debug("Getting synchronous method stub from channel");
                 grpcStub = GrpcUtils.constructGrpcBlockingStub(endpoint.getServicePackage(), endpoint.getServiceName(), channel, callCreds, endpoint.getCamelContext());
             } else {
-                LOG.debug("Getting asynchronous method stub from channel");
+                log.debug("Getting asynchronous method stub from channel");
                 grpcStub = GrpcUtils.constructGrpcAsyncStub(endpoint.getServicePackage(), endpoint.getServiceName(), channel, callCreds, endpoint.getCamelContext());
             }
             forwarder = GrpcExchangeForwarderFactory.createExchangeForwarder(configuration, grpcStub);
@@ -130,7 +127,7 @@ public class GrpcProducer extends DefaultProducer implements AsyncProcessor {
             forwarder.shutdown();
             forwarder = null;
 
-            LOG.debug("Terminating channel to the remote gRPC server");
+            log.debug("Terminating channel to the remote gRPC server");
             channel.shutdown().shutdownNow();
             channel = null;
             grpcStub = null;
@@ -143,7 +140,7 @@ public class GrpcProducer extends DefaultProducer implements AsyncProcessor {
         NettyChannelBuilder channelBuilder = null;
         
         if (!ObjectHelper.isEmpty(configuration.getHost()) && !ObjectHelper.isEmpty(configuration.getPort())) {
-            LOG.info("Creating channel to the remote gRPC server {}:{}", configuration.getHost(), configuration.getPort());
+            log.info("Creating channel to the remote gRPC server {}:{}", configuration.getHost(), configuration.getPort());
             channelBuilder = NettyChannelBuilder.forAddress(configuration.getHost(), configuration.getPort());
         } else {
             throw new IllegalArgumentException("No connection properties (host or port) specified");
diff --git a/components/camel-hazelcast/src/main/java/org/apache/camel/component/hazelcast/queue/HazelcastQueueConsumer.java b/components/camel-hazelcast/src/main/java/org/apache/camel/component/hazelcast/queue/HazelcastQueueConsumer.java
index b2cb56c..5c71509 100644
--- a/components/camel-hazelcast/src/main/java/org/apache/camel/component/hazelcast/queue/HazelcastQueueConsumer.java
+++ b/components/camel-hazelcast/src/main/java/org/apache/camel/component/hazelcast/queue/HazelcastQueueConsumer.java
@@ -27,12 +27,9 @@ import org.apache.camel.Exchange;
 import org.apache.camel.Processor;
 import org.apache.camel.component.hazelcast.HazelcastDefaultConsumer;
 import org.apache.camel.component.hazelcast.listener.CamelItemListener;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public class HazelcastQueueConsumer extends HazelcastDefaultConsumer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(HazelcastQueueConsumer.class);
     private final Processor processor;
     private ExecutorService executor;
     private QueueConsumerTask queueConsumerTask;
@@ -95,8 +92,8 @@ public class HazelcastQueueConsumer extends HazelcastDefaultConsumer {
                             getExceptionHandler().handleException("Error during processing", exchange, e);
                         }
                     } catch (InterruptedException e) {
-                        if (LOG.isDebugEnabled()) {
-                            LOG.debug("Hazelcast Queue Consumer Interrupted: {}", e, e);
+                        if (log.isDebugEnabled()) {
+                            log.debug("Hazelcast Queue Consumer Interrupted: {}", e, e);
                             continue;
                         }
                     }
diff --git a/components/camel-hazelcast/src/main/java/org/apache/camel/component/hazelcast/seda/HazelcastSedaConsumer.java b/components/camel-hazelcast/src/main/java/org/apache/camel/component/hazelcast/seda/HazelcastSedaConsumer.java
index 95183d7..0b6633a 100644
--- a/components/camel-hazelcast/src/main/java/org/apache/camel/component/hazelcast/seda/HazelcastSedaConsumer.java
+++ b/components/camel-hazelcast/src/main/java/org/apache/camel/component/hazelcast/seda/HazelcastSedaConsumer.java
@@ -31,15 +31,12 @@ import org.apache.camel.Processor;
 import org.apache.camel.impl.DefaultConsumer;
 import org.apache.camel.impl.DefaultExchangeHolder;
 import org.apache.camel.util.AsyncProcessorConverterHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Implementation of Hazelcast SEDA {@link Consumer} component.
  */
 public class HazelcastSedaConsumer extends DefaultConsumer implements Runnable {
 
-    private static final Logger LOG = LoggerFactory.getLogger(HazelcastSedaConsumer.class);
     private final HazelcastSedaEndpoint endpoint;
     private final AsyncProcessor processor;
     private ExecutorService executor;
@@ -114,7 +111,7 @@ public class HazelcastSedaConsumer extends DefaultConsumer implements Runnable {
                         }
 
                     } catch (Exception e) {
-                        LOG.error("Hzlq Exception caught: {}", e, e);
+                        log.error("Hzlq Exception caught: {}", e, e);
                         // Rollback
                         if (transactionCtx != null) {
                             log.trace("Rollback transaction: {}", transactionCtx.getTxnId());
@@ -128,8 +125,8 @@ public class HazelcastSedaConsumer extends DefaultConsumer implements Runnable {
                     transactionCtx.commitTransaction();
                 }
             } catch (InterruptedException e) {
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Hzlq Consumer Interrupted: {}", e, e);
+                if (log.isDebugEnabled()) {
+                    log.debug("Hzlq Consumer Interrupted: {}", e, e);
                 }
                 continue;
             } catch (Throwable e) {
diff --git a/components/camel-hbase/src/main/java/org/apache/camel/component/hbase/HBaseConsumer.java b/components/camel-hbase/src/main/java/org/apache/camel/component/hbase/HBaseConsumer.java
index 10983c7..209e38d 100644
--- a/components/camel-hbase/src/main/java/org/apache/camel/component/hbase/HBaseConsumer.java
+++ b/components/camel-hbase/src/main/java/org/apache/camel/component/hbase/HBaseConsumer.java
@@ -41,16 +41,12 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.filter.PageFilter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The HBase consumer.
  */
 public class HBaseConsumer extends ScheduledBatchPollingConsumer {
 
-    private static final Logger LOG = LoggerFactory.getLogger(HBaseConsumer.class);
-
     private final HBaseEndpoint endpoint;
     private HBaseRow rowModel;
 
@@ -154,7 +150,7 @@ public class HBaseConsumer extends ScheduledBatchPollingConsumer {
 
         // limit if needed
         if (maxMessagesPerPoll > 0 && total > maxMessagesPerPoll) {
-            LOG.debug("Limiting to maximum messages to poll {} as there were {} messages in this poll.", maxMessagesPerPoll, total);
+            log.debug("Limiting to maximum messages to poll {} as there were {} messages in this poll.", maxMessagesPerPoll, total);
             total = maxMessagesPerPoll;
         }
 
@@ -169,7 +165,7 @@ public class HBaseConsumer extends ScheduledBatchPollingConsumer {
             // update pending number of exchanges
             pendingExchanges = total - index - 1;
 
-            LOG.trace("Processing exchange [{}]...", exchange);
+            log.trace("Processing exchange [{}]...", exchange);
             getProcessor().process(exchange);
             if (exchange.getException() != null) {
                 // if we failed then throw exception
diff --git a/components/camel-hbase/src/main/java/org/apache/camel/component/hbase/processor/idempotent/HBaseIdempotentRepository.java b/components/camel-hbase/src/main/java/org/apache/camel/component/hbase/processor/idempotent/HBaseIdempotentRepository.java
index 92d0198..f5527c9 100644
--- a/components/camel-hbase/src/main/java/org/apache/camel/component/hbase/processor/idempotent/HBaseIdempotentRepository.java
+++ b/components/camel-hbase/src/main/java/org/apache/camel/component/hbase/processor/idempotent/HBaseIdempotentRepository.java
@@ -32,13 +32,9 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public class HBaseIdempotentRepository extends ServiceSupport implements IdempotentRepository<Object> {
 
-    private static final Logger LOG = LoggerFactory.getLogger(HBaseIdempotentRepository.class);
-
     private final String tableName;
     private final String family;
     private final String qualifier;
@@ -69,7 +65,7 @@ public class HBaseIdempotentRepository extends ServiceSupport implements Idempot
                 return true;
             }
         } catch (Exception e) {
-            LOG.warn("Error adding object {} to HBase repository.", o);
+            log.warn("Error adding object {} to HBase repository.", o);
             return false;
         }
     }
@@ -82,7 +78,7 @@ public class HBaseIdempotentRepository extends ServiceSupport implements Idempot
             get.addColumn(HBaseHelper.getHBaseFieldAsBytes(family), HBaseHelper.getHBaseFieldAsBytes(qualifier));
             return table.exists(get);
         } catch (Exception e) {
-            LOG.warn("Error reading object {} from HBase repository.", o);
+            log.warn("Error reading object {} from HBase repository.", o);
             return false;
         }
     }
@@ -99,7 +95,7 @@ public class HBaseIdempotentRepository extends ServiceSupport implements Idempot
                 return false;
             }
         } catch (Exception e) {
-            LOG.warn("Error removing object {} from HBase repository.", o);
+            log.warn("Error removing object {} from HBase repository.", o);
             return false;
         }
     }
@@ -120,7 +116,7 @@ public class HBaseIdempotentRepository extends ServiceSupport implements Idempot
                 table.delete(d);
             } 
         } catch (Exception e) {
-            LOG.warn("Error clear HBase repository {}", table);
+            log.warn("Error clear HBase repository {}", table);
         }
     }    
 
diff --git a/components/camel-hipchat/src/main/java/org/apache/camel/component/hipchat/HipchatComponent.java b/components/camel-hipchat/src/main/java/org/apache/camel/component/hipchat/HipchatComponent.java
index 3e3f835..71bc882 100644
--- a/components/camel-hipchat/src/main/java/org/apache/camel/component/hipchat/HipchatComponent.java
+++ b/components/camel-hipchat/src/main/java/org/apache/camel/component/hipchat/HipchatComponent.java
@@ -23,8 +23,6 @@ import org.apache.camel.CamelContext;
 import org.apache.camel.Endpoint;
 import org.apache.camel.impl.DefaultComponent;
 import org.apache.camel.util.URISupport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Represents the component that manages {@link HipchatEndpoint}. Hipchat is an Atlassian software for team chat.
@@ -36,8 +34,6 @@ import org.slf4j.LoggerFactory;
  */
 public class HipchatComponent extends DefaultComponent {
 
-    private static final Logger LOG = LoggerFactory.getLogger(HipchatComponent.class);
-
     public HipchatComponent() {
     }
 
@@ -52,7 +48,7 @@ public class HipchatComponent extends DefaultComponent {
             throw new HipchatException("OAuth 2 auth token must be specified");
         }
         parseUri(remaining, endpoint);
-        LOG.debug("Using Hipchat API URL: {}", endpoint.getConfiguration().hipChatUrl());
+        log.debug("Using Hipchat API URL: {}", endpoint.getConfiguration().hipChatUrl());
         return endpoint;
     }
 
diff --git a/components/camel-hipchat/src/main/java/org/apache/camel/component/hipchat/HipchatConsumer.java b/components/camel-hipchat/src/main/java/org/apache/camel/component/hipchat/HipchatConsumer.java
index 8947729..e8c9535 100644
--- a/components/camel-hipchat/src/main/java/org/apache/camel/component/hipchat/HipchatConsumer.java
+++ b/components/camel-hipchat/src/main/java/org/apache/camel/component/hipchat/HipchatConsumer.java
@@ -32,15 +32,12 @@ import org.apache.camel.impl.ScheduledPollConsumer;
 import org.apache.camel.util.URISupport;
 import org.apache.http.client.methods.CloseableHttpResponse;
 import org.apache.http.client.methods.HttpGet;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * The Hipchat consumer consumes messages from a list of users.
  */
 public class HipchatConsumer extends ScheduledPollConsumer {
     public static final long DEFAULT_CONSUMER_DELAY = 5 * 1000;
-    private static final Logger LOG = LoggerFactory.getLogger(HipchatConsumer.class);
     private static final MapType MAP_TYPE = TypeFactory.defaultInstance().constructMapType(Map.class, String.class, Object.class);
     private static final ObjectMapper MAPPER = new ObjectMapper();
     
@@ -63,7 +60,7 @@ public class HipchatConsumer extends ScheduledPollConsumer {
 
     private void processExchangeForUser(String user, Exchange exchange) throws Exception {
         String urlPath = String.format(getMostRecentMessageUrl(), user);
-        LOG.debug("Polling HipChat Api " + urlPath + " for new messages at " + Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTime());
+        log.debug("Polling HipChat Api " + urlPath + " for new messages at " + Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTime());
         HttpGet httpGet = new HttpGet(getConfig().hipChatUrl() + urlPath);
         CloseableHttpResponse response = executeGet(httpGet);
         exchange.getIn().setHeader(HipchatConstants.FROM_USER, user);
@@ -73,7 +70,7 @@ public class HipchatConsumer extends ScheduledPollConsumer {
     private void processApiResponse(Exchange exchange, CloseableHttpResponse response) throws Exception {
         try {
             Map<String, Object> jsonMap = MAPPER.readValue(response.getEntity().getContent(), MAP_TYPE);
-            LOG.debug("Hipchat response " + response + ", json: " + MAPPER.writeValueAsString(jsonMap));
+            log.debug("Hipchat response " + response + ", json: " + MAPPER.writeValueAsString(jsonMap));
             if (jsonMap != null && jsonMap.size() > 0) {
                 List<Map<String, Object>> items = (List<Map<String, Object>>) jsonMap.get(HipchatApiConstants.API_ITEMS);
                 if (items != null && items.size() > 0) {
@@ -81,7 +78,7 @@ public class HipchatConsumer extends ScheduledPollConsumer {
                         Map<String, Object> item = items.get(0);
                         String date = (String) item.get(HipchatApiConstants.API_DATE);
                         String message = (String) item.get(HipchatApiConstants.API_MESSAGE);
-                        LOG.debug("Setting exchange body: " + message + ", header " + HipchatConstants.MESSAGE_DATE + ": " + date);
+                        log.debug("Setting exchange body: " + message + ", header " + HipchatConstants.MESSAGE_DATE + ": " + date);
                         exchange.getIn().setHeader(HipchatConstants.FROM_USER_RESPONSE_STATUS, response.getStatusLine());
                         exchange.getIn().setHeader(HipchatConstants.MESSAGE_DATE, date);
                         exchange.getIn().setBody(message);
diff --git a/components/camel-hipchat/src/main/java/org/apache/camel/component/hipchat/HipchatProducer.java b/components/camel-hipchat/src/main/java/org/apache/camel/component/hipchat/HipchatProducer.java
index 125980b..7200d8b 100644
--- a/components/camel-hipchat/src/main/java/org/apache/camel/component/hipchat/HipchatProducer.java
+++ b/components/camel-hipchat/src/main/java/org/apache/camel/component/hipchat/HipchatProducer.java
@@ -32,8 +32,6 @@ import org.apache.http.client.methods.CloseableHttpResponse;
 import org.apache.http.client.methods.HttpPost;
 import org.apache.http.entity.ContentType;
 import org.apache.http.entity.StringEntity;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.apache.camel.util.UnsafeUriCharactersEncoder.encodeHttpURI;
 
@@ -41,7 +39,7 @@ import static org.apache.camel.util.UnsafeUriCharactersEncoder.encodeHttpURI;
  * The Hipchat producer to send message to a user and/or a room.
  */
 public class HipchatProducer extends DefaultProducer {
-    private static final Logger LOG = LoggerFactory.getLogger(HipchatProducer.class);
+
     private static final ObjectMapper MAPPER = new ObjectMapper();
     
     private transient String hipchatProducerToString;
@@ -69,18 +67,18 @@ public class HipchatProducer extends DefaultProducer {
         if (backGroundColor != null) {
             jsonParam.put(HipchatApiConstants.API_MESSAGE_COLOR, backGroundColor);
         }
-        LOG.info("Sending message to room: " + room + ", " + MAPPER.writeValueAsString(jsonParam));
+        log.info("Sending message to room: " + room + ", " + MAPPER.writeValueAsString(jsonParam));
         StatusLine statusLine = post(encodeHttpURI(urlPath), jsonParam);
-        LOG.debug("Response status for send room message: {}", statusLine);
+        log.debug("Response status for send room message: {}", statusLine);
         return statusLine;
     }
 
     private StatusLine sendUserMessage(String user, Exchange exchange) throws IOException, InvalidPayloadException {
         String urlPath = String.format(getConfig().withAuthToken(HipchatApiConstants.URI_PATH_USER_MESSAGE), user);
         Map<String, String> jsonParam = getCommonHttpPostParam(exchange);
-        LOG.info("Sending message to user: " + user + ", " + MAPPER.writeValueAsString(jsonParam));
+        log.info("Sending message to user: " + user + ", " + MAPPER.writeValueAsString(jsonParam));
         StatusLine statusLine = post(urlPath, jsonParam);
-        LOG.debug("Response status for send user message: {}", statusLine);
+        log.debug("Response status for send user message: {}", statusLine);
         return statusLine;
     }
 
diff --git a/components/camel-http4/src/main/java/org/apache/camel/component/http4/HttpComponent.java b/components/camel-http4/src/main/java/org/apache/camel/component/http4/HttpComponent.java
index 45fe267..d13d15e 100644
--- a/components/camel-http4/src/main/java/org/apache/camel/component/http4/HttpComponent.java
+++ b/components/camel-http4/src/main/java/org/apache/camel/component/http4/HttpComponent.java
@@ -62,8 +62,6 @@ import org.apache.http.impl.client.HttpClientBuilder;
 import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
 import org.apache.http.protocol.HttpContext;
 import org.apache.http.ssl.SSLContexts;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Defines the <a href="http://camel.apache.org/http4.html">HTTP4
@@ -74,8 +72,6 @@ import org.slf4j.LoggerFactory;
 @Metadata(label = "verifiers", enums = "parameters,connectivity")
 public class HttpComponent extends HttpCommonComponent implements RestProducerFactory, SSLContextParametersAware {
 
-    private static final Logger LOG = LoggerFactory.getLogger(HttpComponent.class);
-
     @Metadata(label = "advanced", description = "To use the custom HttpClientConfigurer to perform configuration of the HttpClient that will be used.")
     protected HttpClientConfigurer httpClientConfigurer;
     @Metadata(label = "advanced", description = "To use a custom and shared HttpClientConnectionManager to manage connections."
@@ -275,7 +271,7 @@ public class HttpComponent extends HttpCommonComponent implements RestProducerFa
         // create the endpoint and set the http uri to be null
         String endpointUriString = endpointUri.toString();
 
-        LOG.debug("Creating endpoint uri {}", endpointUriString);
+        log.debug("Creating endpoint uri {}", endpointUriString);
         final HttpClientConnectionManager localConnectionManager = createConnectionManager(parameters, sslContextParameters);
         HttpEndpoint endpoint = new HttpEndpoint(endpointUriString, this, clientBuilder, localConnectionManager, configurer);
 
@@ -407,7 +403,7 @@ public class HttpComponent extends HttpCommonComponent implements RestProducerFa
         if (localConnectionsPerRoute > 0) {
             answer.setDefaultMaxPerRoute(localConnectionsPerRoute);
         }
-        LOG.info("Created ClientConnectionManager {}", answer);
+        log.info("Created ClientConnectionManager {}", answer);
 
         return answer;
     }
@@ -664,7 +660,7 @@ public class HttpComponent extends HttpCommonComponent implements RestProducerFa
     public void doStop() throws Exception {
         // shutdown connection manager
         if (clientConnectionManager != null) {
-            LOG.info("Shutting down ClientConnectionManager: {}", clientConnectionManager);
+            log.info("Shutting down ClientConnectionManager: {}", clientConnectionManager);
             clientConnectionManager.shutdown();
             clientConnectionManager = null;
         }
diff --git a/components/camel-http4/src/main/java/org/apache/camel/component/http4/HttpEndpoint.java b/components/camel-http4/src/main/java/org/apache/camel/component/http4/HttpEndpoint.java
index c0aa983..bd4d307 100644
--- a/components/camel-http4/src/main/java/org/apache/camel/component/http4/HttpEndpoint.java
+++ b/components/camel-http4/src/main/java/org/apache/camel/component/http4/HttpEndpoint.java
@@ -47,8 +47,6 @@ import org.apache.http.impl.client.HttpClientBuilder;
 import org.apache.http.pool.ConnPoolControl;
 import org.apache.http.pool.PoolStats;
 import org.apache.http.protocol.HttpContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * For calling out to external HTTP servers using Apache HTTP Client 4.x.
@@ -58,8 +56,6 @@ import org.slf4j.LoggerFactory;
 @ManagedResource(description = "Managed HttpEndpoint")
 public class HttpEndpoint extends HttpCommonEndpoint {
 
-    private static final Logger LOG = LoggerFactory.getLogger(HttpEndpoint.class);
-
     @UriParam(label = "advanced", description = "To use a custom HttpContext instance")
     private HttpContext httpContext;
     @UriParam(label = "advanced", description = "Register a custom configuration strategy for new HttpClient instances"
@@ -194,7 +190,7 @@ public class HttpEndpoint extends HttpCommonEndpoint {
                 if (scheme == null) {
                     scheme = HttpHelper.isSecureConnection(getEndpointUri()) ? "https" : "http";
                 }
-                LOG.debug("CamelContext properties http.proxyHost, http.proxyPort, and http.proxyScheme detected. Using http proxy host: {} port: {} scheme: {}", host, port, scheme);
+                log.debug("CamelContext properties http.proxyHost, http.proxyPort, and http.proxyScheme detected. Using http proxy host: {} port: {} scheme: {}", host, port, scheme);
                 HttpHost proxy = new HttpHost(host, port, scheme);
                 clientBuilder.setProxy(proxy);
             }
@@ -217,7 +213,7 @@ public class HttpEndpoint extends HttpCommonEndpoint {
             clientBuilder.setDefaultCookieStore(new NoopCookieStore());
         }
 
-        LOG.debug("Setup the HttpClientBuilder {}", clientBuilder);
+        log.debug("Setup the HttpClientBuilder {}", clientBuilder);
         return clientBuilder.build();
     }
 
diff --git a/components/camel-http4/src/main/java/org/apache/camel/component/http4/HttpProducer.java b/components/camel-http4/src/main/java/org/apache/camel/component/http4/HttpProducer.java
index ac9b059..3ba3ce0 100644
--- a/components/camel-http4/src/main/java/org/apache/camel/component/http4/HttpProducer.java
+++ b/components/camel-http4/src/main/java/org/apache/camel/component/http4/HttpProducer.java
@@ -71,14 +71,12 @@ import org.apache.http.protocol.BasicHttpContext;
 import org.apache.http.protocol.HTTP;
 import org.apache.http.protocol.HttpContext;
 import org.apache.http.util.EntityUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * @version
  */
 public class HttpProducer extends DefaultProducer {
-    private static final Logger LOG = LoggerFactory.getLogger(HttpProducer.class);
+
     private HttpClient httpClient;
     private HttpContext httpContext;
     private boolean throwException;
@@ -187,12 +185,12 @@ public class HttpProducer extends DefaultProducer {
         // lets store the result in the output message.
         HttpResponse httpResponse = null;
         try {
... 9027 lines suppressed ...