You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@camel.apache.org by da...@apache.org on 2022/03/04 09:30:21 UTC

[camel] branch main updated: CAMEL-17727: camel-kafka - Add readiness health check for kafka consumer (#7093)

This is an automated email from the ASF dual-hosted git repository.

davsclaus pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/camel.git


The following commit(s) were added to refs/heads/main by this push:
     new c187e95  CAMEL-17727: camel-kafka - Add readiness health check for kafka consumer (#7093)
c187e95 is described below

commit c187e95ec5a6c14f7dfeead9e15be0d1ce246fa4
Author: Claus Ibsen <cl...@gmail.com>
AuthorDate: Fri Mar 4 10:27:44 2022 +0100

    CAMEL-17727: camel-kafka - Add readiness health check for kafka consumer (#7093)
    
    CAMEL-17727: camel-kafka - Add readiness health check for kafka consumer
---
 components/camel-kafka/pom.xml                     |   4 +
 .../camel/component/kafka/KafkaConsumer.java       |  20 ++-
 .../component/kafka/KafkaConsumerHealthCheck.java  |  77 +++++++++
 .../camel/component/kafka/KafkaFetchRecords.java   |  32 ++++
 .../integration/KafkaConsumerHealthCheckIT.java    | 189 +++++++++++++++++++++
 .../camel/impl/health/AbstractHealthCheck.java     |   2 +
 6 files changed, 323 insertions(+), 1 deletion(-)

diff --git a/components/camel-kafka/pom.xml b/components/camel-kafka/pom.xml
index 115c6b7..ab10fab 100644
--- a/components/camel-kafka/pom.xml
+++ b/components/camel-kafka/pom.xml
@@ -38,6 +38,10 @@
             <groupId>org.apache.camel</groupId>
             <artifactId>camel-support</artifactId>
         </dependency>
+        <dependency>
+            <groupId>org.apache.camel</groupId>
+            <artifactId>camel-health</artifactId>
+        </dependency>
 
         <!-- kafka java client -->
         <dependency>
diff --git a/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaConsumer.java b/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaConsumer.java
index 692a505..185d8ac 100644
--- a/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaConsumer.java
+++ b/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaConsumer.java
@@ -24,9 +24,12 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Pattern;
 
+import org.apache.camel.CamelContextAware;
 import org.apache.camel.Processor;
 import org.apache.camel.ResumeAware;
 import org.apache.camel.component.kafka.consumer.support.KafkaConsumerResumeStrategy;
+import org.apache.camel.health.HealthCheck;
+import org.apache.camel.health.HealthCheckAware;
 import org.apache.camel.spi.StateRepository;
 import org.apache.camel.support.BridgeExceptionHandlerToErrorHandler;
 import org.apache.camel.support.DefaultConsumer;
@@ -37,7 +40,7 @@ import org.apache.kafka.clients.consumer.ConsumerConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class KafkaConsumer extends DefaultConsumer implements ResumeAware<KafkaConsumerResumeStrategy> {
+public class KafkaConsumer extends DefaultConsumer implements ResumeAware<KafkaConsumerResumeStrategy>, HealthCheckAware {
 
     private static final Logger LOG = LoggerFactory.getLogger(KafkaConsumer.class);
 
@@ -67,6 +70,17 @@ public class KafkaConsumer extends DefaultConsumer implements ResumeAware<KafkaC
     @Override
     protected void doBuild() throws Exception {
         super.doBuild();
+
+        // build health-check
+        String rid = getRouteId();
+        if (rid == null) {
+            // not from a route so need some other uuid
+            rid = endpoint.getCamelContext().getUuidGenerator().generateUuid();
+        }
+        HealthCheck hc = new KafkaConsumerHealthCheck(this, rid);
+        CamelContextAware.trySetCamelContext(hc, endpoint.getCamelContext());
+        setHealthCheck(hc);
+
         if (endpoint.getComponent().getPollExceptionStrategy() != null) {
             pollExceptionStrategy = endpoint.getComponent().getPollExceptionStrategy();
         } else {
@@ -101,6 +115,10 @@ public class KafkaConsumer extends DefaultConsumer implements ResumeAware<KafkaC
         return props;
     }
 
+    List<KafkaFetchRecords> getTasks() {
+        return tasks;
+    }
+
     @Override
     protected void doStart() throws Exception {
         LOG.info("Starting Kafka consumer on topic: {} with breakOnFirstError: {}", endpoint.getConfiguration().getTopic(),
diff --git a/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaConsumerHealthCheck.java b/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaConsumerHealthCheck.java
new file mode 100644
index 0000000..b20115f
--- /dev/null
+++ b/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaConsumerHealthCheck.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.kafka;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.camel.health.HealthCheckResultBuilder;
+import org.apache.camel.impl.health.AbstractHealthCheck;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+
+/**
+ * Kafka consumer readiness health-check
+ */
+public class KafkaConsumerHealthCheck extends AbstractHealthCheck {
+
+    private final KafkaConsumer kafkaConsumer;
+    private final String routeId;
+
+    public KafkaConsumerHealthCheck(KafkaConsumer kafkaConsumer, String routeId) {
+        super("camel", "kafka-consumer-" + routeId);
+        this.kafkaConsumer = kafkaConsumer;
+        this.routeId = routeId;
+    }
+
+    @Override
+    public boolean isLiveness() {
+        // this health check is only readiness
+        return false;
+    }
+
+    @Override
+    protected void doCall(HealthCheckResultBuilder builder, Map<String, Object> options) {
+        List<KafkaFetchRecords> tasks = kafkaConsumer.getTasks();
+        for (KafkaFetchRecords task : tasks) {
+            if (!task.isReady()) {
+                builder.down();
+                builder.message("KafkaConsumer is not ready");
+
+                KafkaConfiguration cfg = kafkaConsumer.getEndpoint().getConfiguration();
+                Properties props = task.getKafkaProps();
+
+                builder.detail("bootstrap.servers", props.getProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG));
+                String cid = props.getProperty(ConsumerConfig.CLIENT_ID_CONFIG);
+                if (cid != null) {
+                    builder.detail("client.id", cid);
+                }
+                String gid = props.getProperty(ConsumerConfig.GROUP_ID_CONFIG);
+                if (gid != null) {
+                    builder.detail("group.id", gid);
+                }
+                if (routeId != null) {
+                    // camel route id
+                    builder.detail("route.id", routeId);
+                }
+                builder.detail("topic", cfg.getTopic());
+
+                return; // break on first DOWN
+            }
+        }
+    }
+}
diff --git a/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaFetchRecords.java b/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaFetchRecords.java
index 91728e3..6bbb135 100644
--- a/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaFetchRecords.java
+++ b/components/camel-kafka/src/main/java/org/apache/camel/component/kafka/KafkaFetchRecords.java
@@ -35,7 +35,9 @@ import org.apache.camel.component.kafka.consumer.support.ProcessingResult;
 import org.apache.camel.component.kafka.consumer.support.ResumeStrategyFactory;
 import org.apache.camel.support.BridgeExceptionHandlerToErrorHandler;
 import org.apache.camel.util.IOHelper;
+import org.apache.camel.util.ReflectionHelper;
 import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.errors.InterruptException;
 import org.apache.kafka.common.errors.WakeupException;
@@ -413,4 +415,34 @@ class KafkaFetchRecords implements Runnable {
     public void setConnected(boolean connected) {
         this.connected = connected;
     }
+
+    public boolean isReady() {
+        if (!connected) {
+            return false;
+        }
+
+        boolean ready = true;
+        try {
+            if (consumer instanceof org.apache.kafka.clients.consumer.KafkaConsumer) {
+                // need to use reflection to access the network client which has API to check if the client has ready
+                // connections
+                org.apache.kafka.clients.consumer.KafkaConsumer kc = (org.apache.kafka.clients.consumer.KafkaConsumer) consumer;
+                ConsumerNetworkClient nc
+                        = (ConsumerNetworkClient) ReflectionHelper.getField(kc.getClass().getDeclaredField("client"), kc);
+                LOG.trace(
+                        "Health-Check calling org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.hasReadyNode");
+                ready = nc.hasReadyNodes(System.currentTimeMillis());
+            }
+        } catch (Exception e) {
+            // ignore
+            LOG.debug("Cannot check hasReadyNodes on KafkaConsumer client (ConsumerNetworkClient) due to "
+                      + e.getMessage() + ". This exception is ignored.",
+                    e);
+        }
+        return ready;
+    }
+
+    Properties getKafkaProps() {
+        return kafkaProps;
+    }
 }
diff --git a/components/camel-kafka/src/test/java/org/apache/camel/component/kafka/integration/KafkaConsumerHealthCheckIT.java b/components/camel-kafka/src/test/java/org/apache/camel/component/kafka/integration/KafkaConsumerHealthCheckIT.java
new file mode 100644
index 0000000..d5e973c
--- /dev/null
+++ b/components/camel-kafka/src/test/java/org/apache/camel/component/kafka/integration/KafkaConsumerHealthCheckIT.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.camel.component.kafka.integration;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.StreamSupport;
+
+import org.apache.camel.BindToRegistry;
+import org.apache.camel.CamelContext;
+import org.apache.camel.Endpoint;
+import org.apache.camel.EndpointInject;
+import org.apache.camel.builder.RouteBuilder;
+import org.apache.camel.component.kafka.KafkaConstants;
+import org.apache.camel.component.kafka.MockConsumerInterceptor;
+import org.apache.camel.component.kafka.serde.DefaultKafkaHeaderDeserializer;
+import org.apache.camel.component.mock.MockEndpoint;
+import org.apache.camel.health.HealthCheck;
+import org.apache.camel.health.HealthCheckHelper;
+import org.apache.camel.health.HealthCheckRegistry;
+import org.apache.camel.impl.health.DefaultHealthCheckRegistry;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.header.internals.RecordHeader;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.MethodOrderer;
+import org.junit.jupiter.api.Order;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestMethodOrder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.testcontainers.shaded.org.awaitility.Awaitility.await;
+
+@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
+public class KafkaConsumerHealthCheckIT extends BaseEmbeddedKafkaTestSupport {
+    public static final String TOPIC = "test-health";
+
+    private static final Logger LOG = LoggerFactory.getLogger(KafkaConsumerHealthCheckIT.class);
+
+    @BindToRegistry("myHeaderDeserializer")
+    private MyKafkaHeaderDeserializer deserializer = new MyKafkaHeaderDeserializer();
+
+    @EndpointInject("kafka:" + TOPIC
+                    + "?groupId=group1&autoOffsetReset=earliest&keyDeserializer=org.apache.kafka.common.serialization.StringDeserializer&"
+                    + "valueDeserializer=org.apache.kafka.common.serialization.StringDeserializer"
+                    + "&autoCommitIntervalMs=1000&sessionTimeoutMs=30000&autoCommitEnable=true&interceptorClasses=org.apache.camel.component.kafka.MockConsumerInterceptor")
+    private Endpoint from;
+
+    @EndpointInject("mock:result")
+    private MockEndpoint to;
+
+    private org.apache.kafka.clients.producer.KafkaProducer<String, String> producer;
+
+    @BeforeEach
+    public void before() {
+        Properties props = getDefaultProperties();
+        producer = new org.apache.kafka.clients.producer.KafkaProducer<>(props);
+        MockConsumerInterceptor.recordsCaptured.clear();
+    }
+
+    @AfterEach
+    public void after() {
+        if (producer != null) {
+            producer.close();
+        }
+        // clean all test topics
+        kafkaAdminClient.deleteTopics(Collections.singletonList(TOPIC)).all();
+    }
+
+    @Override
+    protected CamelContext createCamelContext() throws Exception {
+        CamelContext context = super.createCamelContext();
+
+        // install health check manually (yes a bit cumbersome)
+        HealthCheckRegistry registry = new DefaultHealthCheckRegistry();
+        registry.setCamelContext(context);
+        Object hc = registry.resolveById("context");
+        registry.register(hc);
+        hc = registry.resolveById("routes");
+        registry.register(hc);
+        hc = registry.resolveById("consumers");
+        registry.register(hc);
+        context.setExtension(HealthCheckRegistry.class, registry);
+
+        return context;
+    }
+
+    @Override
+    protected RouteBuilder createRouteBuilder() throws Exception {
+        return new RouteBuilder() {
+
+            @Override
+            public void configure() {
+                from(from).process(exchange -> LOG.trace("Captured on the processor: {}", exchange.getMessage().getBody()))
+                        .routeId("test-health-it").to(to);
+            }
+        };
+    }
+
+    @Order(1)
+    @Test
+    public void kafkaConsumerHealthCheck() throws InterruptedException, IOException {
+        // health-check liveness should be UP
+        Collection<HealthCheck.Result> res = HealthCheckHelper.invokeLiveness(context);
+        boolean up = res.stream().allMatch(r -> r.getState().equals(HealthCheck.State.UP));
+        Assertions.assertTrue(up, "liveness check");
+
+        // health-check readiness should be ready
+        await().atMost(20, TimeUnit.SECONDS).untilAsserted(() -> {
+            Collection<HealthCheck.Result> res2 = HealthCheckHelper.invokeReadiness(context);
+            boolean up2 = res2.stream().allMatch(r -> r.getState().equals(HealthCheck.State.UP));
+            Assertions.assertTrue(up2, "readiness check");
+        });
+
+        String propagatedHeaderKey = "PropagatedCustomHeader";
+        byte[] propagatedHeaderValue = "propagated header value".getBytes();
+        String skippedHeaderKey = "CamelSkippedHeader";
+        to.expectedMessageCount(5);
+        to.expectedBodiesReceivedInAnyOrder("message-0", "message-1", "message-2", "message-3", "message-4");
+        // The LAST_RECORD_BEFORE_COMMIT header should not be configured on any
+        // exchange because autoCommitEnable=true
+        to.expectedHeaderValuesReceivedInAnyOrder(KafkaConstants.LAST_RECORD_BEFORE_COMMIT, null, null, null, null, null);
+        to.expectedHeaderReceived(propagatedHeaderKey, propagatedHeaderValue);
+
+        for (int k = 0; k < 5; k++) {
+            String msg = "message-" + k;
+            ProducerRecord<String, String> data = new ProducerRecord<>(TOPIC, "1", msg);
+            data.headers().add(new RecordHeader("CamelSkippedHeader", "skipped header value".getBytes()));
+            data.headers().add(new RecordHeader(propagatedHeaderKey, propagatedHeaderValue));
+            producer.send(data);
+        }
+
+        to.assertIsSatisfied(3000);
+
+        assertEquals(5, StreamSupport.stream(MockConsumerInterceptor.recordsCaptured.get(0).records(TOPIC).spliterator(), false)
+                .count());
+
+        Map<String, Object> headers = to.getExchanges().get(0).getIn().getHeaders();
+        assertFalse(headers.containsKey(skippedHeaderKey), "Should not receive skipped header");
+        assertTrue(headers.containsKey(propagatedHeaderKey), "Should receive propagated header");
+
+        // and shutdown kafka which will make readiness report as DOWN
+        service.shutdown();
+
+        // health-check liveness should be UP
+        res = HealthCheckHelper.invokeLiveness(context);
+        up = res.stream().allMatch(r -> r.getState().equals(HealthCheck.State.UP));
+        Assertions.assertTrue(up, "liveness check");
+        // but health-check readiness should NOT be ready
+        await().atMost(20, TimeUnit.SECONDS).untilAsserted(() -> {
+            Collection<HealthCheck.Result> res2 = HealthCheckHelper.invoke(context);
+            Optional<HealthCheck.Result> down
+                    = res2.stream().filter(r -> r.getState().equals(HealthCheck.State.DOWN)).findFirst();
+            Assertions.assertTrue(down.isPresent());
+            String msg = down.get().getMessage().get();
+            Assertions.assertEquals("KafkaConsumer is not ready", msg);
+            Map<String, Object> map = down.get().getDetails();
+            Assertions.assertEquals(TOPIC, map.get("topic"));
+            Assertions.assertEquals("test-health-it", map.get("route.id"));
+        });
+    }
+
+    private static class MyKafkaHeaderDeserializer extends DefaultKafkaHeaderDeserializer {
+    }
+}
diff --git a/core/camel-health/src/main/java/org/apache/camel/impl/health/AbstractHealthCheck.java b/core/camel-health/src/main/java/org/apache/camel/impl/health/AbstractHealthCheck.java
index 79a8b5d..860a371 100644
--- a/core/camel-health/src/main/java/org/apache/camel/impl/health/AbstractHealthCheck.java
+++ b/core/camel-health/src/main/java/org/apache/camel/impl/health/AbstractHealthCheck.java
@@ -156,9 +156,11 @@ public abstract class AbstractHealthCheck implements HealthCheck, CamelContextAw
         if (builder.state() == State.DOWN) {
             // reset success since it failed
             successCount = 0;
+            failureCount++;
         } else if (builder.state() == State.UP) {
             // reset failure since it ok
             failureCount = 0;
+            successCount++;
         }
 
         meta.put(INVOCATION_TIME, invocationTime);