You are viewing a plain text version of this content. The canonical link for it is here.
Posted to jira@kafka.apache.org by GitBox <gi...@apache.org> on 2021/06/21 07:22:14 UTC

[GitHub] [kafka] skaundinya15 commented on a change in pull request #10743: KIP-699: Update FindCoordinator to resolve multiple Coordinators at a time

skaundinya15 commented on a change in pull request #10743:
URL: https://github.com/apache/kafka/pull/10743#discussion_r655133582



##########
File path: clients/src/main/java/org/apache/kafka/clients/admin/internals/AlterConsumerGroupOffsetsHandler.java
##########
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.clients.admin.internals;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.kafka.clients.consumer.OffsetAndMetadata;
+import org.apache.kafka.common.Node;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.message.OffsetCommitRequestData;
+import org.apache.kafka.common.message.OffsetCommitRequestData.OffsetCommitRequestPartition;
+import org.apache.kafka.common.message.OffsetCommitRequestData.OffsetCommitRequestTopic;
+import org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponsePartition;
+import org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponseTopic;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.requests.AbstractResponse;
+import org.apache.kafka.common.requests.OffsetCommitRequest;
+import org.apache.kafka.common.requests.OffsetCommitResponse;
+import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType;
+import org.apache.kafka.common.utils.LogContext;
+import org.slf4j.Logger;
+
+public class AlterConsumerGroupOffsetsHandler implements AdminApiHandler<CoordinatorKey, Map<TopicPartition, Errors>> {
+
+    private final CoordinatorKey groupId;
+    private final Map<TopicPartition, OffsetAndMetadata> offsets;
+    private final Logger log;
+    private final AdminApiLookupStrategy<CoordinatorKey> lookupStrategy;
+
+    public AlterConsumerGroupOffsetsHandler(
+        String groupId,
+        Map<TopicPartition, OffsetAndMetadata> offsets,
+        LogContext logContext
+    ) {
+        this.groupId = CoordinatorKey.byGroupId(groupId);
+        this.offsets = offsets;
+        this.log = logContext.logger(AlterConsumerGroupOffsetsHandler.class);
+        this.lookupStrategy = new CoordinatorStrategy(CoordinatorType.GROUP, logContext);
+    }
+
+    @Override
+    public String apiName() {
+        return "offsetCommit";
+    }
+
+    @Override
+    public AdminApiLookupStrategy<CoordinatorKey> lookupStrategy() {
+        return lookupStrategy;
+    }
+
+    public static AdminApiFuture.SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, Errors>> newFuture(
+            String groupId
+    ) {
+        return AdminApiFuture.forKeys(Collections.singleton(CoordinatorKey.byGroupId(groupId)));
+    }
+
+    @Override
+    public OffsetCommitRequest.Builder buildRequest(int brokerId, Set<CoordinatorKey> keys) {
+        List<OffsetCommitRequestTopic> topics = new ArrayList<>();
+        Map<String, List<OffsetCommitRequestPartition>> offsetData = new HashMap<>();
+        for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) {
+            String topic = entry.getKey().topic();
+            OffsetAndMetadata oam = entry.getValue();
+            offsetData.compute(topic, (key, value) -> {
+                if (value == null) {
+                    value = new ArrayList<>();
+                }
+                OffsetCommitRequestPartition partition = new OffsetCommitRequestPartition()
+                        .setCommittedOffset(oam.offset())
+                        .setCommittedLeaderEpoch(oam.leaderEpoch().orElse(-1))
+                        .setCommittedMetadata(oam.metadata())
+                        .setPartitionIndex(entry.getKey().partition());
+                value.add(partition);
+                return value;
+            });
+        }
+        for (Map.Entry<String, List<OffsetCommitRequestPartition>> entry : offsetData.entrySet()) {
+            OffsetCommitRequestTopic topic = new OffsetCommitRequestTopic()
+                    .setName(entry.getKey())
+                    .setPartitions(entry.getValue());
+            topics.add(topic);
+        }
+        OffsetCommitRequestData data = new OffsetCommitRequestData()
+            .setGroupId(groupId.idValue)
+            .setTopics(topics);
+        return new OffsetCommitRequest.Builder(data);
+    }
+
+    @Override
+    public ApiResult<CoordinatorKey, Map<TopicPartition, Errors>> handleResponse(Node broker, Set<CoordinatorKey> groupIds,
+            AbstractResponse abstractResponse) {
+
+        final OffsetCommitResponse response = (OffsetCommitResponse) abstractResponse;
+        Map<CoordinatorKey, Map<TopicPartition, Errors>> completed = new HashMap<>();
+        Map<CoordinatorKey, Throwable> failed = new HashMap<>();
+        List<CoordinatorKey> unmapped = new ArrayList<>();
+
+        Map<TopicPartition, Errors> partitions = new HashMap<>();
+        for (OffsetCommitResponseTopic topic : response.data().topics()) {
+            for (OffsetCommitResponsePartition partition : topic.partitions()) {
+                TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex());
+                Errors error = Errors.forCode(partition.errorCode());
+                if (error != Errors.NONE) {
+                    handleError(groupId, error, failed, unmapped);
+                } else {
+                    partitions.put(tp, error);
+                }
+            }
+        }
+        if (failed.isEmpty() && unmapped.isEmpty())
+            completed.put(groupId, partitions);
+
+        return new ApiResult<>(completed, failed, unmapped);
+    }
+
+    private void handleError(CoordinatorKey groupId, Errors error, Map<CoordinatorKey, Throwable> failed, List<CoordinatorKey> unmapped) {
+        switch (error) {
+            case GROUP_AUTHORIZATION_FAILED:
+                log.error("Received authorization failure for group {} in `DeleteConsumerGroupOffsets` response", groupId,
+                        error.exception());
+                failed.put(groupId, error.exception());
+                break;
+            case COORDINATOR_LOAD_IN_PROGRESS:
+            case COORDINATOR_NOT_AVAILABLE:
+            case NOT_COORDINATOR:
+                log.debug("DeleteConsumerGroupOffsets request for group {} returned error {}. Will retry", groupId, error);
+                unmapped.add(groupId);
+                break;
+            default:
+                log.error("Received unexpected error for group {} in `DeleteConsumerGroupOffsets` response", 
+                        groupId, error.exception());
+                failed.put(groupId, error.exception(
+                        "Unexpected error during DeleteConsumerGroupOffsets lookup for " + groupId));

Review comment:
       @mimaison makes sense, it would be good to file a JIRA for this so we can address it in a future PR to ensure we have consistent error handling across all consumer group related issues.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org