You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@inlong.apache.org by GitBox <gi...@apache.org> on 2022/09/07 09:45:16 UTC

[GitHub] [inlong] vernedeng commented on a diff in pull request #5800: [INLONG-5796][SDK] Optimize fetcher builders

vernedeng commented on code in PR #5800:
URL: https://github.com/apache/inlong/pull/5800#discussion_r964626383


##########
inlong-sdk/sort-sdk/src/main/java/org/apache/inlong/sdk/sort/api/SortClientConfig.java:
##########
@@ -59,9 +59,10 @@ public class SortClientConfig implements Serializable {
     private int maxEmptyPollSleepMs = 500;
     private int emptyPollTimes = 10;
     private int cleanOldConsumerIntervalSec = 60;
+    private int maxConsumerSize = 5;
 
     public SortClientConfig(String sortTaskId, String sortClusterName, InLongTopicChangeListener assignmentsListener,
-            ConsumeStrategy consumeStrategy, String localIp) {
+                            ConsumeStrategy consumeStrategy, String localIp) {

Review Comment:
   fixed, thx



##########
inlong-sdk/sort-sdk/src/main/java/org/apache/inlong/sdk/sort/fetcher/kafka/AckOffsetOnRebalance.java:
##########
@@ -18,79 +18,151 @@
 
 package org.apache.inlong.sdk.sort.fetcher.kafka;
 
+import org.apache.commons.collections.CollectionUtils;
 import org.apache.inlong.sdk.sort.api.Seeker;
 import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
 import org.apache.kafka.common.TopicPartition;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.ArrayList;
 import java.util.Collection;
+import java.util.List;
+import java.util.Map;
 import java.util.Objects;
+import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
 
 public class AckOffsetOnRebalance implements ConsumerRebalanceListener {
 
     private static final Logger LOGGER = LoggerFactory.getLogger(AckOffsetOnRebalance.class);
+    private static final long DEFAULT_MAX_WAIT_FOR_ACK_TIME = 15000L;
+    private final long maxWaitForAckTime;
     private final String clusterId;
     private final Seeker seeker;
     private final ConcurrentHashMap<TopicPartition, OffsetAndMetadata> commitOffsetMap;
     private final ConcurrentHashMap<TopicPartition, ConcurrentSkipListMap<Long, Boolean>> ackOffsetMap;
+    private final KafkaConsumer<byte[], byte[]> consumer;
+    private final AtomicLong revokedNum = new AtomicLong(0);
+    private final AtomicLong assignedNum = new AtomicLong(0);
 
     public AckOffsetOnRebalance(String clusterId, Seeker seeker,
                                 ConcurrentHashMap<TopicPartition, OffsetAndMetadata> commitOffsetMap) {
-        this(clusterId, seeker, commitOffsetMap, null);
+        this(clusterId, seeker, commitOffsetMap, null, null);
     }
 
     public AckOffsetOnRebalance(
             String clusterId,
             Seeker seeker,
             ConcurrentHashMap<TopicPartition, OffsetAndMetadata> commitOffsetMap,
-            ConcurrentHashMap<TopicPartition, ConcurrentSkipListMap<Long, Boolean>> ackOffsetMap) {
+            ConcurrentHashMap<TopicPartition, ConcurrentSkipListMap<Long, Boolean>> ackOffsetMap,
+            KafkaConsumer<byte[], byte[]> consumer) {
+        this(clusterId, seeker, commitOffsetMap, ackOffsetMap, consumer, DEFAULT_MAX_WAIT_FOR_ACK_TIME);
+    }
+
+    public AckOffsetOnRebalance(
+            String clusterId,
+            Seeker seeker,
+            ConcurrentHashMap<TopicPartition, OffsetAndMetadata> commitOffsetMap,
+            ConcurrentHashMap<TopicPartition, ConcurrentSkipListMap<Long, Boolean>> ackOffsetMap,
+            KafkaConsumer<byte[], byte[]> consumer,
+            long maxWaitForAckTime) {
         this.clusterId = clusterId;
         this.seeker = seeker;
         this.commitOffsetMap = commitOffsetMap;
         this.ackOffsetMap = ackOffsetMap;
+        this.consumer = consumer;
+        this.maxWaitForAckTime = maxWaitForAckTime;
     }
 
     @Override
     public void onPartitionsRevoked(Collection<TopicPartition> collection) {
-        LOGGER.debug("*- in re-balance:onPartitionsRevoked");
+        LOGGER.info("*- in re-balance:onPartitionsRevoked, it's the {} time", revokedNum.incrementAndGet());
         collection.forEach((v) -> {
-            LOGGER.info("clusterId:{},onPartitionsRevoked:{}", clusterId, v.toString());
+            LOGGER.debug("clusterId:{},onPartitionsRevoked:{}, position is {}",
+                    clusterId, v.toString(), consumer.position(v));
         });
-        if (Objects.nonNull(ackOffsetMap) && Objects.nonNull(commitOffsetMap)) {
-            ackRevokedPartitions(collection);
+
+        try {
+            if (Objects.nonNull(ackOffsetMap) && Objects.nonNull(commitOffsetMap)) {
+                //sleep 10s to wait un ack messages

Review Comment:
   fixed, thx



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@inlong.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org