You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pulsar.apache.org by pe...@apache.org on 2022/04/28 13:04:04 UTC

[pulsar] branch branch-2.10 updated (09819b2458e -> f7128614c1d)

This is an automated email from the ASF dual-hosted git repository.

penghui pushed a change to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git


    from 09819b2458e [fix][broker] Fix broker LoadBalance uneffective (#15314)
     new 702d21d8eba Support shrink in ConcurrentLongHashMap (#14497)
     new aeb09298297 Optimize memory usage: support to  shrink for pendingAcks map (#14515)
     new 14d9b8492d6 support shrink for map or set (#14663)
     new 3ebc23e4bff Reduce unnecessary expansions for ConcurrentLong map and set (#14562)
     new 84a08942dd2 [fix][txn] Fix potentially unfinishable future. (#15208)
     new d39c6551196 Upgrade Netty to 4.1.76.Final, Netty Tcnative, grpc and protobuf (#15212)
     new 98849cd5243 Skip unnecessary DNS resolution when creating AuthenticationDataHttp instance (#15221)
     new bb52721b88c TableView should cache created readers (#15178)
     new 663ebe071a4 Improve skipping of DNS resolution when creating AuthenticationDataHttp instance (#15228)
     new dfe0d0d4bf9 [Build] Use grpc-bom to align grpc library versions (#15234)
     new c6c7d6dd41c Fix duplicate validateTopicOwnershipAsync (#15120)
     new 4639b15c1de [Functions] Check executor null when closing the FileSource (#15247)
     new c2cfad82db1 [Fix][Broker] Fix race condition in `OpAddEntry` (#15233)
     new 642159c8866 Pulsar SQL support for Decimal data type (#15153)
     new 90caa1c621a Put `validateTopicOwnershipAsync` before `validateTopicOperationAsync` (#15265)
     new 83b68332af0 [C++] Remove the flaky and meaningless tests (#15271)
     new 9122f93ea94 Fix typo in checkPermissionsAsync (#15273)
     new bdb620369b8 [fix][tools] Only apply maxPendingMessagesAcrossPartitions if it presents (#15283)
     new 4ad4bd85544 Fix typo and doc in TopicPolies client api (#15293)
     new efa28d74a45 [fix] [broker] Fix problem at RateLimiter#tryAcquire (#15306)
     new 536c8919824 [fix][client] Fix negative ack not redelivery. (#15312)
     new 6d365c995a2 [C++] Wait until event loop terminates when closing the Client (#15316)
     new aef4dd202ca [fix][broker] fix resource group does not report usage (#15292)
     new 9352feb7a1c [improve][broker] Use shrink map for message redelivery. (#15342)
     new 0f15d122fbd [fix][broker] Fix MessageDeduplication#inactiveProducers may not be persistence correctly (#15206)
     new f7128614c1d [improve][broker] Support shrink for ConcurrentSortedLongPairSet  (#15354)

The 26 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 buildtools/pom.xml                                 |   2 +-
 distribution/server/src/assemble/LICENSE.bin.txt   |  94 +--
 .../bookkeeper/mledger/impl/ManagedLedgerImpl.java |  10 +-
 .../mledger/impl/ManagedLedgerOfflineBacklog.java  |   3 +-
 pom.xml                                            |  79 +--
 .../apache/pulsar/broker/ServiceConfiguration.java |   8 +
 .../authentication/AuthenticationDataHttp.java     |   2 +-
 .../broker/TransactionMetadataStoreService.java    |  11 +-
 .../broker/admin/impl/PersistentTopicsBase.java    | 284 ++++-----
 .../broker/loadbalance/impl/LoadManagerShared.java |  20 +-
 .../loadbalance/impl/ModularLoadManagerImpl.java   |  18 +-
 .../loadbalance/impl/SimpleLoadManagerImpl.java    |  18 +-
 .../pulsar/broker/namespace/NamespaceService.java  |  13 +-
 .../resourcegroup/ResourceQuotaCalculatorImpl.java |   4 +-
 .../org/apache/pulsar/broker/rest/TopicsBase.java  |   3 +-
 .../pulsar/broker/service/BrokerService.java       |  42 +-
 .../org/apache/pulsar/broker/service/Consumer.java |  13 +-
 .../apache/pulsar/broker/service/ServerCnx.java    |  10 +-
 .../service/nonpersistent/NonPersistentTopic.java  |  12 +-
 .../service/persistent/MessageDeduplication.java   |  25 +-
 .../persistent/MessageRedeliveryController.java    |  11 +-
 .../broker/service/persistent/PersistentTopic.java |  20 +-
 .../broker/stats/ClusterReplicationMetrics.java    |   3 +-
 .../AntiAffinityNamespaceGroupTest.java            |  15 +-
 .../loadbalance/impl/LoadManagerSharedTest.java    |  13 +-
 .../ResourceQuotaCalculatorImplTest.java           |  10 +
 .../pulsar/broker/service/PersistentTopicTest.java |  24 +-
 .../service/persistent/MessageDuplicationTest.java |  29 +-
 .../MessageRedeliveryControllerTest.java           |   2 +-
 .../pulsar/client/impl/NegativeAcksTest.java       |   3 +
 .../apache/pulsar/client/admin/TopicPolicies.java  |  20 +-
 pulsar-client-cpp/lib/ClientImpl.cc                |  37 +-
 pulsar-client-cpp/lib/ExecutorService.cc           |  33 +-
 pulsar-client-cpp/lib/ExecutorService.h            |  11 +-
 pulsar-client-cpp/lib/TimeUtils.h                  |  48 ++
 pulsar-client-cpp/tests/ClientTest.cc              |   2 +-
 pulsar-client-cpp/tests/CustomLoggerTest.cc        |  26 +-
 pulsar-client-cpp/tests/ProducerTest.cc            |  83 ---
 .../org/apache/pulsar/client/impl/ClientCnx.java   |  22 +-
 .../apache/pulsar/client/impl/ConsumerBase.java    |   3 +-
 .../apache/pulsar/client/impl/ConsumerImpl.java    |   3 +-
 .../pulsar/client/impl/NegativeAcksTracker.java    |  28 +-
 .../client/impl/PartitionedProducerImpl.java       |   3 +-
 .../apache/pulsar/client/impl/ProducerBase.java    |   3 +-
 .../apache/pulsar/client/impl/TableViewImpl.java   |  14 +
 .../client/impl/TransactionMetaStoreHandler.java   |   5 +-
 .../TransactionCoordinatorClientImpl.java          |   6 +-
 .../impl/AcknowledgementsGroupingTrackerTest.java  |   3 +-
 .../org/apache/pulsar/common/util/RateLimiter.java |   3 +-
 .../util/collections/ConcurrentLongHashMap.java    | 149 ++++-
 .../collections/ConcurrentLongLongPairHashMap.java | 673 +++++++++++++++++++++
 .../util/collections/ConcurrentLongPairSet.java    | 174 +++++-
 .../util/collections/ConcurrentOpenHashMap.java    | 159 ++++-
 .../util/collections/ConcurrentOpenHashSet.java    | 158 ++++-
 .../collections/ConcurrentSortedLongPairSet.java   |  32 +-
 .../common/util/collections/LongPairSet.java       |   7 +
 .../apache/pulsar/common/util/RateLimiterTest.java |  20 +-
 .../collections/ConcurrentLongHashMapTest.java     | 141 ++++-
 .../ConcurrentLongLongPairHashMapTest.java         | 427 +++++++++++++
 .../collections/ConcurrentLongPairSetTest.java     | 130 +++-
 .../collections/ConcurrentOpenHashMapTest.java     | 144 ++++-
 .../collections/ConcurrentOpenHashSetTest.java     |  93 ++-
 .../ConcurrentSortedLongPairSetTest.java           |  43 ++
 .../java/org/apache/pulsar/io/file/FileSource.java |  12 +-
 pulsar-sql/presto-distribution/LICENSE             |  63 +-
 .../pulsar/sql/presto/PulsarRecordCursor.java      |   3 +-
 .../decoder/avro/PulsarAvroColumnDecoder.java      |  19 +-
 .../decoder/avro/PulsarAvroRowDecoderFactory.java  |  10 +-
 .../decoder/json/PulsarJsonRowDecoderFactory.java  |   6 +
 .../pulsar/sql/presto/TestPulsarConnector.java     |   8 +-
 .../pulsar/sql/presto/TestPulsarRecordCursor.java  |  15 +
 .../sql/presto/decoder/AbstractDecoderTester.java  |   5 +
 .../sql/presto/decoder/DecoderTestMessage.java     |   6 +-
 .../pulsar/sql/presto/decoder/DecoderTestUtil.java |  20 +
 .../sql/presto/decoder/avro/TestAvroDecoder.java   |  11 +
 .../pulsar/testclient/PerformanceProducer.java     |   4 +-
 .../pulsar/testclient/PerformanceProducerTest.java |  21 +
 .../apache/pulsar/websocket/WebSocketService.java  |  23 +-
 .../apache/pulsar/websocket/stats/ProxyStats.java  |   4 +-
 79 files changed, 3089 insertions(+), 645 deletions(-)
 create mode 100644 pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMap.java
 create mode 100644 pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMapTest.java


[pulsar] 18/26: [fix][tools] Only apply maxPendingMessagesAcrossPartitions if it presents (#15283)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit bdb620369b8ff419c5d27dbb36d530bef472c2fc
Author: lipenghui <pe...@apache.org>
AuthorDate: Sun Apr 24 21:01:23 2022 +0800

    [fix][tools] Only apply maxPendingMessagesAcrossPartitions if it presents (#15283)
    
    (cherry picked from commit 188d4f4942e549e101757a73aa8785f2f3a2dbd4)
---
 .../pulsar/testclient/PerformanceProducer.java      |  4 +++-
 .../pulsar/testclient/PerformanceProducerTest.java  | 21 +++++++++++++++++++++
 2 files changed, 24 insertions(+), 1 deletion(-)

diff --git a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java
index d297eb7d7bd..f18f4a84e13 100644
--- a/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java
+++ b/pulsar-testclient/src/main/java/org/apache/pulsar/testclient/PerformanceProducer.java
@@ -597,10 +597,12 @@ public class PerformanceProducer {
                     .sendTimeout(arguments.sendTimeout, TimeUnit.SECONDS) //
                     .compressionType(arguments.compression) //
                     .maxPendingMessages(arguments.maxOutstanding) //
-                    .maxPendingMessagesAcrossPartitions(arguments.maxPendingMessagesAcrossPartitions)
                     .accessMode(arguments.producerAccessMode)
                     // enable round robin message routing if it is a partitioned topic
                     .messageRoutingMode(MessageRoutingMode.RoundRobinPartition);
+            if (arguments.maxPendingMessagesAcrossPartitions > 0) {
+                producerBuilder.maxPendingMessagesAcrossPartitions(arguments.maxPendingMessagesAcrossPartitions);
+            }
 
             AtomicReference<Transaction> transactionAtomicReference;
             if (arguments.isEnableTransaction) {
diff --git a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceProducerTest.java b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceProducerTest.java
index 50174ed4b70..99b615678da 100644
--- a/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceProducerTest.java
+++ b/pulsar-testclient/src/test/java/org/apache/pulsar/testclient/PerformanceProducerTest.java
@@ -190,4 +190,25 @@ public class PerformanceProducerTest extends MockedPulsarServiceBaseTest {
         Assert.assertTrue(msgFormatter instanceof DefaultMessageFormatter);
     }
 
+    @Test
+    public void testMaxOutstanding() throws Exception {
+        String argString = "%s -r 10 -u %s -au %s -m 5 -o 10000";
+        String topic = testTopic + UUID.randomUUID().toString();
+        String args = String.format(argString, topic, pulsar.getBrokerServiceUrl(), pulsar.getWebServiceAddress());
+        Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub")
+                .subscriptionType(SubscriptionType.Key_Shared).subscribe();
+        new Thread(() -> {
+            try {
+                PerformanceProducer.main(args.split(" "));
+            } catch (Exception e) {
+                log.error("Failed to start perf producer");
+            }
+        }).start();
+        Awaitility.await()
+                .untilAsserted(() -> {
+                    Message<byte[]> message = consumer.receive(3, TimeUnit.SECONDS);
+                    assertNotNull(message);
+                });
+        consumer.close();
+    }
 }


[pulsar] 11/26: Fix duplicate validateTopicOwnershipAsync (#15120)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit c6c7d6dd41cb2480408df35e29d652282f477587
Author: gaozhangmin <ga...@qq.com>
AuthorDate: Thu Apr 21 09:59:37 2022 +0800

    Fix duplicate validateTopicOwnershipAsync (#15120)
    
    Co-authored-by: gavingaozhangmin <ga...@didiglobal.com>
    (cherry picked from commit 151f1d1d3e14df9166547d1aed829c774ccce99d)
---
 .../broker/admin/impl/PersistentTopicsBase.java    | 259 ++++++++++-----------
 1 file changed, 129 insertions(+), 130 deletions(-)

diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java
index 3c3f622266c..0b92e1ab981 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java
@@ -1039,11 +1039,13 @@ public class PersistentTopicsBase extends AdminResource {
         } else {
             future = CompletableFuture.completedFuture(null);
         }
-        future.thenCompose(__ -> validateTopicOwnershipAsync(topicName, authoritative))
-                .thenAccept(unused -> {
+        future.thenCompose(__ ->
+                validateTopicOperationAsync(topicName, TopicOperation.GET_SUBSCRIPTIONS)
+                .thenCompose(unused -> validateTopicOwnershipAsync(topicName, authoritative))
+                .thenAccept(unused1 -> {
                     // If the topic name is a partition name, no need to get partition topic metadata again
                     if (topicName.isPartitioned()) {
-                        internalGetSubscriptionsForNonPartitionedTopic(asyncResponse, authoritative);
+                        internalGetSubscriptionsForNonPartitionedTopic(asyncResponse);
                     } else {
                         getPartitionedTopicMetadataAsync(topicName, authoritative, false)
                                 .thenAccept(partitionMetadata -> {
@@ -1061,7 +1063,7 @@ public class PersistentTopicsBase extends AdminResource {
                                                     topicResources().persistentTopicExists(topicName.getPartition(i)));
                                         }
                                         FutureUtil.waitForAll(Lists.newArrayList(existsFutures.values()))
-                                                .thenApply(__ ->
+                                                .thenApply(unused2 ->
                                                 existsFutures.entrySet().stream().filter(e -> e.getValue().join())
                                                         .map(item -> topicName.getPartition(item.getKey()).toString())
                                                         .collect(Collectors.toList())
@@ -1080,7 +1082,7 @@ public class PersistentTopicsBase extends AdminResource {
                                                     throw new RestException(e);
                                                 }
                                             });
-                                        }).thenAccept(__ -> resumeAsyncResponse(asyncResponse,
+                                        }).thenAccept(unused3 -> resumeAsyncResponse(asyncResponse,
                                                         subscriptions, subscriptionFutures));
                                     } else {
                                         for (int i = 0; i < partitionMetadata.partitions; i++) {
@@ -1098,7 +1100,7 @@ public class PersistentTopicsBase extends AdminResource {
                                     asyncResponse.resume(e);
                                 }
                             } else {
-                                internalGetSubscriptionsForNonPartitionedTopic(asyncResponse, authoritative);
+                                internalGetSubscriptionsForNonPartitionedTopic(asyncResponse);
                             }
                         }).exceptionally(ex -> {
                             // If the exception is not redirect exception we need to log it.
@@ -1118,7 +1120,8 @@ public class PersistentTopicsBase extends AdminResource {
                     }
                     resumeAsyncResponseExceptionally(asyncResponse, ex);
                     return null;
-                });
+                })
+        );
     }
 
     private void resumeAsyncResponse(AsyncResponse asyncResponse, Set<String> subscriptions,
@@ -1147,10 +1150,8 @@ public class PersistentTopicsBase extends AdminResource {
         });
     }
 
-    private void internalGetSubscriptionsForNonPartitionedTopic(AsyncResponse asyncResponse, boolean authoritative) {
-        validateTopicOwnershipAsync(topicName, authoritative)
-                .thenCompose(__ -> validateTopicOperationAsync(topicName, TopicOperation.GET_SUBSCRIPTIONS))
-                .thenCompose(__ -> getTopicReferenceAsync(topicName))
+    private void internalGetSubscriptionsForNonPartitionedTopic(AsyncResponse asyncResponse) {
+        getTopicReferenceAsync(topicName)
                 .thenAccept(topic -> asyncResponse.resume(Lists.newArrayList(topic.getSubscriptions().keys())))
                 .exceptionally(ex -> {
                     // If the exception is not redirect exception we need to log it.
@@ -1725,11 +1726,7 @@ public class PersistentTopicsBase extends AdminResource {
     private CompletableFuture<Void> internalSkipAllMessagesForNonPartitionedTopicAsync(AsyncResponse asyncResponse,
                                                                                        String subName,
                                                                                        boolean authoritative) {
-        return validateTopicOwnershipAsync(topicName, authoritative)
-            .thenCompose(__ ->
-                validateTopicOperationAsync(topicName, TopicOperation.SKIP, subName))
-            .thenCompose(__ ->
-                getTopicReferenceAsync(topicName).thenCompose(t -> {
+        return getTopicReferenceAsync(topicName).thenCompose(t -> {
                     PersistentTopic topic = (PersistentTopic) t;
                     BiConsumer<Void, Throwable> biConsumer = (v, ex) -> {
                         if (ex != null) {
@@ -1758,8 +1755,7 @@ public class PersistentTopicsBase extends AdminResource {
                         }
                         return sub.clearBacklog().whenComplete(biConsumer);
                     }
-                })
-                .exceptionally(ex -> {
+                }).exceptionally(ex -> {
                     // If the exception is not redirect exception we need to log it.
                     if (!isRedirectException(ex)) {
                         log.error("[{}] Failed to skip all messages for subscription {} on topic {}",
@@ -1767,7 +1763,7 @@ public class PersistentTopicsBase extends AdminResource {
                     }
                     resumeAsyncResponseExceptionally(asyncResponse, ex);
                     return null;
-                }));
+                });
     }
 
     protected void internalSkipMessages(AsyncResponse asyncResponse, String subName, int numMessages,
@@ -1930,7 +1926,7 @@ public class PersistentTopicsBase extends AdminResource {
                     for (int i = 0; i < subNames.size(); i++) {
                         try {
                             futures.add(internalExpireMessagesByTimestampForSinglePartitionAsync(partitionMetadata,
-                                    subNames.get(i), expireTimeInSeconds, authoritative));
+                                    subNames.get(i), expireTimeInSeconds));
                         } catch (Exception e) {
                             log.error("[{}] Failed to expire messages for all subscription up to {} on {}",
                                     clientAppId(), expireTimeInSeconds, topicName, e);
@@ -3441,61 +3437,68 @@ public class PersistentTopicsBase extends AdminResource {
             future = CompletableFuture.completedFuture(null);
         }
         future.thenCompose(__ ->
-            // If the topic name is a partition name, no need to get partition topic metadata again
-            getPartitionedTopicMetadataAsync(topicName, authoritative, false)
-                    .thenCompose(partitionMetadata -> {
-                        if (topicName.isPartitioned()) {
-                            return internalExpireMessagesByTimestampForSinglePartitionAsync(partitionMetadata, subName,
-                                    expireTimeInSeconds, authoritative)
-                                    .thenAccept(unused -> asyncResponse.resume(Response.noContent().build()));
-                        } else {
-                            if (partitionMetadata.partitions > 0) {
-                                return CompletableFuture.completedFuture(null).thenAccept(unused -> {
-                                    final List<CompletableFuture<Void>> futures = Lists.newArrayList();
-
-                                    // expire messages for each partition topic
-                                    for (int i = 0; i < partitionMetadata.partitions; i++) {
-                                        TopicName topicNamePartition = topicName.getPartition(i);
-                                        try {
-                                            futures.add(pulsar()
-                                                    .getAdminClient()
-                                                    .topics()
-                                                    .expireMessagesAsync(topicNamePartition.toString(),
-                                                            subName, expireTimeInSeconds));
-                                        } catch (Exception e) {
-                                            log.error("[{}] Failed to expire messages up to {} on {}", clientAppId(),
-                                                    expireTimeInSeconds, topicNamePartition, e);
-                                            asyncResponse.resume(new RestException(e));
-                                            return;
+            validateTopicOperationAsync(topicName, TopicOperation.EXPIRE_MESSAGES)
+                .thenCompose(unused -> validateTopicOwnershipAsync(topicName, authoritative))
+                .thenCompose(unused2 ->
+                        // If the topic name is a partition name, no need to get partition topic metadata again
+                        getPartitionedTopicMetadataAsync(topicName, authoritative, false)
+                                .thenCompose(partitionMetadata -> {
+                                    if (topicName.isPartitioned()) {
+                                        return internalExpireMessagesByTimestampForSinglePartitionAsync
+                                                (partitionMetadata, subName, expireTimeInSeconds)
+                                                .thenAccept(unused3 ->
+                                                        asyncResponse.resume(Response.noContent().build()));
+                                    } else {
+                                        if (partitionMetadata.partitions > 0) {
+                                            return CompletableFuture.completedFuture(null).thenAccept(unused -> {
+                                                final List<CompletableFuture<Void>> futures = Lists.newArrayList();
+
+                                                // expire messages for each partition topic
+                                                for (int i = 0; i < partitionMetadata.partitions; i++) {
+                                                    TopicName topicNamePartition = topicName.getPartition(i);
+                                                    try {
+                                                        futures.add(pulsar()
+                                                                .getAdminClient()
+                                                                .topics()
+                                                                .expireMessagesAsync(topicNamePartition.toString(),
+                                                                        subName, expireTimeInSeconds));
+                                                    } catch (Exception e) {
+                                                        log.error("[{}] Failed to expire messages up to {} on {}",
+                                                                clientAppId(),
+                                                                expireTimeInSeconds, topicNamePartition, e);
+                                                        asyncResponse.resume(new RestException(e));
+                                                        return;
+                                                    }
+                                                }
+
+                                                FutureUtil.waitForAll(futures).handle((result, exception) -> {
+                                                    if (exception != null) {
+                                                        Throwable t = exception.getCause();
+                                                        if (t instanceof NotFoundException) {
+                                                            asyncResponse.resume(new RestException(Status.NOT_FOUND,
+                                                                    "Subscription not found"));
+                                                            return null;
+                                                        } else {
+                                                            log.error("[{}] Failed to expire messages up "
+                                                                            + "to {} on {}", clientAppId(),
+                                                                    expireTimeInSeconds, topicName, t);
+                                                            asyncResponse.resume(new RestException(t));
+                                                            return null;
+                                                        }
+                                                    }
+                                                    asyncResponse.resume(Response.noContent().build());
+                                                    return null;
+                                                });
+                                            });
+                                        } else {
+                                            return internalExpireMessagesByTimestampForSinglePartitionAsync
+                                                    (partitionMetadata, subName, expireTimeInSeconds)
+                                                    .thenAccept(unused ->
+                                                            asyncResponse.resume(Response.noContent().build()));
                                         }
                                     }
+                                }))
 
-                                    FutureUtil.waitForAll(futures).handle((result, exception) -> {
-                                        if (exception != null) {
-                                            Throwable t = exception.getCause();
-                                            if (t instanceof NotFoundException) {
-                                                asyncResponse.resume(new RestException(Status.NOT_FOUND,
-                                                        "Subscription not found"));
-                                                return null;
-                                            } else {
-                                                log.error("[{}] Failed to expire messages up to {} on {}",
-                                                        clientAppId(), expireTimeInSeconds,
-                                                        topicName, t);
-                                                asyncResponse.resume(new RestException(t));
-                                                return null;
-                                            }
-                                        }
-                                        asyncResponse.resume(Response.noContent().build());
-                                        return null;
-                                    });
-                                });
-                            } else {
-                                return internalExpireMessagesByTimestampForSinglePartitionAsync(partitionMetadata,
-                                        subName, expireTimeInSeconds, authoritative)
-                                        .thenAccept(unused -> asyncResponse.resume(Response.noContent().build()));
-                            }
-                        }
-                    })
         ).exceptionally(ex -> {
             // If the exception is not redirect exception we need to log it.
             if (!isRedirectException(ex)) {
@@ -3508,69 +3511,65 @@ public class PersistentTopicsBase extends AdminResource {
     }
 
     private CompletableFuture<Void> internalExpireMessagesByTimestampForSinglePartitionAsync(
-            PartitionedTopicMetadata partitionMetadata, String subName, int expireTimeInSeconds,
-            boolean authoritative) {
+            PartitionedTopicMetadata partitionMetadata, String subName, int expireTimeInSeconds) {
         if (!topicName.isPartitioned() && partitionMetadata.partitions > 0) {
             String msg = "This method should not be called for partitioned topic";
             return FutureUtil.failedFuture(new IllegalStateException(msg));
         } else {
             final CompletableFuture<Void> resultFuture = new CompletableFuture<>();
-            validateTopicOperationAsync(topicName, TopicOperation.EXPIRE_MESSAGES)
-                    .thenCompose(__ -> validateTopicOwnershipAsync(topicName, authoritative))
-                    .thenCompose(__ -> getTopicReferenceAsync(topicName).thenAccept(t -> {
-                         if (t == null) {
-                             resultFuture.completeExceptionally(new RestException(Status.NOT_FOUND, "Topic not found"));
-                             return;
-                         }
-                        if (!(t instanceof PersistentTopic)) {
-                            resultFuture.completeExceptionally(new RestException(Status.METHOD_NOT_ALLOWED,
-                                    "Expire messages on a non-persistent topic is not allowed"));
-                            return;
-                        }
-                        PersistentTopic topic = (PersistentTopic) t;
-
-                        boolean issued;
-                        if (subName.startsWith(topic.getReplicatorPrefix())) {
-                            String remoteCluster = PersistentReplicator.getRemoteCluster(subName);
-                            PersistentReplicator repl = (PersistentReplicator) topic
-                                    .getPersistentReplicator(remoteCluster);
-                            if (repl == null) {
-                                resultFuture.completeExceptionally(
-                                        new RestException(Status.NOT_FOUND, "Replicator not found"));
-                                return;
-                            }
-                            issued = repl.expireMessages(expireTimeInSeconds);
-                        } else {
-                            PersistentSubscription sub = topic.getSubscription(subName);
-                            if (sub == null) {
-                                resultFuture.completeExceptionally(
-                                        new RestException(Status.NOT_FOUND, "Subscription not found"));
-                                return;
-                            }
-                            issued = sub.expireMessages(expireTimeInSeconds);
-                        }
-                        if (issued) {
-                            log.info("[{}] Message expire started up to {} on {} {}", clientAppId(),
-                                    expireTimeInSeconds, topicName, subName);
-                            resultFuture.complete(__);
-                        } else {
-                            if (log.isDebugEnabled()) {
-                                log.debug("Expire message by timestamp not issued on topic {} for subscription {} "
-                                        + "due to ongoing message expiration not finished or subscription almost"
-                                        + " catch up. If it's performed on a partitioned topic operation might "
-                                        + "succeeded on other partitions, please check stats of individual "
-                                        + "partition.", topicName, subName);
-                            }
-                            resultFuture.completeExceptionally(new RestException(Status.CONFLICT, "Expire message "
-                                    + "by timestamp not issued on topic " + topicName + " for subscription "
-                                    + subName + " due to ongoing message expiration not finished or subscription "
-                                    + "almost catch  up. If it's performed on a partitioned topic operation might"
-                                    + " succeeded on other partitions, please check stats of individual partition."
-                            ));
-                            return;
-                        }
-                            })
-                    ).exceptionally(e -> {
+            getTopicReferenceAsync(topicName).thenAccept(t -> {
+                 if (t == null) {
+                     resultFuture.completeExceptionally(new RestException(Status.NOT_FOUND, "Topic not found"));
+                     return;
+                 }
+                if (!(t instanceof PersistentTopic)) {
+                    resultFuture.completeExceptionally(new RestException(Status.METHOD_NOT_ALLOWED,
+                            "Expire messages on a non-persistent topic is not allowed"));
+                    return;
+                }
+                PersistentTopic topic = (PersistentTopic) t;
+
+                boolean issued;
+                if (subName.startsWith(topic.getReplicatorPrefix())) {
+                    String remoteCluster = PersistentReplicator.getRemoteCluster(subName);
+                    PersistentReplicator repl = (PersistentReplicator) topic
+                            .getPersistentReplicator(remoteCluster);
+                    if (repl == null) {
+                        resultFuture.completeExceptionally(
+                                new RestException(Status.NOT_FOUND, "Replicator not found"));
+                        return;
+                    }
+                    issued = repl.expireMessages(expireTimeInSeconds);
+                } else {
+                    PersistentSubscription sub = topic.getSubscription(subName);
+                    if (sub == null) {
+                        resultFuture.completeExceptionally(
+                                new RestException(Status.NOT_FOUND, "Subscription not found"));
+                        return;
+                    }
+                    issued = sub.expireMessages(expireTimeInSeconds);
+                }
+                if (issued) {
+                    log.info("[{}] Message expire started up to {} on {} {}", clientAppId(),
+                            expireTimeInSeconds, topicName, subName);
+                    resultFuture.complete(null);
+                } else {
+                    if (log.isDebugEnabled()) {
+                        log.debug("Expire message by timestamp not issued on topic {} for subscription {} "
+                                + "due to ongoing message expiration not finished or subscription almost"
+                                + " catch up. If it's performed on a partitioned topic operation might "
+                                + "succeeded on other partitions, please check stats of individual "
+                                + "partition.", topicName, subName);
+                    }
+                    resultFuture.completeExceptionally(new RestException(Status.CONFLICT, "Expire message "
+                            + "by timestamp not issued on topic " + topicName + " for subscription "
+                            + subName + " due to ongoing message expiration not finished or subscription "
+                            + "almost catch  up. If it's performed on a partitioned topic operation might"
+                            + " succeeded on other partitions, please check stats of individual partition."
+                    ));
+                    return;
+                }
+            }).exceptionally(e -> {
                 resultFuture.completeExceptionally(FutureUtil.unwrapCompletionException(e));
                 return null;
             });


[pulsar] 13/26: [Fix][Broker] Fix race condition in `OpAddEntry` (#15233)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit c2cfad82db1ee0c039c2ce1b1c42593d4aea46f9
Author: Qiang Zhao <74...@users.noreply.github.com>
AuthorDate: Thu Apr 21 14:58:01 2022 +0800

    [Fix][Broker] Fix race condition in `OpAddEntry` (#15233)
    
    (cherry picked from commit b083e9a72227a3360d1ec33b5f239d82f0804e65)
---
 .../java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java     | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java
index 453e77be0c8..0879c48de7d 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java
@@ -3813,12 +3813,13 @@ public class ManagedLedgerImpl implements ManagedLedger, CreateCallback {
         }
         OpAddEntry opAddEntry = pendingAddEntries.peek();
         if (opAddEntry != null) {
+            final long finalAddOpCount = opAddEntry.addOpCount;
             boolean isTimedOut = opAddEntry.lastInitTime != -1
                     && TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - opAddEntry.lastInitTime) >= timeoutSec;
             if (isTimedOut) {
                 log.error("Failed to add entry for ledger {} in time-out {} sec",
                         (opAddEntry.ledger != null ? opAddEntry.ledger.getId() : -1), timeoutSec);
-                opAddEntry.handleAddTimeoutFailure(opAddEntry.ledger, opAddEntry.addOpCount);
+                opAddEntry.handleAddTimeoutFailure(opAddEntry.ledger, finalAddOpCount);
             }
         }
     }


[pulsar] 15/26: Put `validateTopicOwnershipAsync` before `validateTopicOperationAsync` (#15265)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 90caa1c621a93c51a12e6d29e0ae213a985174e7
Author: Jiwei Guo <te...@apache.org>
AuthorDate: Fri Apr 22 19:46:11 2022 +0800

    Put `validateTopicOwnershipAsync` before `validateTopicOperationAsync` (#15265)
    
    (cherry picked from commit 41f40f06c4c4d74939bca07a9b83bda020147346)
---
 .../broker/admin/impl/PersistentTopicsBase.java    | 33 +++++++++++-----------
 1 file changed, 17 insertions(+), 16 deletions(-)

diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java
index 0b92e1ab981..b286a6000dc 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/impl/PersistentTopicsBase.java
@@ -572,8 +572,9 @@ public class PersistentTopicsBase extends AdminResource {
 
     protected void internalDeletePartitionedTopic(AsyncResponse asyncResponse, boolean authoritative,
                                                   boolean force, boolean deleteSchema) {
-        validateNamespaceOperationAsync(topicName.getNamespaceObject(), NamespaceOperation.DELETE_TOPIC)
-                .thenCompose(__ -> validateTopicOwnershipAsync(topicName, authoritative))
+        validateTopicOwnershipAsync(topicName, authoritative)
+                .thenCompose(__ -> validateNamespaceOperationAsync(topicName.getNamespaceObject(),
+                        NamespaceOperation.DELETE_TOPIC))
                 .thenCompose(__ -> pulsar().getBrokerService()
                         .fetchPartitionedTopicMetadataAsync(topicName)
                         .thenCompose(partitionedMeta -> {
@@ -963,8 +964,8 @@ public class PersistentTopicsBase extends AdminResource {
     }
 
     private void internalUnloadNonPartitionedTopicAsync(AsyncResponse asyncResponse, boolean authoritative) {
-        validateTopicOperationAsync(topicName, TopicOperation.UNLOAD)
-                .thenCompose(unused -> validateTopicOwnershipAsync(topicName, authoritative)
+        validateTopicOwnershipAsync(topicName, authoritative)
+                .thenCompose(unused -> validateTopicOperationAsync(topicName, TopicOperation.UNLOAD)
                         .thenCompose(__ -> getTopicReferenceAsync(topicName))
                         .thenCompose(topic -> topic.close(false))
                         .thenRun(() -> {
@@ -982,8 +983,8 @@ public class PersistentTopicsBase extends AdminResource {
     }
 
     private void internalUnloadTransactionCoordinatorAsync(AsyncResponse asyncResponse, boolean authoritative) {
-        validateTopicOperationAsync(topicName, TopicOperation.UNLOAD)
-                .thenCompose(__ -> validateTopicOwnershipAsync(topicName, authoritative)
+        validateTopicOwnershipAsync(topicName, authoritative)
+                .thenCompose(__ -> validateTopicOperationAsync(topicName, TopicOperation.UNLOAD)
                         .thenCompose(v -> pulsar()
                                 .getTransactionMetadataStoreService()
                                 .removeTransactionMetadataStore(
@@ -1040,8 +1041,8 @@ public class PersistentTopicsBase extends AdminResource {
             future = CompletableFuture.completedFuture(null);
         }
         future.thenCompose(__ ->
-                validateTopicOperationAsync(topicName, TopicOperation.GET_SUBSCRIPTIONS)
-                .thenCompose(unused -> validateTopicOwnershipAsync(topicName, authoritative))
+                validateTopicOwnershipAsync(topicName, authoritative)
+                .thenCompose(unused -> validateTopicOperationAsync(topicName, TopicOperation.GET_SUBSCRIPTIONS))
                 .thenAccept(unused1 -> {
                     // If the topic name is a partition name, no need to get partition topic metadata again
                     if (topicName.isPartitioned()) {
@@ -1774,8 +1775,8 @@ public class PersistentTopicsBase extends AdminResource {
         } else {
             future = CompletableFuture.completedFuture(null);
         }
-        future.thenCompose(__ -> validateTopicOperationAsync(topicName, TopicOperation.SKIP))
-                .thenCompose(__ -> validateTopicOwnershipAsync(topicName, authoritative))
+        future.thenCompose(__ -> validateTopicOwnershipAsync(topicName, authoritative))
+                .thenCompose(__ -> validateTopicOperationAsync(topicName, TopicOperation.SKIP))
                 .thenCompose(__ -> getPartitionedTopicMetadataAsync(topicName, authoritative, false)
                      .thenCompose(partitionMetadata -> {
                          if (partitionMetadata.partitions > 0) {
@@ -1902,8 +1903,8 @@ public class PersistentTopicsBase extends AdminResource {
                                                                                  int expireTimeInSeconds,
                                                                                  boolean authoritative) {
         // validate ownership and redirect if current broker is not owner
-        validateTopicOperationAsync(topicName, TopicOperation.EXPIRE_MESSAGES)
-                .thenCompose(__ -> validateTopicOwnershipAsync(topicName, authoritative))
+        validateTopicOwnershipAsync(topicName, authoritative)
+                .thenCompose(__ -> validateTopicOperationAsync(topicName, TopicOperation.EXPIRE_MESSAGES))
                 .thenCompose(__ -> getTopicReferenceAsync(topicName).thenAccept(t -> {
                      if (t == null) {
                          resumeAsyncResponseExceptionally(asyncResponse, new RestException(Status.NOT_FOUND,
@@ -3437,8 +3438,8 @@ public class PersistentTopicsBase extends AdminResource {
             future = CompletableFuture.completedFuture(null);
         }
         future.thenCompose(__ ->
-            validateTopicOperationAsync(topicName, TopicOperation.EXPIRE_MESSAGES)
-                .thenCompose(unused -> validateTopicOwnershipAsync(topicName, authoritative))
+                validateTopicOwnershipAsync(topicName, authoritative)
+                .thenCompose(unused -> validateTopicOperationAsync(topicName, TopicOperation.EXPIRE_MESSAGES))
                 .thenCompose(unused2 ->
                         // If the topic name is a partition name, no need to get partition topic metadata again
                         getPartitionedTopicMetadataAsync(topicName, authoritative, false)
@@ -3586,8 +3587,8 @@ public class PersistentTopicsBase extends AdminResource {
             future = CompletableFuture.completedFuture(null);
         }
 
-        future.thenCompose(__ -> validateTopicOperationAsync(topicName, TopicOperation.EXPIRE_MESSAGES))
-                .thenCompose(__ -> validateTopicOwnershipAsync(topicName, authoritative))
+        future.thenCompose(__ -> validateTopicOwnershipAsync(topicName, authoritative))
+                .thenCompose(__ -> validateTopicOperationAsync(topicName, TopicOperation.EXPIRE_MESSAGES))
                 .thenCompose(__ -> {
                     log.info("[{}][{}] received expire messages on subscription {} to position {}", clientAppId(),
                             topicName, subName, messageId);


[pulsar] 23/26: [fix][broker] fix resource group does not report usage (#15292)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit aef4dd202ca5692e07734ca28bc23ec901703b66
Author: WangJialing <65...@users.noreply.github.com>
AuthorDate: Wed Apr 27 15:29:05 2022 +0800

    [fix][broker] fix resource group does not report usage (#15292)
    
    * fix resource group does not report usage
    
    * fix checkstyle
    
    * fix mistake
    
    Co-authored-by: wangjialing <wa...@cmss.chinamobile.com>
    (cherry picked from commit 4560737bf9c0a8f419c37f6e2cb3a230dcfd4352)
---
 .../broker/resourcegroup/ResourceQuotaCalculatorImpl.java      |  4 ++--
 .../broker/resourcegroup/ResourceQuotaCalculatorImplTest.java  | 10 ++++++++++
 2 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImpl.java
index ca83cae91c5..5dc50f2a255 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImpl.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImpl.java
@@ -108,7 +108,7 @@ public class ResourceQuotaCalculatorImpl implements ResourceQuotaCalculator {
         final float toleratedDriftPercentage = ResourceGroupService.UsageReportSuppressionTolerancePercentage;
         if (currentBytesUsed > 0) {
             long diff = abs(currentBytesUsed - lastReportedBytes);
-            float diffPercentage = (diff / currentBytesUsed) * 100;
+            float diffPercentage = (float) diff * 100 / lastReportedBytes;
             if (diffPercentage > toleratedDriftPercentage) {
                 return true;
             }
@@ -116,7 +116,7 @@ public class ResourceQuotaCalculatorImpl implements ResourceQuotaCalculator {
 
         if (currentMessagesUsed > 0) {
             long diff = abs(currentMessagesUsed - lastReportedMessages);
-            float diffPercentage = (diff / currentMessagesUsed) * 100;
+            float diffPercentage = (float) diff * 100 / lastReportedMessages;
             if (diffPercentage > toleratedDriftPercentage) {
                 return true;
             }
diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImplTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImplTest.java
index f16f960831e..13e17d7c682 100644
--- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImplTest.java
+++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/resourcegroup/ResourceQuotaCalculatorImplTest.java
@@ -111,5 +111,15 @@ public class ResourceQuotaCalculatorImplTest extends MockedPulsarServiceBaseTest
         Assert.assertEquals(newQuota, config);
     }
 
+    @Test
+    public void testNeedToReportLocalUsage() {
+        // If the percentage change (increase or decrease) in usage is more than 5% for
+        // either bytes or messages, send a report.
+        Assert.assertFalse(rqCalc.needToReportLocalUsage(1040, 1000, 104, 100, System.currentTimeMillis()));
+        Assert.assertFalse(rqCalc.needToReportLocalUsage(950, 1000, 95, 100, System.currentTimeMillis()));
+        Assert.assertTrue(rqCalc.needToReportLocalUsage(1060, 1000, 106, 100, System.currentTimeMillis()));
+        Assert.assertTrue(rqCalc.needToReportLocalUsage(940, 1000, 94, 100, System.currentTimeMillis()));
+    }
+
     private ResourceQuotaCalculatorImpl rqCalc;
 }
\ No newline at end of file


[pulsar] 02/26: Optimize memory usage: support to shrink for pendingAcks map (#14515)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit aeb092982975c42b5b3595c81df3d3f99f4d252f
Author: lin chen <15...@qq.com>
AuthorDate: Sun Mar 6 04:58:14 2022 +0800

    Optimize memory usage: support to  shrink for pendingAcks map (#14515)
    
    (cherry picked from commit e747b8f16b0b660231ff27a8c2100d67ad7c79a6)
---
 .../apache/pulsar/broker/ServiceConfiguration.java |   8 +
 .../org/apache/pulsar/broker/service/Consumer.java |  11 +-
 .../collections/ConcurrentLongLongPairHashMap.java | 673 +++++++++++++++++++++
 .../ConcurrentLongLongPairHashMapTest.java         | 427 +++++++++++++
 4 files changed, 1116 insertions(+), 3 deletions(-)

diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java
index cbcd4034718..61aca76c199 100644
--- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java
+++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/ServiceConfiguration.java
@@ -703,6 +703,14 @@ public class ServiceConfiguration implements PulsarConfiguration {
     )
     private boolean isAllowAutoUpdateSchemaEnabled = true;
 
+    @FieldContext(
+            category = CATEGORY_SERVER,
+            doc = "Whether to enable the automatic shrink of pendingAcks map, "
+                    + "the default is false, which means it is not enabled. "
+                    + "When there are a large number of share or key share consumers in the cluster, "
+                    + "it can be enabled to reduce the memory consumption caused by pendingAcks.")
+    private boolean autoShrinkForConsumerPendingAcksMap = false;
+
     @FieldContext(
         category = CATEGORY_SERVER,
         dynamic = true,
diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java
index dbc425d9dfa..0f8a6712676 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java
@@ -40,8 +40,6 @@ import lombok.Setter;
 import org.apache.bookkeeper.mledger.Entry;
 import org.apache.bookkeeper.mledger.Position;
 import org.apache.bookkeeper.mledger.impl.PositionImpl;
-import org.apache.bookkeeper.util.collections.ConcurrentLongLongPairHashMap;
-import org.apache.bookkeeper.util.collections.ConcurrentLongLongPairHashMap.LongPair;
 import org.apache.commons.lang3.mutable.MutableInt;
 import org.apache.commons.lang3.tuple.MutablePair;
 import org.apache.pulsar.broker.service.persistent.PersistentSubscription;
@@ -62,6 +60,8 @@ import org.apache.pulsar.common.stats.Rate;
 import org.apache.pulsar.common.util.DateFormatter;
 import org.apache.pulsar.common.util.FutureUtil;
 import org.apache.pulsar.common.util.collections.BitSetRecyclable;
+import org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap;
+import org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap.LongPair;
 import org.apache.pulsar.transaction.common.exception.TransactionConflictException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -186,7 +186,12 @@ public class Consumer {
         stats.metadata = this.metadata;
 
         if (Subscription.isIndividualAckMode(subType)) {
-            this.pendingAcks = new ConcurrentLongLongPairHashMap(256, 1);
+            this.pendingAcks = ConcurrentLongLongPairHashMap.newBuilder()
+                    .autoShrink(subscription.getTopic().getBrokerService()
+                            .getPulsar().getConfiguration().isAutoShrinkForConsumerPendingAcksMap())
+                    .expectedItems(256)
+                    .concurrencyLevel(1)
+                    .build();
         } else {
             // We don't need to keep track of pending acks if the subscription is not shared
             this.pendingAcks = null;
diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMap.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMap.java
new file mode 100644
index 00000000000..eac7268ba67
--- /dev/null
+++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMap.java
@@ -0,0 +1,673 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.common.util.collections;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import java.util.concurrent.locks.StampedLock;
+
+/**
+ * Concurrent hash map where both keys and values are composed of pairs of longs.
+ *
+ * <p>(long,long) --&gt; (long,long)
+ *
+ * <p>Provides similar methods as a {@code ConcurrentMap<K,V>} but since it's an open hash map with linear probing,
+ * no node allocations are required to store the keys and values, and no boxing is required.
+ *
+ * <p>Keys <strong>MUST</strong> be &gt;= 0.
+ */
+public class ConcurrentLongLongPairHashMap {
+
+    private static final long EmptyKey = -1L;
+    private static final long DeletedKey = -2L;
+
+    private static final long ValueNotFound = -1L;
+
+
+    private static final int DefaultExpectedItems = 256;
+    private static final int DefaultConcurrencyLevel = 16;
+
+    private static final float DefaultMapFillFactor = 0.66f;
+    private static final float DefaultMapIdleFactor = 0.15f;
+
+    private static final float DefaultExpandFactor = 2;
+    private static final float DefaultShrinkFactor = 2;
+
+    private static final boolean DefaultAutoShrink = false;
+
+    private final Section[] sections;
+
+    public static Builder newBuilder() {
+        return new Builder();
+    }
+
+    /**
+     * Builder of ConcurrentLongLongPairHashMap.
+     */
+    public static class Builder {
+        int expectedItems = DefaultExpectedItems;
+        int concurrencyLevel = DefaultConcurrencyLevel;
+        float mapFillFactor = DefaultMapFillFactor;
+        float mapIdleFactor = DefaultMapIdleFactor;
+        float expandFactor = DefaultExpandFactor;
+        float shrinkFactor = DefaultShrinkFactor;
+        boolean autoShrink = DefaultAutoShrink;
+
+        public Builder expectedItems(int expectedItems) {
+            this.expectedItems = expectedItems;
+            return this;
+        }
+
+        public Builder concurrencyLevel(int concurrencyLevel) {
+            this.concurrencyLevel = concurrencyLevel;
+            return this;
+        }
+
+        public Builder mapFillFactor(float mapFillFactor) {
+            this.mapFillFactor = mapFillFactor;
+            return this;
+        }
+
+        public Builder mapIdleFactor(float mapIdleFactor) {
+            this.mapIdleFactor = mapIdleFactor;
+            return this;
+        }
+
+        public Builder expandFactor(float expandFactor) {
+            this.expandFactor = expandFactor;
+            return this;
+        }
+
+        public Builder shrinkFactor(float shrinkFactor) {
+            this.shrinkFactor = shrinkFactor;
+            return this;
+        }
+
+        public Builder autoShrink(boolean autoShrink) {
+            this.autoShrink = autoShrink;
+            return this;
+        }
+
+        public ConcurrentLongLongPairHashMap build() {
+            return new ConcurrentLongLongPairHashMap(expectedItems, concurrencyLevel,
+                    mapFillFactor, mapIdleFactor, autoShrink, expandFactor, shrinkFactor);
+        }
+    }
+
+    /**
+     * A BiConsumer Long pair.
+     */
+    public interface BiConsumerLongPair {
+        void accept(long key1, long key2, long value1, long value2);
+    }
+
+    /**
+     * A Long pair function.
+     */
+    public interface LongLongPairFunction {
+        long apply(long key1, long key2);
+    }
+
+    /**
+     * A Long pair predicate.
+     */
+    public interface LongLongPairPredicate {
+        boolean test(long key1, long key2, long value1, long value2);
+    }
+
+    private ConcurrentLongLongPairHashMap(int expectedItems, int concurrencyLevel,
+                                          float mapFillFactor, float mapIdleFactor,
+                                         boolean autoShrink, float expandFactor, float shrinkFactor) {
+        checkArgument(expectedItems > 0);
+        checkArgument(concurrencyLevel > 0);
+        checkArgument(expectedItems >= concurrencyLevel);
+        checkArgument(mapFillFactor > 0 && mapFillFactor < 1);
+        checkArgument(mapIdleFactor > 0 && mapIdleFactor < 1);
+        checkArgument(mapFillFactor > mapIdleFactor);
+        checkArgument(expandFactor > 1);
+        checkArgument(shrinkFactor > 1);
+
+        int numSections = concurrencyLevel;
+        int perSectionExpectedItems = expectedItems / numSections;
+        int perSectionCapacity = (int) (perSectionExpectedItems / mapFillFactor);
+        this.sections = new Section[numSections];
+
+        for (int i = 0; i < numSections; i++) {
+            sections[i] = new Section(perSectionCapacity, mapFillFactor, mapIdleFactor,
+                    autoShrink, expandFactor, shrinkFactor);
+        }
+    }
+
+    public long size() {
+        long size = 0;
+        for (Section s : sections) {
+            size += s.size;
+        }
+        return size;
+    }
+
+    public long capacity() {
+        long capacity = 0;
+        for (Section s : sections) {
+            capacity += s.capacity;
+        }
+        return capacity;
+    }
+
+    public boolean isEmpty() {
+        for (Section s : sections) {
+            if (s.size != 0) {
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+    long getUsedBucketCount() {
+        long usedBucketCount = 0;
+        for (Section s : sections) {
+            usedBucketCount += s.usedBuckets;
+        }
+        return usedBucketCount;
+    }
+
+    /**
+     * @param key1
+     * @param key2
+     * @return the value or -1 if the key was not present.
+     */
+    public LongPair get(long key1, long key2) {
+        checkBiggerEqualZero(key1);
+        long h = hash(key1, key2);
+        return getSection(h).get(key1, key2, (int) h);
+    }
+
+    public boolean containsKey(long key1, long key2) {
+        return get(key1, key2) != null;
+    }
+
+    public boolean put(long key1, long key2, long value1, long value2) {
+        checkBiggerEqualZero(key1);
+        checkBiggerEqualZero(value1);
+        long h = hash(key1, key2);
+        return getSection(h).put(key1, key2, value1, value2, (int) h, false);
+    }
+
+    public boolean putIfAbsent(long key1, long key2, long value1, long value2) {
+        checkBiggerEqualZero(key1);
+        checkBiggerEqualZero(value1);
+        long h = hash(key1, key2);
+        return getSection(h).put(key1, key2, value1, value2, (int) h, true);
+    }
+
+    /**
+     * Remove an existing entry if found.
+     *
+     * @param key1
+     * @param key2
+     * @return the value associated with the key or -1 if key was not present.
+     */
+    public boolean remove(long key1, long key2) {
+        checkBiggerEqualZero(key1);
+        long h = hash(key1, key2);
+        return getSection(h).remove(key1, key2, ValueNotFound, ValueNotFound, (int) h);
+    }
+
+    public boolean remove(long key1, long key2, long value1, long value2) {
+        checkBiggerEqualZero(key1);
+        checkBiggerEqualZero(value1);
+        long h = hash(key1, key2);
+        return getSection(h).remove(key1, key2, value1, value2, (int) h);
+    }
+
+    private Section getSection(long hash) {
+        // Use 32 msb out of long to get the section
+        final int sectionIdx = (int) (hash >>> 32) & (sections.length - 1);
+        return sections[sectionIdx];
+    }
+
+    public void clear() {
+        for (Section s : sections) {
+            s.clear();
+        }
+    }
+
+    public void forEach(BiConsumerLongPair processor) {
+        for (Section s : sections) {
+            s.forEach(processor);
+        }
+    }
+
+    /**
+     * @return a new list of all keys (makes a copy).
+     */
+    public List<LongPair> keys() {
+        List<LongPair> keys = Lists.newArrayListWithExpectedSize((int) size());
+        forEach((key1, key2, value1, value2) -> keys.add(new LongPair(key1, key2)));
+        return keys;
+    }
+
+    public List<LongPair> values() {
+        List<LongPair> values = Lists.newArrayListWithExpectedSize((int) size());
+        forEach((key1, key2, value1, value2) -> values.add(new LongPair(value1, value2)));
+        return values;
+    }
+
+    public Map<LongPair, LongPair> asMap() {
+        Map<LongPair, LongPair> map = Maps.newHashMapWithExpectedSize((int) size());
+        forEach((key1, key2, value1, value2) -> map.put(new LongPair(key1, key2), new LongPair(value1, value2)));
+        return map;
+    }
+
+    // A section is a portion of the hash map that is covered by a single
+    @SuppressWarnings("serial")
+    private static final class Section extends StampedLock {
+        // Keys and values are stored interleaved in the table array
+        private volatile long[] table;
+
+        private volatile int capacity;
+        private final int initCapacity;
+        private static final AtomicIntegerFieldUpdater<Section> SIZE_UPDATER =
+                AtomicIntegerFieldUpdater.newUpdater(Section.class, "size");
+
+        private volatile int size;
+        private int usedBuckets;
+        private int resizeThresholdUp;
+        private int resizeThresholdBelow;
+        private final float mapFillFactor;
+        private final float mapIdleFactor;
+        private final float expandFactor;
+        private final float shrinkFactor;
+        private final boolean autoShrink;
+
+        Section(int capacity, float mapFillFactor, float mapIdleFactor, boolean autoShrink,
+                float expandFactor, float shrinkFactor) {
+            this.capacity = alignToPowerOfTwo(capacity);
+            this.initCapacity = this.capacity;
+            this.table = new long[4 * this.capacity];
+            this.size = 0;
+            this.usedBuckets = 0;
+            this.autoShrink = autoShrink;
+            this.mapFillFactor = mapFillFactor;
+            this.mapIdleFactor = mapIdleFactor;
+            this.expandFactor = expandFactor;
+            this.shrinkFactor = shrinkFactor;
+            this.resizeThresholdUp = (int) (this.capacity * mapFillFactor);
+            this.resizeThresholdBelow = (int) (this.capacity * mapIdleFactor);
+            Arrays.fill(table, EmptyKey);
+        }
+
+        LongPair get(long key1, long key2, int keyHash) {
+            long stamp = tryOptimisticRead();
+            boolean acquiredLock = false;
+            int bucket = signSafeMod(keyHash, capacity);
+
+            try {
+                while (true) {
+                    // First try optimistic locking
+                    long storedKey1 = table[bucket];
+                    long storedKey2 = table[bucket + 1];
+                    long storedValue1 = table[bucket + 2];
+                    long storedValue2 = table[bucket + 3];
+
+                    if (!acquiredLock && validate(stamp)) {
+                        // The values we have read are consistent
+                        if (key1 == storedKey1 && key2 == storedKey2) {
+                            return new LongPair(storedValue1, storedValue2);
+                        } else if (storedKey1 == EmptyKey) {
+                            // Not found
+                            return null;
+                        }
+                    } else {
+                        // Fallback to acquiring read lock
+                        if (!acquiredLock) {
+                            stamp = readLock();
+                            acquiredLock = true;
+
+                            bucket = signSafeMod(keyHash, capacity);
+                            storedKey1 = table[bucket];
+                            storedKey2 = table[bucket + 1];
+                            storedValue1 = table[bucket + 2];
+                            storedValue2 = table[bucket + 3];
+                        }
+
+                        if (key1 == storedKey1 && key2 == storedKey2) {
+                            return new LongPair(storedValue1, storedValue2);
+                        } else if (storedKey1 == EmptyKey) {
+                            // Not found
+                            return null;
+                        }
+                    }
+
+                    bucket = (bucket + 4) & (table.length - 1);
+                }
+            } finally {
+                if (acquiredLock) {
+                    unlockRead(stamp);
+                }
+            }
+        }
+
+        boolean put(long key1, long key2, long value1, long value2, int keyHash, boolean onlyIfAbsent) {
+            long stamp = writeLock();
+            int bucket = signSafeMod(keyHash, capacity);
+
+            // Remember where we find the first available spot
+            int firstDeletedKey = -1;
+
+            try {
+                while (true) {
+                    long storedKey1 = table[bucket];
+                    long storedKey2 = table[bucket + 1];
+
+                    if (key1 == storedKey1 && key2 == storedKey2) {
+                        if (!onlyIfAbsent) {
+                            // Over written an old value for same key
+                            table[bucket + 2] = value1;
+                            table[bucket + 3] = value2;
+                            return true;
+                        } else {
+                            return false;
+                        }
+                    } else if (storedKey1 == EmptyKey) {
+                        // Found an empty bucket. This means the key is not in the map. If we've already seen a deleted
+                        // key, we should write at that position
+                        if (firstDeletedKey != -1) {
+                            bucket = firstDeletedKey;
+                        } else {
+                            ++usedBuckets;
+                        }
+
+                        table[bucket] = key1;
+                        table[bucket + 1] = key2;
+                        table[bucket + 2] = value1;
+                        table[bucket + 3] = value2;
+                        SIZE_UPDATER.incrementAndGet(this);
+                        return true;
+                    } else if (storedKey1 == DeletedKey) {
+                        // The bucket contained a different deleted key
+                        if (firstDeletedKey == -1) {
+                            firstDeletedKey = bucket;
+                        }
+                    }
+
+                    bucket = (bucket + 4) & (table.length - 1);
+                }
+            } finally {
+                if (usedBuckets > resizeThresholdUp) {
+                    try {
+                        // Expand the hashmap
+                        int newCapacity = alignToPowerOfTwo((int) (capacity * expandFactor));
+                        rehash(newCapacity);
+                    } finally {
+                        unlockWrite(stamp);
+                    }
+                } else {
+                    unlockWrite(stamp);
+                }
+            }
+        }
+
+        private boolean remove(long key1, long key2, long value1, long value2, int keyHash) {
+            long stamp = writeLock();
+            int bucket = signSafeMod(keyHash, capacity);
+
+            try {
+                while (true) {
+                    long storedKey1 = table[bucket];
+                    long storedKey2 = table[bucket + 1];
+                    long storedValue1 = table[bucket + 2];
+                    long storedValue2 = table[bucket + 3];
+                    if (key1 == storedKey1 && key2 == storedKey2) {
+                        if (value1 == ValueNotFound || (value1 == storedValue1 && value2 == storedValue2)) {
+                            SIZE_UPDATER.decrementAndGet(this);
+
+                            cleanBucket(bucket);
+                            return true;
+                        } else {
+                            return false;
+                        }
+                    } else if (storedKey1 == EmptyKey) {
+                        // Key wasn't found
+                        return false;
+                    }
+
+                    bucket = (bucket + 4) & (table.length - 1);
+                }
+
+            } finally {
+                if (autoShrink && size < resizeThresholdBelow) {
+                    try {
+                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                        int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
+                        if (newCapacity < capacity && newResizeThresholdUp > size) {
+                            // shrink the hashmap
+                            rehash(newCapacity);
+                        }
+                    } finally {
+                        unlockWrite(stamp);
+                    }
+                } else {
+                    unlockWrite(stamp);
+                }
+            }
+        }
+
+        private void cleanBucket(int bucket) {
+            int nextInArray = (bucket + 4) & (table.length - 1);
+            if (table[nextInArray] == EmptyKey) {
+                table[bucket] = EmptyKey;
+                table[bucket + 1] = EmptyKey;
+                table[bucket + 2] = ValueNotFound;
+                table[bucket + 3] = ValueNotFound;
+                --usedBuckets;
+
+                // Cleanup all the buckets that were in `DeletedKey` state, so that we can reduce unnecessary expansions
+                bucket = (bucket - 4) & (table.length - 1);
+                while (table[bucket] == DeletedKey) {
+                    table[bucket] = EmptyKey;
+                    table[bucket + 1] = EmptyKey;
+                    table[bucket + 2] = ValueNotFound;
+                    table[bucket + 3] = ValueNotFound;
+                    --usedBuckets;
+
+                    bucket = (bucket - 4) & (table.length - 1);
+                }
+            } else {
+                table[bucket] = DeletedKey;
+                table[bucket + 1] = DeletedKey;
+                table[bucket + 2] = ValueNotFound;
+                table[bucket + 3] = ValueNotFound;
+            }
+        }
+
+        void clear() {
+            long stamp = writeLock();
+
+            try {
+                Arrays.fill(table, EmptyKey);
+                this.size = 0;
+                this.usedBuckets = 0;
+                if (autoShrink) {
+                    rehash(initCapacity);
+                }
+            } finally {
+                unlockWrite(stamp);
+            }
+        }
+
+        public void forEach(BiConsumerLongPair processor) {
+            long stamp = tryOptimisticRead();
+
+            long[] table = this.table;
+            boolean acquiredReadLock = false;
+
+            try {
+
+                // Validate no rehashing
+                if (!validate(stamp)) {
+                    // Fallback to read lock
+                    stamp = readLock();
+                    acquiredReadLock = true;
+                    table = this.table;
+                }
+
+                // Go through all the buckets for this section
+                for (int bucket = 0; bucket < table.length; bucket += 4) {
+                    long storedKey1 = table[bucket];
+                    long storedKey2 = table[bucket + 1];
+                    long storedValue1 = table[bucket + 2];
+                    long storedValue2 = table[bucket + 3];
+
+                    if (!acquiredReadLock && !validate(stamp)) {
+                        // Fallback to acquiring read lock
+                        stamp = readLock();
+                        acquiredReadLock = true;
+
+                        storedKey1 = table[bucket];
+                        storedKey2 = table[bucket + 1];
+                        storedValue1 = table[bucket + 2];
+                        storedValue2 = table[bucket + 3];
+                    }
+
+                    if (storedKey1 != DeletedKey && storedKey1 != EmptyKey) {
+                        processor.accept(storedKey1, storedKey2, storedValue1, storedValue2);
+                    }
+                }
+            } finally {
+                if (acquiredReadLock) {
+                    unlockRead(stamp);
+                }
+            }
+        }
+
+        private void rehash(int newCapacity) {
+            long[] newTable = new long[4 * newCapacity];
+            Arrays.fill(newTable, EmptyKey);
+
+            // Re-hash table
+            for (int i = 0; i < table.length; i += 4) {
+                long storedKey1 = table[i];
+                long storedKey2 = table[i + 1];
+                long storedValue1 = table[i + 2];
+                long storedValue2 = table[i + 3];
+                if (storedKey1 != EmptyKey && storedKey1 != DeletedKey) {
+                    insertKeyValueNoLock(newTable, newCapacity, storedKey1, storedKey2, storedValue1, storedValue2);
+                }
+            }
+
+            table = newTable;
+            usedBuckets = size;
+            // Capacity needs to be updated after the values, so that we won't see
+            // a capacity value bigger than the actual array size
+            capacity = newCapacity;
+            resizeThresholdUp = (int) (capacity * mapFillFactor);
+            resizeThresholdBelow = (int) (capacity * mapIdleFactor);
+        }
+
+        private static void insertKeyValueNoLock(long[] table, int capacity, long key1, long key2, long value1,
+                long value2) {
+            int bucket = signSafeMod(hash(key1, key2), capacity);
+
+            while (true) {
+                long storedKey1 = table[bucket];
+
+                if (storedKey1 == EmptyKey) {
+                    // The bucket is empty, so we can use it
+                    table[bucket] = key1;
+                    table[bucket + 1] = key2;
+                    table[bucket + 2] = value1;
+                    table[bucket + 3] = value2;
+                    return;
+                }
+
+                bucket = (bucket + 4) & (table.length - 1);
+            }
+        }
+    }
+
+    private static final long HashMixer = 0xc6a4a7935bd1e995L;
+    private static final int R = 47;
+
+    static final long hash(long key1, long key2) {
+        long hash = key1 * HashMixer;
+        hash ^= hash >>> R;
+        hash *= HashMixer;
+        hash += 31 + (key2 * HashMixer);
+        hash ^= hash >>> R;
+        hash *= HashMixer;
+        return hash;
+    }
+
+    static final int signSafeMod(long n, int max) {
+        return (int) (n & (max - 1)) << 2;
+    }
+
+    private static int alignToPowerOfTwo(int n) {
+        return (int) Math.pow(2, 32 - Integer.numberOfLeadingZeros(n - 1));
+    }
+
+    private static void checkBiggerEqualZero(long n) {
+        if (n < 0L) {
+            throw new IllegalArgumentException("Keys and values must be >= 0");
+        }
+    }
+
+    /**
+     * A pair of long values.
+     */
+    public static class LongPair implements Comparable<LongPair> {
+        public final long first;
+        public final long second;
+
+        public LongPair(long first, long second) {
+            this.first = first;
+            this.second = second;
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            if (obj instanceof LongPair) {
+                LongPair other = (LongPair) obj;
+                return first == other.first && second == other.second;
+            }
+            return false;
+        }
+
+        @Override
+        public int hashCode() {
+            return (int) hash(first, second);
+        }
+
+        @Override
+        public int compareTo(LongPair o) {
+            if (first != o.first) {
+                return Long.compare(first, o.first);
+            } else {
+                return Long.compare(second, o.second);
+            }
+        }
+    }
+}
diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMapTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMapTest.java
new file mode 100644
index 00000000000..98a96804d25
--- /dev/null
+++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongLongPairHashMapTest.java
@@ -0,0 +1,427 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.common.util.collections;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import com.google.common.collect.Lists;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap.LongPair;
+import org.junit.Test;
+
+/**
+ * Test the concurrent long-long pair hashmap class.
+ */
+public class ConcurrentLongLongPairHashMapTest {
+
+    @Test
+    public void testConstructor() {
+        try {
+             ConcurrentLongLongPairHashMap.newBuilder()
+                    .expectedItems(0)
+                    .build();
+            fail("should have thrown exception");
+        } catch (IllegalArgumentException e) {
+            // ok
+        }
+
+        try {
+            ConcurrentLongLongPairHashMap.newBuilder()
+                    .expectedItems(16)
+                    .concurrencyLevel(0)
+                    .build();
+            fail("should have thrown exception");
+        } catch (IllegalArgumentException e) {
+            // ok
+        }
+
+        try {
+            ConcurrentLongLongPairHashMap.newBuilder()
+                    .expectedItems(4)
+                    .concurrencyLevel(8)
+                    .build();
+            fail("should have thrown exception");
+        } catch (IllegalArgumentException e) {
+            // ok
+        }
+    }
+
+    @Test
+    public void simpleInsertions() {
+        ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder()
+                .expectedItems(16)
+                .build();
+        assertTrue(map.isEmpty());
+        assertTrue(map.put(1, 1, 11, 11));
+        assertFalse(map.isEmpty());
+
+        assertTrue(map.put(2, 2, 22, 22));
+        assertTrue(map.put(3, 3, 33, 33));
+
+        assertEquals(map.size(), 3);
+
+        assertEquals(map.get(1, 1), new LongPair(11, 11));
+        assertEquals(map.size(), 3);
+
+        assertTrue(map.remove(1, 1));
+        assertEquals(map.size(), 2);
+        assertEquals(map.get(1, 1), null);
+        assertEquals(map.get(5, 5), null);
+        assertEquals(map.size(), 2);
+
+        assertTrue(map.put(1, 1, 11, 11));
+        assertEquals(map.size(), 3);
+        assertTrue(map.put(1, 1, 111, 111));
+        assertEquals(map.size(), 3);
+    }
+
+    @Test
+    public void testRemove() {
+        ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap
+                .newBuilder()
+                .build();
+
+        assertTrue(map.isEmpty());
+        assertTrue(map.put(1, 1, 11, 11));
+        assertFalse(map.isEmpty());
+
+        assertFalse(map.remove(0, 0));
+        assertFalse(map.remove(1, 1, 111, 111));
+
+        assertFalse(map.isEmpty());
+        assertTrue(map.remove(1, 1, 11, 11));
+        assertTrue(map.isEmpty());
+    }
+
+    @Test
+    public void testClear() {
+        ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        assertTrue(map.capacity() == 4);
+
+        assertTrue(map.put(1, 1, 11, 11));
+        assertTrue(map.put(2, 2, 22, 22));
+        assertTrue(map.put(3, 3, 33, 33));
+
+        assertTrue(map.capacity() == 8);
+        map.clear();
+        assertTrue(map.capacity() == 4);
+    }
+
+    @Test
+    public void testExpandAndShrink() {
+        ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        assertTrue(map.put(1, 1, 11, 11));
+        assertTrue(map.put(2, 2, 22, 22));
+        assertTrue(map.put(3, 3, 33, 33));
+
+        // expand hashmap
+        assertTrue(map.capacity() == 8);
+
+        assertTrue(map.remove(1, 1, 11, 11));
+        // not shrink
+        assertTrue(map.capacity() == 8);
+        assertTrue(map.remove(2, 2, 22, 22));
+        // shrink hashmap
+        assertTrue(map.capacity() == 4);
+
+        // expand hashmap
+        assertTrue(map.put(4, 4, 44, 44));
+        assertTrue(map.put(5, 5, 55, 55));
+        assertTrue(map.capacity() == 8);
+
+        //verify that the map does not keep shrinking at every remove() operation
+        assertTrue(map.put(6, 6, 66, 66));
+        assertTrue(map.remove(6, 6, 66, 66));
+        assertTrue(map.capacity() == 8);
+    }
+
+    @Test
+    public void testNegativeUsedBucketCount() {
+        ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder()
+                .expectedItems(16)
+                .concurrencyLevel(1)
+                .build();
+
+        map.put(0, 0, 0, 0);
+        assertEquals(1, map.getUsedBucketCount());
+        map.put(0, 0, 1, 1);
+        assertEquals(1, map.getUsedBucketCount());
+        map.remove(0, 0);
+        assertEquals(0, map.getUsedBucketCount());
+        map.remove(0, 0);
+        assertEquals(0, map.getUsedBucketCount());
+    }
+
+    @Test
+    public void testRehashing() {
+        int n = 16;
+        ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder()
+                .expectedItems(n / 2)
+                .concurrencyLevel(1)
+                .build();
+        assertEquals(map.capacity(), n);
+        assertEquals(map.size(), 0);
+
+        for (int i = 0; i < n; i++) {
+            map.put(i, i, i, i);
+        }
+
+        assertEquals(map.capacity(), 2 * n);
+        assertEquals(map.size(), n);
+    }
+
+    @Test
+    public void testRehashingWithDeletes() {
+        int n = 16;
+        ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder()
+                .expectedItems(n / 2)
+                .concurrencyLevel(1)
+                .build();
+        assertEquals(map.capacity(), n);
+        assertEquals(map.size(), 0);
+
+        for (int i = 0; i < n / 2; i++) {
+            map.put(i, i, i, i);
+        }
+
+        for (int i = 0; i < n / 2; i++) {
+            map.remove(i, i);
+        }
+
+        for (int i = n; i < (2 * n); i++) {
+            map.put(i, i, i, i);
+        }
+
+        assertEquals(map.capacity(), 2 * n);
+        assertEquals(map.size(), n);
+    }
+
+    @Test
+    public void concurrentInsertions() throws Throwable {
+        ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder()
+                .build();
+        ExecutorService executor = Executors.newCachedThreadPool();
+
+        final int nThreads = 16;
+        final int n = 100_000;
+        long value = 55;
+
+        List<Future<?>> futures = new ArrayList<>();
+        for (int i = 0; i < nThreads; i++) {
+            final int threadIdx = i;
+
+            futures.add(executor.submit(() -> {
+                Random random = new Random();
+
+                for (int j = 0; j < n; j++) {
+                    long key1 = Math.abs(random.nextLong());
+                    // Ensure keys are uniques
+                    key1 -= key1 % (threadIdx + 1);
+
+                    long key2 = Math.abs(random.nextLong());
+                    // Ensure keys are uniques
+                    key2 -= key2 % (threadIdx + 1);
+
+                    map.put(key1, key2, value, value);
+                }
+            }));
+        }
+
+        for (Future<?> future : futures) {
+            future.get();
+        }
+
+        assertEquals(map.size(), n * nThreads);
+
+        executor.shutdown();
+    }
+
+    @Test
+    public void concurrentInsertionsAndReads() throws Throwable {
+        ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder()
+                .build();
+        ExecutorService executor = Executors.newCachedThreadPool();
+
+        final int nThreads = 16;
+        final int n = 100_000;
+        final long value = 55;
+
+        List<Future<?>> futures = new ArrayList<>();
+        for (int i = 0; i < nThreads; i++) {
+            final int threadIdx = i;
+
+            futures.add(executor.submit(() -> {
+                Random random = new Random();
+
+                for (int j = 0; j < n; j++) {
+                    long key1 = Math.abs(random.nextLong());
+                    // Ensure keys are uniques
+                    key1 -= key1 % (threadIdx + 1);
+
+                    long key2 = Math.abs(random.nextLong());
+                    // Ensure keys are uniques
+                    key2 -= key2 % (threadIdx + 1);
+
+                    map.put(key1, key2, value, value);
+                }
+            }));
+        }
+
+        for (Future<?> future : futures) {
+            future.get();
+        }
+
+        assertEquals(map.size(), n * nThreads);
+
+        executor.shutdown();
+    }
+
+    @Test
+    public void testIteration() {
+        ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder()
+                .build();
+
+        assertEquals(map.keys(), Collections.emptyList());
+        assertEquals(map.values(), Collections.emptyList());
+
+        map.put(0, 0, 0, 0);
+
+        assertEquals(map.keys(), Lists.newArrayList(new LongPair(0, 0)));
+        assertEquals(map.values(), Lists.newArrayList(new LongPair(0, 0)));
+
+        map.remove(0, 0);
+
+        assertEquals(map.keys(), Collections.emptyList());
+        assertEquals(map.values(), Collections.emptyList());
+
+        map.put(0, 0, 0, 0);
+        map.put(1, 1, 11, 11);
+        map.put(2, 2, 22, 22);
+
+        List<LongPair> keys = map.keys();
+        Collections.sort(keys);
+        assertEquals(keys, Lists.newArrayList(new LongPair(0, 0), new LongPair(1, 1), new LongPair(2, 2)));
+
+        List<LongPair> values = map.values();
+        Collections.sort(values);
+        assertEquals(values, Lists.newArrayList(new LongPair(0, 0), new LongPair(11, 11), new LongPair(22, 22)));
+
+        map.put(1, 1, 111, 111);
+
+        keys = map.keys();
+        Collections.sort(keys);
+        assertEquals(keys, Lists.newArrayList(new LongPair(0, 0), new LongPair(1, 1), new LongPair(2, 2)));
+
+        values = map.values();
+        Collections.sort(values);
+        assertEquals(values, Lists.newArrayList(new LongPair(0, 0), new LongPair(22, 22), new LongPair(111, 111)));
+
+        map.clear();
+        assertTrue(map.isEmpty());
+    }
+
+    @Test
+    public void testPutIfAbsent() {
+        ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder()
+                .build();
+
+        assertTrue(map.putIfAbsent(1, 1, 11, 11));
+        assertEquals(map.get(1, 1), new LongPair(11, 11));
+
+        assertFalse(map.putIfAbsent(1, 1, 111, 111));
+        assertEquals(map.get(1, 1), new LongPair(11, 11));
+    }
+
+    @Test
+    public void testIvalidKeys() {
+        ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder()
+                .expectedItems(16)
+                .concurrencyLevel(1)
+                .build();
+
+
+        try {
+            map.put(-5, 3, 4, 4);
+            fail("should have failed");
+        } catch (IllegalArgumentException e) {
+            // ok
+        }
+
+        try {
+            map.get(-1, 0);
+            fail("should have failed");
+        } catch (IllegalArgumentException e) {
+            // ok
+        }
+
+        try {
+            map.containsKey(-1, 0);
+            fail("should have failed");
+        } catch (IllegalArgumentException e) {
+            // ok
+        }
+
+        try {
+            map.putIfAbsent(-1, 1, 1, 1);
+            fail("should have failed");
+        } catch (IllegalArgumentException e) {
+            // ok
+        }
+    }
+
+    @Test
+    public void testAsMap() {
+        ConcurrentLongLongPairHashMap lmap = ConcurrentLongLongPairHashMap.newBuilder()
+                .expectedItems(16)
+                .concurrencyLevel(1)
+                .build();
+        lmap.put(1, 1, 11, 11);
+        lmap.put(2, 2, 22, 22);
+        lmap.put(3, 3, 33, 33);
+
+        Map<LongPair, LongPair> map = new HashMap<>();
+        map.put(new LongPair(1, 1), new LongPair(11, 11));
+        map.put(new LongPair(2, 2), new LongPair(22, 22));
+        map.put(new LongPair(3, 3), new LongPair(33, 33));
+
+        assertEquals(map, lmap.asMap());
+    }
+}


[pulsar] 17/26: Fix typo in checkPermissionsAsync (#15273)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 9122f93ea94f23257f088109ce7e77cf2bf5f0db
Author: Xiaoyu Hou <An...@gmail.com>
AuthorDate: Sat Apr 23 11:25:03 2022 +0800

    Fix typo in checkPermissionsAsync (#15273)
    
    (cherry picked from commit 57aff0a5d93ed5a9b225ce5c0364db0dd0e9eb74)
---
 .../src/main/java/org/apache/pulsar/broker/service/Consumer.java        | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java
index 0f8a6712676..7eabeaafd1f 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/Consumer.java
@@ -795,7 +795,7 @@ public class Consumer {
                             cnx.getAuthenticationData(), subscription.getName())
                     .handle((ok, e) -> {
                         if (e != null) {
-                            log.warn("[{}] Get unexpected error while autorizing [{}]  {}", appId,
+                            log.warn("[{}] Get unexpected error while authorizing [{}]  {}", appId,
                                     subscription.getTopicName(), e.getMessage(), e);
                         }
 


[pulsar] 06/26: Upgrade Netty to 4.1.76.Final, Netty Tcnative, grpc and protobuf (#15212)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit d39c65511964b239fdf5e11bd57313bb366be0d6
Author: Lari Hotari <lh...@users.noreply.github.com>
AuthorDate: Tue Apr 19 19:14:44 2022 +0300

    Upgrade Netty to 4.1.76.Final, Netty Tcnative, grpc and protobuf (#15212)
    
    * Upgrade Netty to 4.1.76.Final and Netty Tcnative to 2.0.51.Final
    
    Fixes #14015
    - release notes https://netty.io/news/2022/04/12/4-1-76-Final.html
      - contains fix for https://github.com/netty/netty/issues/11695
    
    * Upgrade grpc to 1.45.1 and protobuf to 3.19.2
    
    - grpc < 1.45.1 is not compatible with Netty > 4.1.74.Final
      - https://github.com/grpc/grpc-java/pull/9004
    
    (cherry picked from commit 332a3c74c03184bb8d1298450ff21e5dc93be375)
---
 buildtools/pom.xml                               |  2 +-
 distribution/server/src/assemble/LICENSE.bin.txt | 94 +++++++++++++-----------
 pom.xml                                          | 35 +++++++--
 pulsar-sql/presto-distribution/LICENSE           | 63 ++++++++--------
 4 files changed, 115 insertions(+), 79 deletions(-)

diff --git a/buildtools/pom.xml b/buildtools/pom.xml
index 345f942ac20..0e42dc0a4fa 100644
--- a/buildtools/pom.xml
+++ b/buildtools/pom.xml
@@ -134,7 +134,7 @@
     <dependency>
       <groupId>io.netty</groupId>
       <artifactId>netty-common</artifactId>
-      <version>4.1.74.Final</version>
+      <version>4.1.76.Final</version>
       <scope>test</scope>
     </dependency>
   </dependencies>
diff --git a/distribution/server/src/assemble/LICENSE.bin.txt b/distribution/server/src/assemble/LICENSE.bin.txt
index 254216614c8..dcf4039b9f2 100644
--- a/distribution/server/src/assemble/LICENSE.bin.txt
+++ b/distribution/server/src/assemble/LICENSE.bin.txt
@@ -352,26 +352,31 @@ The Apache Software License, Version 2.0
     - org.apache.commons-commons-compress-1.21.jar
     - org.apache.commons-commons-lang3-3.11.jar
  * Netty
-    - io.netty-netty-buffer-4.1.74.Final.jar
-    - io.netty-netty-codec-4.1.74.Final.jar
-    - io.netty-netty-codec-dns-4.1.74.Final.jar
-    - io.netty-netty-codec-http-4.1.74.Final.jar
-    - io.netty-netty-codec-http2-4.1.74.Final.jar
-    - io.netty-netty-codec-socks-4.1.74.Final.jar
-    - io.netty-netty-codec-haproxy-4.1.74.Final.jar
-    - io.netty-netty-common-4.1.74.Final.jar
-    - io.netty-netty-handler-4.1.74.Final.jar
-    - io.netty-netty-handler-proxy-4.1.74.Final.jar
-    - io.netty-netty-resolver-4.1.74.Final.jar
-    - io.netty-netty-resolver-dns-4.1.74.Final.jar
-    - io.netty-netty-transport-4.1.74.Final.jar
-    - io.netty-netty-transport-classes-epoll-4.1.74.Final.jar
-    - io.netty-netty-transport-native-epoll-4.1.74.Final-linux-x86_64.jar
-    - io.netty-netty-transport-native-epoll-4.1.74.Final.jar
-    - io.netty-netty-transport-native-unix-common-4.1.74.Final.jar
-    - io.netty-netty-transport-native-unix-common-4.1.74.Final-linux-x86_64.jar
-    - io.netty-netty-tcnative-boringssl-static-2.0.48.Final.jar
-    - io.netty-netty-tcnative-classes-2.0.48.Final.jar
+    - io.netty-netty-buffer-4.1.76.Final.jar
+    - io.netty-netty-codec-4.1.76.Final.jar
+    - io.netty-netty-codec-dns-4.1.76.Final.jar
+    - io.netty-netty-codec-http-4.1.76.Final.jar
+    - io.netty-netty-codec-http2-4.1.76.Final.jar
+    - io.netty-netty-codec-socks-4.1.76.Final.jar
+    - io.netty-netty-codec-haproxy-4.1.76.Final.jar
+    - io.netty-netty-common-4.1.76.Final.jar
+    - io.netty-netty-handler-4.1.76.Final.jar
+    - io.netty-netty-handler-proxy-4.1.76.Final.jar
+    - io.netty-netty-resolver-4.1.76.Final.jar
+    - io.netty-netty-resolver-dns-4.1.76.Final.jar
+    - io.netty-netty-transport-4.1.76.Final.jar
+    - io.netty-netty-transport-classes-epoll-4.1.76.Final.jar
+    - io.netty-netty-transport-native-epoll-4.1.76.Final-linux-x86_64.jar
+    - io.netty-netty-transport-native-epoll-4.1.76.Final.jar
+    - io.netty-netty-transport-native-unix-common-4.1.76.Final.jar
+    - io.netty-netty-transport-native-unix-common-4.1.76.Final-linux-x86_64.jar
+    - io.netty-netty-tcnative-boringssl-static-2.0.51.Final.jar
+    - io.netty-netty-tcnative-boringssl-static-2.0.51.Final-linux-aarch_64.jar
+    - io.netty-netty-tcnative-boringssl-static-2.0.51.Final-linux-x86_64.jar
+    - io.netty-netty-tcnative-boringssl-static-2.0.51.Final-osx-aarch_64.jar
+    - io.netty-netty-tcnative-boringssl-static-2.0.51.Final-osx-x86_64.jar
+    - io.netty-netty-tcnative-boringssl-static-2.0.51.Final-windows-x86_64.jar
+    - io.netty-netty-tcnative-classes-2.0.51.Final.jar
  * Prometheus client
     - io.prometheus-simpleclient-0.5.0.jar
     - io.prometheus-simpleclient_common-0.5.0.jar
@@ -461,27 +466,27 @@ The Apache Software License, Version 2.0
      - org.jetbrains.kotlin-kotlin-stdlib-jdk8-1.4.32.jar
      - org.jetbrains-annotations-13.0.jar
  * gRPC
-    - io.grpc-grpc-all-1.42.1.jar
-    - io.grpc-grpc-auth-1.42.1.jar
-    - io.grpc-grpc-context-1.42.1.jar
-    - io.grpc-grpc-core-1.42.1.jar
-    - io.grpc-grpc-netty-1.42.1.jar
-    - io.grpc-grpc-protobuf-1.42.1.jar
-    - io.grpc-grpc-protobuf-lite-1.42.1.jar
-    - io.grpc-grpc-stub-1.42.1.jar
-    - io.grpc-grpc-alts-1.42.1.jar
-    - io.grpc-grpc-api-1.42.1.jar
-    - io.grpc-grpc-grpclb-1.42.1.jar
-    - io.grpc-grpc-netty-shaded-1.42.1.jar
-    - io.grpc-grpc-services-1.42.1.jar
-    - io.grpc-grpc-xds-1.42.1.jar
-    - io.grpc-grpc-rls-1.42.1.jar
+    - io.grpc-grpc-all-1.45.1.jar
+    - io.grpc-grpc-auth-1.45.1.jar
+    - io.grpc-grpc-context-1.45.1.jar
+    - io.grpc-grpc-core-1.45.1.jar
+    - io.grpc-grpc-netty-1.45.1.jar
+    - io.grpc-grpc-protobuf-1.45.1.jar
+    - io.grpc-grpc-protobuf-lite-1.45.1.jar
+    - io.grpc-grpc-stub-1.45.1.jar
+    - io.grpc-grpc-alts-1.45.1.jar
+    - io.grpc-grpc-api-1.45.1.jar
+    - io.grpc-grpc-grpclb-1.45.1.jar
+    - io.grpc-grpc-netty-shaded-1.45.1.jar
+    - io.grpc-grpc-services-1.45.1.jar
+    - io.grpc-grpc-xds-1.45.1.jar
+    - io.grpc-grpc-rls-1.45.1.jar
     - com.google.auto.service-auto-service-annotations-1.0.jar
   * Perfmark
     - io.perfmark-perfmark-api-0.19.0.jar
   * OpenCensus
-    - io.opencensus-opencensus-api-0.18.0.jar
-    - io.opencensus-opencensus-contrib-http-util-0.24.0.jar
+    - io.opencensus-opencensus-api-0.28.0.jar
+    - io.opencensus-opencensus-contrib-http-util-0.28.0.jar
     - io.opencensus-opencensus-proto-0.2.0.jar
   * Jodah
     - net.jodah-typetools-0.5.0.jar
@@ -524,9 +529,10 @@ The Apache Software License, Version 2.0
   * Snappy Java
     - org.xerial.snappy-snappy-java-1.1.7.jar
   * Google HTTP Client
-    - com.google.http-client-google-http-client-jackson2-1.38.0.jar
-    - com.google.http-client-google-http-client-1.38.0.jar
-    - com.google.auto.value-auto-value-annotations-1.7.4.jar
+    - com.google.http-client-google-http-client-jackson2-1.41.0.jar
+    - com.google.http-client-google-http-client-gson-1.41.0.jar
+    - com.google.http-client-google-http-client-1.41.0.jar
+    - com.google.auto.value-auto-value-annotations-1.9.jar
     - com.google.re2j-re2j-1.5.jar
   * Jetcd
     - io.etcd-jetcd-common-0.5.11.jar
@@ -536,8 +542,8 @@ The Apache Software License, Version 2.0
 
 BSD 3-clause "New" or "Revised" License
  * Google auth library
-    - com.google.auth-google-auth-library-credentials-0.22.2.jar -- licenses/LICENSE-google-auth-library.txt
-    - com.google.auth-google-auth-library-oauth2-http-0.22.2.jar -- licenses/LICENSE-google-auth-library.txt
+    - com.google.auth-google-auth-library-credentials-1.4.0.jar -- licenses/LICENSE-google-auth-library.txt
+    - com.google.auth-google-auth-library-oauth2-http-1.4.0.jar -- licenses/LICENSE-google-auth-library.txt
  * LevelDB -- (included in org.rocksdb.*.jar) -- licenses/LICENSE-LevelDB.txt
  * JSR305 -- com.google.code.findbugs-jsr305-3.0.2.jar -- licenses/LICENSE-JSR305.txt
  * JLine -- jline-jline-2.14.6.jar -- licenses/LICENSE-JLine.txt
@@ -556,8 +562,8 @@ MIT License
 
 Protocol Buffers License
  * Protocol Buffers
-   - com.google.protobuf-protobuf-java-3.16.1.jar -- licenses/LICENSE-protobuf.txt
-   - com.google.protobuf-protobuf-java-util-3.16.1.jar -- licenses/LICENSE-protobuf.txt
+   - com.google.protobuf-protobuf-java-3.19.2.jar -- licenses/LICENSE-protobuf.txt
+   - com.google.protobuf-protobuf-java-util-3.19.2.jar -- licenses/LICENSE-protobuf.txt
 
 CDDL-1.1 -- licenses/LICENSE-CDDL-1.1.txt
  * Java Annotations API
diff --git a/pom.xml b/pom.xml
index 8af547c2efb..6d224cdecb1 100644
--- a/pom.xml
+++ b/pom.xml
@@ -110,8 +110,8 @@ flexible messaging model and an intuitive client API.</description>
     <snappy.version>1.1.7</snappy.version> <!-- ZooKeeper server -->
     <dropwizardmetrics.version>3.2.5</dropwizardmetrics.version> <!-- ZooKeeper server -->
     <curator.version>5.1.0</curator.version>
-    <netty.version>4.1.74.Final</netty.version>
-    <netty-tc-native.version>2.0.48.Final</netty-tc-native.version>
+    <netty.version>4.1.76.Final</netty.version>
+    <netty-tc-native.version>2.0.51.Final</netty-tc-native.version>
     <jetty.version>9.4.44.v20210927</jetty.version>
     <conscrypt.version>2.5.2</conscrypt.version>
     <jersey.version>2.34</jersey.version>
@@ -131,9 +131,10 @@ flexible messaging model and an intuitive client API.</description>
     <puppycrawl.checkstyle.version>8.37</puppycrawl.checkstyle.version>
     <dockerfile-maven.version>1.4.13</dockerfile-maven.version>
     <typetools.version>0.5.0</typetools.version>
-    <protobuf3.version>3.16.1</protobuf3.version>
+    <protobuf3.version>3.19.2</protobuf3.version>
     <protoc3.version>${protobuf3.version}</protoc3.version>
-    <grpc.version>1.42.1</grpc.version>
+    <grpc.version>1.45.1</grpc.version>
+    <google-http-client.version>1.41.0</google-http-client.version>
     <perfmark.version>0.19.0</perfmark.version>
     <protoc-gen-grpc-java.version>${grpc.version}</protoc-gen-grpc-java.version>
     <gson.version>2.8.9</gson.version>
@@ -162,7 +163,7 @@ flexible messaging model and an intuitive client API.</description>
     <debezium.version>1.7.2.Final</debezium.version>
     <debezium.postgresql.version>42.2.25</debezium.postgresql.version>
     <jsonwebtoken.version>0.11.1</jsonwebtoken.version>
-    <opencensus.version>0.18.0</opencensus.version>
+    <opencensus.version>0.28.0</opencensus.version>
     <hbase.version>2.4.9</hbase.version>
     <guava.version>31.0.1-jre</guava.version>
     <jcip.version>1.0</jcip.version>
@@ -986,6 +987,24 @@ flexible messaging model and an intuitive client API.</description>
         <version>${grpc.version}</version>
       </dependency>
 
+      <dependency>
+        <groupId>com.google.http-client</groupId>
+        <artifactId>google-http-client</artifactId>
+        <version>${google-http-client.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>com.google.http-client</groupId>
+        <artifactId>google-http-client-jackson2</artifactId>
+        <version>${google-http-client.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>com.google.http-client</groupId>
+        <artifactId>google-http-client-gson</artifactId>
+        <version>${google-http-client.version}</version>
+      </dependency>
+
       <dependency>
         <groupId>io.grpc</groupId>
         <artifactId>grpc-netty-shaded</artifactId>
@@ -1142,6 +1161,12 @@ flexible messaging model and an intuitive client API.</description>
         <version>${opencensus.version}</version>
       </dependency>
 
+      <dependency>
+        <groupId>io.opencensus</groupId>
+        <artifactId>opencensus-contrib-http-util</artifactId>
+        <version>${opencensus.version}</version>
+      </dependency>
+
       <dependency>
         <groupId>io.opencensus</groupId>
         <artifactId>opencensus-contrib-grpc-metrics</artifactId>
diff --git a/pulsar-sql/presto-distribution/LICENSE b/pulsar-sql/presto-distribution/LICENSE
index 1b475cd8444..9ba14376164 100644
--- a/pulsar-sql/presto-distribution/LICENSE
+++ b/pulsar-sql/presto-distribution/LICENSE
@@ -233,35 +233,40 @@ The Apache Software License, Version 2.0
     - commons-lang3-3.11.jar
  * Netty
     - netty-3.10.6.Final.jar
-    - netty-buffer-4.1.74.Final.jar
-    - netty-codec-4.1.74.Final.jar
-    - netty-codec-dns-4.1.74.Final.jar
-    - netty-codec-http-4.1.74.Final.jar
-    - netty-codec-haproxy-4.1.74.Final.jar
-    - netty-codec-socks-4.1.74.Final.jar
-    - netty-handler-proxy-4.1.74.Final.jar
-    - netty-common-4.1.74.Final.jar
-    - netty-handler-4.1.74.Final.jar
+    - netty-buffer-4.1.76.Final.jar
+    - netty-codec-4.1.76.Final.jar
+    - netty-codec-dns-4.1.76.Final.jar
+    - netty-codec-http-4.1.76.Final.jar
+    - netty-codec-haproxy-4.1.76.Final.jar
+    - netty-codec-socks-4.1.76.Final.jar
+    - netty-handler-proxy-4.1.76.Final.jar
+    - netty-common-4.1.76.Final.jar
+    - netty-handler-4.1.76.Final.jar
     - netty-reactive-streams-2.0.4.jar
-    - netty-resolver-4.1.74.Final.jar
-    - netty-resolver-dns-4.1.74.Final.jar
-    - netty-tcnative-boringssl-static-2.0.48.Final.jar
-    - netty-tcnative-classes-2.0.48.Final.jar
-    - netty-transport-4.1.74.Final.jar
-    - netty-transport-classes-epoll-4.1.74.Final.jar
-    - netty-transport-native-epoll-4.1.74.Final-linux-x86_64.jar
-    - netty-transport-native-unix-common-4.1.74.Final.jar
-    - netty-transport-native-unix-common-4.1.74.Final-linux-x86_64.jar
-    - netty-codec-http2-4.1.74.Final.jar
+    - netty-resolver-4.1.76.Final.jar
+    - netty-resolver-dns-4.1.76.Final.jar
+    - netty-tcnative-boringssl-static-2.0.51.Final.jar
+    - netty-tcnative-boringssl-static-2.0.51.Final-linux-aarch_64.jar
+    - netty-tcnative-boringssl-static-2.0.51.Final-linux-x86_64.jar
+    - netty-tcnative-boringssl-static-2.0.51.Final-osx-aarch_64.jar
+    - netty-tcnative-boringssl-static-2.0.51.Final-osx-x86_64.jar
+    - netty-tcnative-boringssl-static-2.0.51.Final-windows-x86_64.jar
+    - netty-tcnative-classes-2.0.51.Final.jar
+    - netty-transport-4.1.76.Final.jar
+    - netty-transport-classes-epoll-4.1.76.Final.jar
+    - netty-transport-native-epoll-4.1.76.Final-linux-x86_64.jar
+    - netty-transport-native-unix-common-4.1.76.Final.jar
+    - netty-transport-native-unix-common-4.1.76.Final-linux-x86_64.jar
+    - netty-codec-http2-4.1.76.Final.jar
  * GRPC
-    - grpc-api-1.42.1.jar
-    - grpc-context-1.42.1.jar
-    - grpc-core-1.42.1.jar
-    - grpc-grpclb-1.42.1.jar
-    - grpc-netty-1.42.1.jar
-    - grpc-protobuf-1.42.1.jar
-    - grpc-protobuf-lite-1.42.1.jar
-    - grpc-stub-1.42.1.jar
+    - grpc-api-1.45.1.jar
+    - grpc-context-1.45.1.jar
+    - grpc-core-1.45.1.jar
+    - grpc-grpclb-1.45.1.jar
+    - grpc-netty-1.45.1.jar
+    - grpc-protobuf-1.45.1.jar
+    - grpc-protobuf-lite-1.45.1.jar
+    - grpc-stub-1.45.1.jar
   * JEtcd
     - jetcd-common-0.5.11.jar
     - jetcd-core-0.5.11.jar
@@ -479,8 +484,8 @@ The Apache Software License, Version 2.0
 
 Protocol Buffers License
  * Protocol Buffers
-   - protobuf-java-3.16.1.jar
-   - protobuf-java-util-3.16.1.jar
+   - protobuf-java-3.19.2.jar
+   - protobuf-java-util-3.19.2.jar
    - proto-google-common-protos-2.0.1.jar
 
 BSD 3-clause "New" or "Revised" License


[pulsar] 09/26: Improve skipping of DNS resolution when creating AuthenticationDataHttp instance (#15228)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 663ebe071a4a1decd93b5ee2ac66eb6587627334
Author: Lari Hotari <lh...@users.noreply.github.com>
AuthorDate: Wed Apr 20 11:54:51 2022 +0300

    Improve skipping of DNS resolution when creating AuthenticationDataHttp instance (#15228)
    
    - improves solution added in #15221
      - It's better to use the JDK provided InetSocketAddress.createUnresolved method
        to prevent unnecessary DNS resolution
    
    (cherry picked from commit e71b98ae157c4c108802661eaa72913e9c9e0bef)
---
 .../apache/pulsar/broker/authentication/AuthenticationDataHttp.java  | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttp.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttp.java
index 75a75225576..f48dbc263e0 100644
--- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttp.java
+++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttp.java
@@ -18,7 +18,6 @@
  */
 package org.apache.pulsar.broker.authentication;
 
-import io.netty.util.NetUtil;
 import java.net.InetSocketAddress;
 import java.net.SocketAddress;
 import javax.servlet.http.HttpServletRequest;
@@ -35,9 +34,7 @@ public class AuthenticationDataHttp implements AuthenticationDataSource {
             throw new IllegalArgumentException();
         }
         this.request = request;
-        this.remoteAddress =
-                new InetSocketAddress(NetUtil.createInetAddressFromIpAddressString(request.getRemoteAddr()),
-                        request.getRemotePort());
+        this.remoteAddress = InetSocketAddress.createUnresolved(request.getRemoteAddr(), request.getRemotePort());
     }
 
     /*


[pulsar] 22/26: [C++] Wait until event loop terminates when closing the Client (#15316)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 6d365c995a2a3ec3b48c57b1faaa40069e52d3c6
Author: Yunze Xu <xy...@163.com>
AuthorDate: Wed Apr 27 15:10:01 2022 +0800

    [C++] Wait until event loop terminates when closing the Client (#15316)
    
    * [C++] Wait until event loops terminates when closing the Client
    
    Fixes #13267
    
    ### Motivation
    
    Unlike Java client, the `Client` of C++ client has a `shutdown` method
    that is responsible to execute the following steps:
    1. Call `shutdown` on all internal producers and consumers
    2. Close all connections in the pool
    3. Close all executors of the executor providers.
    
    When an executor is closed, it call `io_service::stop()`, which makes
    the event loop (`io_service::run()`) in another thread return as soon as
    possible. However, there is no wait operation. If a client failed to
    create a producer or consumer, the `close` method will call `shutdown`
    and close all executors immediately and exits the application. In this
    case, the detached event loop thread might not exit ASAP, then valgrind
    will detect the memory leak.
    
    This memory leak can be avoided by sleeping for a while after
    `Client::close` returns or there are still other things to do after
    that. However, we should still adopt the semantics that after
    `Client::shutdown` returns, all event loop threads should be terminated.
    
    ### Modifications
    - Add a timeout parameter to the `close` method of `ExecutorService` and
      `ExecutorServiceProvider` as the max blocking timeout if it's
      non-negative.
    - Add a `TimeoutProcessor` helper class to update the left timeout after
      calling all methods that accept the timeout parameter.
    - Call `close` on all `ExecutorServiceProvider`s in
      `ClientImpl::shutdown` with 500ms timeout, which could be long enough.
      In addition, in `handleClose` method, call `shutdown` in another
      thread to avoid the deadlock.
    
    ### Verifying this change
    
    After applying this patch, the reproduce code in #13627 will pass the
    valgrind check.
    
    ```
    ==3013== LEAK SUMMARY:
    ==3013==    definitely lost: 0 bytes in 0 blocks
    ==3013==    indirectly lost: 0 bytes in 0 blocks
    ==3013==      possibly lost: 0 bytes in 0 blocks
    ```
    
    (cherry picked from commit cd78f39a92521f3847b022580a6e66e651b5cb4b)
---
 pulsar-client-cpp/lib/ClientImpl.cc         | 37 +++++++++++++++++-----
 pulsar-client-cpp/lib/ExecutorService.cc    | 33 +++++++++++++++-----
 pulsar-client-cpp/lib/ExecutorService.h     | 11 +++++--
 pulsar-client-cpp/lib/TimeUtils.h           | 48 +++++++++++++++++++++++++++++
 pulsar-client-cpp/tests/CustomLoggerTest.cc | 26 ++++++++++------
 5 files changed, 130 insertions(+), 25 deletions(-)

diff --git a/pulsar-client-cpp/lib/ClientImpl.cc b/pulsar-client-cpp/lib/ClientImpl.cc
index 0b07b6e9f2d..60e3e248c12 100644
--- a/pulsar-client-cpp/lib/ClientImpl.cc
+++ b/pulsar-client-cpp/lib/ClientImpl.cc
@@ -26,6 +26,7 @@
 #include "PartitionedConsumerImpl.h"
 #include "MultiTopicsConsumerImpl.h"
 #include "PatternMultiTopicsConsumerImpl.h"
+#include "TimeUtils.h"
 #include <pulsar/ConsoleLoggerFactory.h>
 #include <boost/algorithm/string/predicate.hpp>
 #include <sstream>
@@ -35,6 +36,7 @@
 #include <algorithm>
 #include <random>
 #include <mutex>
+#include <thread>
 #ifdef USE_LOG4CXX
 #include "Log4CxxLogger.h"
 #endif
@@ -538,13 +540,20 @@ void ClientImpl::handleClose(Result result, SharedInt numberOfOpenHandlers, Resu
         lock.unlock();
 
         LOG_DEBUG("Shutting down producers and consumers for client");
-        shutdown();
-        if (callback) {
-            if (closingError != ResultOk) {
-                LOG_DEBUG("Problem in closing client, could not close one or more consumers or producers");
+        // handleClose() is called in ExecutorService's event loop, while shutdown() tried to wait the event
+        // loop exits. So here we use another thread to call shutdown().
+        auto self = shared_from_this();
+        std::thread shutdownTask{[this, self, callback] {
+            shutdown();
+            if (callback) {
+                if (closingError != ResultOk) {
+                    LOG_DEBUG(
+                        "Problem in closing client, could not close one or more consumers or producers");
+                }
+                callback(closingError);
             }
-            callback(closingError);
-        }
+        }};
+        shutdownTask.detach();
     }
 }
 
@@ -580,11 +589,25 @@ void ClientImpl::shutdown() {
         return;
     }
     LOG_DEBUG("ConnectionPool is closed");
-    ioExecutorProvider_->close();
+
+    // 500ms as the timeout is long enough because ExecutorService::close calls io_service::stop() internally
+    // and waits until io_service::run() in another thread returns, which should be as soon as possible after
+    // stop() is called.
+    TimeoutProcessor<std::chrono::milliseconds> timeoutProcessor{500};
+
+    timeoutProcessor.tik();
+    ioExecutorProvider_->close(timeoutProcessor.getLeftTimeout());
+    timeoutProcessor.tok();
     LOG_DEBUG("ioExecutorProvider_ is closed");
+
+    timeoutProcessor.tik();
     listenerExecutorProvider_->close();
+    timeoutProcessor.tok();
     LOG_DEBUG("listenerExecutorProvider_ is closed");
+
+    timeoutProcessor.tik();
     partitionListenerExecutorProvider_->close();
+    timeoutProcessor.tok();
     LOG_DEBUG("partitionListenerExecutorProvider_ is closed");
 }
 
diff --git a/pulsar-client-cpp/lib/ExecutorService.cc b/pulsar-client-cpp/lib/ExecutorService.cc
index 9cfbd82881d..b9b5ed46478 100644
--- a/pulsar-client-cpp/lib/ExecutorService.cc
+++ b/pulsar-client-cpp/lib/ExecutorService.cc
@@ -21,6 +21,7 @@
 #include <boost/asio.hpp>
 #include <functional>
 #include <memory>
+#include "TimeUtils.h"
 
 #include "LogUtils.h"
 DECLARE_LOG_OBJECT()
@@ -29,7 +30,7 @@ namespace pulsar {
 
 ExecutorService::ExecutorService() {}
 
-ExecutorService::~ExecutorService() { close(); }
+ExecutorService::~ExecutorService() { close(0); }
 
 void ExecutorService::start() {
     auto self = shared_from_this();
@@ -37,11 +38,16 @@ void ExecutorService::start() {
         if (self->isClosed()) {
             return;
         }
+        LOG_INFO("Run io_service in a single thread");
         boost::system::error_code ec;
         self->getIOService().run(ec);
         if (ec) {
             LOG_ERROR("Failed to run io_service: " << ec.message());
+        } else {
+            LOG_INFO("Event loop of ExecutorService exits successfully");
         }
+        self->ioServiceDone_ = true;
+        self->cond_.notify_all();
     }};
     t.detach();
 }
@@ -79,13 +85,23 @@ DeadlineTimerPtr ExecutorService::createDeadlineTimer() {
     return DeadlineTimerPtr(new boost::asio::deadline_timer(io_service_));
 }
 
-void ExecutorService::close() {
+void ExecutorService::close(long timeoutMs) {
     bool expectedState = false;
     if (!closed_.compare_exchange_strong(expectedState, true)) {
         return;
     }
+    if (timeoutMs == 0) {  // non-blocking
+        io_service_.stop();
+        return;
+    }
 
+    std::unique_lock<std::mutex> lock{mutex_};
     io_service_.stop();
+    if (timeoutMs > 0) {
+        cond_.wait_for(lock, std::chrono::milliseconds(timeoutMs), [this] { return ioServiceDone_.load(); });
+    } else {  // < 0
+        cond_.wait(lock, [this] { return ioServiceDone_.load(); });
+    }
 }
 
 void ExecutorService::postWork(std::function<void(void)> task) { io_service_.post(task); }
@@ -106,14 +122,17 @@ ExecutorServicePtr ExecutorServiceProvider::get() {
     return executors_[idx];
 }
 
-void ExecutorServiceProvider::close() {
+void ExecutorServiceProvider::close(long timeoutMs) {
     Lock lock(mutex_);
 
-    for (ExecutorList::iterator it = executors_.begin(); it != executors_.end(); ++it) {
-        if (*it != NULL) {
-            (*it)->close();
+    TimeoutProcessor<std::chrono::milliseconds> timeoutProcessor{timeoutMs};
+    for (auto &&executor : executors_) {
+        timeoutProcessor.tik();
+        if (executor) {
+            executor->close(timeoutProcessor.getLeftTimeout());
         }
-        it->reset();
+        timeoutProcessor.tok();
+        executor.reset();
     }
 }
 }  // namespace pulsar
diff --git a/pulsar-client-cpp/lib/ExecutorService.h b/pulsar-client-cpp/lib/ExecutorService.h
index 6b0909194b7..e4cbb3ce62e 100644
--- a/pulsar-client-cpp/lib/ExecutorService.h
+++ b/pulsar-client-cpp/lib/ExecutorService.h
@@ -20,6 +20,8 @@
 #define _PULSAR_EXECUTOR_SERVICE_HEADER_
 
 #include <atomic>
+#include <condition_variable>
+#include <chrono>
 #include <memory>
 #include <boost/asio.hpp>
 #include <boost/asio/ssl.hpp>
@@ -50,7 +52,8 @@ class PULSAR_PUBLIC ExecutorService : public std::enable_shared_from_this<Execut
     DeadlineTimerPtr createDeadlineTimer();
     void postWork(std::function<void(void)> task);
 
-    void close();
+    // See TimeoutProcessor for the semantics of the parameter.
+    void close(long timeoutMs = 3000);
 
     IOService &getIOService() { return io_service_; }
     bool isClosed() const noexcept { return closed_; }
@@ -68,6 +71,9 @@ class PULSAR_PUBLIC ExecutorService : public std::enable_shared_from_this<Execut
     IOService::work work_{io_service_};
 
     std::atomic_bool closed_{false};
+    std::mutex mutex_;
+    std::condition_variable cond_;
+    std::atomic_bool ioServiceDone_{false};
 
     ExecutorService();
 
@@ -82,7 +88,8 @@ class PULSAR_PUBLIC ExecutorServiceProvider {
 
     ExecutorServicePtr get();
 
-    void close();
+    // See TimeoutProcessor for the semantics of the parameter.
+    void close(long timeoutMs = 3000);
 
    private:
     typedef std::vector<ExecutorServicePtr> ExecutorList;
diff --git a/pulsar-client-cpp/lib/TimeUtils.h b/pulsar-client-cpp/lib/TimeUtils.h
index 1da7d65923a..45157ae855b 100644
--- a/pulsar-client-cpp/lib/TimeUtils.h
+++ b/pulsar-client-cpp/lib/TimeUtils.h
@@ -19,6 +19,8 @@
 #pragma once
 
 #include <boost/date_time/local_time/local_time.hpp>
+#include <atomic>
+#include <chrono>
 
 #include <pulsar/defines.h>
 
@@ -33,4 +35,50 @@ class PULSAR_PUBLIC TimeUtils {
     static ptime now();
     static int64_t currentTimeMillis();
 };
+
+// This class processes a timeout with the following semantics:
+//  > 0: wait at most the timeout until a blocking operation completes
+//  == 0: do not wait the blocking operation
+//  < 0: wait infinitely until a blocking operation completes.
+//
+// Here is a simple example usage:
+//
+// ```c++
+// // Wait at most 300 milliseconds
+// TimeoutProcessor<std::chrono::milliseconds> timeoutProcessor{300};
+// while (!allOperationsAreDone()) {
+//     timeoutProcessor.tik();
+//     // This method may block for some time
+//     performBlockingOperation(timeoutProcessor.getLeftTimeout());
+//     timeoutProcessor.tok();
+// }
+// ```
+//
+// The template argument is the same as std::chrono::duration.
+template <typename Duration>
+class TimeoutProcessor {
+   public:
+    using Clock = std::chrono::high_resolution_clock;
+
+    TimeoutProcessor(long timeout) : leftTimeout_(timeout) {}
+
+    long getLeftTimeout() const noexcept { return leftTimeout_; }
+
+    void tik() { before_ = Clock::now(); }
+
+    void tok() {
+        if (leftTimeout_ > 0) {
+            leftTimeout_ -= std::chrono::duration_cast<Duration>(Clock::now() - before_).count();
+            if (leftTimeout_ <= 0) {
+                // The timeout exceeds, getLeftTimeout() will return 0 to indicate we should not wait more
+                leftTimeout_ = 0;
+            }
+        }
+    }
+
+   private:
+    std::atomic_long leftTimeout_;
+    std::chrono::time_point<Clock> before_;
+};
+
 }  // namespace pulsar
diff --git a/pulsar-client-cpp/tests/CustomLoggerTest.cc b/pulsar-client-cpp/tests/CustomLoggerTest.cc
index 0b4e76adcc4..bd80c312e3b 100644
--- a/pulsar-client-cpp/tests/CustomLoggerTest.cc
+++ b/pulsar-client-cpp/tests/CustomLoggerTest.cc
@@ -20,6 +20,7 @@
 #include <pulsar/ConsoleLoggerFactory.h>
 #include <LogUtils.h>
 #include <gtest/gtest.h>
+#include <atomic>
 #include <thread>
 
 using namespace pulsar;
@@ -28,35 +29,42 @@ static std::vector<std::string> logLines;
 
 class MyTestLogger : public Logger {
    public:
-    MyTestLogger() = default;
+    MyTestLogger(const std::string &fileName) : fileName_(fileName) {}
 
     bool isEnabled(Level level) override { return true; }
 
     void log(Level level, int line, const std::string &message) override {
         std::stringstream ss;
-        ss << " " << level << ":" << line << " " << message << std::endl;
+        ss << std::this_thread::get_id() << " " << level << " " << fileName_ << ":" << line << " " << message
+           << std::endl;
         logLines.emplace_back(ss.str());
     }
+
+   private:
+    const std::string fileName_;
 };
 
 class MyTestLoggerFactory : public LoggerFactory {
    public:
-    Logger *getLogger(const std::string &fileName) override { return logger; }
-
-   private:
-    MyTestLogger *logger = new MyTestLogger;
+    Logger *getLogger(const std::string &fileName) override { return new MyTestLogger(fileName); }
 };
 
 TEST(CustomLoggerTest, testCustomLogger) {
     // simulate new client created on a different thread (because logging factory is called once per thread)
-    auto testThread = std::thread([] {
+    std::atomic_int numLogLines{0};
+    auto testThread = std::thread([&numLogLines] {
         ClientConfiguration clientConfig;
         auto customLogFactory = new MyTestLoggerFactory();
         clientConfig.setLogger(customLogFactory);
         // reset to previous log factory
         Client client("pulsar://localhost:6650", clientConfig);
         client.close();
-        ASSERT_EQ(logLines.size(), 7);
+        ASSERT_TRUE(logLines.size() > 0);
+        for (auto &&line : logLines) {
+            std::cout << line;
+            std::cout.flush();
+        }
+        numLogLines = logLines.size();
         LogUtils::resetLoggerFactory();
     });
     testThread.join();
@@ -65,7 +73,7 @@ TEST(CustomLoggerTest, testCustomLogger) {
     Client client("pulsar://localhost:6650", clientConfig);
     client.close();
     // custom logger didn't get any new lines
-    ASSERT_EQ(logLines.size(), 7);
+    ASSERT_EQ(logLines.size(), numLogLines);
 }
 
 TEST(CustomLoggerTest, testConsoleLoggerFactory) {


[pulsar] 10/26: [Build] Use grpc-bom to align grpc library versions (#15234)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit dfe0d0d4bf93c2040001870269bc8478eeafee7a
Author: Lari Hotari <lh...@users.noreply.github.com>
AuthorDate: Wed Apr 20 14:55:09 2022 +0300

    [Build] Use grpc-bom to align grpc library versions (#15234)
    
    (cherry picked from commit 081f0d1d20165007de1ed8b0ebd130605e094910)
---
 pom.xml | 62 ++++++++------------------------------------------------------
 1 file changed, 8 insertions(+), 54 deletions(-)

diff --git a/pom.xml b/pom.xml
index 6d224cdecb1..fe7526f5781 100644
--- a/pom.xml
+++ b/pom.xml
@@ -927,6 +927,14 @@ flexible messaging model and an intuitive client API.</description>
         <version>${typetools.version}</version>
       </dependency>
 
+      <dependency>
+        <groupId>io.grpc</groupId>
+        <artifactId>grpc-bom</artifactId>
+        <version>${grpc.version}</version>
+        <type>pom</type>
+        <scope>import</scope>
+      </dependency>
+
       <dependency>
         <groupId>io.grpc</groupId>
         <artifactId>grpc-all</artifactId>
@@ -951,42 +959,6 @@ flexible messaging model and an intuitive client API.</description>
         </exclusions>
       </dependency>
 
-      <dependency>
-        <groupId>io.grpc</groupId>
-        <artifactId>grpc-api</artifactId>
-        <version>${grpc.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>io.grpc</groupId>
-        <artifactId>grpc-core</artifactId>
-        <version>${grpc.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>io.grpc</groupId>
-        <artifactId>grpc-netty</artifactId>
-        <version>${grpc.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>io.grpc</groupId>
-        <artifactId>grpc-protobuf</artifactId>
-        <version>${grpc.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>io.grpc</groupId>
-        <artifactId>grpc-grpclb</artifactId>
-        <version>${grpc.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>io.grpc</groupId>
-        <artifactId>grpc-alts</artifactId>
-        <version>${grpc.version}</version>
-      </dependency>
-
       <dependency>
         <groupId>com.google.http-client</groupId>
         <artifactId>google-http-client</artifactId>
@@ -1005,12 +977,6 @@ flexible messaging model and an intuitive client API.</description>
         <version>${google-http-client.version}</version>
       </dependency>
 
-      <dependency>
-        <groupId>io.grpc</groupId>
-        <artifactId>grpc-netty-shaded</artifactId>
-        <version>${grpc.version}</version>
-      </dependency>
-
       <dependency>
         <groupId>io.perfmark</groupId>
         <artifactId>perfmark-api</artifactId>
@@ -1024,18 +990,6 @@ flexible messaging model and an intuitive client API.</description>
         </exclusions>
       </dependency>
 
-      <dependency>
-        <groupId>io.grpc</groupId>
-        <artifactId>grpc-stub</artifactId>
-        <version>${grpc.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>io.grpc</groupId>
-        <artifactId>grpc-protobuf-lite</artifactId>
-        <version>${grpc.version}</version>
-      </dependency>
-
       <dependency>
         <groupId>com.google.protobuf</groupId>
         <artifactId>protobuf-bom</artifactId>


[pulsar] 05/26: [fix][txn] Fix potentially unfinishable future. (#15208)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 84a08942dd2754a758b94aa3db9b292d894b2f2e
Author: Qiang Zhao <74...@users.noreply.github.com>
AuthorDate: Tue Apr 19 14:31:20 2022 +0800

    [fix][txn] Fix potentially unfinishable future. (#15208)
    
    (cherry picked from commit 6aaabdb8acfc9ecf07b1f2799b9d8e2a980343a5)
---
 .../org/apache/pulsar/broker/TransactionMetadataStoreService.java   | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java
index cd188397989..902546958c5 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java
@@ -243,7 +243,11 @@ public class TransactionMetadataStoreService {
                             LOG.debug("Handle tc client connect added into pending queue! tcId : {}", tcId.toString());
                         }
                     }
-                }));
+                })).exceptionally(ex -> {
+                    Throwable realCause = FutureUtil.unwrapCompletionException(ex);
+                    completableFuture.completeExceptionally(realCause);
+                    return null;
+                });
             }
         });
         return completableFuture;


[pulsar] 19/26: Fix typo and doc in TopicPolies client api (#15293)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 4ad4bd85544a050d90cafa868092d26dd1a8198c
Author: JiangHaiting <ji...@apache.org>
AuthorDate: Mon Apr 25 09:54:10 2022 +0800

    Fix typo and doc in TopicPolies client api (#15293)
    
    (cherry picked from commit 14ba6c4570e10d800cc2ce7c7694a97de704616f)
---
 .../apache/pulsar/client/admin/TopicPolicies.java    | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/TopicPolicies.java b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/TopicPolicies.java
index 5e358095919..1dfb79d7ba0 100644
--- a/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/TopicPolicies.java
+++ b/pulsar-client-admin-api/src/main/java/org/apache/pulsar/client/admin/TopicPolicies.java
@@ -825,7 +825,7 @@ public interface TopicPolicies {
     /**
      * Set subscription-message-dispatch-rate for the topic.
      * <p/>
-     * Subscriptions under this namespace can dispatch this many messages per second
+     * Subscriptions of this topic can dispatch this many messages per second
      *
      * @param topic
      * @param dispatchRate
@@ -838,7 +838,7 @@ public interface TopicPolicies {
     /**
      * Set subscription-message-dispatch-rate for the topic asynchronously.
      * <p/>
-     * Subscriptions under this namespace can dispatch this many messages per second.
+     * Subscriptions of this topic can dispatch this many messages per second.
      *
      * @param topic
      * @param dispatchRate
@@ -849,31 +849,31 @@ public interface TopicPolicies {
     /**
      * Get applied subscription-message-dispatch-rate.
      * <p/>
-     * Subscriptions under this namespace can dispatch this many messages per second.
+     * Subscriptions of this topic can dispatch this many messages per second.
      *
-     * @param namespace
+     * @param topic
      * @returns DispatchRate
      *            number of messages per second
      * @throws PulsarAdminException
      *             Unexpected error
      */
-    DispatchRate getSubscriptionDispatchRate(String namespace, boolean applied) throws PulsarAdminException;
+    DispatchRate getSubscriptionDispatchRate(String topic, boolean applied) throws PulsarAdminException;
 
     /**
      * Get applied subscription-message-dispatch-rate asynchronously.
      * <p/>
-     * Subscriptions under this namespace can dispatch this many messages per second.
+     * Subscriptions in this topic can dispatch this many messages per second.
      *
-     * @param namespace
+     * @param topic
      * @returns DispatchRate
      *            number of messages per second
      */
-    CompletableFuture<DispatchRate> getSubscriptionDispatchRateAsync(String namespace, boolean applied);
+    CompletableFuture<DispatchRate> getSubscriptionDispatchRateAsync(String topic, boolean applied);
 
     /**
      * Get subscription-message-dispatch-rate for the topic.
      * <p/>
-     * Subscriptions under this namespace can dispatch this many messages per second.
+     * Subscriptions of this topic can dispatch this many messages per second.
      *
      * @param topic
      * @returns DispatchRate
@@ -886,7 +886,7 @@ public interface TopicPolicies {
     /**
      * Get subscription-message-dispatch-rate asynchronously.
      * <p/>
-     * Subscriptions under this namespace can dispatch this many messages per second.
+     * Subscriptions of this topic can dispatch this many messages per second.
      *
      * @param topic
      * @returns DispatchRate


[pulsar] 12/26: [Functions] Check executor null when closing the FileSource (#15247)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 4639b15c1de5c7878904eddb34caed821a3d6f5c
Author: Neng Lu <nl...@streamnative.io>
AuthorDate: Wed Apr 20 23:23:44 2022 -0700

    [Functions] Check executor null when closing the FileSource (#15247)
    
    (cherry picked from commit 06ba587fb92eff81785f8d463c85aaa1095292e9)
---
 .../src/main/java/org/apache/pulsar/io/file/FileSource.java  | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileSource.java b/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileSource.java
index 9ba85f3ed71..a67e3195ad7 100644
--- a/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileSource.java
+++ b/pulsar-io/file/src/main/java/org/apache/pulsar/io/file/FileSource.java
@@ -57,13 +57,15 @@ public class FileSource extends PushSource<byte[]> {
 
     @Override
     public void close() throws Exception {
-        executor.shutdown();
-        try {
-            if (!executor.awaitTermination(800, TimeUnit.MILLISECONDS)) {
+        if (executor != null) {
+            executor.shutdown();
+            try {
+                if (!executor.awaitTermination(800, TimeUnit.MILLISECONDS)) {
+                    executor.shutdownNow();
+                }
+            } catch (InterruptedException e) {
                 executor.shutdownNow();
             }
-        } catch (InterruptedException e) {
-            executor.shutdownNow();
         }
     }
 }


[pulsar] 01/26: Support shrink in ConcurrentLongHashMap (#14497)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 702d21d8eba3b54748da5fc295ebc3596e9fdf1f
Author: lin chen <15...@qq.com>
AuthorDate: Tue Mar 1 21:16:52 2022 +0800

    Support shrink in ConcurrentLongHashMap (#14497)
    
    (cherry picked from commit 297941964ed739e35ca68aa46d74410cf112b7bc)
---
 .../bookkeeper/mledger/impl/ManagedLedgerImpl.java |   7 +-
 .../broker/TransactionMetadataStoreService.java    |   5 +-
 .../apache/pulsar/broker/service/ServerCnx.java    |  10 +-
 .../org/apache/pulsar/client/impl/ClientCnx.java   |  22 +++-
 .../client/impl/TransactionMetaStoreHandler.java   |   5 +-
 .../TransactionCoordinatorClientImpl.java          |   6 +-
 .../util/collections/ConcurrentLongHashMap.java    | 139 ++++++++++++++++++---
 .../collections/ConcurrentLongHashMapTest.java     | 122 +++++++++++++++---
 8 files changed, 274 insertions(+), 42 deletions(-)

diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java
index b7ff480c674..453e77be0c8 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java
@@ -155,8 +155,11 @@ public class ManagedLedgerImpl implements ManagedLedger, CreateCallback {
     protected Map<String, String> propertiesMap;
     protected final MetaStore store;
 
-    final ConcurrentLongHashMap<CompletableFuture<ReadHandle>> ledgerCache = new ConcurrentLongHashMap<>(
-            16 /* initial capacity */, 1 /* number of sections */);
+    final ConcurrentLongHashMap<CompletableFuture<ReadHandle>> ledgerCache =
+            ConcurrentLongHashMap.<CompletableFuture<ReadHandle>>newBuilder()
+                    .expectedItems(16) // initial capacity
+                    .concurrencyLevel(1) // number of sections
+                    .build();
     protected final NavigableMap<Long, LedgerInfo> ledgers = new ConcurrentSkipListMap<>();
     private volatile Stat ledgersStat;
 
diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java
index 7297c334c4c..cd188397989 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/TransactionMetadataStoreService.java
@@ -107,8 +107,9 @@ public class TransactionMetadataStoreService {
         this.tbClient = tbClient;
         this.timeoutTrackerFactory = new TransactionTimeoutTrackerFactoryImpl(this, timer);
         this.transactionOpRetryTimer = timer;
-        this.tcLoadSemaphores = new ConcurrentLongHashMap<>();
-        this.pendingConnectRequests = new ConcurrentLongHashMap<>();
+        this.tcLoadSemaphores = ConcurrentLongHashMap.<Semaphore>newBuilder().build();
+        this.pendingConnectRequests =
+                ConcurrentLongHashMap.<ConcurrentLinkedDeque<CompletableFuture<Void>>>newBuilder().build();
         this.internalPinnedExecutor = Executors.newSingleThreadScheduledExecutor(threadFactory);
     }
 
diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java
index 46691273e31..7fa6c9dde8a 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/ServerCnx.java
@@ -243,8 +243,14 @@ public class ServerCnx extends PulsarHandler implements TransportCnx {
         ServiceConfiguration conf = pulsar.getConfiguration();
 
         // This maps are not heavily contended since most accesses are within the cnx thread
-        this.producers = new ConcurrentLongHashMap<>(8, 1);
-        this.consumers = new ConcurrentLongHashMap<>(8, 1);
+        this.producers = ConcurrentLongHashMap.<CompletableFuture<Producer>>newBuilder()
+                .expectedItems(8)
+                .concurrencyLevel(1)
+                .build();
+        this.consumers = ConcurrentLongHashMap.<CompletableFuture<Consumer>>newBuilder()
+                .expectedItems(8)
+                .concurrencyLevel(1)
+                .build();
         this.replicatorPrefix = conf.getReplicatorPrefix();
         this.maxNonPersistentPendingMessages = conf.getMaxConcurrentNonPersistentMessagePerConnection();
         this.proxyRoles = conf.getProxyRoles();
diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java
index e7df2944c8c..4cbf98c4fe6 100644
--- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java
+++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ClientCnx.java
@@ -106,14 +106,28 @@ public class ClientCnx extends PulsarHandler {
     private State state;
 
     private final ConcurrentLongHashMap<TimedCompletableFuture<? extends Object>> pendingRequests =
-        new ConcurrentLongHashMap<>(16, 1);
+            ConcurrentLongHashMap.<TimedCompletableFuture<? extends Object>>newBuilder()
+                    .expectedItems(16)
+                    .concurrencyLevel(1)
+                    .build();
     // LookupRequests that waiting in client side.
     private final Queue<Pair<Long, Pair<ByteBuf, TimedCompletableFuture<LookupDataResult>>>> waitingLookupRequests;
 
-    private final ConcurrentLongHashMap<ProducerImpl<?>> producers = new ConcurrentLongHashMap<>(16, 1);
-    private final ConcurrentLongHashMap<ConsumerImpl<?>> consumers = new ConcurrentLongHashMap<>(16, 1);
+    private final ConcurrentLongHashMap<ProducerImpl<?>> producers =
+            ConcurrentLongHashMap.<ProducerImpl<?>>newBuilder()
+                    .expectedItems(16)
+                    .concurrencyLevel(1)
+                    .build();
+    private final ConcurrentLongHashMap<ConsumerImpl<?>> consumers =
+            ConcurrentLongHashMap.<ConsumerImpl<?>>newBuilder()
+                    .expectedItems(16)
+                    .concurrencyLevel(1)
+                    .build();
     private final ConcurrentLongHashMap<TransactionMetaStoreHandler> transactionMetaStoreHandlers =
-            new ConcurrentLongHashMap<>(16, 1);
+            ConcurrentLongHashMap.<TransactionMetaStoreHandler>newBuilder()
+                    .expectedItems(16)
+                    .concurrencyLevel(1)
+                    .build();
 
     private final CompletableFuture<Void> connectionFuture = new CompletableFuture<Void>();
     private final ConcurrentLinkedQueue<RequestTime> requestTimeoutQueue = new ConcurrentLinkedQueue<>();
diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java
index baec2f94bdf..d14faa227cb 100644
--- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java
+++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TransactionMetaStoreHandler.java
@@ -61,7 +61,10 @@ public class TransactionMetaStoreHandler extends HandlerState
     private final long transactionCoordinatorId;
     private final ConnectionHandler connectionHandler;
     private final ConcurrentLongHashMap<OpBase<?>> pendingRequests =
-        new ConcurrentLongHashMap<>(16, 1);
+            ConcurrentLongHashMap.<OpBase<?>>newBuilder()
+                    .expectedItems(16)
+                    .concurrencyLevel(1)
+                    .build();
     private final ConcurrentLinkedQueue<RequestTime> timeoutQueue;
 
     protected final Timer timer;
diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionCoordinatorClientImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionCoordinatorClientImpl.java
index 81390ec4988..432fc671071 100644
--- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionCoordinatorClientImpl.java
+++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/transaction/TransactionCoordinatorClientImpl.java
@@ -51,7 +51,11 @@ public class TransactionCoordinatorClientImpl implements TransactionCoordinatorC
 
     private final PulsarClientImpl pulsarClient;
     private TransactionMetaStoreHandler[] handlers;
-    private ConcurrentLongHashMap<TransactionMetaStoreHandler> handlerMap = new ConcurrentLongHashMap<>(16, 1);
+    private ConcurrentLongHashMap<TransactionMetaStoreHandler> handlerMap =
+            ConcurrentLongHashMap.<TransactionMetaStoreHandler>newBuilder()
+                    .expectedItems(16)
+                    .concurrencyLevel(1)
+                    .build();
     private final AtomicLong epoch = new AtomicLong(0);
 
     private static final AtomicReferenceFieldUpdater<TransactionCoordinatorClientImpl, State> STATE_UPDATER =
diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java
index 01627c0529d..d8b0c32cd3c 100644
--- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java
+++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java
@@ -44,33 +44,112 @@ public class ConcurrentLongHashMap<V> {
     private static final Object EmptyValue = null;
     private static final Object DeletedValue = new Object();
 
-    private static final float MapFillFactor = 0.66f;
-
     private static final int DefaultExpectedItems = 256;
     private static final int DefaultConcurrencyLevel = 16;
 
+    private static final float DefaultMapFillFactor = 0.66f;
+    private static final float DefaultMapIdleFactor = 0.15f;
+
+    private static final float DefaultExpandFactor = 2;
+    private static final float DefaultShrinkFactor = 2;
+
+    private static final boolean DefaultAutoShrink = false;
+
+    public static <V> Builder<V> newBuilder() {
+        return new Builder<>();
+    }
+
+    /**
+     * Builder of ConcurrentLongHashMap.
+     */
+    public static class Builder<T> {
+        int expectedItems = DefaultExpectedItems;
+        int concurrencyLevel = DefaultConcurrencyLevel;
+        float mapFillFactor = DefaultMapFillFactor;
+        float mapIdleFactor = DefaultMapIdleFactor;
+        float expandFactor = DefaultExpandFactor;
+        float shrinkFactor = DefaultShrinkFactor;
+        boolean autoShrink = DefaultAutoShrink;
+
+        public Builder<T> expectedItems(int expectedItems) {
+            this.expectedItems = expectedItems;
+            return this;
+        }
+
+        public Builder<T> concurrencyLevel(int concurrencyLevel) {
+            this.concurrencyLevel = concurrencyLevel;
+            return this;
+        }
+
+        public Builder<T> mapFillFactor(float mapFillFactor) {
+            this.mapFillFactor = mapFillFactor;
+            return this;
+        }
+
+        public Builder<T> mapIdleFactor(float mapIdleFactor) {
+            this.mapIdleFactor = mapIdleFactor;
+            return this;
+        }
+
+        public Builder<T> expandFactor(float expandFactor) {
+            this.expandFactor = expandFactor;
+            return this;
+        }
+
+        public Builder<T> shrinkFactor(float shrinkFactor) {
+            this.shrinkFactor = shrinkFactor;
+            return this;
+        }
+
+        public Builder<T> autoShrink(boolean autoShrink) {
+            this.autoShrink = autoShrink;
+            return this;
+        }
+
+        public ConcurrentLongHashMap<T> build() {
+            return new ConcurrentLongHashMap<>(expectedItems, concurrencyLevel,
+                    mapFillFactor, mapIdleFactor, autoShrink, expandFactor, shrinkFactor);
+        }
+    }
+
     private final Section<V>[] sections;
 
+    @Deprecated
     public ConcurrentLongHashMap() {
         this(DefaultExpectedItems);
     }
 
+    @Deprecated
     public ConcurrentLongHashMap(int expectedItems) {
         this(expectedItems, DefaultConcurrencyLevel);
     }
 
+    @Deprecated
     public ConcurrentLongHashMap(int expectedItems, int concurrencyLevel) {
+        this(expectedItems, concurrencyLevel, DefaultMapFillFactor, DefaultMapIdleFactor,
+                DefaultAutoShrink, DefaultExpandFactor, DefaultShrinkFactor);
+    }
+
+    public ConcurrentLongHashMap(int expectedItems, int concurrencyLevel,
+                                 float mapFillFactor, float mapIdleFactor,
+                                 boolean autoShrink, float expandFactor, float shrinkFactor) {
         checkArgument(expectedItems > 0);
         checkArgument(concurrencyLevel > 0);
         checkArgument(expectedItems >= concurrencyLevel);
+        checkArgument(mapFillFactor > 0 && mapFillFactor < 1);
+        checkArgument(mapIdleFactor > 0 && mapIdleFactor < 1);
+        checkArgument(mapFillFactor > mapIdleFactor);
+        checkArgument(expandFactor > 1);
+        checkArgument(shrinkFactor > 1);
 
         int numSections = concurrencyLevel;
         int perSectionExpectedItems = expectedItems / numSections;
-        int perSectionCapacity = (int) (perSectionExpectedItems / MapFillFactor);
+        int perSectionCapacity = (int) (perSectionExpectedItems / mapFillFactor);
         this.sections = (Section<V>[]) new Section[numSections];
 
         for (int i = 0; i < numSections; i++) {
-            sections[i] = new Section<>(perSectionCapacity);
+            sections[i] = new Section<>(perSectionCapacity, mapFillFactor, mapIdleFactor,
+                    autoShrink, expandFactor, shrinkFactor);
         }
     }
 
@@ -195,20 +274,35 @@ public class ConcurrentLongHashMap<V> {
         private volatile V[] values;
 
         private volatile int capacity;
+        private final int initCapacity;
         private static final AtomicIntegerFieldUpdater<Section> SIZE_UPDATER =
                 AtomicIntegerFieldUpdater.newUpdater(Section.class, "size");
 
         private volatile int size;
         private int usedBuckets;
-        private int resizeThreshold;
-
-        Section(int capacity) {
+        private int resizeThresholdUp;
+        private int resizeThresholdBelow;
+        private final float mapFillFactor;
+        private final float mapIdleFactor;
+        private final float expandFactor;
+        private final float shrinkFactor;
+        private final boolean autoShrink;
+
+        Section(int capacity, float mapFillFactor, float mapIdleFactor, boolean autoShrink,
+                float expandFactor, float shrinkFactor) {
             this.capacity = alignToPowerOfTwo(capacity);
+            this.initCapacity = this.capacity;
             this.keys = new long[this.capacity];
             this.values = (V[]) new Object[this.capacity];
             this.size = 0;
             this.usedBuckets = 0;
-            this.resizeThreshold = (int) (this.capacity * MapFillFactor);
+            this.autoShrink = autoShrink;
+            this.mapFillFactor = mapFillFactor;
+            this.mapIdleFactor = mapIdleFactor;
+            this.expandFactor = expandFactor;
+            this.shrinkFactor = shrinkFactor;
+            this.resizeThresholdUp = (int) (this.capacity * mapFillFactor);
+            this.resizeThresholdBelow = (int) (this.capacity * mapIdleFactor);
         }
 
         V get(long key, int keyHash) {
@@ -322,9 +416,10 @@ public class ConcurrentLongHashMap<V> {
                     ++bucket;
                 }
             } finally {
-                if (usedBuckets >= resizeThreshold) {
+                if (usedBuckets > resizeThresholdUp) {
                     try {
-                        rehash();
+                        int newCapacity = alignToPowerOfTwo((int) (capacity * expandFactor));
+                        rehash(newCapacity);
                     } finally {
                         unlockWrite(stamp);
                     }
@@ -373,7 +468,20 @@ public class ConcurrentLongHashMap<V> {
                 }
 
             } finally {
-                unlockWrite(stamp);
+                if (autoShrink && size < resizeThresholdBelow) {
+                    try {
+                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                        int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
+                        if (newCapacity < capacity && newResizeThresholdUp > size) {
+                            // shrink the hashmap
+                            rehash(newCapacity);
+                        }
+                    } finally {
+                        unlockWrite(stamp);
+                    }
+                } else {
+                    unlockWrite(stamp);
+                }
             }
         }
 
@@ -385,6 +493,9 @@ public class ConcurrentLongHashMap<V> {
                 Arrays.fill(values, EmptyValue);
                 this.size = 0;
                 this.usedBuckets = 0;
+                if (autoShrink) {
+                    rehash(initCapacity);
+                }
             } finally {
                 unlockWrite(stamp);
             }
@@ -439,9 +550,8 @@ public class ConcurrentLongHashMap<V> {
             }
         }
 
-        private void rehash() {
+        private void rehash(int newCapacity) {
             // Expand the hashmap
-            int newCapacity = capacity * 2;
             long[] newKeys = new long[newCapacity];
             V[] newValues = (V[]) new Object[newCapacity];
 
@@ -458,7 +568,8 @@ public class ConcurrentLongHashMap<V> {
             values = newValues;
             capacity = newCapacity;
             usedBuckets = size;
-            resizeThreshold = (int) (capacity * MapFillFactor);
+            resizeThresholdUp = (int) (capacity * mapFillFactor);
+            resizeThresholdBelow = (int) (capacity * mapIdleFactor);
         }
 
         private static <V> void insertKeyValueNoLock(long[] keys, V[] values, long key, V value) {
diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java
index 14d8395ae8c..6cf126cf2ff 100644
--- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java
+++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java
@@ -48,21 +48,29 @@ public class ConcurrentLongHashMapTest {
     @Test
     public void testConstructor() {
         try {
-            new ConcurrentLongHashMap<String>(0);
+            ConcurrentLongHashMap.<String>newBuilder()
+                    .expectedItems(0)
+                    .build();
             fail("should have thrown exception");
         } catch (IllegalArgumentException e) {
             // ok
         }
 
         try {
-            new ConcurrentLongHashMap<String>(16, 0);
+            ConcurrentLongHashMap.<String>newBuilder()
+                    .expectedItems(16)
+                    .concurrencyLevel(0)
+                    .build();
             fail("should have thrown exception");
         } catch (IllegalArgumentException e) {
             // ok
         }
 
         try {
-            new ConcurrentLongHashMap<String>(4, 8);
+            ConcurrentLongHashMap.<String>newBuilder()
+                    .expectedItems(4)
+                    .concurrencyLevel(8)
+                    .build();
             fail("should have thrown exception");
         } catch (IllegalArgumentException e) {
             // ok
@@ -71,7 +79,9 @@ public class ConcurrentLongHashMapTest {
 
     @Test
     public void simpleInsertions() {
-        ConcurrentLongHashMap<String> map = new ConcurrentLongHashMap<>(16);
+        ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
+                .expectedItems(16)
+                .build();
 
         assertTrue(map.isEmpty());
         assertNull(map.put(1, "one"));
@@ -97,9 +107,64 @@ public class ConcurrentLongHashMapTest {
         assertEquals(map.size(), 3);
     }
 
+    @Test
+    public void testClear() {
+        ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        assertTrue(map.capacity() == 4);
+
+        assertNull(map.put(1, "v1"));
+        assertNull(map.put(2, "v2"));
+        assertNull(map.put(3, "v3"));
+
+        assertTrue(map.capacity() == 8);
+        map.clear();
+        assertTrue(map.capacity() == 4);
+    }
+
+    @Test
+    public void testExpandAndShrink() {
+        ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        assertTrue(map.capacity() == 4);
+
+        assertNull(map.put(1, "v1"));
+        assertNull(map.put(2, "v2"));
+        assertNull(map.put(3, "v3"));
+
+        // expand hashmap
+        assertTrue(map.capacity() == 8);
+
+        assertTrue(map.remove(1, "v1"));
+        // not shrink
+        assertTrue(map.capacity() == 8);
+        assertTrue(map.remove(2, "v2"));
+        // shrink hashmap
+        assertTrue(map.capacity() == 4);
+
+        // expand hashmap
+        assertNull(map.put(4, "v4"));
+        assertNull(map.put(5, "v5"));
+        assertTrue(map.capacity() == 8);
+
+        //verify that the map does not keep shrinking at every remove() operation
+        assertNull(map.put(6, "v6"));
+        assertTrue(map.remove(6, "v6"));
+        assertTrue(map.capacity() == 8);
+    }
+
     @Test
     public void testRemove() {
-        ConcurrentLongHashMap<String> map = new ConcurrentLongHashMap<>();
+        ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
+                .build();
 
         assertTrue(map.isEmpty());
         assertNull(map.put(1, "one"));
@@ -115,7 +180,10 @@ public class ConcurrentLongHashMapTest {
 
     @Test
     public void testNegativeUsedBucketCount() {
-        ConcurrentLongHashMap<String> map = new ConcurrentLongHashMap<>(16, 1);
+        ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
+                .expectedItems(16)
+                .concurrencyLevel(1)
+                .build();
 
         map.put(0, "zero");
         assertEquals(1, map.getUsedBucketCount());
@@ -130,7 +198,10 @@ public class ConcurrentLongHashMapTest {
     @Test
     public void testRehashing() {
         int n = 16;
-        ConcurrentLongHashMap<Integer> map = new ConcurrentLongHashMap<>(n / 2, 1);
+        ConcurrentLongHashMap<Integer> map = ConcurrentLongHashMap.<Integer>newBuilder()
+                .expectedItems(n / 2)
+                .concurrencyLevel(1)
+                .build();
         assertEquals(map.capacity(), n);
         assertEquals(map.size(), 0);
 
@@ -145,7 +216,10 @@ public class ConcurrentLongHashMapTest {
     @Test
     public void testRehashingWithDeletes() {
         int n = 16;
-        ConcurrentLongHashMap<Integer> map = new ConcurrentLongHashMap<>(n / 2, 1);
+        ConcurrentLongHashMap<Integer> map = ConcurrentLongHashMap.<Integer>newBuilder()
+                .expectedItems(n / 2)
+                .concurrencyLevel(1)
+                .build();
         assertEquals(map.capacity(), n);
         assertEquals(map.size(), 0);
 
@@ -167,7 +241,8 @@ public class ConcurrentLongHashMapTest {
 
     @Test
     public void concurrentInsertions() throws Throwable {
-        ConcurrentLongHashMap<String> map = new ConcurrentLongHashMap<>();
+        ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
+                .build();
         @Cleanup("shutdownNow")
         ExecutorService executor = Executors.newCachedThreadPool();
 
@@ -201,7 +276,8 @@ public class ConcurrentLongHashMapTest {
 
     @Test
     public void concurrentInsertionsAndReads() throws Throwable {
-        ConcurrentLongHashMap<String> map = new ConcurrentLongHashMap<>();
+        ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
+                .build();
         @Cleanup("shutdownNow")
         ExecutorService executor = Executors.newCachedThreadPool();
 
@@ -235,7 +311,10 @@ public class ConcurrentLongHashMapTest {
 
     @Test
     public void stressConcurrentInsertionsAndReads() throws Throwable {
-        ConcurrentLongHashMap<String> map = new ConcurrentLongHashMap<>(4, 1);
+        ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
+                .expectedItems(4)
+                .concurrencyLevel(1)
+                .build();
         @Cleanup("shutdownNow")
         ExecutorService executor = Executors.newCachedThreadPool();
         final int writeThreads = 16;
@@ -286,7 +365,8 @@ public class ConcurrentLongHashMapTest {
 
     @Test
     public void testIteration() {
-        ConcurrentLongHashMap<String> map = new ConcurrentLongHashMap<>();
+        ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
+                .build();
 
         assertEquals(map.keys(), Collections.emptyList());
         assertEquals(map.values(), Collections.emptyList());
@@ -330,7 +410,10 @@ public class ConcurrentLongHashMapTest {
     @Test
     public void testHashConflictWithDeletion() {
         final int Buckets = 16;
-        ConcurrentLongHashMap<String> map = new ConcurrentLongHashMap<>(Buckets, 1);
+        ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
+                .expectedItems(Buckets)
+                .concurrencyLevel(1)
+                .build();
 
         // Pick 2 keys that fall into the same bucket
         long key1 = 1;
@@ -363,7 +446,8 @@ public class ConcurrentLongHashMapTest {
 
     @Test
     public void testPutIfAbsent() {
-        ConcurrentLongHashMap<String> map = new ConcurrentLongHashMap<>();
+        ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
+                .build();
         assertNull(map.putIfAbsent(1, "one"));
         assertEquals(map.get(1), "one");
 
@@ -373,7 +457,10 @@ public class ConcurrentLongHashMapTest {
 
     @Test
     public void testComputeIfAbsent() {
-        ConcurrentLongHashMap<Integer> map = new ConcurrentLongHashMap<>(16, 1);
+        ConcurrentLongHashMap<Integer> map = ConcurrentLongHashMap.<Integer>newBuilder()
+                .expectedItems(16)
+                .concurrencyLevel(1)
+                .build();
         AtomicInteger counter = new AtomicInteger();
         LongFunction<Integer> provider = key -> counter.getAndIncrement();
 
@@ -395,7 +482,10 @@ public class ConcurrentLongHashMapTest {
     static final int N = 100_000;
 
     public void benchConcurrentLongHashMap() throws Exception {
-        ConcurrentLongHashMap<String> map = new ConcurrentLongHashMap<>(N, 1);
+        ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
+                .expectedItems(N)
+                .concurrencyLevel(1)
+                .build();
 
         for (long i = 0; i < Iterations; i++) {
             for (int j = 0; j < N; j++) {


[pulsar] 25/26: [fix][broker] Fix MessageDeduplication#inactiveProducers may not be persistence correctly (#15206)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 0f15d122fbda0e7e4f8c9dd0ef44f79bb0ba3b3f
Author: Baodi Shi <wu...@icloud.com>
AuthorDate: Thu Apr 28 15:06:11 2022 +0800

    [fix][broker] Fix MessageDeduplication#inactiveProducers may not be persistence correctly (#15206)
    
    ### Motivation
    
    #15204
    
    In the current implementation, When the first time execute `purgeInactiveProducers`, Although the produces does not expire, it removed directly from the collection(464 line). The will result in these producers never being remove.
    
    https://github.com/apache/pulsar/blob/9861dfb1208c4b6b8a1f17ef026e9af71c3e784c/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java#L454-L472
    
    ### Modifications
    
    1. It is removed from the collection only when the producer is inactive.
    2. Take a snapshot after each removal of an inactive producer. When `managedLedger.getLastConfirmedEntry` equals `managedCursor.getMarkDeletedPosition()`, The`deduplication-snapshot-monitor` thread does not trigger a snapshot. The persistence these producers only the next time a message is produced, The can be confusing for users.
    
    ```
            PositionImpl position = (PositionImpl) managedLedger.getLastConfirmedEntry();
            if (position == null) {
                return;
            }
            PositionImpl markDeletedPosition = (PositionImpl) managedCursor.getMarkDeletedPosition();
            if (markDeletedPosition != null && position.compareTo(markDeletedPosition) <= 0) {
                return;
            }
    ```
    
    (cherry picked from commit 8e1ca487c1026510fee264d65a34067ac427ee9d)
---
 .../service/persistent/MessageDeduplication.java   | 13 +++++++---
 .../service/persistent/MessageDuplicationTest.java | 29 ++++++++++++++--------
 2 files changed, 27 insertions(+), 15 deletions(-)

diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java
index 90ee3b67e3c..761a8a65d2a 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java
@@ -35,6 +35,7 @@ import org.apache.bookkeeper.mledger.Entry;
 import org.apache.bookkeeper.mledger.ManagedCursor;
 import org.apache.bookkeeper.mledger.ManagedLedger;
 import org.apache.bookkeeper.mledger.ManagedLedgerException;
+import org.apache.bookkeeper.mledger.Position;
 import org.apache.bookkeeper.mledger.impl.PositionImpl;
 import org.apache.pulsar.broker.PulsarService;
 import org.apache.pulsar.broker.service.Topic.PublishContext;
@@ -401,7 +402,7 @@ public class MessageDeduplication {
         }
     }
 
-    private void takeSnapshot(PositionImpl position) {
+    private void takeSnapshot(Position position) {
         if (log.isDebugEnabled()) {
             log.debug("[{}] Taking snapshot of sequence ids map", topic.getName());
         }
@@ -412,7 +413,7 @@ public class MessageDeduplication {
             }
         });
 
-        managedCursor.asyncMarkDelete(position, snapshot, new MarkDeleteCallback() {
+        getManagedCursor().asyncMarkDelete(position, snapshot, new MarkDeleteCallback() {
             @Override
             public void markDeleteComplete(Object ctx) {
                 if (log.isDebugEnabled()) {
@@ -456,19 +457,23 @@ public class MessageDeduplication {
                 .toMillis(pulsar.getConfiguration().getBrokerDeduplicationProducerInactivityTimeoutMinutes());
 
         Iterator<java.util.Map.Entry<String, Long>> mapIterator = inactiveProducers.entrySet().iterator();
+        boolean hasInactive = false;
         while (mapIterator.hasNext()) {
             java.util.Map.Entry<String, Long> entry = mapIterator.next();
             String producerName = entry.getKey();
             long lastActiveTimestamp = entry.getValue();
 
-            mapIterator.remove();
-
             if (lastActiveTimestamp < minimumActiveTimestamp) {
                 log.info("[{}] Purging dedup information for producer {}", topic.getName(), producerName);
+                mapIterator.remove();
                 highestSequencedPushed.remove(producerName);
                 highestSequencedPersisted.remove(producerName);
+                hasInactive = true;
             }
         }
+        if (hasInactive) {
+            takeSnapshot(getManagedCursor().getMarkDeletedPosition());
+        }
     }
 
     public long getLastPublishedSequenceId(String producerName) {
diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java
index f62a65ad36a..765d7463f98 100644
--- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java
+++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageDuplicationTest.java
@@ -32,12 +32,12 @@ import static org.mockito.Mockito.when;
 import static org.testng.Assert.assertEquals;
 import static org.testng.Assert.assertFalse;
 import static org.testng.Assert.assertNotNull;
-import static org.testng.Assert.assertTrue;
 import io.netty.buffer.ByteBuf;
 import io.netty.channel.EventLoopGroup;
 import java.lang.reflect.Field;
 import java.util.Map;
 import lombok.extern.slf4j.Slf4j;
+import org.apache.bookkeeper.mledger.ManagedCursor;
 import org.apache.bookkeeper.mledger.ManagedLedger;
 import org.apache.bookkeeper.mledger.ManagedLedgerException;
 import org.apache.bookkeeper.mledger.impl.PositionImpl;
@@ -167,11 +167,14 @@ public class MessageDuplicationTest {
         MessageDeduplication messageDeduplication = spyWithClassAndConstructorArgs(MessageDeduplication.class, pulsarService, topic, managedLedger);
         doReturn(true).when(messageDeduplication).isEnabled();
 
+        ManagedCursor managedCursor = mock(ManagedCursor.class);
+        doReturn(managedCursor).when(messageDeduplication).getManagedCursor();
+
         Topic.PublishContext publishContext = mock(Topic.PublishContext.class);
 
         Field field = MessageDeduplication.class.getDeclaredField("inactiveProducers");
         field.setAccessible(true);
-        Map<String, Long> map = (Map<String, Long>) field.get(messageDeduplication);
+        Map<String, Long> inactiveProducers = (Map<String, Long>) field.get(messageDeduplication);
 
         String producerName1 = "test1";
         when(publishContext.getHighestSequenceId()).thenReturn(2L);
@@ -187,18 +190,23 @@ public class MessageDuplicationTest {
         when(publishContext.getProducerName()).thenReturn(producerName3);
         messageDeduplication.isDuplicate(publishContext, null);
 
+        // All 3 are added to the inactiveProducers list
         messageDeduplication.producerRemoved(producerName1);
-        assertTrue(map.containsKey(producerName1));
-        messageDeduplication.producerAdded(producerName1);
-        assertFalse(map.containsKey(producerName1));
+        messageDeduplication.producerRemoved(producerName2);
+        messageDeduplication.producerRemoved(producerName3);
+
+        // Try first purgeInactive, all producer not inactive.
         messageDeduplication.purgeInactiveProducers();
+        assertEquals(inactiveProducers.size(), 3);
+
+        // Modify the inactive time of produce2 and produce3
         // messageDeduplication.purgeInactiveProducers() will remove producer2 and producer3
-        map.put(producerName2, System.currentTimeMillis() - 70000);
-        map.put(producerName3, System.currentTimeMillis() - 70000);
+        inactiveProducers.put(producerName2, System.currentTimeMillis() - 70000);
+        inactiveProducers.put(producerName3, System.currentTimeMillis() - 70000);
+        // Try second purgeInactive, produce2 and produce3 is inactive.
         messageDeduplication.purgeInactiveProducers();
-        assertFalse(map.containsKey(producerName2));
-        assertFalse(map.containsKey(producerName3));
-
+        assertFalse(inactiveProducers.containsKey(producerName2));
+        assertFalse(inactiveProducers.containsKey(producerName3));
         field = MessageDeduplication.class.getDeclaredField("highestSequencedPushed");
         field.setAccessible(true);
         ConcurrentOpenHashMap<String, Long> highestSequencedPushed = (ConcurrentOpenHashMap<String, Long>) field.get(messageDeduplication);
@@ -206,7 +214,6 @@ public class MessageDuplicationTest {
         assertEquals((long) highestSequencedPushed.get(producerName1), 2L);
         assertFalse(highestSequencedPushed.containsKey(producerName2));
         assertFalse(highestSequencedPushed.containsKey(producerName3));
-
     }
 
     @Test


[pulsar] 21/26: [fix][client] Fix negative ack not redelivery. (#15312)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 536c89198249f3f55136b0385b26dc19af4107c6
Author: Jiwei Guo <te...@apache.org>
AuthorDate: Tue Apr 26 11:50:15 2022 +0800

    [fix][client] Fix negative ack not redelivery. (#15312)
    
    (cherry picked from commit 9f6532a43eff5021896ed2fd8e3a771ce4d8cc7b)
---
 .../pulsar/client/impl/NegativeAcksTest.java       |  3 +++
 .../pulsar/client/impl/NegativeAcksTracker.java    | 28 ++++++----------------
 2 files changed, 10 insertions(+), 21 deletions(-)

diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java
index 8ff339fed07..ba35529d024 100644
--- a/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java
+++ b/pulsar-broker/src/test/java/org/apache/pulsar/client/impl/NegativeAcksTest.java
@@ -106,6 +106,9 @@ public class NegativeAcksTest extends ProducerConsumerBase {
         log.info("Test negative acks batching={} partitions={} subType={} negAckDelayMs={}", batching, usePartitions,
                 subscriptionType, negAcksDelayMillis);
         String topic = BrokerTestUtil.newUniqueName("testNegativeAcks");
+        if (usePartitions) {
+            admin.topics().createPartitionedTopic(topic, 2);
+        }
 
         @Cleanup
         Consumer<String> consumer = pulsarClient.newConsumer(Schema.STRING)
diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/NegativeAcksTracker.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/NegativeAcksTracker.java
index 17238ece38e..6273f4d582e 100644
--- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/NegativeAcksTracker.java
+++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/NegativeAcksTracker.java
@@ -85,29 +85,10 @@ class NegativeAcksTracker implements Closeable {
     }
 
     public synchronized void add(MessageId messageId) {
-        if (messageId instanceof BatchMessageIdImpl) {
-            BatchMessageIdImpl batchMessageId = (BatchMessageIdImpl) messageId;
-            messageId = new MessageIdImpl(batchMessageId.getLedgerId(), batchMessageId.getEntryId(),
-                    batchMessageId.getPartitionIndex());
-        }
-
-        if (nackedMessages == null) {
-            nackedMessages = new HashMap<>();
-        }
-        nackedMessages.put(messageId, System.nanoTime() + nackDelayNanos);
-
-        if (this.timeout == null) {
-            // Schedule a task and group all the redeliveries for same period. Leave a small buffer to allow for
-            // nack immediately following the current one will be batched into the same redeliver request.
-            this.timeout = timer.newTimeout(this::triggerRedelivery, timerIntervalNanos, TimeUnit.NANOSECONDS);
-        }
+        add(messageId, 0);
     }
 
     public synchronized void add(Message<?> message) {
-        if (negativeAckRedeliveryBackoff == null) {
-            add(message.getMessageId());
-            return;
-        }
         add(message.getMessageId(), message.getRedeliveryCount());
     }
 
@@ -127,7 +108,12 @@ class NegativeAcksTracker implements Closeable {
             nackedMessages = new HashMap<>();
         }
 
-        long backoffNs = TimeUnit.MILLISECONDS.toNanos(negativeAckRedeliveryBackoff.next(redeliveryCount));
+        long backoffNs;
+        if (negativeAckRedeliveryBackoff != null) {
+            backoffNs = TimeUnit.MILLISECONDS.toNanos(negativeAckRedeliveryBackoff.next(redeliveryCount));
+        } else {
+            backoffNs = nackDelayNanos;
+        }
         nackedMessages.put(messageId, System.nanoTime() + backoffNs);
 
         if (this.timeout == null) {


[pulsar] 07/26: Skip unnecessary DNS resolution when creating AuthenticationDataHttp instance (#15221)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 98849cd52434f9141ca58e730bb6b285635b80bc
Author: Lari Hotari <lh...@users.noreply.github.com>
AuthorDate: Wed Apr 20 06:08:36 2022 +0300

    Skip unnecessary DNS resolution when creating AuthenticationDataHttp instance (#15221)
    
    (cherry picked from commit 14991c93533927c35dd3cba74fe52ba3d57f244b)
---
 .../apache/pulsar/broker/authentication/AuthenticationDataHttp.java  | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttp.java b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttp.java
index 9d8ab42b467..75a75225576 100644
--- a/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttp.java
+++ b/pulsar-broker-common/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationDataHttp.java
@@ -18,6 +18,7 @@
  */
 package org.apache.pulsar.broker.authentication;
 
+import io.netty.util.NetUtil;
 import java.net.InetSocketAddress;
 import java.net.SocketAddress;
 import javax.servlet.http.HttpServletRequest;
@@ -34,7 +35,9 @@ public class AuthenticationDataHttp implements AuthenticationDataSource {
             throw new IllegalArgumentException();
         }
         this.request = request;
-        this.remoteAddress = new InetSocketAddress(request.getRemoteAddr(), request.getRemotePort());
+        this.remoteAddress =
+                new InetSocketAddress(NetUtil.createInetAddressFromIpAddressString(request.getRemoteAddr()),
+                        request.getRemotePort());
     }
 
     /*


[pulsar] 08/26: TableView should cache created readers (#15178)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit bb52721b88cbf3f8772481076c6f9047f65ca9b8
Author: Neng Lu <nl...@streamnative.io>
AuthorDate: Tue Apr 19 22:18:58 2022 -0700

    TableView should cache created readers (#15178)
    
    (cherry picked from commit b1225fecd8a04106667fe09e98960829e39376af)
---
 .../java/org/apache/pulsar/client/impl/TableViewImpl.java  | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TableViewImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TableViewImpl.java
index dc32bd008a5..483b2c1ee63 100644
--- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TableViewImpl.java
+++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/TableViewImpl.java
@@ -224,9 +224,23 @@ public class TableViewImpl<T> implements TableView<T> {
                 .readCompacted(true)
                 .poolMessages(true)
                 .createAsync()
+                .thenCompose(this::cacheNewReader)
                 .thenCompose(this::readAllExistingMessages);
     }
 
+    private CompletableFuture<Reader<T>> cacheNewReader(Reader<T> reader) {
+        CompletableFuture<Reader<T>> future = new CompletableFuture<>();
+        if (this.readers.containsKey(reader.getTopic())) {
+            future.completeExceptionally(
+                    new IllegalArgumentException("reader on partition " + reader.getTopic() + " already existed"));
+        } else {
+            this.readers.put(reader.getTopic(), reader);
+            future.complete(reader);
+        }
+
+        return future;
+    }
+
     private CompletableFuture<Reader<T>> readAllExistingMessages(Reader<T> reader) {
         long startTime = System.nanoTime();
         AtomicLong messagesRead = new AtomicLong();


[pulsar] 24/26: [improve][broker] Use shrink map for message redelivery. (#15342)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 9352feb7a1cd466d3c570fc8d2d2f5365abe5ebc
Author: Jiwei Guo <te...@apache.org>
AuthorDate: Thu Apr 28 11:06:15 2022 +0800

    [improve][broker] Use shrink map for message redelivery. (#15342)
    
    (cherry picked from commit 615f05af3e7c72d83b3fe24f64566ed58244ea5d)
---
 .../broker/service/persistent/MessageRedeliveryController.java   | 9 ++++++---
 .../service/persistent/MessageRedeliveryControllerTest.java      | 2 +-
 2 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryController.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryController.java
index be143565c48..c7f96fffcef 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryController.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryController.java
@@ -26,8 +26,8 @@ import java.util.TreeSet;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.stream.Collectors;
 import org.apache.bookkeeper.mledger.impl.PositionImpl;
-import org.apache.bookkeeper.util.collections.ConcurrentLongLongPairHashMap;
-import org.apache.bookkeeper.util.collections.ConcurrentLongLongPairHashMap.LongPair;
+import org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap;
+import org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap.LongPair;
 import org.apache.pulsar.common.util.collections.ConcurrentSortedLongPairSet;
 import org.apache.pulsar.common.util.collections.LongPairSet;
 
@@ -37,7 +37,10 @@ public class MessageRedeliveryController {
 
     public MessageRedeliveryController(boolean allowOutOfOrderDelivery) {
         this.messagesToRedeliver = new ConcurrentSortedLongPairSet(128, 2);
-        this.hashesToBeBlocked = allowOutOfOrderDelivery ? null : new ConcurrentLongLongPairHashMap(128, 2);
+        this.hashesToBeBlocked = allowOutOfOrderDelivery
+                ? null
+                : ConcurrentLongLongPairHashMap
+                    .newBuilder().concurrencyLevel(2).expectedItems(128).autoShrink(true).build();
     }
 
     public boolean add(long ledgerId, long entryId) {
diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryControllerTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryControllerTest.java
index 9a785f6f95f..478677a25e4 100644
--- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryControllerTest.java
+++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryControllerTest.java
@@ -30,7 +30,7 @@ import java.lang.reflect.Field;
 import java.util.Set;
 import java.util.TreeSet;
 import org.apache.bookkeeper.mledger.impl.PositionImpl;
-import org.apache.bookkeeper.util.collections.ConcurrentLongLongPairHashMap;
+import org.apache.pulsar.common.util.collections.ConcurrentLongLongPairHashMap;
 import org.apache.pulsar.common.util.collections.LongPairSet;
 import org.testng.annotations.DataProvider;
 import org.testng.annotations.Test;


[pulsar] 20/26: [fix] [broker] Fix problem at RateLimiter#tryAcquire (#15306)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit efa28d74a451ea1d2215f838aa88ae19de06a774
Author: Yan Zhao <ho...@apache.org>
AuthorDate: Mon Apr 25 22:17:52 2022 +0800

    [fix] [broker] Fix problem at RateLimiter#tryAcquire (#15306)
    
    (cherry picked from commit 84b65598481fd9bbb6e06e2deb335222a04b9c6b)
---
 .../org/apache/pulsar/common/util/RateLimiter.java   |  3 +--
 .../apache/pulsar/common/util/RateLimiterTest.java   | 20 +++++++++++++++++++-
 2 files changed, 20 insertions(+), 3 deletions(-)

diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java
index 20ca181c400..8f02bcc0e5c 100644
--- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java
+++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/RateLimiter.java
@@ -189,8 +189,7 @@ public class RateLimiter implements AutoCloseable{
             canAcquire = acquirePermit < 0 || acquiredPermits < this.permits;
         } else {
             // acquired-permits can't be larger than the rate
-            if (acquirePermit > this.permits) {
-                acquiredPermits = this.permits;
+            if (acquirePermit + acquiredPermits > this.permits) {
                 return false;
             }
 
diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/RateLimiterTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/RateLimiterTest.java
index 788ab749390..57090fcc7b7 100644
--- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/RateLimiterTest.java
+++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/RateLimiterTest.java
@@ -133,6 +133,24 @@ public class RateLimiterTest {
         rate.close();
     }
 
+    @Test
+    public void testTryAcquireMoreThanPermits() {
+        final long rateTimeMSec = 1000;
+        RateLimiter rate = RateLimiter.builder().permits(3).rateTime(rateTimeMSec).timeUnit(TimeUnit.MILLISECONDS)
+                .build();
+        assertTrue(rate.tryAcquire(2));
+        assertEquals(rate.getAvailablePermits(), 1);
+
+        //try to acquire failed, not decrease availablePermits.
+        assertFalse(rate.tryAcquire(2));
+        assertEquals(rate.getAvailablePermits(), 1);
+
+        assertTrue(rate.tryAcquire(1));
+        assertEquals(rate.getAvailablePermits(), 0);
+
+        rate.close();
+    }
+
     @Test
     public void testMultipleTryAcquire() {
         final long rateTimeMSec = 1000;
@@ -189,7 +207,7 @@ public class RateLimiterTest {
 
         Thread.sleep(rateTimeMSec);
         // check after three rate-time: acquiredPermits is 0
-        assertEquals(rate.getAvailablePermits() > 0, true);
+        assertTrue(rate.getAvailablePermits() > 0);
 
         rate.close();
     }


[pulsar] 03/26: support shrink for map or set (#14663)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 14d9b8492d6dc4781bc83f817ca7898966b4214d
Author: LinChen <15...@qq.com>
AuthorDate: Mon Mar 14 23:23:47 2022 +0800

    support shrink for map or set (#14663)
    
    * support shrink for map or set
    
    * check style
    
    * check style
    
    (cherry picked from commit 1d10dff757ac7b9a203c14d2085a480495fb141b)
---
 .../mledger/impl/ManagedLedgerOfflineBacklog.java  |   3 +-
 .../broker/loadbalance/impl/LoadManagerShared.java |  20 ++-
 .../loadbalance/impl/ModularLoadManagerImpl.java   |  18 ++-
 .../loadbalance/impl/SimpleLoadManagerImpl.java    |  18 ++-
 .../pulsar/broker/namespace/NamespaceService.java  |  13 +-
 .../org/apache/pulsar/broker/rest/TopicsBase.java  |   3 +-
 .../pulsar/broker/service/BrokerService.java       |  42 ++++--
 .../service/nonpersistent/NonPersistentTopic.java  |  12 +-
 .../service/persistent/MessageDeduplication.java   |  12 +-
 .../broker/service/persistent/PersistentTopic.java |  20 ++-
 .../broker/stats/ClusterReplicationMetrics.java    |   3 +-
 .../AntiAffinityNamespaceGroupTest.java            |  15 ++-
 .../loadbalance/impl/LoadManagerSharedTest.java    |  13 +-
 .../pulsar/broker/service/PersistentTopicTest.java |  24 +++-
 .../apache/pulsar/client/impl/ConsumerBase.java    |   3 +-
 .../apache/pulsar/client/impl/ConsumerImpl.java    |   3 +-
 .../client/impl/PartitionedProducerImpl.java       |   3 +-
 .../apache/pulsar/client/impl/ProducerBase.java    |   3 +-
 .../impl/AcknowledgementsGroupingTrackerTest.java  |   3 +-
 .../util/collections/ConcurrentLongPairSet.java    | 148 +++++++++++++++++++--
 .../util/collections/ConcurrentOpenHashMap.java    | 140 +++++++++++++++++--
 .../util/collections/ConcurrentOpenHashSet.java    | 140 +++++++++++++++++--
 .../collections/ConcurrentSortedLongPairSet.java   |   5 +-
 .../collections/ConcurrentLongPairSetTest.java     | 111 +++++++++++++---
 .../collections/ConcurrentOpenHashMapTest.java     | 125 ++++++++++++++---
 .../collections/ConcurrentOpenHashSetTest.java     |  73 +++++++++-
 .../pulsar/sql/presto/PulsarRecordCursor.java      |   3 +-
 .../apache/pulsar/websocket/WebSocketService.java  |  23 +++-
 .../apache/pulsar/websocket/stats/ProxyStats.java  |   4 +-
 29 files changed, 860 insertions(+), 143 deletions(-)

diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerOfflineBacklog.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerOfflineBacklog.java
index 99cc6c8842e..e0362205c6d 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerOfflineBacklog.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerOfflineBacklog.java
@@ -220,7 +220,8 @@ public class ManagedLedgerOfflineBacklog {
         BookKeeper bk = factory.getBookKeeper();
         final CountDownLatch allCursorsCounter = new CountDownLatch(1);
         final long errorInReadingCursor = -1;
-        ConcurrentOpenHashMap<String, Long> ledgerRetryMap = new ConcurrentOpenHashMap<>();
+        ConcurrentOpenHashMap<String, Long> ledgerRetryMap =
+                ConcurrentOpenHashMap.<String, Long>newBuilder().build();
 
         final MLDataFormats.ManagedLedgerInfo.LedgerInfo ledgerInfo = ledgers.lastEntry().getValue();
         final PositionImpl lastLedgerPosition = new PositionImpl(ledgerInfo.getLedgerId(), ledgerInfo.getEntries() - 1);
diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java
index 0c8f8a00d1c..c0ee0d2f986 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerShared.java
@@ -190,7 +190,9 @@ public class LoadManagerShared {
         bundles.forEach(bundleName -> {
             final String namespaceName = getNamespaceNameFromBundleName(bundleName);
             final String bundleRange = getBundleRangeFromBundleName(bundleName);
-            target.computeIfAbsent(namespaceName, k -> new ConcurrentOpenHashSet<>()).add(bundleRange);
+            target.computeIfAbsent(namespaceName,
+                    k -> ConcurrentOpenHashSet.<String>newBuilder().build())
+                    .add(bundleRange);
         });
     }
 
@@ -263,8 +265,12 @@ public class LoadManagerShared {
 
         for (final String broker : candidates) {
             int bundles = (int) brokerToNamespaceToBundleRange
-                    .computeIfAbsent(broker, k -> new ConcurrentOpenHashMap<>())
-                    .computeIfAbsent(namespaceName, k -> new ConcurrentOpenHashSet<>()).size();
+                    .computeIfAbsent(broker,
+                            k -> ConcurrentOpenHashMap.<String,
+                                    ConcurrentOpenHashSet<String>>newBuilder().build())
+                    .computeIfAbsent(namespaceName,
+                            k -> ConcurrentOpenHashSet.<String>newBuilder().build())
+                    .size();
             leastBundles = Math.min(leastBundles, bundles);
             if (leastBundles == 0) {
                 break;
@@ -276,8 +282,12 @@ public class LoadManagerShared {
 
         final int finalLeastBundles = leastBundles;
         candidates.removeIf(
-                broker -> brokerToNamespaceToBundleRange.computeIfAbsent(broker, k -> new ConcurrentOpenHashMap<>())
-                        .computeIfAbsent(namespaceName, k -> new ConcurrentOpenHashSet<>()).size() > finalLeastBundles);
+                broker -> brokerToNamespaceToBundleRange.computeIfAbsent(broker,
+                        k -> ConcurrentOpenHashMap.<String,
+                                ConcurrentOpenHashSet<String>>newBuilder().build())
+                        .computeIfAbsent(namespaceName,
+                                k -> ConcurrentOpenHashSet.<String>newBuilder().build())
+                        .size() > finalLeastBundles);
     }
 
     /**
diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java
index 326f6af4375..08620340497 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java
@@ -204,7 +204,10 @@ public class ModularLoadManagerImpl implements ModularLoadManager {
      */
     public ModularLoadManagerImpl() {
         brokerCandidateCache = new HashSet<>();
-        brokerToNamespaceToBundleRange = new ConcurrentOpenHashMap<>();
+        brokerToNamespaceToBundleRange =
+                ConcurrentOpenHashMap.<String,
+                        ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>>newBuilder()
+                        .build();
         defaultStats = new NamespaceBundleStats();
         filterPipeline = new ArrayList<>();
         loadData = new LoadData();
@@ -567,7 +570,10 @@ public class ModularLoadManagerImpl implements ModularLoadManager {
             brokerData.getTimeAverageData().reset(statsMap.keySet(), bundleData, defaultStats);
             final ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>> namespaceToBundleRange =
                     brokerToNamespaceToBundleRange
-                            .computeIfAbsent(broker, k -> new ConcurrentOpenHashMap<>());
+                            .computeIfAbsent(broker, k ->
+                                    ConcurrentOpenHashMap.<String,
+                                            ConcurrentOpenHashSet<String>>newBuilder()
+                                            .build());
             synchronized (namespaceToBundleRange) {
                 namespaceToBundleRange.clear();
                 LoadManagerShared.fillNamespaceToBundlesMap(statsMap.keySet(), namespaceToBundleRange);
@@ -850,9 +856,13 @@ public class ModularLoadManagerImpl implements ModularLoadManager {
                 final String bundleRange = LoadManagerShared.getBundleRangeFromBundleName(bundle);
                 final ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>> namespaceToBundleRange =
                         brokerToNamespaceToBundleRange
-                                .computeIfAbsent(broker.get(), k -> new ConcurrentOpenHashMap<>());
+                                .computeIfAbsent(broker.get(),
+                                        k -> ConcurrentOpenHashMap.<String,
+                                                ConcurrentOpenHashSet<String>>newBuilder()
+                                                .build());
                 synchronized (namespaceToBundleRange) {
-                    namespaceToBundleRange.computeIfAbsent(namespaceName, k -> new ConcurrentOpenHashSet<>())
+                    namespaceToBundleRange.computeIfAbsent(namespaceName,
+                            k -> ConcurrentOpenHashSet.<String>newBuilder().build())
                             .add(bundleRange);
                 }
                 return broker;
diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java
index 092fe2c852d..4f7e37ad344 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java
@@ -202,7 +202,10 @@ public class SimpleLoadManagerImpl implements LoadManager, Consumer<Notification
         bundleLossesCache = new HashSet<>();
         brokerCandidateCache = new HashSet<>();
         availableBrokersCache = new HashSet<>();
-        brokerToNamespaceToBundleRange = new ConcurrentOpenHashMap<>();
+        brokerToNamespaceToBundleRange =
+                ConcurrentOpenHashMap.<String,
+                        ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>>newBuilder()
+                        .build();
         this.brokerTopicLoadingPredicate = new BrokerTopicLoadingPredicate() {
             @Override
             public boolean isEnablePersistentTopics(String brokerUrl) {
@@ -851,8 +854,12 @@ public class SimpleLoadManagerImpl implements LoadManager, Consumer<Notification
                 // same broker.
                 brokerToNamespaceToBundleRange
                         .computeIfAbsent(selectedRU.getResourceId().replace("http://", ""),
-                                k -> new ConcurrentOpenHashMap<>())
-                        .computeIfAbsent(namespaceName, k -> new ConcurrentOpenHashSet<>()).add(bundleRange);
+                                k -> ConcurrentOpenHashMap.<String,
+                                        ConcurrentOpenHashSet<String>>newBuilder()
+                                        .build())
+                        .computeIfAbsent(namespaceName, k ->
+                                ConcurrentOpenHashSet.<String>newBuilder().build())
+                        .add(bundleRange);
                 ranking.addPreAllocatedServiceUnit(serviceUnitId, quota);
                 resourceUnitRankings.put(selectedRU, ranking);
             }
@@ -1271,7 +1278,10 @@ public class SimpleLoadManagerImpl implements LoadManager, Consumer<Notification
             final Set<String> preallocatedBundles = resourceUnitRankings.get(resourceUnit).getPreAllocatedBundles();
             final ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>> namespaceToBundleRange =
                     brokerToNamespaceToBundleRange
-                            .computeIfAbsent(broker.replace("http://", ""), k -> new ConcurrentOpenHashMap<>());
+                            .computeIfAbsent(broker.replace("http://", ""),
+                                    k -> ConcurrentOpenHashMap.<String,
+                                            ConcurrentOpenHashSet<String>>newBuilder()
+                                            .build());
             namespaceToBundleRange.clear();
             LoadManagerShared.fillNamespaceToBundlesMap(loadedBundles, namespaceToBundleRange);
             LoadManagerShared.fillNamespaceToBundlesMap(preallocatedBundles, namespaceToBundleRange);
diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java
index 5ae88adbaa1..98e65dc3e56 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/namespace/NamespaceService.java
@@ -165,7 +165,8 @@ public class NamespaceService implements AutoCloseable {
         this.loadManager = pulsar.getLoadManager();
         this.bundleFactory = new NamespaceBundleFactory(pulsar, Hashing.crc32());
         this.ownershipCache = new OwnershipCache(pulsar, bundleFactory, this);
-        this.namespaceClients = new ConcurrentOpenHashMap<>();
+        this.namespaceClients =
+                ConcurrentOpenHashMap.<ClusterDataImpl, PulsarClientImpl>newBuilder().build();
         this.bundleOwnershipListeners = new CopyOnWriteArrayList<>();
         this.localBrokerDataCache = pulsar.getLocalMetadataStore().getMetadataCache(LocalBrokerData.class);
         this.localPoliciesCache = pulsar.getLocalMetadataStore().getMetadataCache(LocalPolicies.class);
@@ -355,9 +356,15 @@ public class NamespaceService implements AutoCloseable {
     }
 
     private final ConcurrentOpenHashMap<NamespaceBundle, CompletableFuture<Optional<LookupResult>>>
-            findingBundlesAuthoritative = new ConcurrentOpenHashMap<>();
+            findingBundlesAuthoritative =
+            ConcurrentOpenHashMap.<NamespaceBundle,
+                    CompletableFuture<Optional<LookupResult>>>newBuilder()
+                    .build();
     private final ConcurrentOpenHashMap<NamespaceBundle, CompletableFuture<Optional<LookupResult>>>
-            findingBundlesNotAuthoritative = new ConcurrentOpenHashMap<>();
+            findingBundlesNotAuthoritative =
+            ConcurrentOpenHashMap.<NamespaceBundle,
+                    CompletableFuture<Optional<LookupResult>>>newBuilder()
+                    .build();
 
     /**
      * Main internal method to lookup and setup ownership of service unit to a broker.
diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java
index f89abf9bea3..770d77794d5 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/rest/TopicsBase.java
@@ -431,7 +431,8 @@ public class TopicsBase extends PersistentTopicsBase {
                             partitionedTopicName, result.getLookupData());
                 }
                 pulsar().getBrokerService().getOwningTopics().computeIfAbsent(partitionedTopicName
-                                .getPartitionedTopicName(), (key) -> new ConcurrentOpenHashSet<Integer>())
+                                .getPartitionedTopicName(),
+                        (key) -> ConcurrentOpenHashSet.<Integer>newBuilder().build())
                         .add(partitionedTopicName.getPartitionIndex());
                 completeLookup(Pair.of(Collections.emptyList(), false), redirectAddresses, future);
             } else {
diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java
index a388f13f2dc..b7931be9b4b 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/BrokerService.java
@@ -284,17 +284,28 @@ public class BrokerService implements Closeable {
         this.preciseTopicPublishRateLimitingEnable =
                 pulsar.getConfiguration().isPreciseTopicPublishRateLimiterEnable();
         this.managedLedgerFactory = pulsar.getManagedLedgerFactory();
-        this.topics = new ConcurrentOpenHashMap<>();
-        this.replicationClients = new ConcurrentOpenHashMap<>();
-        this.clusterAdmins = new ConcurrentOpenHashMap<>();
+        this.topics =
+                ConcurrentOpenHashMap.<String, CompletableFuture<Optional<Topic>>>newBuilder()
+                .build();
+        this.replicationClients =
+                ConcurrentOpenHashMap.<String, PulsarClient>newBuilder().build();
+        this.clusterAdmins =
+                ConcurrentOpenHashMap.<String, PulsarAdmin>newBuilder().build();
         this.keepAliveIntervalSeconds = pulsar.getConfiguration().getKeepAliveIntervalSeconds();
-        this.configRegisteredListeners = new ConcurrentOpenHashMap<>();
+        this.configRegisteredListeners =
+                ConcurrentOpenHashMap.<String, Consumer<?>>newBuilder().build();
         this.pendingTopicLoadingQueue = Queues.newConcurrentLinkedQueue();
 
-        this.multiLayerTopicsMap = new ConcurrentOpenHashMap<>();
-        this.owningTopics = new ConcurrentOpenHashMap<>();
+        this.multiLayerTopicsMap = ConcurrentOpenHashMap.<String,
+                ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, Topic>>>newBuilder()
+                .build();
+        this.owningTopics = ConcurrentOpenHashMap.<String,
+                ConcurrentOpenHashSet<Integer>>newBuilder()
+                .build();
         this.pulsarStats = new PulsarStats(pulsar);
-        this.offlineTopicStatCache = new ConcurrentOpenHashMap<>();
+        this.offlineTopicStatCache =
+                ConcurrentOpenHashMap.<TopicName,
+                        PersistentOfflineTopicStats>newBuilder().build();
 
         this.topicOrderedExecutor = OrderedScheduler.newSchedulerBuilder()
                 .numThreads(pulsar.getConfiguration().getNumWorkerThreadsForNonPersistentTopic())
@@ -329,7 +340,8 @@ public class BrokerService implements Closeable {
         this.backlogQuotaChecker = Executors
                 .newSingleThreadScheduledExecutor(new DefaultThreadFactory("pulsar-backlog-quota-checker"));
         this.authenticationService = new AuthenticationService(pulsar.getConfiguration());
-        this.blockedDispatchers = new ConcurrentOpenHashSet<>();
+        this.blockedDispatchers =
+                ConcurrentOpenHashSet.<PersistentDispatcherMultipleConsumers>newBuilder().build();
         // update dynamic configuration and register-listener
         updateConfigurationAndRegisterListeners();
         this.lookupRequestSemaphore = new AtomicReference<Semaphore>(
@@ -1595,8 +1607,12 @@ public class BrokerService implements Closeable {
                         synchronized (multiLayerTopicsMap) {
                             String serviceUnit = namespaceBundle.toString();
                             multiLayerTopicsMap //
-                                    .computeIfAbsent(topicName.getNamespace(), k -> new ConcurrentOpenHashMap<>()) //
-                                    .computeIfAbsent(serviceUnit, k -> new ConcurrentOpenHashMap<>()) //
+                                    .computeIfAbsent(topicName.getNamespace(),
+                                            k -> ConcurrentOpenHashMap.<String,
+                                                    ConcurrentOpenHashMap<String, Topic>>newBuilder()
+                                                    .build()) //
+                                    .computeIfAbsent(serviceUnit,
+                                            k -> ConcurrentOpenHashMap.<String, Topic>newBuilder().build()) //
                                     .put(topicName.toString(), topic);
                         }
                     }
@@ -2413,7 +2429,8 @@ public class BrokerService implements Closeable {
     }
 
     private static ConcurrentOpenHashMap<String, ConfigField> prepareDynamicConfigurationMap() {
-        ConcurrentOpenHashMap<String, ConfigField> dynamicConfigurationMap = new ConcurrentOpenHashMap<>();
+        ConcurrentOpenHashMap<String, ConfigField> dynamicConfigurationMap =
+                ConcurrentOpenHashMap.<String, ConfigField>newBuilder().build();
         for (Field field : ServiceConfiguration.class.getDeclaredFields()) {
             if (field != null && field.isAnnotationPresent(FieldContext.class)) {
                 field.setAccessible(true);
@@ -2426,7 +2443,8 @@ public class BrokerService implements Closeable {
     }
 
     private ConcurrentOpenHashMap<String, Object> getRuntimeConfigurationMap() {
-        ConcurrentOpenHashMap<String, Object> runtimeConfigurationMap = new ConcurrentOpenHashMap<>();
+        ConcurrentOpenHashMap<String, Object> runtimeConfigurationMap =
+                ConcurrentOpenHashMap.<String, Object>newBuilder().build();
         for (Field field : ServiceConfiguration.class.getDeclaredFields()) {
             if (field != null && field.isAnnotationPresent(FieldContext.class)) {
                 field.setAccessible(true);
diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java
index f58a4fd644f..3818b8abc2e 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/nonpersistent/NonPersistentTopic.java
@@ -143,8 +143,16 @@ public class NonPersistentTopic extends AbstractTopic implements Topic, TopicPol
     public NonPersistentTopic(String topic, BrokerService brokerService) {
         super(topic, brokerService);
 
-        this.subscriptions = new ConcurrentOpenHashMap<>(16, 1);
-        this.replicators = new ConcurrentOpenHashMap<>(16, 1);
+        this.subscriptions =
+                ConcurrentOpenHashMap.<String, NonPersistentSubscription>newBuilder()
+                        .expectedItems(16)
+                        .concurrencyLevel(1)
+                        .build();
+        this.replicators =
+                ConcurrentOpenHashMap.<String, NonPersistentReplicator>newBuilder()
+                        .expectedItems(16)
+                        .concurrencyLevel(1)
+                        .build();
         this.isFenced = false;
         registerTopicPolicyListener();
     }
diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java
index 201e6129a48..90ee3b67e3c 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageDeduplication.java
@@ -97,12 +97,20 @@ public class MessageDeduplication {
     // Map that contains the highest sequenceId that have been sent by each producers. The map will be updated before
     // the messages are persisted
     @VisibleForTesting
-    final ConcurrentOpenHashMap<String, Long> highestSequencedPushed = new ConcurrentOpenHashMap<>(16, 1);
+    final ConcurrentOpenHashMap<String, Long> highestSequencedPushed =
+            ConcurrentOpenHashMap.<String, Long>newBuilder()
+                    .expectedItems(16)
+                    .concurrencyLevel(1)
+                    .build();
 
     // Map that contains the highest sequenceId that have been persistent by each producers. The map will be updated
     // after the messages are persisted
     @VisibleForTesting
-    final ConcurrentOpenHashMap<String, Long> highestSequencedPersisted = new ConcurrentOpenHashMap<>(16, 1);
+    final ConcurrentOpenHashMap<String, Long> highestSequencedPersisted =
+            ConcurrentOpenHashMap.<String, Long>newBuilder()
+            .expectedItems(16)
+            .concurrencyLevel(1)
+            .build();
 
     // Number of persisted entries after which to store a snapshot of the sequence ids map
     private final int snapshotInterval;
diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java
index 3914278ae7f..5a970b48f58 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/PersistentTopic.java
@@ -254,8 +254,14 @@ public class PersistentTopic extends AbstractTopic implements Topic, AddEntryCal
     public PersistentTopic(String topic, ManagedLedger ledger, BrokerService brokerService) {
         super(topic, brokerService);
         this.ledger = ledger;
-        this.subscriptions = new ConcurrentOpenHashMap<>(16, 1);
-        this.replicators = new ConcurrentOpenHashMap<>(16, 1);
+        this.subscriptions = ConcurrentOpenHashMap.<String, PersistentSubscription>newBuilder()
+                        .expectedItems(16)
+                        .concurrencyLevel(1)
+                        .build();
+        this.replicators = ConcurrentOpenHashMap.<String, Replicator>newBuilder()
+                .expectedItems(16)
+                .concurrencyLevel(1)
+                .build();
         this.backloggedCursorThresholdEntries =
                 brokerService.pulsar().getConfiguration().getManagedLedgerCursorBackloggedThreshold();
         initializeRateLimiterIfNeeded(Optional.empty());
@@ -344,8 +350,14 @@ public class PersistentTopic extends AbstractTopic implements Topic, AddEntryCal
         super(topic, brokerService);
         this.ledger = ledger;
         this.messageDeduplication = messageDeduplication;
-        this.subscriptions = new ConcurrentOpenHashMap<>(16, 1);
-        this.replicators = new ConcurrentOpenHashMap<>(16, 1);
+        this.subscriptions = ConcurrentOpenHashMap.<String, PersistentSubscription>newBuilder()
+                .expectedItems(16)
+                .concurrencyLevel(1)
+                .build();
+        this.replicators = ConcurrentOpenHashMap.<String, Replicator>newBuilder()
+                .expectedItems(16)
+                .concurrencyLevel(1)
+                .build();
         this.compactedTopic = new CompactedTopicImpl(brokerService.pulsar().getBookKeeperClient());
         this.backloggedCursorThresholdEntries =
                 brokerService.pulsar().getConfiguration().getManagedLedgerCursorBackloggedThreshold();
diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/ClusterReplicationMetrics.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/ClusterReplicationMetrics.java
index 1086563085b..6718f074c67 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/ClusterReplicationMetrics.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/ClusterReplicationMetrics.java
@@ -35,7 +35,8 @@ public class ClusterReplicationMetrics {
     public ClusterReplicationMetrics(String localCluster, boolean metricsEnabled) {
         metricsList = new ArrayList<>();
         this.localCluster = localCluster;
-        metricsMap = new ConcurrentOpenHashMap<>();
+        metricsMap = ConcurrentOpenHashMap.<String, ReplicationMetrics>newBuilder()
+                .build();
         this.metricsEnabled = metricsEnabled;
     }
 
diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java
index 1429c7376f4..9e81a3e1db9 100644
--- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java
+++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/AntiAffinityNamespaceGroupTest.java
@@ -234,7 +234,8 @@ public class AntiAffinityNamespaceGroupTest {
         brokerToDomainMap.put("brokerName-3", "domain-1");
 
         Set<String> candidate = Sets.newHashSet();
-        ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> brokerToNamespaceToBundleRange = new ConcurrentOpenHashMap<>();
+        ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> brokerToNamespaceToBundleRange =
+                ConcurrentOpenHashMap.<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>>newBuilder().build();
 
         assertEquals(brokers.size(), totalBrokers);
 
@@ -320,7 +321,8 @@ public class AntiAffinityNamespaceGroupTest {
 
         Set<String> brokers = Sets.newHashSet();
         Set<String> candidate = Sets.newHashSet();
-        ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> brokerToNamespaceToBundleRange = new ConcurrentOpenHashMap<>();
+        ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> brokerToNamespaceToBundleRange =
+                ConcurrentOpenHashMap.<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>>newBuilder().build();
         brokers.add("broker-0");
         brokers.add("broker-1");
         brokers.add("broker-2");
@@ -367,9 +369,11 @@ public class AntiAffinityNamespaceGroupTest {
     private void selectBrokerForNamespace(
             ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> brokerToNamespaceToBundleRange,
             String broker, String namespace, String assignedBundleName) {
-        ConcurrentOpenHashSet<String> bundleSet = new ConcurrentOpenHashSet<>();
+        ConcurrentOpenHashSet<String> bundleSet =
+                ConcurrentOpenHashSet.<String>newBuilder().build();
         bundleSet.add(assignedBundleName);
-        ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>> nsToBundleMap = new ConcurrentOpenHashMap<>();
+        ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>> nsToBundleMap =
+                ConcurrentOpenHashMap.<String, ConcurrentOpenHashSet<String>>newBuilder().build();
         nsToBundleMap.put(namespace, bundleSet);
         brokerToNamespaceToBundleRange.put(broker, nsToBundleMap);
     }
@@ -469,7 +473,8 @@ public class AntiAffinityNamespaceGroupTest {
 
         Set<String> brokers = Sets.newHashSet();
         Set<String> candidate = Sets.newHashSet();
-        ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> brokerToNamespaceToBundleRange = new ConcurrentOpenHashMap<>();
+        ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> brokerToNamespaceToBundleRange =
+                ConcurrentOpenHashMap.<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>>newBuilder().build();
         brokers.add("broker-0");
         brokers.add("broker-1");
         brokers.add("broker-2");
diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerSharedTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerSharedTest.java
index 716b9716425..d23772185f1 100644
--- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerSharedTest.java
+++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/loadbalance/impl/LoadManagerSharedTest.java
@@ -36,7 +36,10 @@ public class LoadManagerSharedTest {
         String assignedBundle = namespace + "/0x00000000_0x40000000";
 
         Set<String> candidates = Sets.newHashSet();
-        ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> map = new ConcurrentOpenHashMap<>();
+        ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> map =
+                ConcurrentOpenHashMap.<String,
+                        ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>>newBuilder()
+                        .build();
         LoadManagerShared.removeMostServicingBrokersForNamespace(assignedBundle, candidates, map);
         Assert.assertEquals(candidates.size(), 0);
 
@@ -80,8 +83,12 @@ public class LoadManagerSharedTest {
     private static void fillBrokerToNamespaceToBundleMap(
             ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> map,
             String broker, String namespace, String bundle) {
-        map.computeIfAbsent(broker, k -> new ConcurrentOpenHashMap<>())
-                .computeIfAbsent(namespace, k -> new ConcurrentOpenHashSet<>()).add(bundle);
+        map.computeIfAbsent(broker,
+                k -> ConcurrentOpenHashMap.<String,
+                        ConcurrentOpenHashSet<String>>newBuilder().build())
+                .computeIfAbsent(namespace,
+                        k -> ConcurrentOpenHashSet.<String>newBuilder().build())
+                .add(bundle);
     }
 
 }
diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java
index 4e53819e44c..6c8b4d5f334 100644
--- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java
+++ b/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/PersistentTopicTest.java
@@ -850,7 +850,11 @@ public class PersistentTopicTest extends MockedBookKeeperTestCase {
         addConsumerToSubscription.setAccessible(true);
 
         // for count consumers on topic
-        ConcurrentOpenHashMap<String, PersistentSubscription> subscriptions = new ConcurrentOpenHashMap<>(16, 1);
+        ConcurrentOpenHashMap<String, PersistentSubscription> subscriptions =
+                ConcurrentOpenHashMap.<String, PersistentSubscription>newBuilder()
+                        .expectedItems(16)
+                        .concurrencyLevel(1)
+                        .build();
         subscriptions.put("sub-1", sub);
         subscriptions.put("sub-2", sub2);
         Field field = topic.getClass().getDeclaredField("subscriptions");
@@ -954,7 +958,11 @@ public class PersistentTopicTest extends MockedBookKeeperTestCase {
         addConsumerToSubscription.setAccessible(true);
 
         // for count consumers on topic
-        ConcurrentOpenHashMap<String, PersistentSubscription> subscriptions = new ConcurrentOpenHashMap<>(16, 1);
+        ConcurrentOpenHashMap<String, PersistentSubscription> subscriptions =
+                ConcurrentOpenHashMap.<String, PersistentSubscription>newBuilder()
+                        .expectedItems(16)
+                        .concurrencyLevel(1)
+                        .build();
         subscriptions.put("sub-1", sub);
         subscriptions.put("sub-2", sub2);
         Field field = topic.getClass().getDeclaredField("subscriptions");
@@ -1081,7 +1089,11 @@ public class PersistentTopicTest extends MockedBookKeeperTestCase {
         addConsumerToSubscription.setAccessible(true);
 
         // for count consumers on topic
-        ConcurrentOpenHashMap<String, PersistentSubscription> subscriptions = new ConcurrentOpenHashMap<>(16, 1);
+        ConcurrentOpenHashMap<String, PersistentSubscription> subscriptions =
+                ConcurrentOpenHashMap.<String, PersistentSubscription>newBuilder()
+                        .expectedItems(16)
+                        .concurrencyLevel(1)
+                        .build();
         subscriptions.put("sub1", sub1);
         subscriptions.put("sub2", sub2);
         Field field = topic.getClass().getDeclaredField("subscriptions");
@@ -2071,7 +2083,11 @@ public class PersistentTopicTest extends MockedBookKeeperTestCase {
     public void testCheckInactiveSubscriptions() throws Exception {
         PersistentTopic topic = new PersistentTopic(successTopicName, ledgerMock, brokerService);
 
-        ConcurrentOpenHashMap<String, PersistentSubscription> subscriptions = new ConcurrentOpenHashMap<>(16, 1);
+        ConcurrentOpenHashMap<String, PersistentSubscription> subscriptions =
+                ConcurrentOpenHashMap.<String, PersistentSubscription>newBuilder()
+                        .expectedItems(16)
+                        .concurrencyLevel(1)
+                        .build();
         // This subscription is connected by consumer.
         PersistentSubscription nonDeletableSubscription1 = spyWithClassAndConstructorArgs(PersistentSubscription.class, topic, "nonDeletableSubscription1", cursorMock, false);
         subscriptions.put(nonDeletableSubscription1.getName(), nonDeletableSubscription1);
diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java
index 5b14a841b8c..689c4eb7405 100644
--- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java
+++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerBase.java
@@ -110,7 +110,8 @@ public abstract class ConsumerBase<T> extends HandlerState implements Consumer<T
         this.consumerEventListener = conf.getConsumerEventListener();
         // Always use growable queue since items can exceed the advertised size
         this.incomingMessages = new GrowableArrayBlockingQueue<>();
-        this.unAckedChunkedMessageIdSequenceMap = new ConcurrentOpenHashMap<>();
+        this.unAckedChunkedMessageIdSequenceMap =
+                ConcurrentOpenHashMap.<MessageIdImpl, MessageIdImpl[]>newBuilder().build();
         this.executorProvider = executorProvider;
         this.externalPinnedExecutor = (ScheduledExecutorService) executorProvider.getExecutor();
         this.internalPinnedExecutor = (ScheduledExecutorService) client.getInternalExecutorService();
diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java
index 0304a608f05..b4ee5a2e784 100644
--- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java
+++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ConsumerImpl.java
@@ -183,7 +183,8 @@ public class ConsumerImpl<T> extends ConsumerBase<T> implements ConnectionHandle
 
     protected volatile boolean paused;
 
-    protected ConcurrentOpenHashMap<String, ChunkedMessageCtx> chunkedMessagesMap = new ConcurrentOpenHashMap<>();
+    protected ConcurrentOpenHashMap<String, ChunkedMessageCtx> chunkedMessagesMap =
+            ConcurrentOpenHashMap.<String, ChunkedMessageCtx>newBuilder().build();
     private int pendingChunkedMessageCount = 0;
     protected long expireTimeOfIncompleteChunkedMessageMillis = 0;
     private boolean expireChunkMessageTaskScheduled = false;
diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java
index e61e7c82166..4a84ba03ebe 100644
--- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java
+++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/PartitionedProducerImpl.java
@@ -76,7 +76,8 @@ public class PartitionedProducerImpl<T> extends ProducerBase<T> {
                                    int numPartitions, CompletableFuture<Producer<T>> producerCreatedFuture,
                                    Schema<T> schema, ProducerInterceptors interceptors) {
         super(client, topic, conf, producerCreatedFuture, schema, interceptors);
-        this.producers = new ConcurrentOpenHashMap<>();
+        this.producers =
+                ConcurrentOpenHashMap.<Integer, ProducerImpl<T>>newBuilder().build();
         this.topicMetadata = new TopicMetadataImpl(numPartitions);
         this.routerPolicy = getMessageRouter();
         stats = client.getConfiguration().getStatsIntervalSeconds() > 0 ? new ProducerStatsRecorderImpl() : null;
diff --git a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerBase.java b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerBase.java
index 0117e651e6f..c7b9d24151f 100644
--- a/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerBase.java
+++ b/pulsar-client/src/main/java/org/apache/pulsar/client/impl/ProducerBase.java
@@ -50,7 +50,8 @@ public abstract class ProducerBase<T> extends HandlerState implements Producer<T
         this.conf = conf;
         this.schema = schema;
         this.interceptors = interceptors;
-        this.schemaCache = new ConcurrentOpenHashMap<>();
+        this.schemaCache =
+                ConcurrentOpenHashMap.<SchemaHash, byte[]>newBuilder().build();
         if (!conf.isMultiSchema()) {
             multiSchemaMode = MultiSchemaMode.Disabled;
         }
diff --git a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java
index c0b952a281a..d577f48357c 100644
--- a/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java
+++ b/pulsar-client/src/test/java/org/apache/pulsar/client/impl/AcknowledgementsGroupingTrackerTest.java
@@ -60,7 +60,8 @@ public class AcknowledgementsGroupingTrackerTest {
     public void setup() throws NoSuchFieldException, IllegalAccessException {
         eventLoopGroup = new NioEventLoopGroup(1);
         consumer = mock(ConsumerImpl.class);
-        consumer.unAckedChunkedMessageIdSequenceMap = new ConcurrentOpenHashMap<>();
+        consumer.unAckedChunkedMessageIdSequenceMap =
+                ConcurrentOpenHashMap.<MessageIdImpl, MessageIdImpl[]>newBuilder().build();
         cnx = spy(new ClientCnxTest(new ClientConfigurationData(), new NioEventLoopGroup()));
         PulsarClientImpl client = mock(PulsarClientImpl.class);
         doReturn(client).when(consumer).getClient();
diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java
index f1806c511e2..abbe11576a9 100644
--- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java
+++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java
@@ -45,8 +45,74 @@ public class ConcurrentLongPairSet implements LongPairSet {
     private static final int DefaultExpectedItems = 256;
     private static final int DefaultConcurrencyLevel = 16;
 
+    private static final float DefaultMapFillFactor = 0.66f;
+    private static final float DefaultMapIdleFactor = 0.15f;
+
+    private static final float DefaultExpandFactor = 2;
+    private static final float DefaultShrinkFactor = 2;
+
+    private static final boolean DefaultAutoShrink = false;
+
     private final Section[] sections;
 
+    public static Builder newBuilder() {
+        return new Builder();
+    }
+
+    /**
+     * Builder of ConcurrentLongPairSet.
+     */
+    public static class Builder {
+        int expectedItems = DefaultExpectedItems;
+        int concurrencyLevel = DefaultConcurrencyLevel;
+        float mapFillFactor = DefaultMapFillFactor;
+        float mapIdleFactor = DefaultMapIdleFactor;
+        float expandFactor = DefaultExpandFactor;
+        float shrinkFactor = DefaultShrinkFactor;
+        boolean autoShrink = DefaultAutoShrink;
+
+        public Builder expectedItems(int expectedItems) {
+            this.expectedItems = expectedItems;
+            return this;
+        }
+
+        public Builder concurrencyLevel(int concurrencyLevel) {
+            this.concurrencyLevel = concurrencyLevel;
+            return this;
+        }
+
+        public Builder mapFillFactor(float mapFillFactor) {
+            this.mapFillFactor = mapFillFactor;
+            return this;
+        }
+
+        public Builder mapIdleFactor(float mapIdleFactor) {
+            this.mapIdleFactor = mapIdleFactor;
+            return this;
+        }
+
+        public Builder expandFactor(float expandFactor) {
+            this.expandFactor = expandFactor;
+            return this;
+        }
+
+        public Builder shrinkFactor(float shrinkFactor) {
+            this.shrinkFactor = shrinkFactor;
+            return this;
+        }
+
+        public Builder autoShrink(boolean autoShrink) {
+            this.autoShrink = autoShrink;
+            return this;
+        }
+
+        public ConcurrentLongPairSet build() {
+            return new ConcurrentLongPairSet(expectedItems, concurrencyLevel,
+                    mapFillFactor, mapIdleFactor, autoShrink, expandFactor, shrinkFactor);
+        }
+    }
+
+
     /**
      * Represents a function that accepts an object of the {@code LongPair} type.
      */
@@ -61,18 +127,33 @@ public class ConcurrentLongPairSet implements LongPairSet {
         void accept(long v1, long v2);
     }
 
+    @Deprecated
     public ConcurrentLongPairSet() {
         this(DefaultExpectedItems);
     }
 
+    @Deprecated
     public ConcurrentLongPairSet(int expectedItems) {
         this(expectedItems, DefaultConcurrencyLevel);
     }
 
+    @Deprecated
     public ConcurrentLongPairSet(int expectedItems, int concurrencyLevel) {
+        this(expectedItems, concurrencyLevel, DefaultMapFillFactor, DefaultMapIdleFactor,
+                DefaultAutoShrink, DefaultExpandFactor, DefaultShrinkFactor);
+    }
+
+    public ConcurrentLongPairSet(int expectedItems, int concurrencyLevel,
+                                 float mapFillFactor, float mapIdleFactor,
+                                 boolean autoShrink, float expandFactor, float shrinkFactor) {
         checkArgument(expectedItems > 0);
         checkArgument(concurrencyLevel > 0);
         checkArgument(expectedItems >= concurrencyLevel);
+        checkArgument(mapFillFactor > 0 && mapFillFactor < 1);
+        checkArgument(mapIdleFactor > 0 && mapIdleFactor < 1);
+        checkArgument(mapFillFactor > mapIdleFactor);
+        checkArgument(expandFactor > 1);
+        checkArgument(shrinkFactor > 1);
 
         int numSections = concurrencyLevel;
         int perSectionExpectedItems = expectedItems / numSections;
@@ -80,10 +161,12 @@ public class ConcurrentLongPairSet implements LongPairSet {
         this.sections = new Section[numSections];
 
         for (int i = 0; i < numSections; i++) {
-            sections[i] = new Section(perSectionCapacity);
+            sections[i] = new Section(perSectionCapacity, mapFillFactor, mapIdleFactor,
+                    autoShrink, expandFactor, shrinkFactor);
         }
     }
 
+    @Override
     public long size() {
         long size = 0;
         for (int i = 0; i < sections.length; i++) {
@@ -214,18 +297,33 @@ public class ConcurrentLongPairSet implements LongPairSet {
         private volatile long[] table;
 
         private volatile int capacity;
+        private final int initCapacity;
         private static final AtomicIntegerFieldUpdater<Section> SIZE_UPDATER = AtomicIntegerFieldUpdater
                 .newUpdater(Section.class, "size");
         private volatile int size;
         private int usedBuckets;
-        private int resizeThreshold;
-
-        Section(int capacity) {
+        private int resizeThresholdUp;
+        private int resizeThresholdBelow;
+        private final float mapFillFactor;
+        private final float mapIdleFactor;
+        private final float expandFactor;
+        private final float shrinkFactor;
+        private final boolean autoShrink;
+
+        Section(int capacity, float mapFillFactor, float mapIdleFactor, boolean autoShrink,
+                float expandFactor, float shrinkFactor) {
             this.capacity = alignToPowerOfTwo(capacity);
+            this.initCapacity = this.capacity;
             this.table = new long[2 * this.capacity];
             this.size = 0;
             this.usedBuckets = 0;
-            this.resizeThreshold = (int) (this.capacity * SetFillFactor);
+            this.autoShrink = autoShrink;
+            this.mapFillFactor = mapFillFactor;
+            this.mapIdleFactor = mapIdleFactor;
+            this.expandFactor = expandFactor;
+            this.shrinkFactor = shrinkFactor;
+            this.resizeThresholdUp = (int) (this.capacity * mapFillFactor);
+            this.resizeThresholdBelow = (int) (this.capacity * mapIdleFactor);
             Arrays.fill(table, EmptyItem);
         }
 
@@ -314,9 +412,11 @@ public class ConcurrentLongPairSet implements LongPairSet {
                     bucket = (bucket + 2) & (table.length - 1);
                 }
             } finally {
-                if (usedBuckets > resizeThreshold) {
+                if (usedBuckets > resizeThresholdUp) {
                     try {
-                        rehash();
+                        // Expand the hashmap
+                        int newCapacity = alignToPowerOfTwo((int) (capacity * expandFactor));
+                        rehash(newCapacity);
                     } finally {
                         unlockWrite(stamp);
                     }
@@ -347,7 +447,20 @@ public class ConcurrentLongPairSet implements LongPairSet {
                     bucket = (bucket + 2) & (table.length - 1);
                 }
             } finally {
-                unlockWrite(stamp);
+                if (autoShrink && size < resizeThresholdBelow) {
+                    try {
+                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                        int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
+                        if (newCapacity < capacity && newResizeThresholdUp > size) {
+                            // shrink the hashmap
+                            rehash(newCapacity);
+                        }
+                    } finally {
+                        unlockWrite(stamp);
+                    }
+                } else {
+                    unlockWrite(stamp);
+                }
             }
         }
 
@@ -379,6 +492,16 @@ public class ConcurrentLongPairSet implements LongPairSet {
                 table[bucket] = EmptyItem;
                 table[bucket + 1] = EmptyItem;
                 --usedBuckets;
+
+                // Cleanup all the buckets that were in `DeletedKey` state,
+                // so that we can reduce unnecessary expansions
+                bucket = (bucket - 1) & (table.length - 1);
+                while (table[bucket] == DeletedItem) {
+                    table[bucket] = EmptyItem;
+                    --usedBuckets;
+
+                    bucket = (bucket - 1) & (table.length - 1);
+                }
             } else {
                 table[bucket] = DeletedItem;
                 table[bucket + 1] = DeletedItem;
@@ -392,6 +515,9 @@ public class ConcurrentLongPairSet implements LongPairSet {
                 Arrays.fill(table, EmptyItem);
                 this.size = 0;
                 this.usedBuckets = 0;
+                if (autoShrink) {
+                    rehash(initCapacity);
+                }
             } finally {
                 unlockWrite(stamp);
             }
@@ -431,9 +557,8 @@ public class ConcurrentLongPairSet implements LongPairSet {
             }
         }
 
-        private void rehash() {
+        private void rehash(int newCapacity) {
             // Expand the hashmap
-            int newCapacity = capacity * 2;
             long[] newTable = new long[2 * newCapacity];
             Arrays.fill(newTable, EmptyItem);
 
@@ -451,7 +576,8 @@ public class ConcurrentLongPairSet implements LongPairSet {
             // Capacity needs to be updated after the values, so that we won't see
             // a capacity value bigger than the actual array size
             capacity = newCapacity;
-            resizeThreshold = (int) (capacity * SetFillFactor);
+            resizeThresholdUp = (int) (capacity * mapFillFactor);
+            resizeThresholdBelow = (int) (capacity * mapIdleFactor);
         }
 
         private static void insertKeyValueNoLock(long[] table, int capacity, long item1, long item2) {
diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java
index 5966a95e5f3..1ccbeb3b6b5 100644
--- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java
+++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java
@@ -64,33 +64,112 @@ public class ConcurrentOpenHashMap<K, V> {
         }
     };
 
-    private static final float MapFillFactor = 0.66f;
-
     private static final int DefaultExpectedItems = 256;
     private static final int DefaultConcurrencyLevel = 16;
 
+    private static final float DefaultMapFillFactor = 0.66f;
+    private static final float DefaultMapIdleFactor = 0.15f;
+
+    private static final float DefaultExpandFactor = 2;
+    private static final float DefaultShrinkFactor = 2;
+
+    private static final boolean DefaultAutoShrink = false;
+
     private final Section<K, V>[] sections;
 
+    public static <K, V> Builder<K, V> newBuilder() {
+        return new Builder<>();
+    }
+
+    /**
+     * Builder of ConcurrentOpenHashMap.
+     */
+    public static class Builder<K, V> {
+        int expectedItems = DefaultExpectedItems;
+        int concurrencyLevel = DefaultConcurrencyLevel;
+        float mapFillFactor = DefaultMapFillFactor;
+        float mapIdleFactor = DefaultMapIdleFactor;
+        float expandFactor = DefaultExpandFactor;
+        float shrinkFactor = DefaultShrinkFactor;
+        boolean autoShrink = DefaultAutoShrink;
+
+        public Builder<K, V> expectedItems(int expectedItems) {
+            this.expectedItems = expectedItems;
+            return this;
+        }
+
+        public Builder<K, V> concurrencyLevel(int concurrencyLevel) {
+            this.concurrencyLevel = concurrencyLevel;
+            return this;
+        }
+
+        public Builder<K, V> mapFillFactor(float mapFillFactor) {
+            this.mapFillFactor = mapFillFactor;
+            return this;
+        }
+
+        public Builder<K, V> mapIdleFactor(float mapIdleFactor) {
+            this.mapIdleFactor = mapIdleFactor;
+            return this;
+        }
+
+        public Builder<K, V> expandFactor(float expandFactor) {
+            this.expandFactor = expandFactor;
+            return this;
+        }
+
+        public Builder<K, V> shrinkFactor(float shrinkFactor) {
+            this.shrinkFactor = shrinkFactor;
+            return this;
+        }
+
+        public Builder<K, V> autoShrink(boolean autoShrink) {
+            this.autoShrink = autoShrink;
+            return this;
+        }
+
+        public ConcurrentOpenHashMap<K, V> build() {
+            return new ConcurrentOpenHashMap<>(expectedItems, concurrencyLevel,
+                    mapFillFactor, mapIdleFactor, autoShrink, expandFactor, shrinkFactor);
+        }
+    }
+
+    @Deprecated
     public ConcurrentOpenHashMap() {
         this(DefaultExpectedItems);
     }
 
+    @Deprecated
     public ConcurrentOpenHashMap(int expectedItems) {
         this(expectedItems, DefaultConcurrencyLevel);
     }
 
+    @Deprecated
     public ConcurrentOpenHashMap(int expectedItems, int concurrencyLevel) {
+        this(expectedItems, concurrencyLevel, DefaultMapFillFactor, DefaultMapIdleFactor,
+                DefaultAutoShrink, DefaultExpandFactor, DefaultShrinkFactor);
+    }
+
+    public ConcurrentOpenHashMap(int expectedItems, int concurrencyLevel,
+                                 float mapFillFactor, float mapIdleFactor,
+                                 boolean autoShrink, float expandFactor, float shrinkFactor) {
         checkArgument(expectedItems > 0);
         checkArgument(concurrencyLevel > 0);
         checkArgument(expectedItems >= concurrencyLevel);
+        checkArgument(mapFillFactor > 0 && mapFillFactor < 1);
+        checkArgument(mapIdleFactor > 0 && mapIdleFactor < 1);
+        checkArgument(mapFillFactor > mapIdleFactor);
+        checkArgument(expandFactor > 1);
+        checkArgument(shrinkFactor > 1);
 
         int numSections = concurrencyLevel;
         int perSectionExpectedItems = expectedItems / numSections;
-        int perSectionCapacity = (int) (perSectionExpectedItems / MapFillFactor);
+        int perSectionCapacity = (int) (perSectionExpectedItems / mapFillFactor);
         this.sections = (Section<K, V>[]) new Section[numSections];
 
         for (int i = 0; i < numSections; i++) {
-            sections[i] = new Section<>(perSectionCapacity);
+            sections[i] = new Section<>(perSectionCapacity, mapFillFactor, mapIdleFactor,
+                    autoShrink, expandFactor, shrinkFactor);
         }
     }
 
@@ -208,18 +287,33 @@ public class ConcurrentOpenHashMap<K, V> {
         private volatile Object[] table;
 
         private volatile int capacity;
+        private final int initCapacity;
         private static final AtomicIntegerFieldUpdater<Section> SIZE_UPDATER =
                 AtomicIntegerFieldUpdater.newUpdater(Section.class, "size");
         private volatile int size;
         private int usedBuckets;
-        private int resizeThreshold;
-
-        Section(int capacity) {
+        private int resizeThresholdUp;
+        private int resizeThresholdBelow;
+        private final float mapFillFactor;
+        private final float mapIdleFactor;
+        private final float expandFactor;
+        private final float shrinkFactor;
+        private final boolean autoShrink;
+
+        Section(int capacity, float mapFillFactor, float mapIdleFactor, boolean autoShrink,
+                float expandFactor, float shrinkFactor) {
             this.capacity = alignToPowerOfTwo(capacity);
+            this.initCapacity = this.capacity;
             this.table = new Object[2 * this.capacity];
             this.size = 0;
             this.usedBuckets = 0;
-            this.resizeThreshold = (int) (this.capacity * MapFillFactor);
+            this.autoShrink = autoShrink;
+            this.mapFillFactor = mapFillFactor;
+            this.mapIdleFactor = mapIdleFactor;
+            this.expandFactor = expandFactor;
+            this.shrinkFactor = shrinkFactor;
+            this.resizeThresholdUp = (int) (this.capacity * mapFillFactor);
+            this.resizeThresholdBelow = (int) (this.capacity * mapIdleFactor);
         }
 
         V get(K key, int keyHash) {
@@ -316,9 +410,11 @@ public class ConcurrentOpenHashMap<K, V> {
                     bucket = (bucket + 2) & (table.length - 1);
                 }
             } finally {
-                if (usedBuckets > resizeThreshold) {
+                if (usedBuckets > resizeThresholdUp) {
                     try {
-                        rehash();
+                        // Expand the hashmap
+                        int newCapacity = alignToPowerOfTwo((int) (capacity * expandFactor));
+                        rehash(newCapacity);
                     } finally {
                         unlockWrite(stamp);
                     }
@@ -363,7 +459,20 @@ public class ConcurrentOpenHashMap<K, V> {
                 }
 
             } finally {
-                unlockWrite(stamp);
+                if (autoShrink && size < resizeThresholdBelow) {
+                    try {
+                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                        int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
+                        if (newCapacity < capacity && newResizeThresholdUp > size) {
+                            // shrink the hashmap
+                            rehash(newCapacity);
+                        }
+                    } finally {
+                        unlockWrite(stamp);
+                    }
+                } else {
+                    unlockWrite(stamp);
+                }
             }
         }
 
@@ -374,6 +483,9 @@ public class ConcurrentOpenHashMap<K, V> {
                 Arrays.fill(table, EmptyKey);
                 this.size = 0;
                 this.usedBuckets = 0;
+                if (autoShrink) {
+                    rehash(initCapacity);
+                }
             } finally {
                 unlockWrite(stamp);
             }
@@ -415,9 +527,8 @@ public class ConcurrentOpenHashMap<K, V> {
             }
         }
 
-        private void rehash() {
+        private void rehash(int newCapacity) {
             // Expand the hashmap
-            int newCapacity = capacity * 2;
             Object[] newTable = new Object[2 * newCapacity];
 
             // Re-hash table
@@ -432,7 +543,8 @@ public class ConcurrentOpenHashMap<K, V> {
             table = newTable;
             capacity = newCapacity;
             usedBuckets = size;
-            resizeThreshold = (int) (capacity * MapFillFactor);
+            resizeThresholdUp = (int) (capacity * mapFillFactor);
+            resizeThresholdBelow = (int) (capacity * mapIdleFactor);
         }
 
         private static <K, V> void insertKeyValueNoLock(Object[] table, int capacity, K key, V value) {
diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java
index 8e0e69d32df..28f0df0ff20 100644
--- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java
+++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java
@@ -43,33 +43,112 @@ public class ConcurrentOpenHashSet<V> {
     private static final Object EmptyValue = null;
     private static final Object DeletedValue = new Object();
 
-    private static final float MapFillFactor = 0.66f;
-
     private static final int DefaultExpectedItems = 256;
     private static final int DefaultConcurrencyLevel = 16;
 
+    private static final float DefaultMapFillFactor = 0.66f;
+    private static final float DefaultMapIdleFactor = 0.15f;
+
+    private static final float DefaultExpandFactor = 2;
+    private static final float DefaultShrinkFactor = 2;
+
+    private static final boolean DefaultAutoShrink = false;
+
     private final Section<V>[] sections;
 
+    public static <V> Builder<V> newBuilder() {
+        return new Builder<>();
+    }
+
+    /**
+     * Builder of ConcurrentOpenHashSet.
+     */
+    public static class Builder<V> {
+        int expectedItems = DefaultExpectedItems;
+        int concurrencyLevel = DefaultConcurrencyLevel;
+        float mapFillFactor = DefaultMapFillFactor;
+        float mapIdleFactor = DefaultMapIdleFactor;
+        float expandFactor = DefaultExpandFactor;
+        float shrinkFactor = DefaultShrinkFactor;
+        boolean autoShrink = DefaultAutoShrink;
+
+        public Builder<V> expectedItems(int expectedItems) {
+            this.expectedItems = expectedItems;
+            return this;
+        }
+
+        public Builder<V> concurrencyLevel(int concurrencyLevel) {
+            this.concurrencyLevel = concurrencyLevel;
+            return this;
+        }
+
+        public Builder<V> mapFillFactor(float mapFillFactor) {
+            this.mapFillFactor = mapFillFactor;
+            return this;
+        }
+
+        public Builder<V> mapIdleFactor(float mapIdleFactor) {
+            this.mapIdleFactor = mapIdleFactor;
+            return this;
+        }
+
+        public Builder<V> expandFactor(float expandFactor) {
+            this.expandFactor = expandFactor;
+            return this;
+        }
+
+        public Builder<V> shrinkFactor(float shrinkFactor) {
+            this.shrinkFactor = shrinkFactor;
+            return this;
+        }
+
+        public Builder<V> autoShrink(boolean autoShrink) {
+            this.autoShrink = autoShrink;
+            return this;
+        }
+
+        public ConcurrentOpenHashSet<V> build() {
+            return new ConcurrentOpenHashSet<>(expectedItems, concurrencyLevel,
+                    mapFillFactor, mapIdleFactor, autoShrink, expandFactor, shrinkFactor);
+        }
+    }
+
+    @Deprecated
     public ConcurrentOpenHashSet() {
         this(DefaultExpectedItems);
     }
 
+    @Deprecated
     public ConcurrentOpenHashSet(int expectedItems) {
         this(expectedItems, DefaultConcurrencyLevel);
     }
 
+    @Deprecated
     public ConcurrentOpenHashSet(int expectedItems, int concurrencyLevel) {
+        this(expectedItems, concurrencyLevel, DefaultMapFillFactor, DefaultMapIdleFactor,
+                DefaultAutoShrink, DefaultExpandFactor, DefaultShrinkFactor);
+    }
+
+    public ConcurrentOpenHashSet(int expectedItems, int concurrencyLevel,
+                                 float mapFillFactor, float mapIdleFactor,
+                                 boolean autoShrink, float expandFactor, float shrinkFactor) {
         checkArgument(expectedItems > 0);
         checkArgument(concurrencyLevel > 0);
         checkArgument(expectedItems >= concurrencyLevel);
+        checkArgument(mapFillFactor > 0 && mapFillFactor < 1);
+        checkArgument(mapIdleFactor > 0 && mapIdleFactor < 1);
+        checkArgument(mapFillFactor > mapIdleFactor);
+        checkArgument(expandFactor > 1);
+        checkArgument(shrinkFactor > 1);
 
         int numSections = concurrencyLevel;
         int perSectionExpectedItems = expectedItems / numSections;
-        int perSectionCapacity = (int) (perSectionExpectedItems / MapFillFactor);
+        int perSectionCapacity = (int) (perSectionExpectedItems / mapFillFactor);
         this.sections = (Section<V>[]) new Section[numSections];
 
         for (int i = 0; i < numSections; i++) {
-            sections[i] = new Section<>(perSectionCapacity);
+            sections[i] = new Section<>(perSectionCapacity, mapFillFactor, mapIdleFactor,
+                    autoShrink, expandFactor, shrinkFactor);
         }
     }
 
@@ -177,18 +256,33 @@ public class ConcurrentOpenHashSet<V> {
         private volatile V[] values;
 
         private volatile int capacity;
+        private final int initCapacity;
         private static final AtomicIntegerFieldUpdater<Section> SIZE_UPDATER =
                 AtomicIntegerFieldUpdater.newUpdater(Section.class, "size");
         private volatile int size;
         private int usedBuckets;
-        private int resizeThreshold;
-
-        Section(int capacity) {
+        private int resizeThresholdUp;
+        private int resizeThresholdBelow;
+        private final float mapFillFactor;
+        private final float mapIdleFactor;
+        private final float expandFactor;
+        private final float shrinkFactor;
+        private final boolean autoShrink;
+
+        Section(int capacity, float mapFillFactor, float mapIdleFactor, boolean autoShrink,
+                float expandFactor, float shrinkFactor) {
             this.capacity = alignToPowerOfTwo(capacity);
+            this.initCapacity = this.capacity;
             this.values = (V[]) new Object[this.capacity];
             this.size = 0;
             this.usedBuckets = 0;
-            this.resizeThreshold = (int) (this.capacity * MapFillFactor);
+            this.autoShrink = autoShrink;
+            this.mapFillFactor = mapFillFactor;
+            this.mapIdleFactor = mapIdleFactor;
+            this.expandFactor = expandFactor;
+            this.shrinkFactor = shrinkFactor;
+            this.resizeThresholdUp = (int) (this.capacity * mapFillFactor);
+            this.resizeThresholdBelow = (int) (this.capacity * mapIdleFactor);
         }
 
         boolean contains(V value, int keyHash) {
@@ -284,9 +378,11 @@ public class ConcurrentOpenHashSet<V> {
                     ++bucket;
                 }
             } finally {
-                if (usedBuckets > resizeThreshold) {
+                if (usedBuckets > resizeThresholdUp) {
                     try {
-                        rehash();
+                        // Expand the hashmap
+                        int newCapacity = alignToPowerOfTwo((int) (capacity * expandFactor));
+                        rehash(newCapacity);
                     } finally {
                         unlockWrite(stamp);
                     }
@@ -319,7 +415,20 @@ public class ConcurrentOpenHashSet<V> {
                 }
 
             } finally {
-                unlockWrite(stamp);
+                if (autoShrink && size < resizeThresholdBelow) {
+                    try {
+                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                        int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
+                        if (newCapacity < capacity && newResizeThresholdUp > size) {
+                            // shrink the hashmap
+                            rehash(newCapacity);
+                        }
+                    } finally {
+                        unlockWrite(stamp);
+                    }
+                } else {
+                    unlockWrite(stamp);
+                }
             }
         }
 
@@ -330,6 +439,9 @@ public class ConcurrentOpenHashSet<V> {
                 Arrays.fill(values, EmptyValue);
                 this.size = 0;
                 this.usedBuckets = 0;
+                if (autoShrink) {
+                    rehash(initCapacity);
+                }
             } finally {
                 unlockWrite(stamp);
             }
@@ -402,9 +514,8 @@ public class ConcurrentOpenHashSet<V> {
             }
         }
 
-        private void rehash() {
+        private void rehash(int newCapacity) {
             // Expand the hashmap
-            int newCapacity = capacity * 2;
             V[] newValues = (V[]) new Object[newCapacity];
 
             // Re-hash table
@@ -418,7 +529,8 @@ public class ConcurrentOpenHashSet<V> {
             values = newValues;
             capacity = newCapacity;
             usedBuckets = size;
-            resizeThreshold = (int) (capacity * MapFillFactor);
+            resizeThresholdUp = (int) (capacity * mapFillFactor);
+            resizeThresholdBelow = (int) (capacity * mapIdleFactor);
         }
 
         private static <V> void insertValueNoLock(V[] values, V value) {
diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSet.java
index 95e2302dcb7..e4cb668fc92 100644
--- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSet.java
+++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSet.java
@@ -79,7 +79,10 @@ public class ConcurrentSortedLongPairSet implements LongPairSet {
     @Override
     public boolean add(long item1, long item2) {
         ConcurrentLongPairSet messagesToReplay = longPairSets.computeIfAbsent(item1,
-                (key) -> new ConcurrentLongPairSet(expectedItems, concurrencyLevel));
+                (key) -> ConcurrentLongPairSet.newBuilder()
+                        .expectedItems(expectedItems)
+                        .concurrencyLevel(concurrencyLevel)
+                        .build());
         return messagesToReplay.add(item1, item2);
     }
 
diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java
index 82cac712975..a8d3e1d0603 100644
--- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java
+++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java
@@ -45,21 +45,29 @@ public class ConcurrentLongPairSetTest {
     @Test
     public void testConstructor() {
         try {
-            new ConcurrentLongPairSet(0);
+            ConcurrentLongPairSet.newBuilder()
+                    .expectedItems(0)
+                    .build();
             fail("should have thrown exception");
         } catch (IllegalArgumentException e) {
             // ok
         }
 
         try {
-            new ConcurrentLongPairSet(16, 0);
+            ConcurrentLongPairSet.newBuilder()
+                    .expectedItems(16)
+                    .concurrencyLevel(0)
+                    .build();
             fail("should have thrown exception");
         } catch (IllegalArgumentException e) {
             // ok
         }
 
         try {
-            new ConcurrentLongPairSet(4, 8);
+            ConcurrentLongPairSet.newBuilder()
+                    .expectedItems(4)
+                    .concurrencyLevel(8)
+                    .build();
             fail("should have thrown exception");
         } catch (IllegalArgumentException e) {
             // ok
@@ -68,7 +76,9 @@ public class ConcurrentLongPairSetTest {
 
     @Test
     public void simpleInsertions() {
-        ConcurrentLongPairSet set = new ConcurrentLongPairSet(16);
+        ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder()
+                .expectedItems(16)
+                .build();
 
         assertTrue(set.isEmpty());
         assertTrue(set.add(1, 1));
@@ -94,9 +104,64 @@ public class ConcurrentLongPairSetTest {
         assertEquals(set.size(), 3);
     }
 
+    @Test
+    public void testClear() {
+        ConcurrentLongPairSet map = ConcurrentLongPairSet.newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        assertTrue(map.capacity() == 4);
+
+        assertTrue(map.add(1, 1));
+        assertTrue(map.add(2, 2));
+        assertTrue(map.add(3, 3));
+
+        assertTrue(map.capacity() == 8);
+        map.clear();
+        assertTrue(map.capacity() == 4);
+    }
+
+    @Test
+    public void testExpandAndShrink() {
+        ConcurrentLongPairSet map = ConcurrentLongPairSet.newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        assertTrue(map.capacity() == 4);
+
+        assertTrue(map.add(1, 1));
+        assertTrue(map.add(2, 2));
+        assertTrue(map.add(3, 3));
+
+        // expand hashmap
+        assertTrue(map.capacity() == 8);
+
+        assertTrue(map.remove(1, 1));
+        // not shrink
+        assertTrue(map.capacity() == 8);
+        assertTrue(map.remove(2, 2));
+        // shrink hashmap
+        assertTrue(map.capacity() == 4);
+
+        // expand hashmap
+        assertTrue(map.add(4, 4));
+        assertTrue(map.add(5, 5));
+        assertTrue(map.capacity() == 8);
+
+        //verify that the map does not keep shrinking at every remove() operation
+        assertTrue(map.add(6, 6));
+        assertTrue(map.remove(6, 6));
+        assertTrue(map.capacity() == 8);
+    }
+
+
     @Test
     public void testRemove() {
-        ConcurrentLongPairSet set = new ConcurrentLongPairSet();
+        ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build();
 
         assertTrue(set.isEmpty());
         assertTrue(set.add(1, 1));
@@ -111,7 +176,10 @@ public class ConcurrentLongPairSetTest {
     @Test
     public void testRehashing() {
         int n = 16;
-        ConcurrentLongPairSet set = new ConcurrentLongPairSet(n / 2, 1);
+        ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder()
+                .expectedItems(n / 2)
+                .concurrencyLevel(1)
+                .build();
         assertEquals(set.capacity(), n);
         assertEquals(set.size(), 0);
 
@@ -126,7 +194,10 @@ public class ConcurrentLongPairSetTest {
     @Test
     public void testRehashingRemoval() {
         int n = 16;
-        ConcurrentLongPairSet set = new ConcurrentLongPairSet(n / 2, 1);
+        ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder()
+                .expectedItems(n / 2)
+                .concurrencyLevel(1)
+                .build();
         assertEquals(set.capacity(), n);
         assertEquals(set.size(), 0);
 
@@ -152,7 +223,10 @@ public class ConcurrentLongPairSetTest {
     @Test
     public void testRehashingWithDeletes() {
         int n = 16;
-        ConcurrentLongPairSet set = new ConcurrentLongPairSet(n / 2, 1);
+        ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder()
+                .expectedItems(n / 2)
+                .concurrencyLevel(1)
+                .build();
         assertEquals(set.capacity(), n);
         assertEquals(set.size(), 0);
 
@@ -177,7 +251,7 @@ public class ConcurrentLongPairSetTest {
 
     @Test
     public void concurrentInsertions() throws Throwable {
-        ConcurrentLongPairSet set = new ConcurrentLongPairSet();
+        ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build();
         @Cleanup("shutdownNow")
         ExecutorService executor = Executors.newCachedThreadPool();
 
@@ -210,7 +284,7 @@ public class ConcurrentLongPairSetTest {
 
     @Test
     public void concurrentInsertionsAndReads() throws Throwable {
-        ConcurrentLongPairSet map = new ConcurrentLongPairSet();
+        ConcurrentLongPairSet map = ConcurrentLongPairSet.newBuilder().build();
         @Cleanup("shutdownNow")
         ExecutorService executor = Executors.newCachedThreadPool();
 
@@ -243,7 +317,7 @@ public class ConcurrentLongPairSetTest {
 
     @Test
     public void testIteration() {
-        ConcurrentLongPairSet set = new ConcurrentLongPairSet();
+        ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build();
 
         assertEquals(set.items(), Collections.emptyList());
 
@@ -269,7 +343,7 @@ public class ConcurrentLongPairSetTest {
 
     @Test
     public void testRemoval() {
-        ConcurrentLongPairSet set = new ConcurrentLongPairSet();
+        ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build();
 
         set.add(0, 0);
         set.add(1, 1);
@@ -295,7 +369,7 @@ public class ConcurrentLongPairSetTest {
 
     @Test
     public void testIfRemoval() {
-        ConcurrentLongPairSet set = new ConcurrentLongPairSet();
+        ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build();
 
         set.add(0, 0);
         set.add(1, 1);
@@ -319,7 +393,7 @@ public class ConcurrentLongPairSetTest {
 
     @Test
     public void testItems() {
-        ConcurrentLongPairSet set = new ConcurrentLongPairSet();
+        ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build();
 
         int n = 100;
         int limit = 10;
@@ -340,7 +414,10 @@ public class ConcurrentLongPairSetTest {
     @Test
     public void testHashConflictWithDeletion() {
         final int Buckets = 16;
-        ConcurrentLongPairSet set = new ConcurrentLongPairSet(Buckets, 1);
+        ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder()
+                .expectedItems(Buckets)
+                .concurrencyLevel(1)
+                .build();
 
         // Pick 2 keys that fall into the same bucket
         long key1 = 1;
@@ -375,7 +452,7 @@ public class ConcurrentLongPairSetTest {
     @Test
     public void testEqualsObjects() {
 
-        ConcurrentLongPairSet set = new ConcurrentLongPairSet();
+        ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build();
 
         long t1 = 1;
         long t2 = 2;
@@ -397,7 +474,7 @@ public class ConcurrentLongPairSetTest {
     @Test
     public void testToString() {
 
-        ConcurrentLongPairSet set = new ConcurrentLongPairSet();
+        ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build();
 
         set.add(0, 0);
         set.add(1, 1);
diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java
index 254be51f292..7919485d9b6 100644
--- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java
+++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java
@@ -49,21 +49,29 @@ public class ConcurrentOpenHashMapTest {
     @Test
     public void testConstructor() {
         try {
-            new ConcurrentOpenHashMap<String, String>(0);
+            ConcurrentOpenHashMap.<String, String>newBuilder()
+                    .expectedItems(0)
+                    .build();
             fail("should have thrown exception");
         } catch (IllegalArgumentException e) {
             // ok
         }
 
         try {
-            new ConcurrentOpenHashMap<String, String>(16, 0);
+            ConcurrentOpenHashMap.<String, String>newBuilder()
+                    .expectedItems(16)
+                    .concurrencyLevel(0)
+                    .build();
             fail("should have thrown exception");
         } catch (IllegalArgumentException e) {
             // ok
         }
 
         try {
-            new ConcurrentOpenHashMap<String, String>(4, 8);
+            ConcurrentOpenHashMap.<String, String>newBuilder()
+                    .expectedItems(4)
+                    .concurrencyLevel(8)
+                    .build();
             fail("should have thrown exception");
         } catch (IllegalArgumentException e) {
             // ok
@@ -72,7 +80,10 @@ public class ConcurrentOpenHashMapTest {
 
     @Test
     public void simpleInsertions() {
-        ConcurrentOpenHashMap<String, String> map = new ConcurrentOpenHashMap<>(16);
+        ConcurrentOpenHashMap<String, String> map =
+                ConcurrentOpenHashMap.<String, String>newBuilder()
+                .expectedItems(16)
+                .build();
 
         assertTrue(map.isEmpty());
         assertNull(map.put("1", "one"));
@@ -98,9 +109,64 @@ public class ConcurrentOpenHashMapTest {
         assertEquals(map.size(), 3);
     }
 
+    @Test
+    public void testClear() {
+        ConcurrentOpenHashMap<String, String> map = ConcurrentOpenHashMap.<String, String>newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        assertTrue(map.capacity() == 4);
+
+        assertNull(map.put("k1", "v1"));
+        assertNull(map.put("k2", "v2"));
+        assertNull(map.put("k3", "v3"));
+
+        assertTrue(map.capacity() == 8);
+        map.clear();
+        assertTrue(map.capacity() == 4);
+    }
+
+    @Test
+    public void testExpandAndShrink() {
+        ConcurrentOpenHashMap<String, String> map = ConcurrentOpenHashMap.<String, String>newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        assertTrue(map.capacity() == 4);
+
+        assertNull(map.put("k1", "v1"));
+        assertNull(map.put("k2", "v2"));
+        assertNull(map.put("k3", "v3"));
+
+        // expand hashmap
+        assertTrue(map.capacity() == 8);
+
+        assertTrue(map.remove("k1", "v1"));
+        // not shrink
+        assertTrue(map.capacity() == 8);
+        assertTrue(map.remove("k2", "v2"));
+        // shrink hashmap
+        assertTrue(map.capacity() == 4);
+
+        // expand hashmap
+        assertNull(map.put("k4", "v4"));
+        assertNull(map.put("k5", "v5"));
+        assertTrue(map.capacity() == 8);
+
+        //verify that the map does not keep shrinking at every remove() operation
+        assertNull(map.put("k6", "v6"));
+        assertTrue(map.remove("k6", "v6"));
+        assertTrue(map.capacity() == 8);
+    }
+
     @Test
     public void testRemove() {
-        ConcurrentOpenHashMap<String, String> map = new ConcurrentOpenHashMap<>();
+        ConcurrentOpenHashMap<String, String> map =
+                ConcurrentOpenHashMap.<String, String>newBuilder().build();
 
         assertTrue(map.isEmpty());
         assertNull(map.put("1", "one"));
@@ -117,7 +183,10 @@ public class ConcurrentOpenHashMapTest {
     @Test
     public void testRehashing() {
         int n = 16;
-        ConcurrentOpenHashMap<String, Integer> map = new ConcurrentOpenHashMap<>(n / 2, 1);
+        ConcurrentOpenHashMap<String, Integer> map = ConcurrentOpenHashMap.<String, Integer>newBuilder()
+                        .expectedItems(n / 2)
+                        .concurrencyLevel(1)
+                        .build();
         assertEquals(map.capacity(), n);
         assertEquals(map.size(), 0);
 
@@ -132,7 +201,11 @@ public class ConcurrentOpenHashMapTest {
     @Test
     public void testRehashingWithDeletes() {
         int n = 16;
-        ConcurrentOpenHashMap<Integer, Integer> map = new ConcurrentOpenHashMap<>(n / 2, 1);
+        ConcurrentOpenHashMap<Integer, Integer> map =
+                ConcurrentOpenHashMap.<Integer, Integer>newBuilder()
+                        .expectedItems(n / 2)
+                        .concurrencyLevel(1)
+                        .build();
         assertEquals(map.capacity(), n);
         assertEquals(map.size(), 0);
 
@@ -154,7 +227,10 @@ public class ConcurrentOpenHashMapTest {
 
     @Test
     public void concurrentInsertions() throws Throwable {
-        ConcurrentOpenHashMap<Long, String> map = new ConcurrentOpenHashMap<>(16, 1);
+        ConcurrentOpenHashMap<Long, String> map = ConcurrentOpenHashMap.<Long, String>newBuilder()
+                        .expectedItems(16)
+                        .concurrencyLevel(1)
+                        .build();
         @Cleanup("shutdownNow")
         ExecutorService executor = Executors.newCachedThreadPool();
 
@@ -188,7 +264,8 @@ public class ConcurrentOpenHashMapTest {
 
     @Test
     public void concurrentInsertionsAndReads() throws Throwable {
-        ConcurrentOpenHashMap<Long, String> map = new ConcurrentOpenHashMap<>();
+        ConcurrentOpenHashMap<Long, String> map =
+                ConcurrentOpenHashMap.<Long, String>newBuilder().build();
         @Cleanup("shutdownNow")
         ExecutorService executor = Executors.newCachedThreadPool();
 
@@ -222,7 +299,8 @@ public class ConcurrentOpenHashMapTest {
 
     @Test
     public void testIteration() {
-        ConcurrentOpenHashMap<Long, String> map = new ConcurrentOpenHashMap<>();
+        ConcurrentOpenHashMap<Long, String> map =
+                ConcurrentOpenHashMap.<Long, String>newBuilder().build();
 
         assertEquals(map.keys(), Collections.emptyList());
         assertEquals(map.values(), Collections.emptyList());
@@ -266,7 +344,10 @@ public class ConcurrentOpenHashMapTest {
     @Test
     public void testHashConflictWithDeletion() {
         final int Buckets = 16;
-        ConcurrentOpenHashMap<Long, String> map = new ConcurrentOpenHashMap<>(Buckets, 1);
+        ConcurrentOpenHashMap<Long, String> map = ConcurrentOpenHashMap.<Long, String>newBuilder()
+                .expectedItems(Buckets)
+                .concurrencyLevel(1)
+                .build();
 
         // Pick 2 keys that fall into the same bucket
         long key1 = 1;
@@ -299,7 +380,8 @@ public class ConcurrentOpenHashMapTest {
 
     @Test
     public void testPutIfAbsent() {
-        ConcurrentOpenHashMap<Long, String> map = new ConcurrentOpenHashMap<>();
+        ConcurrentOpenHashMap<Long, String> map =
+                ConcurrentOpenHashMap.<Long, String>newBuilder().build();
         assertNull(map.putIfAbsent(1l, "one"));
         assertEquals(map.get(1l), "one");
 
@@ -309,7 +391,10 @@ public class ConcurrentOpenHashMapTest {
 
     @Test
     public void testComputeIfAbsent() {
-        ConcurrentOpenHashMap<Integer, Integer> map = new ConcurrentOpenHashMap<>(16, 1);
+        ConcurrentOpenHashMap<Integer, Integer> map = ConcurrentOpenHashMap.<Integer, Integer>newBuilder()
+                .expectedItems(16)
+                .concurrencyLevel(1)
+                .build();
         AtomicInteger counter = new AtomicInteger();
         Function<Integer, Integer> provider = key -> counter.getAndIncrement();
 
@@ -350,7 +435,8 @@ public class ConcurrentOpenHashMapTest {
             }
         }
 
-        ConcurrentOpenHashMap<T, String> map = new ConcurrentOpenHashMap<>();
+        ConcurrentOpenHashMap<T, String> map =
+                ConcurrentOpenHashMap.<T, String>newBuilder().build();
 
         T t1 = new T(1);
         T t1_b = new T(1);
@@ -372,7 +458,11 @@ public class ConcurrentOpenHashMapTest {
 
     @Test
     public void testNullValue() {
-        ConcurrentOpenHashMap<String, String> map = new ConcurrentOpenHashMap<>(16, 1);
+        ConcurrentOpenHashMap<String, String> map =
+                ConcurrentOpenHashMap.<String, String>newBuilder()
+                        .expectedItems(16)
+                        .concurrencyLevel(1)
+                        .build();
         String key = "a";
         assertThrows(NullPointerException.class, () -> map.put(key, null));
 
@@ -406,7 +496,10 @@ public class ConcurrentOpenHashMapTest {
     static final int N = 1_000_000;
 
     public void benchConcurrentOpenHashMap() throws Exception {
-        ConcurrentOpenHashMap<Long, String> map = new ConcurrentOpenHashMap<>(N, 1);
+        ConcurrentOpenHashMap<Long, String> map = ConcurrentOpenHashMap.<Long, String>newBuilder()
+                .expectedItems(N)
+                .concurrencyLevel(1)
+                .build();
 
         for (long i = 0; i < Iterations; i++) {
             for (int j = 0; j < N; j++) {
diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java
index 3c1d99668d7..af62948b64a 100644
--- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java
+++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java
@@ -91,9 +91,66 @@ public class ConcurrentOpenHashSetTest {
         assertEquals(set.size(), 3);
     }
 
+    @Test
+    public void testClear() {
+        ConcurrentOpenHashSet<String> map =
+                ConcurrentOpenHashSet.<String>newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        assertTrue(map.capacity() == 4);
+
+        assertTrue(map.add("k1"));
+        assertTrue(map.add("k2"));
+        assertTrue(map.add("k3"));
+
+        assertTrue(map.capacity() == 8);
+        map.clear();
+        assertTrue(map.capacity() == 4);
+    }
+
+    @Test
+    public void testExpandAndShrink() {
+        ConcurrentOpenHashSet<String> map =
+                ConcurrentOpenHashSet.<String>newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        assertTrue(map.capacity() == 4);
+
+        assertTrue(map.add("k1"));
+        assertTrue(map.add("k2"));
+        assertTrue(map.add("k3"));
+
+        // expand hashmap
+        assertTrue(map.capacity() == 8);
+
+        assertTrue(map.remove("k1"));
+        // not shrink
+        assertTrue(map.capacity() == 8);
+        assertTrue(map.remove("k2"));
+        // shrink hashmap
+        assertTrue(map.capacity() == 4);
+
+        // expand hashmap
+        assertTrue(map.add("k4"));
+        assertTrue(map.add("k5"));
+        assertTrue(map.capacity() == 8);
+
+        //verify that the map does not keep shrinking at every remove() operation
+        assertTrue(map.add("k6"));
+        assertTrue(map.remove("k6"));
+        assertTrue(map.capacity() == 8);
+    }
+
     @Test
     public void testRemove() {
-        ConcurrentOpenHashSet<String> set = new ConcurrentOpenHashSet<>();
+        ConcurrentOpenHashSet<String> set =
+                ConcurrentOpenHashSet.<String>newBuilder().build();
 
         assertTrue(set.isEmpty());
         assertTrue(set.add("1"));
@@ -145,7 +202,8 @@ public class ConcurrentOpenHashSetTest {
 
     @Test
     public void concurrentInsertions() throws Throwable {
-        ConcurrentOpenHashSet<Long> set = new ConcurrentOpenHashSet<>();
+        ConcurrentOpenHashSet<Long> set =
+                ConcurrentOpenHashSet.<Long>newBuilder().build();
         @Cleanup("shutdownNow")
         ExecutorService executor = Executors.newCachedThreadPool();
 
@@ -178,7 +236,8 @@ public class ConcurrentOpenHashSetTest {
 
     @Test
     public void concurrentInsertionsAndReads() throws Throwable {
-        ConcurrentOpenHashSet<Long> map = new ConcurrentOpenHashSet<>();
+        ConcurrentOpenHashSet<Long> map =
+                ConcurrentOpenHashSet.<Long>newBuilder().build();
         @Cleanup("shutdownNow")
         ExecutorService executor = Executors.newCachedThreadPool();
 
@@ -211,7 +270,7 @@ public class ConcurrentOpenHashSetTest {
 
     @Test
     public void testIteration() {
-        ConcurrentOpenHashSet<Long> set = new ConcurrentOpenHashSet<>();
+        ConcurrentOpenHashSet<Long> set = ConcurrentOpenHashSet.<Long>newBuilder().build();
 
         assertEquals(set.values(), Collections.emptyList());
 
@@ -237,7 +296,8 @@ public class ConcurrentOpenHashSetTest {
 
     @Test
     public void testRemoval() {
-        ConcurrentOpenHashSet<Integer> set = new ConcurrentOpenHashSet<>();
+        ConcurrentOpenHashSet<Integer> set =
+                ConcurrentOpenHashSet.<Integer>newBuilder().build();
 
         set.add(0);
         set.add(1);
@@ -315,7 +375,8 @@ public class ConcurrentOpenHashSetTest {
             }
         }
 
-        ConcurrentOpenHashSet<T> set = new ConcurrentOpenHashSet<>();
+        ConcurrentOpenHashSet<T> set =
+                ConcurrentOpenHashSet.<T>newBuilder().build();
 
         T t1 = new T(1);
         T t1_b = new T(1);
diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java
index 8e85618f3cc..1ea232203d3 100644
--- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java
+++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/PulsarRecordCursor.java
@@ -120,7 +120,8 @@ public class PulsarRecordCursor implements RecordCursor {
 
     PulsarDispatchingRowDecoderFactory decoderFactory;
 
-    protected ConcurrentOpenHashMap<String, ChunkedMessageCtx> chunkedMessagesMap = new ConcurrentOpenHashMap<>();
+    protected ConcurrentOpenHashMap<String, ChunkedMessageCtx> chunkedMessagesMap =
+            ConcurrentOpenHashMap.<String, ChunkedMessageCtx>newBuilder().build();
 
     private static final Logger log = Logger.get(PulsarRecordCursor.class);
 
diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/WebSocketService.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/WebSocketService.java
index ee607687b15..5a81d9f21a2 100644
--- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/WebSocketService.java
+++ b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/WebSocketService.java
@@ -81,9 +81,17 @@ public class WebSocketService implements Closeable {
     public WebSocketService(ClusterData localCluster, ServiceConfiguration config) {
         this.config = config;
         this.localCluster = localCluster;
-        this.topicProducerMap = new ConcurrentOpenHashMap<>();
-        this.topicConsumerMap = new ConcurrentOpenHashMap<>();
-        this.topicReaderMap = new ConcurrentOpenHashMap<>();
+        this.topicProducerMap =
+                ConcurrentOpenHashMap.<String,
+                        ConcurrentOpenHashSet<ProducerHandler>>newBuilder()
+                        .build();
+        this.topicConsumerMap =
+                ConcurrentOpenHashMap.<String,
+                        ConcurrentOpenHashSet<ConsumerHandler>>newBuilder()
+                        .build();
+        this.topicReaderMap =
+                ConcurrentOpenHashMap.<String, ConcurrentOpenHashSet<ReaderHandler>>newBuilder()
+                        .build();
         this.proxyStats = new ProxyStats(this);
     }
 
@@ -249,7 +257,8 @@ public class WebSocketService implements Closeable {
 
     public boolean addProducer(ProducerHandler producer) {
         return topicProducerMap
-                .computeIfAbsent(producer.getProducer().getTopic(), topic -> new ConcurrentOpenHashSet<>())
+                .computeIfAbsent(producer.getProducer().getTopic(),
+                        topic -> ConcurrentOpenHashSet.<ProducerHandler>newBuilder().build())
                 .add(producer);
     }
 
@@ -267,7 +276,8 @@ public class WebSocketService implements Closeable {
 
     public boolean addConsumer(ConsumerHandler consumer) {
         return topicConsumerMap
-                .computeIfAbsent(consumer.getConsumer().getTopic(), topic -> new ConcurrentOpenHashSet<>())
+                .computeIfAbsent(consumer.getConsumer().getTopic(), topic ->
+                        ConcurrentOpenHashSet.<ConsumerHandler>newBuilder().build())
                 .add(consumer);
     }
 
@@ -284,7 +294,8 @@ public class WebSocketService implements Closeable {
     }
 
     public boolean addReader(ReaderHandler reader) {
-        return topicReaderMap.computeIfAbsent(reader.getConsumer().getTopic(), topic -> new ConcurrentOpenHashSet<>())
+        return topicReaderMap.computeIfAbsent(reader.getConsumer().getTopic(), topic ->
+                ConcurrentOpenHashSet.<ReaderHandler>newBuilder().build())
                 .add(reader);
     }
 
diff --git a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/ProxyStats.java b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/ProxyStats.java
index cc327f57191..7fd75c9b3d9 100644
--- a/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/ProxyStats.java
+++ b/pulsar-websocket/src/main/java/org/apache/pulsar/websocket/stats/ProxyStats.java
@@ -48,7 +48,9 @@ public class ProxyStats {
         super();
         this.service = service;
         this.jvmMetrics = new JvmMetrics(service);
-        this.topicStats = new ConcurrentOpenHashMap<>();
+        this.topicStats =
+                ConcurrentOpenHashMap.<String, ProxyNamespaceStats>newBuilder()
+                        .build();
         this.metricsCollection = new ArrayList<>();
         this.tempMetricsCollection = new ArrayList<>();
         // schedule stat generation task every 1 minute


[pulsar] 14/26: Pulsar SQL support for Decimal data type (#15153)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 642159c8866ac13246ee112964ad79f4b0c7cf9e
Author: Baodi Shi <wu...@icloud.com>
AuthorDate: Thu Apr 21 17:26:35 2022 +0800

    Pulsar SQL support for Decimal data type (#15153)
    
    (cherry picked from commit 6b004ed6a2554ab826a00aa2a177963de3c5f44b)
---
 .../presto/decoder/avro/PulsarAvroColumnDecoder.java | 19 ++++++++++++++++++-
 .../decoder/avro/PulsarAvroRowDecoderFactory.java    | 10 +++++++++-
 .../decoder/json/PulsarJsonRowDecoderFactory.java    |  6 ++++++
 .../pulsar/sql/presto/TestPulsarConnector.java       |  8 +++++++-
 .../pulsar/sql/presto/TestPulsarRecordCursor.java    | 15 +++++++++++++++
 .../sql/presto/decoder/AbstractDecoderTester.java    |  5 +++++
 .../sql/presto/decoder/DecoderTestMessage.java       |  6 +++++-
 .../pulsar/sql/presto/decoder/DecoderTestUtil.java   | 20 ++++++++++++++++++++
 .../sql/presto/decoder/avro/TestAvroDecoder.java     | 11 +++++++++++
 9 files changed, 96 insertions(+), 4 deletions(-)

diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java
index 690daf62d2e..0c57336d213 100644
--- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java
+++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroColumnDecoder.java
@@ -40,6 +40,8 @@ import io.prestosql.spi.type.ArrayType;
 import io.prestosql.spi.type.BigintType;
 import io.prestosql.spi.type.BooleanType;
 import io.prestosql.spi.type.DateType;
+import io.prestosql.spi.type.DecimalType;
+import io.prestosql.spi.type.Decimals;
 import io.prestosql.spi.type.DoubleType;
 import io.prestosql.spi.type.IntegerType;
 import io.prestosql.spi.type.MapType;
@@ -53,6 +55,7 @@ import io.prestosql.spi.type.TinyintType;
 import io.prestosql.spi.type.Type;
 import io.prestosql.spi.type.VarbinaryType;
 import io.prestosql.spi.type.VarcharType;
+import java.math.BigInteger;
 import java.nio.ByteBuffer;
 import java.util.List;
 import java.util.Map;
@@ -139,7 +142,7 @@ public class PulsarAvroColumnDecoder {
     }
 
     private boolean isSupportedPrimitive(Type type) {
-        return type instanceof VarcharType || SUPPORTED_PRIMITIVE_TYPES.contains(type);
+        return type instanceof VarcharType || type instanceof DecimalType || SUPPORTED_PRIMITIVE_TYPES.contains(type);
     }
 
     public FieldValueProvider decodeField(GenericRecord avroRecord) {
@@ -205,6 +208,13 @@ public class PulsarAvroColumnDecoder {
                 return floatToIntBits((Float) value);
             }
 
+            if (columnType instanceof DecimalType) {
+                ByteBuffer buffer = (ByteBuffer) value;
+                byte[] bytes = new byte[buffer.remaining()];
+                buffer.get(bytes);
+                return new BigInteger(bytes).longValue();
+            }
+
             throw new PrestoException(DECODER_CONVERSION_NOT_SUPPORTED,
                     format("cannot decode object of '%s' as '%s' for column '%s'",
                             value.getClass(), columnType, columnName));
@@ -234,6 +244,13 @@ public class PulsarAvroColumnDecoder {
             }
         }
 
+        // The returned Slice size must be equals to 18 Byte
+        if (type instanceof DecimalType) {
+            ByteBuffer buffer = (ByteBuffer) value;
+            BigInteger bigInteger = new BigInteger(buffer.array());
+            return Decimals.encodeUnscaledValue(bigInteger);
+        }
+
         throw new PrestoException(DECODER_CONVERSION_NOT_SUPPORTED,
                 format("cannot decode object of '%s' as '%s' for column '%s'",
                         value.getClass(), type, columnName));
diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java
index 12352059c2d..74b0a88fcef 100644
--- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java
+++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/avro/PulsarAvroRowDecoderFactory.java
@@ -33,6 +33,7 @@ import io.prestosql.spi.connector.ColumnMetadata;
 import io.prestosql.spi.type.ArrayType;
 import io.prestosql.spi.type.BigintType;
 import io.prestosql.spi.type.BooleanType;
+import io.prestosql.spi.type.DecimalType;
 import io.prestosql.spi.type.DoubleType;
 import io.prestosql.spi.type.IntegerType;
 import io.prestosql.spi.type.RealType;
@@ -128,7 +129,14 @@ public class PulsarAvroRowDecoderFactory implements PulsarRowDecoderFactory {
                                 + "please check the schema or report the bug.", fieldname));
             case FIXED:
             case BYTES:
-                //TODO: support decimal logicalType
+                //  When the precision <= 0, throw Exception.
+                //  When the precision > 0 and <= 18, use ShortDecimalType. and mapping Long
+                //  When the precision > 18 and <= 36, use LongDecimalType. and mapping Slice
+                //  When the precision > 36, throw Exception.
+                if (logicalType instanceof LogicalTypes.Decimal) {
+                    LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType;
+                    return DecimalType.createDecimalType(decimal.getPrecision(), decimal.getScale());
+                }
                 return VarbinaryType.VARBINARY;
             case INT:
                 if (logicalType == LogicalTypes.timeMillis()) {
diff --git a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java
index 330631e72a8..bb064d8909f 100644
--- a/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java
+++ b/pulsar-sql/presto-pulsar/src/main/java/org/apache/pulsar/sql/presto/decoder/json/PulsarJsonRowDecoderFactory.java
@@ -128,6 +128,12 @@ public class PulsarJsonRowDecoderFactory implements PulsarRowDecoderFactory {
                                 + "please check the schema or report the bug.", fieldname));
             case FIXED:
             case BYTES:
+                // In the current implementation, since JsonSchema is generated by Avro,
+                // there may exist LogicalTypes.Decimal.
+                // Mapping decimalType with varcharType in JsonSchema.
+                if (logicalType instanceof LogicalTypes.Decimal) {
+                    return createUnboundedVarcharType();
+                }
                 return VarbinaryType.VARBINARY;
             case INT:
                 if (logicalType == LogicalTypes.timeMillis()) {
diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnector.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnector.java
index b673fc368e6..7db32f59148 100644
--- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnector.java
+++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarConnector.java
@@ -25,6 +25,7 @@ import io.prestosql.spi.connector.ColumnMetadata;
 import io.prestosql.spi.connector.ConnectorContext;
 import io.prestosql.spi.predicate.TupleDomain;
 import io.prestosql.testing.TestingConnectorContext;
+import java.math.BigDecimal;
 import org.apache.bookkeeper.mledger.AsyncCallbacks;
 import org.apache.bookkeeper.mledger.Entry;
 import org.apache.bookkeeper.mledger.ManagedLedgerConfig;
@@ -166,6 +167,8 @@ public abstract class TestPulsarConnector {
         public int time;
         @org.apache.avro.reflect.AvroSchema("{ \"type\": \"int\", \"logicalType\": \"date\" }")
         public int date;
+        @org.apache.avro.reflect.AvroSchema("{ \"type\": \"bytes\", \"logicalType\": \"decimal\", \"precision\": 4, \"scale\": 2 }")
+        public BigDecimal decimal;
         public TestPulsarConnector.Bar bar;
         public TestEnum field7;
     }
@@ -253,6 +256,7 @@ public abstract class TestPulsarConnector {
             fooFieldNames.add("date");
             fooFieldNames.add("bar");
             fooFieldNames.add("field7");
+            fooFieldNames.add("decimal");
 
 
             ConnectorContext prestoConnectorContext = new TestingConnectorContext();
@@ -313,6 +317,7 @@ public abstract class TestPulsarConnector {
                 LocalDate epoch = LocalDate.ofEpochDay(0);
                 return Math.toIntExact(ChronoUnit.DAYS.between(epoch, localDate));
             });
+            fooFunctions.put("decimal", integer -> BigDecimal.valueOf(1234, 2));
             fooFunctions.put("bar.field1", integer -> integer % 3 == 0 ? null : integer + 1);
             fooFunctions.put("bar.field2", integer -> integer % 2 == 0 ? null : String.valueOf(integer + 2));
             fooFunctions.put("bar.field3", integer -> integer + 3.0f);
@@ -331,7 +336,6 @@ public abstract class TestPulsarConnector {
      * @param schemaInfo
      * @param handleKeyValueType
      * @param includeInternalColumn
-     * @param dispatchingRowDecoderFactory
      * @return
      */
     protected static List<PulsarColumnHandle> getColumnColumnHandles(TopicName topicName, SchemaInfo schemaInfo,
@@ -393,6 +397,7 @@ public abstract class TestPulsarConnector {
             LocalDate localDate = LocalDate.now();
             LocalDate epoch = LocalDate.ofEpochDay(0);
             foo.date = Math.toIntExact(ChronoUnit.DAYS.between(epoch, localDate));
+            foo.decimal= BigDecimal.valueOf(count, 2);
 
             MessageMetadata messageMetadata = new MessageMetadata()
                     .setProducerName("test-producer").setSequenceId(i)
@@ -609,6 +614,7 @@ public abstract class TestPulsarConnector {
                                     foo.timestamp = (long) fooFunctions.get("timestamp").apply(count);
                                     foo.time = (int) fooFunctions.get("time").apply(count);
                                     foo.date = (int) fooFunctions.get("date").apply(count);
+                                    foo.decimal = (BigDecimal) fooFunctions.get("decimal").apply(count);
                                     foo.bar = bar;
                                     foo.field7 = (Foo.TestEnum) fooFunctions.get("field7").apply(count);
 
diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarRecordCursor.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarRecordCursor.java
index dbde648ee95..23dc69245f0 100644
--- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarRecordCursor.java
+++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/TestPulsarRecordCursor.java
@@ -22,7 +22,11 @@ import com.fasterxml.jackson.databind.ObjectMapper;
 import io.airlift.log.Logger;
 import io.netty.buffer.ByteBuf;
 import io.prestosql.spi.predicate.TupleDomain;
+import io.prestosql.spi.type.DecimalType;
 import io.prestosql.spi.type.RowType;
+import io.prestosql.spi.type.Type;
+import io.prestosql.spi.type.VarcharType;
+import java.math.BigDecimal;
 import lombok.Data;
 import org.apache.bookkeeper.mledger.AsyncCallbacks;
 import org.apache.bookkeeper.mledger.Entry;
@@ -142,6 +146,17 @@ public class TestPulsarRecordCursor extends TestPulsarConnector {
                         }else if (fooColumnHandles.get(i).getName().equals("field7")) {
                             assertEquals(pulsarRecordCursor.getSlice(i).getBytes(), fooFunctions.get("field7").apply(count).toString().getBytes());
                             columnsSeen.add(fooColumnHandles.get(i).getName());
+                        }else if (fooColumnHandles.get(i).getName().equals("decimal")) {
+                            Type type = fooColumnHandles.get(i).getType();
+                            // In JsonDecoder, decimal trans to varcharType
+                            if (type instanceof VarcharType) {
+                                assertEquals(new String(pulsarRecordCursor.getSlice(i).getBytes()),
+                                        fooFunctions.get("decimal").apply(count).toString());
+                            } else {
+                                DecimalType decimalType = (DecimalType) fooColumnHandles.get(i).getType();
+                                assertEquals(BigDecimal.valueOf(pulsarRecordCursor.getLong(i), decimalType.getScale()), fooFunctions.get("decimal").apply(count));
+                            }
+                            columnsSeen.add(fooColumnHandles.get(i).getName());
                         } else {
                             if (PulsarInternalColumn.getInternalFieldsMap().containsKey(fooColumnHandles.get(i).getName())) {
                                 columnsSeen.add(fooColumnHandles.get(i).getName());
diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/AbstractDecoderTester.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/AbstractDecoderTester.java
index 5cd46832516..e5ceb321aae 100644
--- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/AbstractDecoderTester.java
+++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/AbstractDecoderTester.java
@@ -26,6 +26,7 @@ import io.prestosql.spi.connector.ColumnMetadata;
 import io.prestosql.spi.connector.ConnectorContext;
 import io.prestosql.spi.type.Type;
 import io.prestosql.testing.TestingConnectorContext;
+import java.math.BigDecimal;
 import org.apache.pulsar.common.naming.NamespaceName;
 import org.apache.pulsar.common.naming.TopicName;
 import org.apache.pulsar.common.schema.SchemaInfo;
@@ -102,6 +103,10 @@ public abstract class AbstractDecoderTester {
         decoderTestUtil.checkValue(decodedRow, handle, value);
     }
 
+    protected void checkValue(Map<DecoderColumnHandle, FieldValueProvider> decodedRow, DecoderColumnHandle handle, BigDecimal value) {
+        decoderTestUtil.checkValue(decodedRow, handle, value);
+    }
+
     protected Block getBlock(Map<DecoderColumnHandle, FieldValueProvider> decodedRow, DecoderColumnHandle handle) {
         FieldValueProvider provider = decodedRow.get(handle);
         assertNotNull(provider);
diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java
index 115f3691c00..da6d92e5158 100644
--- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java
+++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestMessage.java
@@ -18,6 +18,7 @@
  */
 package org.apache.pulsar.sql.presto.decoder;
 
+import java.math.BigDecimal;
 import lombok.Data;
 
 import java.util.List;
@@ -45,6 +46,10 @@ public class DecoderTestMessage {
     public int dateField;
     public TestRow rowField;
     public TestEnum enumField;
+    @org.apache.avro.reflect.AvroSchema("{ \"type\": \"bytes\", \"logicalType\": \"decimal\", \"precision\": 4, \"scale\": 2 }")
+    public BigDecimal decimalField;
+    @org.apache.avro.reflect.AvroSchema("{ \"type\": \"bytes\", \"logicalType\": \"decimal\", \"precision\": 30, \"scale\": 2 }")
+    public BigDecimal longDecimalField;
 
     public List<String> arrayField;
     public Map<String, Long> mapField;
@@ -62,7 +67,6 @@ public class DecoderTestMessage {
         public long longField;
     }
 
-
     public static class CompositeRow {
         public String stringField;
         public List<NestedRow> arrayField;
diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestUtil.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestUtil.java
index 4c3c4a63447..496a6f061bf 100644
--- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestUtil.java
+++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/DecoderTestUtil.java
@@ -23,11 +23,16 @@ import io.prestosql.decoder.DecoderColumnHandle;
 import io.prestosql.decoder.FieldValueProvider;
 import io.prestosql.spi.block.Block;
 import io.prestosql.spi.type.ArrayType;
+import io.prestosql.spi.type.DecimalType;
+import io.prestosql.spi.type.Decimals;
 import io.prestosql.spi.type.MapType;
 import io.prestosql.spi.type.RowType;
 import io.prestosql.spi.type.Type;
+import java.math.BigDecimal;
+import java.math.BigInteger;
 import java.util.Map;
 
+import static io.prestosql.spi.type.UnscaledDecimal128Arithmetic.UNSCALED_DECIMAL_128_SLICE_LENGTH;
 import static io.prestosql.testing.TestingConnectorSession.SESSION;
 import static org.testng.Assert.*;
 
@@ -113,6 +118,21 @@ public abstract class DecoderTestUtil {
         assertEquals(provider.getBoolean(), value);
     }
 
+    public void checkValue(Map<DecoderColumnHandle, FieldValueProvider> decodedRow, DecoderColumnHandle handle, BigDecimal value) {
+        FieldValueProvider provider = decodedRow.get(handle);
+        DecimalType decimalType = (DecimalType) handle.getType();
+        BigDecimal actualDecimal;
+        if (decimalType.getFixedSize() == UNSCALED_DECIMAL_128_SLICE_LENGTH) {
+            Slice slice = provider.getSlice();
+            BigInteger bigInteger = Decimals.decodeUnscaledValue(slice);
+            actualDecimal = new BigDecimal(bigInteger, decimalType.getScale());
+        } else {
+            actualDecimal = BigDecimal.valueOf(provider.getLong(), decimalType.getScale());
+        }
+        assertNotNull(provider);
+        assertEquals(actualDecimal, value);
+    }
+
     public void checkIsNull(Map<DecoderColumnHandle, FieldValueProvider> decodedRow, DecoderColumnHandle handle) {
         FieldValueProvider provider = decodedRow.get(handle);
         assertNotNull(provider);
diff --git a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java
index 1cfbbb4fce5..7b270c7995b 100644
--- a/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java
+++ b/pulsar-sql/presto-pulsar/src/test/java/org/apache/pulsar/sql/presto/decoder/avro/TestAvroDecoder.java
@@ -25,11 +25,13 @@ import io.prestosql.decoder.FieldValueProvider;
 import io.prestosql.spi.PrestoException;
 import io.prestosql.spi.type.ArrayType;
 import io.prestosql.spi.type.BigintType;
+import io.prestosql.spi.type.DecimalType;
 import io.prestosql.spi.type.RowType;
 import io.prestosql.spi.type.StandardTypes;
 import io.prestosql.spi.type.Type;
 import io.prestosql.spi.type.TypeSignatureParameter;
 import io.prestosql.spi.type.VarcharType;
+import java.math.BigDecimal;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -87,6 +89,8 @@ public class TestAvroDecoder extends AbstractDecoderTester {
         message.longField = 222L;
         message.timestampField = System.currentTimeMillis();
         message.enumField = DecoderTestMessage.TestEnum.TEST_ENUM_1;
+        message.decimalField = BigDecimal.valueOf(2233, 2);
+        message.longDecimalField = new BigDecimal("1234567891234567891234567891.23");
 
         LocalTime now = LocalTime.now(ZoneId.systemDefault());
         message.timeField = now.toSecondOfDay() * 1000;
@@ -127,6 +131,13 @@ public class TestAvroDecoder extends AbstractDecoderTester {
                 "enumField", VARCHAR, false, false, "enumField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE);
         checkValue(decodedRow, enumFieldColumnHandle, message.enumField.toString());
 
+        PulsarColumnHandle decimalFieldColumnHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(),
+                "decimalField", DecimalType.createDecimalType(4, 2), false, false, "decimalField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE);
+        checkValue(decodedRow, decimalFieldColumnHandle, message.decimalField);
+
+        PulsarColumnHandle longDecimalFieldColumnHandle = new PulsarColumnHandle(getPulsarConnectorId().toString(),
+                "longDecimalField", DecimalType.createDecimalType(30, 2), false, false, "longDecimalField", null, null, PulsarColumnHandle.HandleKeyValueType.NONE);
+        checkValue(decodedRow, longDecimalFieldColumnHandle, message.longDecimalField);
     }
 
     @Test


[pulsar] 26/26: [improve][broker] Support shrink for ConcurrentSortedLongPairSet (#15354)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit f7128614c1dc8f2e6a51c0644353197792297528
Author: Jiwei Guo <te...@apache.org>
AuthorDate: Thu Apr 28 20:30:30 2022 +0800

    [improve][broker] Support shrink for ConcurrentSortedLongPairSet  (#15354)
    
    (cherry picked from commit 24d4d76bb9e39010bae3f4cbd8ddba6422570b4e)
---
 .../persistent/MessageRedeliveryController.java    |  2 +-
 .../util/collections/ConcurrentLongPairSet.java    | 53 ++++++++++++----------
 .../collections/ConcurrentSortedLongPairSet.java   | 27 +++++++++--
 .../common/util/collections/LongPairSet.java       |  7 +++
 .../ConcurrentSortedLongPairSetTest.java           | 43 ++++++++++++++++++
 5 files changed, 105 insertions(+), 27 deletions(-)

diff --git a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryController.java b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryController.java
index c7f96fffcef..46fa1b2b050 100644
--- a/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryController.java
+++ b/pulsar-broker/src/main/java/org/apache/pulsar/broker/service/persistent/MessageRedeliveryController.java
@@ -36,7 +36,7 @@ public class MessageRedeliveryController {
     private final ConcurrentLongLongPairHashMap hashesToBeBlocked;
 
     public MessageRedeliveryController(boolean allowOutOfOrderDelivery) {
-        this.messagesToRedeliver = new ConcurrentSortedLongPairSet(128, 2);
+        this.messagesToRedeliver = new ConcurrentSortedLongPairSet(128, 2, true);
         this.hashesToBeBlocked = allowOutOfOrderDelivery
                 ? null
                 : ConcurrentLongLongPairHashMap
diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java
index 66ecaee4bfa..7b5e75813fa 100644
--- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java
+++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java
@@ -175,6 +175,7 @@ public class ConcurrentLongPairSet implements LongPairSet {
         return size;
     }
 
+    @Override
     public long capacity() {
         long capacity = 0;
         for (int i = 0; i < sections.length; i++) {
@@ -447,20 +448,7 @@ public class ConcurrentLongPairSet implements LongPairSet {
                     bucket = (bucket + 2) & (table.length - 1);
                 }
             } finally {
-                if (autoShrink && size < resizeThresholdBelow) {
-                    try {
-                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
-                        int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
-                        if (newCapacity < capacity && newResizeThresholdUp > size) {
-                            // shrink the hashmap
-                            rehash(newCapacity);
-                        }
-                    } finally {
-                        unlockWrite(stamp);
-                    }
-                } else {
-                    unlockWrite(stamp);
-                }
+                tryShrinkThenUnlock(stamp);
             }
         }
 
@@ -469,23 +457,42 @@ public class ConcurrentLongPairSet implements LongPairSet {
             int removedItems = 0;
 
             // Go through all the buckets for this section
-            for (int bucket = 0; bucket < table.length; bucket += 2) {
-                long storedItem1 = table[bucket];
-                long storedItem2 = table[bucket + 1];
-
-                if (storedItem1 != DeletedItem && storedItem1 != EmptyItem) {
-                    if (filter.test(storedItem1, storedItem2)) {
-                        long h = hash(storedItem1, storedItem2);
-                        if (remove(storedItem1, storedItem2, (int) h)) {
+            long stamp = writeLock();
+            try {
+                for (int bucket = 0; bucket < table.length; bucket += 2) {
+                    long storedItem1 = table[bucket];
+                    long storedItem2 = table[bucket + 1];
+                    if (storedItem1 != DeletedItem && storedItem1 != EmptyItem) {
+                        if (filter.test(storedItem1, storedItem2)) {
+                            SIZE_UPDATER.decrementAndGet(this);
+                            cleanBucket(bucket);
                             removedItems++;
                         }
                     }
                 }
+            } finally {
+                tryShrinkThenUnlock(stamp);
             }
-
             return removedItems;
         }
 
+        private void tryShrinkThenUnlock(long stamp) {
+            if (autoShrink && size < resizeThresholdBelow) {
+                try {
+                    int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                    int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
+                    if (newCapacity < capacity && newResizeThresholdUp > size) {
+                        // shrink the hashmap
+                        rehash(newCapacity);
+                    }
+                } finally {
+                    unlockWrite(stamp);
+                }
+            } else {
+                unlockWrite(stamp);
+            }
+        }
+
         private void cleanBucket(int bucket) {
             int nextInArray = (bucket + 2) & (table.length - 1);
             if (table[nextInArray] == EmptyItem) {
diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSet.java
index e4cb668fc92..06efd0490d1 100644
--- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSet.java
+++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSet.java
@@ -48,14 +48,15 @@ import org.apache.pulsar.common.util.collections.ConcurrentLongPairSet.LongPairC
 public class ConcurrentSortedLongPairSet implements LongPairSet {
 
     protected final NavigableMap<Long, ConcurrentLongPairSet> longPairSets = new ConcurrentSkipListMap<>();
-    private int expectedItems;
-    private int concurrencyLevel;
+    private final int expectedItems;
+    private final int concurrencyLevel;
     /**
      * If {@link #longPairSets} adds and removes the item-set frequently then it allocates and removes
      * {@link ConcurrentLongPairSet} for the same item multiple times which can lead to gc-puases. To avoid such
      * situation, avoid removing empty LogPairSet until it reaches max limit.
      */
-    private int maxAllowedSetOnRemove;
+    private final int maxAllowedSetOnRemove;
+    private final boolean autoShrink;
     private static final int DEFAULT_MAX_ALLOWED_SET_ON_REMOVE = 10;
 
     public ConcurrentSortedLongPairSet() {
@@ -70,10 +71,20 @@ public class ConcurrentSortedLongPairSet implements LongPairSet {
         this(expectedItems, concurrencyLevel, DEFAULT_MAX_ALLOWED_SET_ON_REMOVE);
     }
 
+    public ConcurrentSortedLongPairSet(int expectedItems, int concurrencyLevel, boolean autoShrink) {
+        this(expectedItems, concurrencyLevel, DEFAULT_MAX_ALLOWED_SET_ON_REMOVE, autoShrink);
+    }
+
     public ConcurrentSortedLongPairSet(int expectedItems, int concurrencyLevel, int maxAllowedSetOnRemove) {
+        this(expectedItems, concurrencyLevel, maxAllowedSetOnRemove, false);
+    }
+
+    public ConcurrentSortedLongPairSet(int expectedItems, int concurrencyLevel, int maxAllowedSetOnRemove,
+                                       boolean autoShrink) {
         this.expectedItems = expectedItems;
         this.concurrencyLevel = concurrencyLevel;
         this.maxAllowedSetOnRemove = maxAllowedSetOnRemove;
+        this.autoShrink = autoShrink;
     }
 
     @Override
@@ -82,6 +93,7 @@ public class ConcurrentSortedLongPairSet implements LongPairSet {
                 (key) -> ConcurrentLongPairSet.newBuilder()
                         .expectedItems(expectedItems)
                         .concurrencyLevel(concurrencyLevel)
+                        .autoShrink(autoShrink)
                         .build());
         return messagesToReplay.add(item1, item2);
     }
@@ -194,6 +206,15 @@ public class ConcurrentSortedLongPairSet implements LongPairSet {
         return size.get();
     }
 
+    @Override
+    public long capacity() {
+        AtomicLong capacity = new AtomicLong(0);
+        longPairSets.forEach((item1, longPairSet) -> {
+            capacity.getAndAdd(longPairSet.capacity());
+        });
+        return capacity.get();
+    }
+
     @Override
     public boolean contains(long item1, long item2) {
         ConcurrentLongPairSet longPairSet = longPairSets.get(item1);
diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/LongPairSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/LongPairSet.java
index 32de7e4c232..f27b994f777 100644
--- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/LongPairSet.java
+++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/LongPairSet.java
@@ -107,6 +107,13 @@ public interface LongPairSet {
      */
     long size();
 
+    /**
+     * Returns capacity of the set.
+     *
+     * @return
+     */
+    long capacity();
+
     /**
      * Checks if given (item1,item2) composite value exists into set.
      *
diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSetTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSetTest.java
index fcb9884a795..62dfa21dc81 100644
--- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSetTest.java
+++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentSortedLongPairSetTest.java
@@ -22,6 +22,7 @@ import static org.testng.Assert.assertEquals;
 import static org.testng.Assert.assertFalse;
 import static org.testng.Assert.assertNotEquals;
 import static org.testng.Assert.assertTrue;
+import com.google.common.collect.ComparisonChain;
 import com.google.common.collect.Lists;
 import java.util.ArrayList;
 import java.util.List;
@@ -181,6 +182,20 @@ public class ConcurrentSortedLongPairSetTest {
         values = new ArrayList<>(set.items());
         values.sort(null);
         assertEquals(values, Lists.newArrayList(new LongPair(6, 6), new LongPair(7, 7)));
+
+        set = new ConcurrentSortedLongPairSet(128, 2, true);
+        set.add(2, 2);
+        set.add(1, 3);
+        set.add(3, 1);
+        set.add(2, 1);
+        set.add(3, 2);
+        set.add(1, 2);
+        set.add(1, 1);
+        removeItems = set.removeIf((ledgerId, entryId) -> {
+            return ComparisonChain.start().compare(ledgerId, 1).compare(entryId, 3)
+                    .result() <= 0;
+        });
+        assertEquals(removeItems, 3);
     }
 
     @Test
@@ -245,4 +260,32 @@ public class ConcurrentSortedLongPairSetTest {
         set.add(1, 1);
         assertFalse(set.isEmpty());
     }
+
+    @Test
+    public void testShrink() {
+        LongPairSet set = new ConcurrentSortedLongPairSet(2, 1, true);
+        set.add(0, 0);
+        assertTrue(set.capacity() == 4);
+        set.add(0, 1);
+        assertTrue(set.capacity() == 4);
+        set.add(1, 1);
+        assertTrue(set.capacity() == 8);
+        set.add(1, 2);
+        assertTrue(set.capacity() == 8);
+        set.add(1, 3);
+        set.add(1, 4);
+        set.add(1, 5);
+        assertTrue(set.capacity() == 12);
+        set.remove(1, 5);
+        // not shrink
+        assertTrue(set.capacity() == 12);
+        set.remove(1, 4);
+        // the internal map does not keep shrinking at every remove() operation
+        assertTrue(set.capacity() == 12);
+        set.remove(1, 3);
+        set.remove(1, 2);
+        set.remove(1, 1);
+        // shrink
+        assertTrue(set.capacity() == 8);
+    }
 }


[pulsar] 04/26: Reduce unnecessary expansions for ConcurrentLong map and set (#14562)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 3ebc23e4bff05af9c79a41bf3f9cd17f777f6c6f
Author: LinChen <15...@qq.com>
AuthorDate: Tue Mar 15 11:41:08 2022 +0800

    Reduce unnecessary expansions for ConcurrentLong map and set (#14562)
    
    (cherry picked from commit 8e7006f899bd2b9ed9482ab2ce1ee35233957d03)
---
 .../util/collections/ConcurrentLongHashMap.java    | 10 ++++++
 .../util/collections/ConcurrentLongPairSet.java    | 11 ++++---
 .../util/collections/ConcurrentOpenHashMap.java    | 19 ++++++++++++
 .../util/collections/ConcurrentOpenHashSet.java    | 18 +++++++++++
 .../collections/ConcurrentLongHashMapTest.java     | 19 ++++++++++++
 .../collections/ConcurrentLongPairSetTest.java     | 19 ++++++++++++
 .../collections/ConcurrentOpenHashMapTest.java     | 19 ++++++++++++
 .../collections/ConcurrentOpenHashSetTest.java     | 36 +++++++++++++++++-----
 8 files changed, 138 insertions(+), 13 deletions(-)

diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java
index d8b0c32cd3c..90aa61a6d9b 100644
--- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java
+++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMap.java
@@ -451,6 +451,16 @@ public class ConcurrentLongHashMap<V> {
                             if (nextValueInArray == EmptyValue) {
                                 values[bucket] = (V) EmptyValue;
                                 --usedBuckets;
+
+                                // Cleanup all the buckets that were in `DeletedValue` state,
+                                // so that we can reduce unnecessary expansions
+                                int lastBucket = signSafeMod(bucket - 1, capacity);
+                                while (values[lastBucket] == DeletedValue) {
+                                    values[lastBucket] = (V) EmptyValue;
+                                    --usedBuckets;
+
+                                    lastBucket = signSafeMod(lastBucket - 1, capacity);
+                                }
                             } else {
                                 values[bucket] = (V) DeletedValue;
                             }
diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java
index abbe11576a9..66ecaee4bfa 100644
--- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java
+++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSet.java
@@ -493,14 +493,15 @@ public class ConcurrentLongPairSet implements LongPairSet {
                 table[bucket + 1] = EmptyItem;
                 --usedBuckets;
 
-                // Cleanup all the buckets that were in `DeletedKey` state,
+                // Cleanup all the buckets that were in `DeletedItem` state,
                 // so that we can reduce unnecessary expansions
-                bucket = (bucket - 1) & (table.length - 1);
-                while (table[bucket] == DeletedItem) {
-                    table[bucket] = EmptyItem;
+                int lastBucket = (bucket - 2) & (table.length - 1);
+                while (table[lastBucket] == DeletedItem) {
+                    table[lastBucket] = EmptyItem;
+                    table[lastBucket + 1] = EmptyItem;
                     --usedBuckets;
 
-                    bucket = (bucket - 1) & (table.length - 1);
+                    lastBucket = (lastBucket - 2) & (table.length - 1);
                 }
             } else {
                 table[bucket] = DeletedItem;
diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java
index 1ccbeb3b6b5..e039079eeb3 100644
--- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java
+++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMap.java
@@ -173,6 +173,14 @@ public class ConcurrentOpenHashMap<K, V> {
         }
     }
 
+    long getUsedBucketCount() {
+        long usedBucketCount = 0;
+        for (Section<K, V> s : sections) {
+            usedBucketCount += s.usedBuckets;
+        }
+        return usedBucketCount;
+    }
+
     public long size() {
         long size = 0;
         for (Section<K, V> s : sections) {
@@ -441,6 +449,17 @@ public class ConcurrentOpenHashMap<K, V> {
                                 table[bucket] = EmptyKey;
                                 table[bucket + 1] = null;
                                 --usedBuckets;
+
+                                // Cleanup all the buckets that were in `DeletedKey` state,
+                                // so that we can reduce unnecessary expansions
+                                int lastBucket = (bucket - 2) & (table.length - 1);
+                                while (table[lastBucket] == DeletedKey) {
+                                    table[lastBucket] = EmptyKey;
+                                    table[lastBucket + 1] = null;
+                                    --usedBuckets;
+
+                                    lastBucket = (lastBucket - 2) & (table.length - 1);
+                                }
                             } else {
                                 table[bucket] = DeletedKey;
                                 table[bucket + 1] = null;
diff --git a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java
index 28f0df0ff20..6dd6e6a4b63 100644
--- a/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java
+++ b/pulsar-common/src/main/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSet.java
@@ -152,6 +152,14 @@ public class ConcurrentOpenHashSet<V> {
         }
     }
 
+    long getUsedBucketCount() {
+        long usedBucketCount = 0;
+        for (Section<V> s : sections) {
+            usedBucketCount += s.usedBuckets;
+        }
+        return usedBucketCount;
+    }
+
     public long size() {
         long size = 0;
         for (int i = 0; i < sections.length; i++) {
@@ -477,6 +485,16 @@ public class ConcurrentOpenHashSet<V> {
             if (values[nextInArray] == EmptyValue) {
                 values[bucket] = (V) EmptyValue;
                 --usedBuckets;
+
+                // Cleanup all the buckets that were in `DeletedValue` state,
+                // so that we can reduce unnecessary expansions
+                int lastBucket = signSafeMod(bucket - 1, capacity);
+                while (values[lastBucket] == DeletedValue) {
+                    values[lastBucket] = (V) EmptyValue;
+                    --usedBuckets;
+
+                    lastBucket = signSafeMod(lastBucket - 1, capacity);
+                }
             } else {
                 values[bucket] = (V) DeletedValue;
             }
diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java
index 6cf126cf2ff..205cf91b47d 100644
--- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java
+++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongHashMapTest.java
@@ -107,6 +107,25 @@ public class ConcurrentLongHashMapTest {
         assertEquals(map.size(), 3);
     }
 
+    @Test
+    public void testReduceUnnecessaryExpansions() {
+        ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .build();
+        assertNull(map.put(1, "v1"));
+        assertNull(map.put(2, "v2"));
+        assertNull(map.put(3, "v3"));
+        assertNull(map.put(4, "v4"));
+
+        assertTrue(map.remove(1, "v1"));
+        assertTrue(map.remove(2, "v2"));
+        assertTrue(map.remove(3, "v3"));
+        assertTrue(map.remove(4, "v4"));
+
+        assertEquals(0, map.getUsedBucketCount());
+    }
+
     @Test
     public void testClear() {
         ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java
index a8d3e1d0603..86030f21619 100644
--- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java
+++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentLongPairSetTest.java
@@ -74,6 +74,25 @@ public class ConcurrentLongPairSetTest {
         }
     }
 
+    @Test
+    public void testReduceUnnecessaryExpansions() {
+        ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .build();
+        assertTrue(set.add(1, 1));
+        assertTrue(set.add(2, 2));
+        assertTrue(set.add(3, 3));
+        assertTrue(set.add(4, 4));
+
+        assertTrue(set.remove(1, 1));
+        assertTrue(set.remove(2, 2));
+        assertTrue(set.remove(3, 3));
+        assertTrue(set.remove(4, 4));
+
+        assertEquals(0, set.getUsedBucketCount());
+    }
+
     @Test
     public void simpleInsertions() {
         ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder()
diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java
index 7919485d9b6..cec52ea3ded 100644
--- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java
+++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashMapTest.java
@@ -109,6 +109,25 @@ public class ConcurrentOpenHashMapTest {
         assertEquals(map.size(), 3);
     }
 
+    @Test
+    public void testReduceUnnecessaryExpansions() {
+        ConcurrentOpenHashMap<String, String> map = ConcurrentOpenHashMap.<String, String>newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .build();
+        assertNull(map.put("1", "1"));
+        assertNull(map.put("2", "2"));
+        assertNull(map.put("3", "3"));
+        assertNull(map.put("4", "4"));
+
+        assertEquals(map.remove("1"), "1");
+        assertEquals(map.remove("2"), "2");
+        assertEquals(map.remove("3"), "3");
+        assertEquals(map.remove("4"), "4");
+
+        assertEquals(0, map.getUsedBucketCount());
+    }
+
     @Test
     public void testClear() {
         ConcurrentOpenHashMap<String, String> map = ConcurrentOpenHashMap.<String, String>newBuilder()
diff --git a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java
index af62948b64a..6c82293bec2 100644
--- a/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java
+++ b/pulsar-common/src/test/java/org/apache/pulsar/common/util/collections/ConcurrentOpenHashSetTest.java
@@ -91,24 +91,44 @@ public class ConcurrentOpenHashSetTest {
         assertEquals(set.size(), 3);
     }
 
+    @Test
+    public void testReduceUnnecessaryExpansions() {
+        ConcurrentOpenHashSet<String> set =
+                ConcurrentOpenHashSet.<String>newBuilder()
+                        .expectedItems(2)
+                        .concurrencyLevel(1)
+                        .build();
+
+        assertTrue(set.add("1"));
+        assertTrue(set.add("2"));
+        assertTrue(set.add("3"));
+        assertTrue(set.add("4"));
+
+        assertTrue(set.remove("1"));
+        assertTrue(set.remove("2"));
+        assertTrue(set.remove("3"));
+        assertTrue(set.remove("4"));
+        assertEquals(0, set.getUsedBucketCount());
+    }
+
     @Test
     public void testClear() {
-        ConcurrentOpenHashSet<String> map =
+        ConcurrentOpenHashSet<String> set =
                 ConcurrentOpenHashSet.<String>newBuilder()
                 .expectedItems(2)
                 .concurrencyLevel(1)
                 .autoShrink(true)
                 .mapIdleFactor(0.25f)
                 .build();
-        assertTrue(map.capacity() == 4);
+        assertTrue(set.capacity() == 4);
 
-        assertTrue(map.add("k1"));
-        assertTrue(map.add("k2"));
-        assertTrue(map.add("k3"));
+        assertTrue(set.add("k1"));
+        assertTrue(set.add("k2"));
+        assertTrue(set.add("k3"));
 
-        assertTrue(map.capacity() == 8);
-        map.clear();
-        assertTrue(map.capacity() == 4);
+        assertTrue(set.capacity() == 8);
+        set.clear();
+        assertTrue(set.capacity() == 4);
     }
 
     @Test


[pulsar] 16/26: [C++] Remove the flaky and meaningless tests (#15271)

Posted by pe...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

penghui pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/pulsar.git

commit 83b68332af00845b412eec713617349aef4302c2
Author: Yunze Xu <xy...@163.com>
AuthorDate: Fri Apr 22 23:48:42 2022 +0800

    [C++] Remove the flaky and meaningless tests (#15271)
    
    Fixes #13849
    Fixes #14848
    
    ### Motivation
    
    #11570 adds a `testSendAsyncCloseAsyncConcurrentlyWithLazyProducers` for
    the case that some `sendAsync` calls that are invoked after `closeAsync`
    is called in another thread must complete with `ResultAlreadyClosed`.
    It's flaky because the synchronization between two threads is not
    strict. This test uses `sendStartLatch` for the order of `sendAsync` and
    `closeAsync`:
    
    ```
    sendAsync 0,1,...,9 -> sendStartLatch is done -> closeAsync
    ```
    
    However, it cannot guarantee the rest `sendAsync` calls happen after
    `closeAsync` is called. If so, all `sendAsync` calls will complete with
    `ResultOk`.
    
    On the other hand, this test is meaningless because it requires strict
    synchronization between two threads so there is no need to run
    `sendAsync` and `closeAsync` in two threads.
    
    The verification of this test is also wrong, see
    https://github.com/apache/pulsar/issues/13849#issuecomment-1079098248.
    When `closeAsync` is called, the previous `sendAsync` calls might not
    complete, so all `sendAsync` will complete with `ResultAlreadyClosed`,
    not only those called after `closeAsync`.
    
    In addition, this PR also tries to fix the flaky `testReferenceCount`,
    which assumes too strictly.
    
    ### Modifications
    
    - Remove `testSendAsyncCloseAsyncConcurrentlyWithLazyProducers`
    - Only check the reference count is greater than 0 instead of equal to 1
    
    (cherry picked from commit eeea9ca1f6eeef1248b7fe8f36be30be835d2480)
---
 pulsar-client-cpp/tests/ClientTest.cc   |  2 +-
 pulsar-client-cpp/tests/ProducerTest.cc | 83 ---------------------------------
 2 files changed, 1 insertion(+), 84 deletions(-)

diff --git a/pulsar-client-cpp/tests/ClientTest.cc b/pulsar-client-cpp/tests/ClientTest.cc
index 32814169631..135c3f19b51 100644
--- a/pulsar-client-cpp/tests/ClientTest.cc
+++ b/pulsar-client-cpp/tests/ClientTest.cc
@@ -224,7 +224,7 @@ TEST(ClientTest, testReferenceCount) {
         LOG_INFO("Reference count of the reader's underlying consumer: " << consumers[1].use_count());
 
         readerWeakPtr = PulsarFriend::getReaderImplWeakPtr(reader);
-        ASSERT_EQ(readerWeakPtr.use_count(), 1);
+        ASSERT_TRUE(readerWeakPtr.use_count() > 0);
         LOG_INFO("Reference count of the reader: " << readerWeakPtr.use_count());
     }
 
diff --git a/pulsar-client-cpp/tests/ProducerTest.cc b/pulsar-client-cpp/tests/ProducerTest.cc
index b5d7c617245..65676f8b6ef 100644
--- a/pulsar-client-cpp/tests/ProducerTest.cc
+++ b/pulsar-client-cpp/tests/ProducerTest.cc
@@ -160,89 +160,6 @@ TEST(ProducerTest, testSendAsyncAfterCloseAsyncWithLazyProducers) {
     ASSERT_EQ(ResultOk, result);
 }
 
-TEST(ProducerTest, testSendAsyncCloseAsyncConcurrentlyWithLazyProducers) {
-    // run sendAsync and closeAsync concurrently and verify that all sendAsync callbacks are called
-    // and that messages sent after closeAsync is invoked receive ResultAlreadyClosed.
-    for (int run = 0; run < 20; run++) {
-        LOG_INFO("Start of run " << run);
-        Client client(serviceUrl);
-        const std::string partitionedTopic =
-            "testProducerIsConnectedPartitioned-" + std::to_string(time(nullptr));
-
-        int res = makePutRequest(
-            adminUrl + "admin/v2/persistent/public/default/" + partitionedTopic + "/partitions", "10");
-        ASSERT_TRUE(res == 204 || res == 409) << "res: " << res;
-
-        ProducerConfiguration producerConfiguration;
-        producerConfiguration.setLazyStartPartitionedProducers(true);
-        producerConfiguration.setPartitionsRoutingMode(ProducerConfiguration::UseSinglePartition);
-        producerConfiguration.setBatchingEnabled(true);
-        Producer producer;
-        ASSERT_EQ(ResultOk, client.createProducer(partitionedTopic, producerConfiguration, producer));
-
-        int sendCount = 100;
-        std::vector<Promise<Result, MessageId>> promises(sendCount);
-        Promise<bool, Result> promiseClose;
-
-        // only call closeAsync once at least 10 messages have been sent
-        Latch sendStartLatch(10);
-        Latch closeLatch(1);
-        int closedAt = 0;
-
-        std::thread t1([&]() {
-            for (int i = 0; i < sendCount; i++) {
-                sendStartLatch.countdown();
-                Message msg = MessageBuilder().setContent("test").build();
-
-                if (closeLatch.getCount() == 0 && closedAt == 0) {
-                    closedAt = i;
-                    LOG_INFO("closedAt set to " << closedAt)
-                }
-
-                producer.sendAsync(msg, WaitForCallbackValue<MessageId>(promises[i]));
-                std::this_thread::sleep_for(std::chrono::milliseconds(1));
-            }
-        });
-
-        std::thread t2([&]() {
-            sendStartLatch.wait(std::chrono::milliseconds(1000));
-            LOG_INFO("Closing");
-            producer.closeAsync(WaitForCallback(promiseClose));
-            LOG_INFO("Close called");
-            closeLatch.countdown();
-            Result result;
-            promiseClose.getFuture().get(result);
-            ASSERT_EQ(ResultOk, result);
-            LOG_INFO("Closed");
-        });
-
-        t1.join();
-        t2.join();
-
-        // make sure that all messages after the moment when closeAsync was invoked
-        // return AlreadyClosed
-        for (int i = 0; i < sendCount; i++) {
-            LOG_DEBUG("Checking " << i)
-
-            // whether a message was sent successfully or not, it's callback
-            // must have been invoked
-            ASSERT_EQ(true, promises[i].isComplete());
-            MessageId mi;
-            Result res = promises[i].getFuture().get(mi);
-            LOG_DEBUG("Result is " << res);
-
-            // for the messages sent after closeAsync was invoked, they
-            // should all return ResultAlreadyClosed
-            if (i >= closedAt) {
-                ASSERT_EQ(ResultAlreadyClosed, res);
-            }
-        }
-
-        client.close();
-        LOG_INFO("End of run " << run);
-    }
-}
-
 TEST(ProducerTest, testGetNumOfChunks) {
     ASSERT_EQ(ProducerImpl::getNumOfChunks(11, 5), 3);
     ASSERT_EQ(ProducerImpl::getNumOfChunks(10, 5), 2);