You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@bookkeeper.apache.org by yo...@apache.org on 2022/07/26 12:39:55 UTC

[bookkeeper] branch master updated: Optimize concurrent collection's shrink logic (#3417)

This is an automated email from the ASF dual-hosted git repository.

yong pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/bookkeeper.git


The following commit(s) were added to refs/heads/master by this push:
     new a5805476cd Optimize concurrent collection's shrink logic (#3417)
a5805476cd is described below

commit a5805476cdd0bfb27aa162515989620ee64d694c
Author: wenbingshen <ol...@gmail.com>
AuthorDate: Tue Jul 26 20:39:50 2022 +0800

    Optimize concurrent collection's shrink logic (#3417)
    
    ### Motivation
    
    Optimize concurrent collection's shrink and clear logic
    
    ### Changes
    1. Reduce the repeated `Arrays.fill` in the clear process
    2. When `capacity` is already equal to `initCapacity`,`rehash` should not be executed
    3. Reduce the `rehash` logic in the `clear` process
    4. Shrinking must at least ensure `initCapacity`, so as to avoid frequent shrinking and expansion near `initCapacity`, frequent shrinking and expansion, additionally opened `arrays` will consume more memory and affect GC.
    
    If this PR is accepted, I will optimize the same `concurrent collection's shrink and clear logic ` defined in pulsar.
    
    Related to #3061 and #3074
---
 .../util/collections/ConcurrentLongHashMap.java    | 40 +++++++++++++++++-----
 .../util/collections/ConcurrentLongHashSet.java    | 31 +++++++++++++----
 .../collections/ConcurrentLongLongHashMap.java     | 35 ++++++++++++++-----
 .../collections/ConcurrentLongLongPairHashMap.java | 31 +++++++++++++----
 .../util/collections/ConcurrentOpenHashMap.java    | 36 +++++++++++++++----
 .../util/collections/ConcurrentOpenHashSet.java    | 30 ++++++++++++----
 .../collections/ConcurrentLongHashMapTest.java     | 33 ++++++++++++++++++
 .../collections/ConcurrentLongHashSetTest.java     | 34 ++++++++++++++++++
 .../collections/ConcurrentLongLongHashMapTest.java | 33 ++++++++++++++++++
 .../ConcurrentLongLongPairHashMapTest.java         | 32 +++++++++++++++++
 .../collections/ConcurrentOpenHashMapTest.java     | 33 ++++++++++++++++++
 .../collections/ConcurrentOpenHashSetTest.java     | 34 ++++++++++++++++++
 12 files changed, 361 insertions(+), 41 deletions(-)

diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashMap.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashMap.java
index d7ee024ceb..dd2b6c1bc8 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashMap.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashMap.java
@@ -507,7 +507,11 @@ public class ConcurrentLongHashMap<V> {
             } finally {
                 if (autoShrink && size < resizeThresholdBelow) {
                     try {
-                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                        // Shrinking must at least ensure initCapacity,
+                        // so as to avoid frequent shrinking and expansion near initCapacity,
+                        // frequent shrinking and expansion,
+                        // additionally opened arrays will consume more memory and affect GC
+                        int newCapacity = Math.max(alignToPowerOfTwo((int) (capacity / shrinkFactor)), initCapacity);
                         int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
                         if (newCapacity < capacity && newResizeThresholdUp > size) {
                             // shrink the hashmap
@@ -556,7 +560,11 @@ public class ConcurrentLongHashMap<V> {
             } finally {
                 if (autoShrink && size < resizeThresholdBelow) {
                     try {
-                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                        // Shrinking must at least ensure initCapacity,
+                        // so as to avoid frequent shrinking and expansion near initCapacity,
+                        // frequent shrinking and expansion,
+                        // additionally opened arrays will consume more memory and affect GC
+                        int newCapacity = Math.max(alignToPowerOfTwo((int) (capacity / shrinkFactor)), initCapacity);
                         int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
                         if (newCapacity < capacity && newResizeThresholdUp > size) {
                             // shrink the hashmap
@@ -575,12 +583,13 @@ public class ConcurrentLongHashMap<V> {
             long stamp = writeLock();
 
             try {
-                Arrays.fill(keys, 0);
-                Arrays.fill(values, EmptyValue);
-                this.size = 0;
-                this.usedBuckets = 0;
-                if (autoShrink) {
-                    rehash(initCapacity);
+                if (autoShrink && capacity > initCapacity) {
+                    shrinkToInitCapacity();
+                } else {
+                    Arrays.fill(keys, 0);
+                    Arrays.fill(values, EmptyValue);
+                    this.size = 0;
+                    this.usedBuckets = 0;
                 }
             } finally {
                 unlockWrite(stamp);
@@ -658,6 +667,21 @@ public class ConcurrentLongHashMap<V> {
             resizeThresholdBelow = (int) (capacity * mapIdleFactor);
         }
 
+        private void shrinkToInitCapacity() {
+            long[] newKeys = new long[initCapacity];
+            V[] newValues = (V[]) new Object[initCapacity];
+
+            keys = newKeys;
+            values = newValues;
+            size = 0;
+            usedBuckets = 0;
+            // Capacity needs to be updated after the values, so that we won't see
+            // a capacity value bigger than the actual array size
+            capacity = initCapacity;
+            resizeThresholdUp = (int) (capacity * mapFillFactor);
+            resizeThresholdBelow = (int) (capacity * mapIdleFactor);
+        }
+
         private static <V> void insertKeyValueNoLock(long[] keys, V[] values, long key, V value) {
             int bucket = (int) hash(key);
 
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashSet.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashSet.java
index 8243f3e813..a66de9ed8b 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashSet.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashSet.java
@@ -405,7 +405,11 @@ public class ConcurrentLongHashSet {
             } finally {
                 if (autoShrink && size < resizeThresholdBelow) {
                     try {
-                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                        // Shrinking must at least ensure initCapacity,
+                        // so as to avoid frequent shrinking and expansion near initCapacity,
+                        // frequent shrinking and expansion,
+                        // additionally opened arrays will consume more memory and affect GC
+                        int newCapacity = Math.max(alignToPowerOfTwo((int) (capacity / shrinkFactor)), initCapacity);
                         int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
                         if (newCapacity < capacity && newResizeThresholdUp > size) {
                             // shrink the hashmap
@@ -444,11 +448,12 @@ public class ConcurrentLongHashSet {
             long stamp = writeLock();
 
             try {
-                Arrays.fill(table, EmptyItem);
-                this.size = 0;
-                this.usedBuckets = 0;
-                if (autoShrink) {
-                    rehash(initCapacity);
+                if (autoShrink && capacity > initCapacity) {
+                    shrinkToInitCapacity();
+                } else {
+                    Arrays.fill(table, EmptyItem);
+                    this.size = 0;
+                    this.usedBuckets = 0;
                 }
             } finally {
                 unlockWrite(stamp);
@@ -516,6 +521,20 @@ public class ConcurrentLongHashSet {
             resizeThresholdBelow = (int) (capacity * mapIdleFactor);
         }
 
+        private void shrinkToInitCapacity() {
+            long[] newTable = new long[initCapacity];
+            Arrays.fill(newTable, EmptyItem);
+
+            table = newTable;
+            size = 0;
+            usedBuckets = 0;
+            // Capacity needs to be updated after the values, so that we won't see
+            // a capacity value bigger than the actual array size
+            capacity = initCapacity;
+            resizeThresholdUp = (int) (capacity * mapFillFactor);
+            resizeThresholdBelow = (int) (capacity * mapIdleFactor);
+        }
+
         private static void insertKeyValueNoLock(long[] table, int capacity, long item) {
             int bucket = signSafeMod(hash(item), capacity);
 
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongHashMap.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongHashMap.java
index 6db4e32ad7..756ef0d7ec 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongHashMap.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongHashMap.java
@@ -657,7 +657,11 @@ public class ConcurrentLongLongHashMap {
             } finally {
                 if (autoShrink && size < resizeThresholdBelow) {
                     try {
-                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                        // Shrinking must at least ensure initCapacity,
+                        // so as to avoid frequent shrinking and expansion near initCapacity,
+                        // frequent shrinking and expansion,
+                        // additionally opened arrays will consume more memory and affect GC
+                        int newCapacity = Math.max(alignToPowerOfTwo((int) (capacity / shrinkFactor)), initCapacity);
                         int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
                         if (newCapacity < capacity && newResizeThresholdUp > size) {
                             // shrink the hashmap
@@ -695,7 +699,7 @@ public class ConcurrentLongLongHashMap {
             } finally {
                 if (autoShrink && size < resizeThresholdBelow) {
                     try {
-                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                        int newCapacity = Math.max(alignToPowerOfTwo((int) (capacity / shrinkFactor)), initCapacity);
                         int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
                         if (newCapacity < capacity && newResizeThresholdUp > size) {
                             // shrink the hashmap
@@ -734,7 +738,7 @@ public class ConcurrentLongLongHashMap {
             } finally {
                 if (autoShrink && size < resizeThresholdBelow) {
                     try {
-                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                        int newCapacity = Math.max(alignToPowerOfTwo((int) (capacity / shrinkFactor)), initCapacity);
                         int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
                         if (newCapacity < capacity && newResizeThresholdUp > size) {
                             // shrink the hashmap
@@ -775,11 +779,12 @@ public class ConcurrentLongLongHashMap {
             long stamp = writeLock();
 
             try {
-                Arrays.fill(table, EmptyKey);
-                this.size = 0;
-                this.usedBuckets = 0;
-                if (autoShrink) {
-                    rehash(initCapacity);
+                if (autoShrink && capacity > initCapacity) {
+                    shrinkToInitCapacity();
+                } else {
+                    Arrays.fill(table, EmptyKey);
+                    this.size = 0;
+                    this.usedBuckets = 0;
                 }
             } finally {
                 unlockWrite(stamp);
@@ -850,6 +855,20 @@ public class ConcurrentLongLongHashMap {
             resizeThresholdBelow = (int) (capacity * mapIdleFactor);
         }
 
+        private void shrinkToInitCapacity() {
+            long[] newTable = new long[2 * initCapacity];
+            Arrays.fill(newTable, EmptyKey);
+
+            table = newTable;
+            size = 0;
+            usedBuckets = 0;
+            // Capacity needs to be updated after the values, so that we won't see
+            // a capacity value bigger than the actual array size
+            capacity = initCapacity;
+            resizeThresholdUp = (int) (capacity * mapFillFactor);
+            resizeThresholdBelow = (int) (capacity * mapIdleFactor);
+        }
+
         private static void insertKeyValueNoLock(long[] table, int capacity, long key, long value) {
             int bucket = signSafeMod(hash(key), capacity);
 
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongPairHashMap.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongPairHashMap.java
index a02e51fa44..2d31f26238 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongPairHashMap.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongPairHashMap.java
@@ -483,7 +483,11 @@ public class ConcurrentLongLongPairHashMap {
             } finally {
                 if (autoShrink && size < resizeThresholdBelow) {
                     try {
-                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                        // Shrinking must at least ensure initCapacity,
+                        // so as to avoid frequent shrinking and expansion near initCapacity,
+                        // frequent shrinking and expansion,
+                        // additionally opened arrays will consume more memory and affect GC
+                        int newCapacity = Math.max(alignToPowerOfTwo((int) (capacity / shrinkFactor)), initCapacity);
                         int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
                         if (newCapacity < capacity && newResizeThresholdUp > size) {
                             // shrink the hashmap
@@ -531,11 +535,12 @@ public class ConcurrentLongLongPairHashMap {
             long stamp = writeLock();
 
             try {
-                Arrays.fill(table, EmptyKey);
-                this.size = 0;
-                this.usedBuckets = 0;
-                if (autoShrink) {
-                    rehash(initCapacity);
+                if (autoShrink && capacity > initCapacity) {
+                    shrinkToInitCapacity();
+                } else {
+                    Arrays.fill(table, EmptyKey);
+                    this.size = 0;
+                    this.usedBuckets = 0;
                 }
             } finally {
                 unlockWrite(stamp);
@@ -611,6 +616,20 @@ public class ConcurrentLongLongPairHashMap {
             resizeThresholdBelow = (int) (capacity * mapIdleFactor);
         }
 
+        private void shrinkToInitCapacity() {
+            long[] newTable = new long[4 * initCapacity];
+            Arrays.fill(newTable, EmptyKey);
+
+            table = newTable;
+            size = 0;
+            usedBuckets = 0;
+            // Capacity needs to be updated after the values, so that we won't see
+            // a capacity value bigger than the actual array size
+            capacity = initCapacity;
+            resizeThresholdUp = (int) (capacity * mapFillFactor);
+            resizeThresholdBelow = (int) (capacity * mapIdleFactor);
+        }
+
         private static void insertKeyValueNoLock(long[] table, int capacity, long key1, long key2, long value1,
                 long value2) {
             int bucket = signSafeMod(hash(key1, key2), capacity);
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashMap.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashMap.java
index 0068a1e684..2690e69766 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashMap.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashMap.java
@@ -446,7 +446,11 @@ public class ConcurrentOpenHashMap<K, V> {
             } finally {
                 if (autoShrink && size < resizeThresholdBelow) {
                     try {
-                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                        // Shrinking must at least ensure initCapacity,
+                        // so as to avoid frequent shrinking and expansion near initCapacity,
+                        // frequent shrinking and expansion,
+                        // additionally opened arrays will consume more memory and affect GC
+                        int newCapacity = Math.max(alignToPowerOfTwo((int) (capacity / shrinkFactor)), initCapacity);
                         int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
                         if (newCapacity < capacity && newResizeThresholdUp > size) {
                             // shrink the hashmap
@@ -465,11 +469,12 @@ public class ConcurrentOpenHashMap<K, V> {
             long stamp = writeLock();
 
             try {
-                Arrays.fill(table, EmptyKey);
-                this.size = 0;
-                this.usedBuckets = 0;
-                if (autoShrink) {
-                    rehash(initCapacity);
+                if (autoShrink && capacity > initCapacity) {
+                    shrinkToInitCapacity();
+                } else {
+                    Arrays.fill(table, EmptyKey);
+                    this.size = 0;
+                    this.usedBuckets = 0;
                 }
             } finally {
                 unlockWrite(stamp);
@@ -541,7 +546,11 @@ public class ConcurrentOpenHashMap<K, V> {
             } finally {
                 if (autoShrink && size < resizeThresholdBelow) {
                     try {
-                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                        // Shrinking must at least ensure initCapacity,
+                        // so as to avoid frequent shrinking and expansion near initCapacity,
+                        // frequent shrinking and expansion,
+                        // additionally opened arrays will consume more memory and affect GC
+                        int newCapacity = Math.max(alignToPowerOfTwo((int) (capacity / shrinkFactor)), initCapacity);
                         int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
                         if (newCapacity < capacity && newResizeThresholdUp > size) {
                             // shrink the hashmap
@@ -601,6 +610,19 @@ public class ConcurrentOpenHashMap<K, V> {
             resizeThresholdBelow = (int) (capacity * mapIdleFactor);
         }
 
+        private void shrinkToInitCapacity() {
+            Object[] newTable = new Object[2 * initCapacity];
+
+            table = newTable;
+            size = 0;
+            usedBuckets = 0;
+            // Capacity needs to be updated after the values, so that we won't see
+            // a capacity value bigger than the actual array size
+            capacity = initCapacity;
+            resizeThresholdUp = (int) (capacity * mapFillFactor);
+            resizeThresholdBelow = (int) (capacity * mapIdleFactor);
+        }
+
         private static <K, V> void insertKeyValueNoLock(Object[] table, int capacity, K key, V value) {
             int bucket = signSafeMod(hash(key), capacity);
 
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashSet.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashSet.java
index 58ef4a00df..6f317f50c5 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashSet.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashSet.java
@@ -415,7 +415,11 @@ public class ConcurrentOpenHashSet<V> {
             } finally {
                 if (autoShrink && size < resizeThresholdBelow) {
                     try {
-                        int newCapacity = alignToPowerOfTwo((int) (capacity / shrinkFactor));
+                        // Shrinking must at least ensure initCapacity,
+                        // so as to avoid frequent shrinking and expansion near initCapacity,
+                        // frequent shrinking and expansion,
+                        // additionally opened arrays will consume more memory and affect GC
+                        int newCapacity = Math.max(alignToPowerOfTwo((int) (capacity / shrinkFactor)), initCapacity);
                         int newResizeThresholdUp = (int) (newCapacity * mapFillFactor);
                         if (newCapacity < capacity && newResizeThresholdUp > size) {
                             // shrink the hashmap
@@ -434,11 +438,12 @@ public class ConcurrentOpenHashSet<V> {
             long stamp = writeLock();
 
             try {
-                Arrays.fill(values, EmptyValue);
-                this.size = 0;
-                this.usedBuckets = 0;
-                if (autoShrink) {
-                    rehash(initCapacity);
+                if (autoShrink && capacity > initCapacity) {
+                    shrinkToInitCapacity();
+                } else {
+                    Arrays.fill(values, EmptyValue);
+                    this.size = 0;
+                    this.usedBuckets = 0;
                 }
             } finally {
                 unlockWrite(stamp);
@@ -509,6 +514,19 @@ public class ConcurrentOpenHashSet<V> {
             resizeThresholdBelow = (int) (capacity * mapIdleFactor);
         }
 
+        private void shrinkToInitCapacity() {
+            V[] newValues = (V[]) new Object[initCapacity];
+
+            values = newValues;
+            size = 0;
+            usedBuckets = 0;
+            // Capacity needs to be updated after the values, so that we won't see
+            // a capacity value bigger than the actual array size
+            capacity = initCapacity;
+            resizeThresholdUp = (int) (capacity * mapFillFactor);
+            resizeThresholdBelow = (int) (capacity * mapIdleFactor);
+        }
+
         private static <V> void insertValueNoLock(V[] values, V value) {
             int bucket = (int) hash(value);
 
diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashMapTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashMapTest.java
index 9c0739532a..290eaa68be 100644
--- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashMapTest.java
+++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashMapTest.java
@@ -154,6 +154,39 @@ public class ConcurrentLongHashMapTest {
         assertTrue(map.capacity() == 8);
     }
 
+    @Test
+    public void testExpandShrinkAndClear() {
+        ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        final long initCapacity = map.capacity();
+        assertTrue(map.capacity() == 4);
+        assertNull(map.put(1, "v1"));
+        assertNull(map.put(2, "v2"));
+        assertNull(map.put(3, "v3"));
+
+        // expand hashmap
+        assertTrue(map.capacity() == 8);
+
+        assertTrue(map.remove(1, "v1"));
+        // not shrink
+        assertTrue(map.capacity() == 8);
+        assertTrue(map.remove(2, "v2"));
+        // shrink hashmap
+        assertTrue(map.capacity() == 4);
+
+        assertTrue(map.remove(3, "v3"));
+        // Will not shrink the hashmap again because shrink capacity is less than initCapacity
+        // current capacity is equal than the initial capacity
+        assertTrue(map.capacity() == initCapacity);
+        map.clear();
+        // after clear, because current capacity is equal than the initial capacity, so not shrinkToInitCapacity
+        assertTrue(map.capacity() == initCapacity);
+    }
+
     @Test
     public void simpleInsertions() {
         ConcurrentLongHashMap<String> map = ConcurrentLongHashMap.<String>newBuilder()
diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashSetTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashSetTest.java
index 402e1a3390..044eaf0f7a 100644
--- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashSetTest.java
+++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongHashSetTest.java
@@ -297,6 +297,40 @@ public class ConcurrentLongHashSetTest {
         assertTrue(map.capacity() == 8);
     }
 
+    @Test
+    public void testExpandShrinkAndClear() {
+        ConcurrentLongHashSet map = ConcurrentLongHashSet.newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        final long initCapacity = map.capacity();
+        assertTrue(map.capacity() == 4);
+
+        assertTrue(map.add(1));
+        assertTrue(map.add(2));
+        assertTrue(map.add(3));
+
+        // expand hashmap
+        assertTrue(map.capacity() == 8);
+
+        assertTrue(map.remove(1));
+        // not shrink
+        assertTrue(map.capacity() == 8);
+        assertTrue(map.remove(2));
+        // shrink hashmap
+        assertTrue(map.capacity() == 4);
+
+        assertTrue(map.remove(3));
+        // Will not shrink the hashmap again because shrink capacity is less than initCapacity
+        // current capacity is equal than the initial capacity
+        assertTrue(map.capacity() == initCapacity);
+        map.clear();
+        // after clear, because current capacity is equal than the initial capacity, so not shrinkToInitCapacity
+        assertTrue(map.capacity() == initCapacity);
+    }
+
     @Test
     public void testIteration() {
         ConcurrentLongHashSet set = ConcurrentLongHashSet.newBuilder().build();
diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongHashMapTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongHashMapTest.java
index 1d4b4ed3aa..93279b14e2 100644
--- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongHashMapTest.java
+++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongHashMapTest.java
@@ -161,6 +161,39 @@ public class ConcurrentLongLongHashMapTest {
         assertTrue(map.capacity() == 8);
     }
 
+    @Test
+    public void testExpandShrinkAndClear() {
+        ConcurrentLongLongHashMap map = ConcurrentLongLongHashMap.newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        final long initCapacity = map.capacity();
+        assertTrue(map.capacity() == 4);
+        assertTrue(map.put(1, 1) == -1);
+        assertTrue(map.put(2, 2) == -1);
+        assertTrue(map.put(3, 3) == -1);
+
+        // expand hashmap
+        assertTrue(map.capacity() == 8);
+
+        assertTrue(map.remove(1, 1));
+        // not shrink
+        assertTrue(map.capacity() == 8);
+        assertTrue(map.remove(2, 2));
+        // shrink hashmap
+        assertTrue(map.capacity() == 4);
+
+        assertTrue(map.remove(3, 3));
+        // Will not shrink the hashmap again because shrink capacity is less than initCapacity
+        // current capacity is equal than the initial capacity
+        assertTrue(map.capacity() == initCapacity);
+        map.clear();
+        // after clear, because current capacity is equal than the initial capacity, so not shrinkToInitCapacity
+        assertTrue(map.capacity() == initCapacity);
+    }
+
     @Test
     public void testRemove() {
         ConcurrentLongLongHashMap map = ConcurrentLongLongHashMap.newBuilder().build();
diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongPairHashMapTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongPairHashMapTest.java
index bdfe39c15a..3d0855a92a 100644
--- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongPairHashMapTest.java
+++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentLongLongPairHashMapTest.java
@@ -177,6 +177,38 @@ public class ConcurrentLongLongPairHashMapTest {
         assertTrue(map.capacity() == 8);
     }
 
+    @Test
+    public void testExpandShrinkAndClear() {
+        ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        final long initCapacity = map.capacity();
+        assertTrue(map.put(1, 1, 11, 11));
+        assertTrue(map.put(2, 2, 22, 22));
+        assertTrue(map.put(3, 3, 33, 33));
+
+        // expand hashmap
+        assertTrue(map.capacity() == 8);
+
+        assertTrue(map.remove(1, 1, 11, 11));
+        // not shrink
+        assertTrue(map.capacity() == 8);
+        assertTrue(map.remove(2, 2, 22, 22));
+        // shrink hashmap
+        assertTrue(map.capacity() == 4);
+
+        assertTrue(map.remove(3, 3, 33, 33));
+        // Will not shrink the hashmap again because shrink capacity is less than initCapacity
+        // current capacity is equal than the initial capacity
+        assertTrue(map.capacity() == initCapacity);
+        map.clear();
+        // after clear, because current capacity is equal than the initial capacity, so not shrinkToInitCapacity
+        assertTrue(map.capacity() == initCapacity);
+    }
+
     @Test
     public void testNegativeUsedBucketCount() {
         ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap.newBuilder()
diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashMapTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashMapTest.java
index 0e919f6527..9fd3ff29cc 100644
--- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashMapTest.java
+++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashMapTest.java
@@ -182,6 +182,39 @@ public class ConcurrentOpenHashMapTest {
         assertTrue(map.capacity() == 8);
     }
 
+    @Test
+    public void testExpandShrinkAndClear() {
+        ConcurrentOpenHashMap<String, String> map = ConcurrentOpenHashMap.<String, String>newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        final long initCapacity = map.capacity();
+        assertTrue(map.capacity() == 4);
+        assertNull(map.put("k1", "v1"));
+        assertNull(map.put("k2", "v2"));
+        assertNull(map.put("k3", "v3"));
+
+        // expand hashmap
+        assertTrue(map.capacity() == 8);
+
+        assertTrue(map.remove("k1", "v1"));
+        // not shrink
+        assertTrue(map.capacity() == 8);
+        assertTrue(map.remove("k2", "v2"));
+        // shrink hashmap
+        assertTrue(map.capacity() == 4);
+
+        assertTrue(map.remove("k3", "v3"));
+        // Will not shrink the hashmap again because shrink capacity is less than initCapacity
+        // current capacity is equal than the initial capacity
+        assertTrue(map.capacity() == initCapacity);
+        map.clear();
+        // after clear, because current capacity is equal than the initial capacity, so not shrinkToInitCapacity
+        assertTrue(map.capacity() == initCapacity);
+    }
+
     @Test
     public void testRemove() {
         ConcurrentOpenHashMap<String, String> map =
diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashSetTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashSetTest.java
index e49c103c78..2a5ea4c4c8 100644
--- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashSetTest.java
+++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/collections/ConcurrentOpenHashSetTest.java
@@ -158,6 +158,40 @@ public class ConcurrentOpenHashSetTest {
         assertTrue(map.capacity() == 8);
     }
 
+    @Test
+    public void testExpandShrinkAndClear() {
+        ConcurrentOpenHashSet<String> map = ConcurrentOpenHashSet.<String>newBuilder()
+                .expectedItems(2)
+                .concurrencyLevel(1)
+                .autoShrink(true)
+                .mapIdleFactor(0.25f)
+                .build();
+        final long initCapacity = map.capacity();
+        assertTrue(map.capacity() == 4);
+
+        assertTrue(map.add("k1"));
+        assertTrue(map.add("k2"));
+        assertTrue(map.add("k3"));
+
+        // expand hashmap
+        assertTrue(map.capacity() == 8);
+
+        assertTrue(map.remove("k1"));
+        // not shrink
+        assertTrue(map.capacity() == 8);
+        assertTrue(map.remove("k2"));
+        // shrink hashmap
+        assertTrue(map.capacity() == 4);
+
+        assertTrue(map.remove("k3"));
+        // Will not shrink the hashmap again because shrink capacity is less than initCapacity
+        // current capacity is equal than the initial capacity
+        assertTrue(map.capacity() == initCapacity);
+        map.clear();
+        // after clear, because current capacity is equal than the initial capacity, so not shrinkToInitCapacity
+        assertTrue(map.capacity() == initCapacity);
+    }
+
     @Test
     public void testReduceUnnecessaryExpansions(){
         ConcurrentOpenHashSet<String> set =