You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2022/10/28 15:32:35 UTC

[hbase] branch branch-2 updated: HBASE-27437 TestHeapSize is flaky (#4841)

This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
     new 7b0d705a1a5 HBASE-27437 TestHeapSize is flaky (#4841)
7b0d705a1a5 is described below

commit 7b0d705a1a5b6a705562ee4906e083e3f9770ad3
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Mon Oct 24 10:27:33 2022 +0800

    HBASE-27437 TestHeapSize is flaky (#4841)
    
    Signed-off-by: GeorryHuang <hu...@apache.org>
    (cherry picked from commit dad9a7da9298b7f2f396826487d4133e78810895)
---
 .../org/apache/hadoop/hbase/io/TestHeapSize.java   | 42 +++++++++++++++-------
 1 file changed, 29 insertions(+), 13 deletions(-)

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
index e7842202d17..38b093b997e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.io;
 
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.lessThan;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -25,13 +27,16 @@ import java.lang.management.ManagementFactory;
 import java.lang.management.RuntimeMXBean;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.LinkedList;
+import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.CopyOnWriteArraySet;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
@@ -69,7 +74,6 @@ import org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.ClassSize;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -602,18 +606,30 @@ public class TestHeapSize {
     }
   }
 
-  @Test
-  public void testAutoCalcFixedOverHead() {
-    Class[] classList = new Class[] { HFileContext.class, HRegion.class, BlockCacheKey.class,
-      HFileBlock.class, HStore.class, LruBlockCache.class, StoreContext.class };
-    for (Class cl : classList) {
-      // do estimate in advance to ensure class is loaded
-      ClassSize.estimateBase(cl, false);
-
-      long startTime = EnvironmentEdgeManager.currentTime();
-      ClassSize.estimateBase(cl, false);
-      long endTime = EnvironmentEdgeManager.currentTime();
-      assertTrue(endTime - startTime < 5);
+  private long calcFixedOverhead(List<Class<?>> classList) {
+    long overhead = 0;
+    for (Class<?> clazz : classList) {
+      overhead += ClassSize.estimateBase(clazz, false);
     }
+    return overhead;
+  }
+
+  @Test
+  public void testAutoCalcFixedOverhead() throws InterruptedException {
+    List<Class<?>> classList = Arrays.asList(HFileContext.class, HRegion.class, BlockCacheKey.class,
+      HFileBlock.class, HStore.class, LruBlockCache.class, StoreContext.class);
+    for (int i = 0; i < 10; i++) {
+      // warm up
+      calcFixedOverhead(classList);
+    }
+    long startNs = System.nanoTime();
+    long overhead = 0;
+    for (int i = 0; i < 100; i++) {
+      overhead += calcFixedOverhead(classList);
+    }
+    long costNs = System.nanoTime() - startNs;
+    LOG.info("overhead = {}, cost {} ns", overhead, costNs);
+    // the single computation cost should be less than 5ms
+    assertThat(costNs, lessThan(TimeUnit.MILLISECONDS.toNanos(5) * classList.size() * 100));
   }
 }