You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2021/11/21 13:11:39 UTC

[hbase] branch branch-2 updated: HBASE-26467 Fix bug for MemStoreLABImpl.forceCopyOfBigCellInto(Cell) (#3858)

This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
     new 4312f81  HBASE-26467 Fix bug for MemStoreLABImpl.forceCopyOfBigCellInto(Cell) (#3858)
4312f81 is described below

commit 4312f817f358d102dd7a278b7b7aec2d942f26f1
Author: zhengzhuobinzzb <zh...@gmail.com>
AuthorDate: Sun Nov 21 20:59:11 2021 +0800

    HBASE-26467 Fix bug for MemStoreLABImpl.forceCopyOfBigCellInto(Cell) (#3858)
    
    Signed-off-by: Duo Zhang <zh...@apache.org>
    Reviewed-by: chenglei <ch...@apache.org>
---
 .../hadoop/hbase/regionserver/MemStoreLABImpl.java |  3 +-
 .../TestCompactingToCellFlatMapMemStore.java       | 28 +++++++++++++++++-
 .../hadoop/hbase/regionserver/TestMemStoreLAB.java | 33 +++++++++++++++++++++-
 3 files changed, 60 insertions(+), 4 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java
index f5fccf4..0d13e49 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java
@@ -124,9 +124,8 @@ public class MemStoreLABImpl implements MemStoreLAB {
   @Override
   public Cell forceCopyOfBigCellInto(Cell cell) {
     int size = Segment.getCellLength(cell);
-    size += ChunkCreator.SIZEOF_CHUNK_HEADER;
     Preconditions.checkArgument(size >= 0, "negative size");
-    if (size <= dataChunkSize) {
+    if (size + ChunkCreator.SIZEOF_CHUNK_HEADER <= dataChunkSize) {
       // Using copyCellInto for cells which are bigger than the original maxAlloc
       return copyCellInto(cell, dataChunkSize);
     } else {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
index c0f85b6..f621fef 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -799,7 +800,7 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore
     totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellChunkImmutableSegment.DEEP_OVERHEAD_CCM
             + numOfCells * oneCellOnCCMHeapSize;
 
-    assertEquals(totalCellsLen+ChunkCreator.SIZEOF_CHUNK_HEADER, regionServicesForStores
+    assertEquals(totalCellsLen, regionServicesForStores
         .getMemStoreSize());
 
     assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
@@ -905,6 +906,31 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore
     }
   }
 
+  /**
+   * Test big cell size after in memory compaction. (HBASE-26467)
+   */
+  @Test
+  public void testBigCellSizeAfterInMemoryCompaction() throws IOException {
+    MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC;
+    memstore.getConfiguration().setInt(MemStoreCompactionStrategy
+      .COMPACTING_MEMSTORE_THRESHOLD_KEY, 1);
+    memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
+      String.valueOf(compactionType));
+    ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration());
+
+    byte[] val = new byte[MemStoreLAB.CHUNK_SIZE_DEFAULT];
+
+    long size = addRowsByKeys(memstore, new String[]{"A"}, val);
+    ((MyCompactingMemStore) memstore).flushInMemory();
+
+    for(KeyValueScanner scanner : memstore.getScanners(Long.MAX_VALUE)) {
+      Cell cell;
+      while ((cell = scanner.next()) != null) {
+        assertEquals(size, cell.getSerializedSize());
+      }
+    }
+  }
+
 
   private long addRowsByKeysDataSize(final AbstractMemStore hmc, String[] keys) {
     byte[] fam = Bytes.toBytes("testfamily");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java
index e0ff50d..d9b1035 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java
@@ -16,6 +16,8 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hbase.regionserver;
+import static org.apache.hadoop.hbase.regionserver.MemStoreLAB.CHUNK_SIZE_KEY;
+import static org.apache.hadoop.hbase.regionserver.MemStoreLAB.MAX_ALLOC_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
@@ -38,6 +40,7 @@ import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -212,7 +215,7 @@ public class TestMemStoreLAB {
       Configuration conf = HBaseConfiguration.create();
       conf.setDouble(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, 0.1);
       // set chunk size to default max alloc size, so we could easily trigger chunk retirement
-      conf.setLong(MemStoreLABImpl.CHUNK_SIZE_KEY, MemStoreLABImpl.MAX_ALLOC_DEFAULT);
+      conf.setLong(CHUNK_SIZE_KEY, MemStoreLABImpl.MAX_ALLOC_DEFAULT);
       // reconstruct mslab
       long globalMemStoreLimit = (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage()
           .getMax() * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false));
@@ -266,6 +269,34 @@ public class TestMemStoreLAB {
     }
   }
 
+  /**
+   * Test cell with right length, which constructed by testForceCopyOfBigCellInto. (HBASE-26467)
+   */
+  @Test
+  public void testForceCopyOfBigCellInto() {
+    Configuration conf = HBaseConfiguration.create();
+    int chunkSize = ChunkCreator.getInstance().getChunkSize();
+    conf.setInt(CHUNK_SIZE_KEY, chunkSize);
+    conf.setInt(MAX_ALLOC_KEY, chunkSize / 2);
+
+    MemStoreLABImpl mslab = new MemStoreLABImpl(conf);
+    byte[] row = Bytes.toBytes("row");
+    byte[] columnFamily = Bytes.toBytes("columnFamily");
+    byte[] qualify = Bytes.toBytes("qualify");
+    byte[] smallValue = new byte[chunkSize / 2];
+    byte[] bigValue = new byte[chunkSize];
+    KeyValue smallKV = new KeyValue(row, columnFamily, qualify, EnvironmentEdgeManager
+      .currentTime(), smallValue);
+
+    assertEquals(smallKV.getSerializedSize(),
+      mslab.forceCopyOfBigCellInto(smallKV).getSerializedSize());
+
+    KeyValue bigKV = new KeyValue(row, columnFamily, qualify, EnvironmentEdgeManager
+      .currentTime(), bigValue);
+    assertEquals(bigKV.getSerializedSize(),
+      mslab.forceCopyOfBigCellInto(bigKV).getSerializedSize());
+  }
+
   private Thread getChunkQueueTestThread(final MemStoreLABImpl mslab, String threadName,
       Cell cellToCopyInto) {
     Thread thread = new Thread() {