You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ha...@apache.org on 2023/03/06 09:23:56 UTC

[hbase] branch revert-4536-HBASE-25709 created (now 7abea1c4bac)

This is an automated email from the ASF dual-hosted git repository.

haxiaolin pushed a change to branch revert-4536-HBASE-25709
in repository https://gitbox.apache.org/repos/asf/hbase.git


      at 7abea1c4bac Revert "HBASE-25709 Close region may stuck when region is compacting and skipped most cells read (#4536)"

This branch includes the following new commits:

     new 7abea1c4bac Revert "HBASE-25709 Close region may stuck when region is compacting and skipped most cells read (#4536)"

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[hbase] 01/01: Revert "HBASE-25709 Close region may stuck when region is compacting and skipped most cells read (#4536)"

Posted by ha...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

haxiaolin pushed a commit to branch revert-4536-HBASE-25709
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 7abea1c4bac0bd72b8352055f45dc1698a7a5d28
Author: Xiaolin Ha <su...@gmail.com>
AuthorDate: Mon Mar 6 17:23:42 2023 +0800

    Revert "HBASE-25709 Close region may stuck when region is compacting and skipped most cells read (#4536)"
    
    This reverts commit 37858bb6b058f5af17d19aabedba69816b2941aa.
---
 .../java/org/apache/hadoop/hbase/HConstants.java   |  5 --
 .../hadoop/hbase/mob/DefaultMobStoreCompactor.java |  6 +-
 .../hbase/regionserver/compactions/Compactor.java  |  9 +--
 .../hadoop/hbase/mob/FaultyMobStoreCompactor.java  |  6 +-
 .../hadoop/hbase/regionserver/TestCompaction.java  |  1 -
 .../hadoop/hbase/regionserver/TestHRegion.java     | 69 ----------------------
 6 files changed, 6 insertions(+), 90 deletions(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index d56607653fc..26c105b40cb 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -326,11 +326,6 @@ public final class HConstants {
   public static final String COMPACTION_KV_MAX = "hbase.hstore.compaction.kv.max";
   public static final int COMPACTION_KV_MAX_DEFAULT = 10;
 
-  /** Parameter name for the scanner size limit to be used in compactions */
-  public static final String COMPACTION_SCANNER_SIZE_MAX =
-    "hbase.hstore.compaction.scanner.size.limit";
-  public static final long COMPACTION_SCANNER_SIZE_MAX_DEFAULT = 10 * 1024 * 1024L; // 10MB
-
   /** Parameter name for HBase instance root directory */
   public static final String HBASE_DIR = "hbase.rootdir";
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
index 44f77b62ad8..695a632b243 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
@@ -345,10 +345,8 @@ public class DefaultMobStoreCompactor extends DefaultCompactor {
     long cellsSizeCompactedToMob = 0, cellsSizeCompactedFromMob = 0;
     boolean finished = false;
 
-    ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax)
-      .setSizeLimit(ScannerContext.LimitScope.BETWEEN_CELLS, Long.MAX_VALUE, Long.MAX_VALUE,
-        compactScannerSizeLimit)
-      .build();
+    ScannerContext scannerContext =
+      ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
     throughputController.start(compactionName);
     KeyValueScanner kvs = (scanner instanceof KeyValueScanner) ? (KeyValueScanner) scanner : null;
     long shippedCallSizeLimit =
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
index d9ad265da64..363fa29909e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
@@ -83,7 +83,6 @@ public abstract class Compactor<T extends CellSink> {
   protected final Configuration conf;
   protected final HStore store;
   protected final int compactionKVMax;
-  protected final long compactScannerSizeLimit;
   protected final Compression.Algorithm majorCompactionCompression;
   protected final Compression.Algorithm minorCompactionCompression;
 
@@ -110,8 +109,6 @@ public abstract class Compactor<T extends CellSink> {
     this.store = store;
     this.compactionKVMax =
       this.conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
-    this.compactScannerSizeLimit = this.conf.getLong(HConstants.COMPACTION_SCANNER_SIZE_MAX,
-      HConstants.COMPACTION_SCANNER_SIZE_MAX_DEFAULT);
     this.majorCompactionCompression = (store.getColumnFamilyDescriptor() == null)
       ? Compression.Algorithm.NONE
       : store.getColumnFamilyDescriptor().getMajorCompactionCompressionType();
@@ -432,10 +429,8 @@ public abstract class Compactor<T extends CellSink> {
     String compactionName = ThroughputControlUtil.getNameForThrottling(store, "compaction");
     long now = 0;
     boolean hasMore;
-    ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax)
-      .setSizeLimit(ScannerContext.LimitScope.BETWEEN_CELLS, Long.MAX_VALUE, Long.MAX_VALUE,
-        compactScannerSizeLimit)
-      .build();
+    ScannerContext scannerContext =
+      ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
 
     throughputController.start(compactionName);
     Shipper shipper = (scanner instanceof Shipper) ? (Shipper) scanner : null;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java
index 943a7acb648..96675fb69e5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java
@@ -142,10 +142,8 @@ public class FaultyMobStoreCompactor extends DefaultMobStoreCompactor {
     long cellsSizeCompactedToMob = 0, cellsSizeCompactedFromMob = 0;
     boolean finished = false;
 
-    ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax)
-      .setSizeLimit(ScannerContext.LimitScope.BETWEEN_CELLS, Long.MAX_VALUE, Long.MAX_VALUE,
-        compactScannerSizeLimit)
-      .build();
+    ScannerContext scannerContext =
+      ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
     throughputController.start(compactionName);
     KeyValueScanner kvs = (scanner instanceof KeyValueScanner) ? (KeyValueScanner) scanner : null;
     long shippedCallSizeLimit =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
index c0bc72079cb..c4d5d35f7d8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
@@ -121,7 +121,6 @@ public class TestCompaction {
     // Set cache flush size to 1MB
     conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024);
     conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100);
-    conf.setLong(HConstants.COMPACTION_SCANNER_SIZE_MAX, 10L);
     conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
       NoLimitThroughputController.class.getName());
     compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 4282d16fa6e..637aab9ff5c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -6919,75 +6919,6 @@ public class TestHRegion {
     assertNull(r.getValue(fam1, q1));
   }
 
-  @Test
-  public void testTTLsUsingSmallHeartBeatCells() throws IOException {
-    IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
-    EnvironmentEdgeManager.injectEdge(edge);
-
-    final byte[] row = Bytes.toBytes("testRow");
-    final byte[] q1 = Bytes.toBytes("q1");
-    final byte[] q2 = Bytes.toBytes("q2");
-    final byte[] q3 = Bytes.toBytes("q3");
-    final byte[] q4 = Bytes.toBytes("q4");
-    final byte[] q5 = Bytes.toBytes("q5");
-    final byte[] q6 = Bytes.toBytes("q6");
-    final byte[] q7 = Bytes.toBytes("q7");
-    final byte[] q8 = Bytes.toBytes("q8");
-
-    // 10 seconds
-    int ttlSecs = 10;
-    TableDescriptor tableDescriptor =
-      TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(
-        ColumnFamilyDescriptorBuilder.newBuilder(fam1).setTimeToLive(ttlSecs).build()).build();
-
-    Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
-    conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS);
-    // using small heart beat cells
-    conf.setLong(StoreScanner.HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK, 2);
-
-    region = HBaseTestingUtil.createRegionAndWAL(
-      RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(),
-      TEST_UTIL.getDataTestDir(), conf, tableDescriptor);
-    assertNotNull(region);
-    long now = EnvironmentEdgeManager.currentTime();
-    // Add a cell that will expire in 5 seconds via cell TTL
-    region.put(new Put(row).addColumn(fam1, q1, now, HConstants.EMPTY_BYTE_ARRAY));
-    region.put(new Put(row).addColumn(fam1, q2, now, HConstants.EMPTY_BYTE_ARRAY));
-    region.put(new Put(row).addColumn(fam1, q3, now, HConstants.EMPTY_BYTE_ARRAY));
-    // Add a cell that will expire after 10 seconds via family setting
-    region
-      .put(new Put(row).addColumn(fam1, q4, now + ttlSecs * 1000 + 1, HConstants.EMPTY_BYTE_ARRAY));
-    region
-      .put(new Put(row).addColumn(fam1, q5, now + ttlSecs * 1000 + 1, HConstants.EMPTY_BYTE_ARRAY));
-
-    region.put(new Put(row).addColumn(fam1, q6, now, HConstants.EMPTY_BYTE_ARRAY));
-    region.put(new Put(row).addColumn(fam1, q7, now, HConstants.EMPTY_BYTE_ARRAY));
-    region
-      .put(new Put(row).addColumn(fam1, q8, now + ttlSecs * 1000 + 1, HConstants.EMPTY_BYTE_ARRAY));
-
-    // Flush so we are sure store scanning gets this right
-    region.flush(true);
-
-    // A query at time T+0 should return all cells
-    checkScan(8);
-    region.delete(new Delete(row).addColumn(fam1, q8));
-
-    // Increment time to T+ttlSecs seconds
-    edge.incrementTime(ttlSecs * 1000);
-    checkScan(2);
-  }
-
-  private void checkScan(int expectCellSize) throws IOException {
-    Scan s = new Scan().withStartRow(row);
-    ScannerContext.Builder contextBuilder = ScannerContext.newBuilder(true);
-    ScannerContext scannerContext = contextBuilder.build();
-    RegionScanner scanner = region.getScanner(s);
-    List<Cell> kvs = new ArrayList<>();
-    scanner.next(kvs, scannerContext);
-    assertEquals(expectCellSize, kvs.size());
-    scanner.close();
-  }
-
   @Test
   public void testIncrementTimestampsAreMonotonic() throws IOException {
     region = initHRegion(tableName, method, CONF, fam1);