You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by vj...@apache.org on 2020/04/01 10:58:48 UTC
[hbase] branch branch-2.3 updated: HBASE-23678 : Builder API for version management - setVersionsWithTim… (#1381)
This is an automated email from the ASF dual-hosted git repository.
vjasani pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2.3 by this push:
new 88b8bba HBASE-23678 : Builder API for version management - setVersionsWithTim… (#1381)
88b8bba is described below
commit 88b8bba969ed009273965d0fc50437a51c055d2b
Author: Viraj Jasani <vj...@apache.org>
AuthorDate: Wed Apr 1 16:16:40 2020 +0530
HBASE-23678 : Builder API for version management - setVersionsWithTim… (#1381)
Signed-off-by: Xu Cang <xu...@apache.org>
---
.../client/ColumnFamilyDescriptorBuilder.java | 23 +++
.../hadoop/hbase/regionserver/TestMinVersions.java | 191 ++++++++++++++-------
2 files changed, 152 insertions(+), 62 deletions(-)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
index da7bdda..88699d7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
@@ -572,6 +572,12 @@ public class ColumnFamilyDescriptorBuilder {
return this;
}
+ public ColumnFamilyDescriptorBuilder setVersionsWithTimeToLive(final int retentionInterval,
+ final int versionAfterInterval) {
+ desc.setVersionsWithTimeToLive(retentionInterval, versionAfterInterval);
+ return this;
+ }
+
/**
* An ModifyableFamilyDescriptor contains information about a column family such as the
* number of versions, compression settings, etc.
@@ -944,6 +950,23 @@ public class ColumnFamilyDescriptorBuilder {
return setValue(MIN_VERSIONS_BYTES, Integer.toString(minVersions));
}
+ /**
+ * Retain all versions for a given TTL(retentionInterval), and then only a specific number
+ * of versions(versionAfterInterval) after that interval elapses.
+ *
+ * @param retentionInterval Retain all versions for this interval
+ * @param versionAfterInterval Retain no of versions to retain after retentionInterval
+ * @return this (for chained invocation)
+ */
+ public ModifyableColumnFamilyDescriptor setVersionsWithTimeToLive(
+ final int retentionInterval, final int versionAfterInterval) {
+ ModifyableColumnFamilyDescriptor modifyableColumnFamilyDescriptor =
+ setVersions(versionAfterInterval, Integer.MAX_VALUE);
+ modifyableColumnFamilyDescriptor.setTimeToLive(retentionInterval);
+ modifyableColumnFamilyDescriptor.setKeepDeletedCells(KeepDeletedCells.TTL);
+ return modifyableColumnFamilyDescriptor;
+ }
+
@Override
public boolean isBlockCacheEnabled() {
return getStringOrDefault(BLOCKCACHE_BYTES, Boolean::valueOf, DEFAULT_BLOCKCACHE);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java
index 45160de..7bfbfcf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.regionserver;
import static org.apache.hadoop.hbase.HBaseTestingUtility.COLUMNS;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
@@ -528,80 +530,145 @@ public class TestMinVersions {
int ttl = 4;
ColumnFamilyDescriptor cfd =
ColumnFamilyDescriptorBuilder.newBuilder(c0)
- .setMinVersions(2).setMaxVersions(Integer.MAX_VALUE).setTimeToLive(ttl).
- setKeepDeletedCells(KeepDeletedCells.TTL).build();
+ .setVersionsWithTimeToLive(ttl, 2).build();
+ verifyVersionedCellKeyValues(ttl, cfd);
+
+ cfd = ColumnFamilyDescriptorBuilder.newBuilder(c0)
+ .setMinVersions(2)
+ .setMaxVersions(Integer.MAX_VALUE)
+ .setTimeToLive(ttl)
+ .setKeepDeletedCells(KeepDeletedCells.TTL)
+ .build();
+ verifyVersionedCellKeyValues(ttl, cfd);
+ }
+ private void verifyVersionedCellKeyValues(int ttl, ColumnFamilyDescriptor cfd)
+ throws IOException {
TableDescriptor htd = TableDescriptorBuilder.
newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(cfd).build();
HRegion region = hbu.createLocalHRegion(htd, null, null);
- long startTS = EnvironmentEdgeManager.currentTime();
- ManualEnvironmentEdge injectEdge = new ManualEnvironmentEdge();
- injectEdge.setValue(startTS);
- EnvironmentEdgeManager.injectEdge(injectEdge);
+ try {
+ long startTS = EnvironmentEdgeManager.currentTime();
+ ManualEnvironmentEdge injectEdge = new ManualEnvironmentEdge();
+ injectEdge.setValue(startTS);
+ EnvironmentEdgeManager.injectEdge(injectEdge);
+
+ long ts = startTS - 2000;
+ putFourVersions(region, ts);
+
+ Get get;
+ Result result;
+
+ //check we can still see all versions before compaction
+ get = new Get(T1);
+ get.readAllVersions();
+ get.setTimeRange(0, ts);
+ result = region.get(get);
+ checkResult(result, c0, T4, T3, T2, T1);
+
+ region.flush(true);
+ region.compact(true);
+ Assert.assertEquals(startTS, EnvironmentEdgeManager.currentTime());
+ long expiredTime = EnvironmentEdgeManager.currentTime() - ts - 4;
+ Assert.assertTrue("TTL for T1 has expired", expiredTime < (ttl * 1000));
+ //check that nothing was purged yet
+ verifyBeforeCompaction(region, ts);
+
+ injectEdge.incValue(ttl * 1000);
+
+ region.flush(true);
+ region.compact(true);
+ verifyAfterTtl(region, ts);
+ } finally {
+ HBaseTestingUtility.closeRegionAndWAL(region);
+ }
+ }
+
+ private void verifyAfterTtl(HRegion region, long ts) throws IOException {
+ Get get;
+ Result result;
+ //check that after compaction (which is after TTL) that only T1 && T2 were purged
+ get = new Get(T1);
+ get.readAllVersions();
+ get.setTimeRange(0, ts);
+ result = region.get(get);
+ checkResult(result, c0, T4, T3);
+
+ get = new Get(T1);
+ get.readAllVersions();
+ get.setTimeRange(0, ts - 1);
+ result = region.get(get);
+ checkResult(result, c0, T3);
+
+ get = new Get(T1);
+ get.readAllVersions();
+ get.setTimestamp(ts - 2);
+ result = region.get(get);
+ checkResult(result, c0, T3);
+
+ get = new Get(T1);
+ get.readAllVersions();
+ get.setTimestamp(ts - 3);
+ result = region.get(get);
+ Assert.assertEquals(result.getColumnCells(c0, c0).size(), 0);
+
+ get = new Get(T1);
+ get.readAllVersions();
+ get.setTimeRange(0, ts - 2);
+ result = region.get(get);
+ Assert.assertEquals(result.getColumnCells(c0, c0).size(), 0);
+ }
+
+ private void verifyBeforeCompaction(HRegion region, long ts) throws IOException {
+ Get get;
+ Result result;
+ get = new Get(T1);
+ get.readAllVersions();
+ get.setTimeRange(0, ts);
+ result = region.get(get);
+ checkResult(result, c0, T4, T3, T2, T1);
+
+ get = new Get(T1);
+ get.readAllVersions();
+ get.setTimeRange(0, ts - 1);
+ result = region.get(get);
+ checkResult(result, c0, T3, T2, T1);
+
+ get = new Get(T1);
+ get.readAllVersions();
+ get.setTimeRange(0, ts - 2);
+ result = region.get(get);
+ checkResult(result, c0, T2, T1);
+
+ get = new Get(T1);
+ get.readAllVersions();
+ get.setTimeRange(0, ts - 3);
+ result = region.get(get);
+ checkResult(result, c0, T1);
+ }
- long ts = startTS - 2000;
+ private void putFourVersions(HRegion region, long ts) throws IOException {
// 1st version
- Put p = new Put(T1, ts-3);
- p.addColumn(c0, c0, T1);
- region.put(p);
+ Put put = new Put(T1, ts - 4);
+ put.addColumn(c0, c0, T1);
+ region.put(put);
// 2nd version
- p = new Put(T1, ts-2);
- p.addColumn(c0, c0, T2);
- region.put(p);
+ put = new Put(T1, ts - 3);
+ put.addColumn(c0, c0, T2);
+ region.put(put);
// 3rd version
- p = new Put(T1, ts-1);
- p.addColumn(c0, c0, T3);
- region.put(p);
-
- Get g;
- Result r;
-
- //check we can still see all versions before compaction
- g = new Get(T1);
- g.readAllVersions();
- g.setTimeRange(0, ts);
- r = region.get(g);
- checkResult(r, c0, T3, T2, T1);
-
- region.flush(true);
- region.compact(true);
- Assert.assertEquals(startTS, EnvironmentEdgeManager.currentTime());
- long expiredTime = EnvironmentEdgeManager.currentTime() - ts - 3;
- Assert.assertTrue("TTL for T1 has expired", expiredTime < (ttl * 1000));
- //check that nothing was purged yet
- g = new Get(T1);
- g.readAllVersions();
- g.setTimeRange(0, ts);
- r = region.get(g);
- checkResult(r, c0, T3, T2, T1);
-
- g = new Get(T1);
- g.readAllVersions();
- g.setTimeRange(0, ts -1);
- r = region.get(g);
- checkResult(r, c0, T2, T1);
-
- injectEdge.incValue(ttl * 1000);
-
- region.flush(true);
- region.compact(true);
-
- //check that after compaction (which is after TTL) that only T1 was purged
- g = new Get(T1);
- g.readAllVersions();
- g.setTimeRange(0, ts);
- r = region.get(g);
- checkResult(r, c0, T3, T2);
-
- g = new Get(T1);
- g.readAllVersions();
- g.setTimestamp(ts -2);
- r = region.get(g);
- checkResult(r, c0, T2);
+ put = new Put(T1, ts - 2);
+ put.addColumn(c0, c0, T3);
+ region.put(put);
+
+ // 4th version
+ put = new Put(T1, ts - 1);
+ put.addColumn(c0, c0, T4);
+ region.put(put);
}
private void checkResult(Result r, byte[] col, byte[] ... vals) {