You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by pa...@apache.org on 2018/06/06 18:46:52 UTC
[2/6] cassandra git commit: Re-examine commented out Compactions and
AntiCompactionsTest
Re-examine commented out Compactions and AntiCompactionsTest
Patch by Lerh Chuan Low; Reviewed by Paulo Motta for CASSANDRA-13698
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/b8fb29a6
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/b8fb29a6
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/b8fb29a6
Branch: refs/heads/cassandra-3.11
Commit: b8fb29a6ba1304010ac78df3cecba6ab2fece4cd
Parents: 214a3ab
Author: Lerh Chuan Low <le...@instaclustr.com>
Authored: Fri Feb 9 11:14:12 2018 -0800
Committer: Paulo Motta <pa...@gmail.com>
Committed: Wed Jun 6 15:36:59 2018 -0300
----------------------------------------------------------------------
.../org/apache/cassandra/db/SystemKeyspace.java | 2 -
.../db/compaction/AntiCompactionTest.java | 13 +-
.../db/compaction/CompactionsTest.java | 328 ++++++++-----------
3 files changed, 141 insertions(+), 202 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/b8fb29a6/src/java/org/apache/cassandra/db/SystemKeyspace.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SystemKeyspace.java b/src/java/org/apache/cassandra/db/SystemKeyspace.java
index 7ce74a1..5f5041c 100644
--- a/src/java/org/apache/cassandra/db/SystemKeyspace.java
+++ b/src/java/org/apache/cassandra/db/SystemKeyspace.java
@@ -40,8 +40,6 @@ import org.slf4j.LoggerFactory;
import com.google.common.util.concurrent.Futures;
-import org.apache.cassandra.concurrent.Stage;
-import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.cql3.QueryProcessor;
http://git-wip-us.apache.org/repos/asf/cassandra/blob/b8fb29a6/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java b/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
index 841a22e..ead0349 100644
--- a/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
@@ -199,18 +199,7 @@ public class AntiCompactionTest
}
@Test
- public void antiCompactTenSTC() throws InterruptedException, IOException
- {
- antiCompactTen("SizeTieredCompactionStrategy");
- }
-
- @Test
- public void antiCompactTenLC() throws InterruptedException, IOException
- {
- antiCompactTen("LeveledCompactionStrategy");
- }
-
- public void antiCompactTen(String compactionStrategy) throws InterruptedException, IOException
+ public void antiCompactTen() throws InterruptedException, IOException
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
http://git-wip-us.apache.org/repos/asf/cassandra/blob/b8fb29a6/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
index 1530741..28725c7 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
@@ -18,6 +18,8 @@
*/
package org.apache.cassandra.db.compaction;
+import java.io.File;
+import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.TimeUnit;
@@ -30,13 +32,32 @@ import org.apache.cassandra.OrderedJUnit4ClassRunner;
import org.apache.cassandra.SchemaLoader;
import org.apache.cassandra.Util;
import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.cql3.ColumnIdentifier;
import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.marshal.AsciiType;
+import org.apache.cassandra.db.columniterator.SSTableIterator;
+import org.apache.cassandra.db.filter.ClusteringIndexSliceFilter;
+import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.filter.DataLimits;
+import org.apache.cassandra.db.filter.RowFilter;
+import org.apache.cassandra.db.partitions.FilteredPartition;
+import org.apache.cassandra.db.partitions.ImmutableBTreePartition;
+import org.apache.cassandra.db.partitions.PartitionIterator;
+import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.db.rows.ColumnData;
+import org.apache.cassandra.db.rows.Row;
+import org.apache.cassandra.db.rows.RowIterator;
+import org.apache.cassandra.db.rows.Unfiltered;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.dht.*;
+import org.apache.cassandra.io.sstable.Component;
+import org.apache.cassandra.io.sstable.ISSTableScanner;
import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
import org.apache.cassandra.schema.CompactionParams;
import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.TableParams;
+import org.apache.cassandra.service.MigrationManager;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.FBUtilities;
@@ -75,13 +96,15 @@ public class CompactionsTest
SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD2),
SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD3),
SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD4),
- SchemaLoader.superCFMD(KEYSPACE1, CF_SUPER1, AsciiType.instance),
- SchemaLoader.superCFMD(KEYSPACE1, CF_SUPER5, AsciiType.instance),
- SchemaLoader.superCFMD(KEYSPACE1, CF_SUPERGC, AsciiType.instance)
+ SchemaLoader.standardCFMD(KEYSPACE1, CF_SUPER1),
+ SchemaLoader.standardCFMD(KEYSPACE1, CF_SUPER5),
+ SchemaLoader.standardCFMD(KEYSPACE1, CF_SUPERGC)
.gcGraceSeconds(0));
}
- public ColumnFamilyStore testSingleSSTableCompaction(String strategyClassName) throws Exception
+ // Test to see if sstable has enough expired columns, it is compacted itself.
+ @Test
+ public void testSingleSSTableCompaction() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_DENSE1);
@@ -115,8 +138,6 @@ public class CompactionsTest
// make sure max timestamp of compacted sstables is recorded properly after compaction.
assertMaxTimestamp(store, timestamp);
-
- return store;
}
public static long populate(String ks, String cf, int startRowKey, int endRowKey, int ttl)
@@ -138,69 +159,57 @@ public class CompactionsTest
return timestamp;
}
- // Test to see if sstable has enough expired columns, it is compacted itself.
- @Test
- public void testSingleSSTableCompactionWithSizeTieredCompaction() throws Exception
- {
- testSingleSSTableCompaction(SizeTieredCompactionStrategy.class.getCanonicalName());
- }
-
- /*
- @Test
- public void testSingleSSTableCompactionWithLeveledCompaction() throws Exception
- {
- ColumnFamilyStore store = testSingleSSTableCompaction(LeveledCompactionStrategy.class.getCanonicalName());
- CompactionStrategyManager strategyManager = store.getCompactionStrategyManager();
- // tombstone removal compaction should not promote level
- assert strategyManager.getSSTableCountPerLevel()[0] == 1;
- }
-
@Test
public void testSuperColumnTombstones()
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Super1");
- CFMetaData cfm = cfs.metadata;
+ CFMetaData table = cfs.metadata;
cfs.disableAutoCompaction();
DecoratedKey key = Util.dk("tskey");
ByteBuffer scName = ByteBufferUtil.bytes("TestSuperColumn");
// a subcolumn
- new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), key.getKey())
- .clustering(ByteBufferUtil.bytes("cols"))
- .add("val", "val1")
- .build().applyUnsafe();
+ new RowUpdateBuilder(table, FBUtilities.timestampMicros(), key.getKey())
+ .clustering(ByteBufferUtil.bytes("cols"))
+ .add("val", "val1")
+ .build().applyUnsafe();
cfs.forceBlockingFlush();
// shadow the subcolumn with a supercolumn tombstone
- RowUpdateBuilder.deleteRow(cfm, FBUtilities.timestampMicros(), key.getKey(), ByteBufferUtil.bytes("cols")).applyUnsafe();
+ RowUpdateBuilder.deleteRow(table, FBUtilities.timestampMicros(), key.getKey(), ByteBufferUtil.bytes("cols")).applyUnsafe();
cfs.forceBlockingFlush();
- CompactionManager.instance.performMaximal(cfs);
+ CompactionManager.instance.performMaximal(cfs, false);
assertEquals(1, cfs.getLiveSSTables().size());
// check that the shadowed column is gone
SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
- AbstractBounds<PartitionPosition> bounds = new Bounds<PartitionPosition>(key, sstable.partitioner.getMinimumToken().maxKeyBound());
- ISSTableScanner scanner = sstable.getScanner(FBUtilities.nowInSeconds());
- UnfilteredRowIterator ai = scanner.next();
- assertTrue(ai.next() instanceof RangeTombstone);
- assertFalse(ai.hasNext());
-
+ AbstractBounds<PartitionPosition> bounds = new Bounds<>(key, sstable.getPartitioner().getMinimumToken().maxKeyBound());
+ UnfilteredRowIterator ai;
+ try (ISSTableScanner scanner = sstable.getScanner())
+ {
+ ai = scanner.next();
+ final Unfiltered next = ai.next();
+ assertTrue(next.isRow());
+ assertFalse(ai.hasNext());
+ }
}
@Test
public void testUncheckedTombstoneSizeTieredCompaction() throws Exception
{
+ Map<String, String> compactionOptions = new HashMap<>();
+ compactionOptions.put("tombstone_compaction_interval", "1");
+ compactionOptions.put("unchecked_tombstone_compaction", "false");
+
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_STANDARD1);
store.clearUnsafe();
- store.metadata.gcGraceSeconds(1);
- store.metadata.compactionStrategyOptions.put("tombstone_compaction_interval", "1");
- store.metadata.compactionStrategyOptions.put("unchecked_tombstone_compaction", "false");
+
+ MigrationManager.announceColumnFamilyUpdate(store.metadata.params(TableParams.builder(store.metadata.params).gcGraceSeconds(1).compaction(CompactionParams.scts(compactionOptions)).build()), true);
store.reload();
- store.setCompactionStrategyClass(SizeTieredCompactionStrategy.class.getName());
// disable compaction while flushing
store.disableAutoCompaction();
@@ -237,12 +246,13 @@ public class CompactionsTest
long newSize1 = it.next().uncompressedLength();
long newSize2 = it.next().uncompressedLength();
assertEquals("candidate sstable should not be tombstone-compacted because its key range overlap with other sstable",
- originalSize1, newSize1);
+ originalSize1, newSize1);
assertEquals("candidate sstable should not be tombstone-compacted because its key range overlap with other sstable",
- originalSize2, newSize2);
+ originalSize2, newSize2);
// now let's enable the magic property
- store.metadata.compactionStrategyOptions.put("unchecked_tombstone_compaction", "true");
+ compactionOptions.put("unchecked_tombstone_compaction", "true");
+ MigrationManager.announceColumnFamilyUpdate(store.metadata.params(TableParams.builder(store.metadata.params).gcGraceSeconds(1).compaction(CompactionParams.scts(compactionOptions)).build()), true);
store.reload();
//submit background task again and wait for it to complete
@@ -263,7 +273,6 @@ public class CompactionsTest
// make sure max timestamp of compacted sstables is recorded properly after compaction.
assertMaxTimestamp(store, timestamp2);
}
- */
public static void assertMaxTimestamp(ColumnFamilyStore cfs, long maxTimestampExpected)
{
@@ -273,68 +282,13 @@ public class CompactionsTest
assertEquals(maxTimestampExpected, maxTimestampObserved);
}
- /*
@Test
- public void testEchoedRow()
+ public void testDontPurgeAccidentally() throws InterruptedException
{
- // This test check that EchoedRow doesn't skipp rows: see CASSANDRA-2653
-
- Keyspace keyspace = Keyspace.open(KEYSPACE1);
- ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard2");
-
- // disable compaction while flushing
- cfs.disableAutoCompaction();
-
- // Insert 4 keys in two sstables. We need the sstables to have 2 rows
- // at least to trigger what was causing CASSANDRA-2653
- for (int i=1; i < 5; i++)
- {
- DecoratedKey key = Util.dk(String.valueOf(i));
- Mutation rm = new Mutation(KEYSPACE1, key.getKey());
- rm.add("Standard2", Util.cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
- rm.applyUnsafe();
-
- if (i % 2 == 0)
- cfs.forceBlockingFlush();
- }
- Collection<SSTableReader> toCompact = cfs.getLiveSSTables();
- assertEquals(2, toCompact.size());
-
- // Reinserting the same keys. We will compact only the previous sstable, but we need those new ones
- // to make sure we use EchoedRow, otherwise it won't be used because purge can be done.
- for (int i=1; i < 5; i++)
- {
- DecoratedKey key = Util.dk(String.valueOf(i));
- Mutation rm = new Mutation(KEYSPACE1, key.getKey());
- rm.add("Standard2", Util.cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
- rm.applyUnsafe();
- }
- cfs.forceBlockingFlush();
- SSTableReader tmpSSTable = null;
- for (SSTableReader sstable : cfs.getLiveSSTables())
- if (!toCompact.contains(sstable))
- tmpSSTable = sstable;
- assertNotNull(tmpSSTable);
-
- // Force compaction on first sstables. Since each row is in only one sstable, we will be using EchoedRow.
- Util.compact(cfs, toCompact);
- assertEquals(2, cfs.getLiveSSTables().size());
-
- // Now, we remove the sstable that was just created to force the use of EchoedRow (so that it doesn't hide the problem)
- cfs.markObsolete(Collections.singleton(tmpSSTable), OperationType.UNKNOWN);
- assertEquals(1, cfs.getLiveSSTables().size());
-
- // Now assert we do have the 4 keys
- assertEquals(4, Util.getRangeSlice(cfs).size());
- }
-
- @Test
- public void testDontPurgeAccidentaly() throws InterruptedException
- {
- testDontPurgeAccidentaly("test1", "Super5");
+ testDontPurgeAccidentally("test1", CF_SUPER5);
// Use CF with gc_grace=0, see last bug of CASSANDRA-2786
- testDontPurgeAccidentaly("test1", "SuperDirectGC");
+ testDontPurgeAccidentally("test1", CF_SUPERGC);
}
@Test
@@ -343,6 +297,7 @@ public class CompactionsTest
Keyspace keyspace = Keyspace.open(KEYSPACE1);
final String cfname = "Standard3"; // use clean(no sstable) CF
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
+ CFMetaData table = cfs.metadata;
// disable compaction while flushing
cfs.disableAutoCompaction();
@@ -350,11 +305,10 @@ public class CompactionsTest
final int ROWS_PER_SSTABLE = 10;
for (int i = 0; i < ROWS_PER_SSTABLE; i++) {
DecoratedKey key = Util.dk(String.valueOf(i));
- Mutation rm = new Mutation(KEYSPACE1, key.getKey());
- rm.add(cfname, Util.cellname("col"),
- ByteBufferUtil.EMPTY_BYTE_BUFFER,
- System.currentTimeMillis());
- rm.applyUnsafe();
+ new RowUpdateBuilder(table, FBUtilities.timestampMicros(), key.getKey())
+ .clustering(ByteBufferUtil.bytes("cols"))
+ .add("val", "val1")
+ .build().applyUnsafe();
}
cfs.forceBlockingFlush();
Collection<SSTableReader> sstables = cfs.getLiveSSTables();
@@ -377,6 +331,23 @@ public class CompactionsTest
assertEquals( prevGeneration + 1, sstables.iterator().next().descriptor.generation);
}
+ public static void writeSSTableWithRangeTombstoneMaskingOneColumn(ColumnFamilyStore cfs, CFMetaData table, int[] dks) {
+ for (int dk : dks)
+ {
+ RowUpdateBuilder deletedRowUpdateBuilder = new RowUpdateBuilder(table, 1, Util.dk(Integer.toString(dk)));
+ deletedRowUpdateBuilder.clustering("01").add("val", "a"); //Range tombstone covers this (timestamp 2 > 1)
+ Clustering startClustering = new Clustering(ByteBufferUtil.bytes("0"));
+ Clustering endClustering = new Clustering(ByteBufferUtil.bytes("b"));
+ deletedRowUpdateBuilder.addRangeTombstone(new RangeTombstone(Slice.make(startClustering, endClustering), new DeletionTime(2, (int) (System.currentTimeMillis() / 1000))));
+ deletedRowUpdateBuilder.build().applyUnsafe();
+
+ RowUpdateBuilder notYetDeletedRowUpdateBuilder = new RowUpdateBuilder(table, 3, Util.dk(Integer.toString(dk)));
+ notYetDeletedRowUpdateBuilder.clustering("02").add("val", "a"); //Range tombstone doesn't cover this (timestamp 3 > 2)
+ notYetDeletedRowUpdateBuilder.build().applyUnsafe();
+ }
+ cfs.forceBlockingFlush();
+ }
+
@Test
public void testRangeTombstones()
{
@@ -387,104 +358,67 @@ public class CompactionsTest
// disable compaction while flushing
cfs.disableAutoCompaction();
- final CFMetaData cfmeta = cfs.metadata;
- Directories dir = cfs.directories;
+ final CFMetaData table = cfs.metadata;
+ Directories dir = cfs.getDirectories();
ArrayList<DecoratedKey> keys = new ArrayList<DecoratedKey>();
for (int i=0; i < 4; i++)
{
- keys.add(Util.dk(""+i));
+ keys.add(Util.dk(Integer.toString(i)));
}
- ArrayBackedSortedColumns cf = ArrayBackedSortedColumns.factory.create(cfmeta);
- cf.addColumn(Util.column("01", "a", 1)); // this must not resurrect
- cf.addColumn(Util.column("a", "a", 3));
- cf.deletionInfo().add(new RangeTombstone(Util.cellname("0"), Util.cellname("b"), 2, (int) (System.currentTimeMillis()/1000)),cfmeta.comparator);
+ int[] dks = {0, 1, 3};
+ writeSSTableWithRangeTombstoneMaskingOneColumn(cfs, table, dks);
- Descriptor desc = Descriptor.fromFilename(cfs.getSSTablePath(dir.getDirectoryForNewSSTables()));
- try(SSTableTxnWriter writer = SSTableTxnWriter.create(desc, 0, 0, 0))
- {
- writer.append(Util.dk("0"), cf);
- writer.append(Util.dk("1"), cf);
- writer.append(Util.dk("3"), cf);
-
- cfs.addSSTable(writer.closeAndOpenReader());
- }
-
- desc = Descriptor.fromFilename(cfs.getSSTablePath(dir.getDirectoryForNewSSTables()));
- try (SSTableTxnWriter writer = SSTableTxnWriter.create(desc, 0, 0, 0))
- {
- writer.append(Util.dk("0"), cf);
- writer.append(Util.dk("1"), cf);
- writer.append(Util.dk("2"), cf);
- writer.append(Util.dk("3"), cf);
- cfs.addSSTable(writer.closeAndOpenReader());
- }
+ int[] dkays = {0, 1, 2, 3};
+ writeSSTableWithRangeTombstoneMaskingOneColumn(cfs, table, dkays);
Collection<SSTableReader> toCompact = cfs.getLiveSSTables();
assert toCompact.size() == 2;
- // Force compaction on first sstables. Since each row is in only one sstable, we will be using EchoedRow.
Util.compact(cfs, toCompact);
assertEquals(1, cfs.getLiveSSTables().size());
// Now assert we do have the 4 keys
- assertEquals(4, Util.getRangeSlice(cfs).size());
+ assertEquals(4, Util.getAll(Util.cmd(cfs).build()).size());
+
+ ArrayList<DecoratedKey> k = new ArrayList<>();
- ArrayList<DecoratedKey> k = new ArrayList<DecoratedKey>();
- for (Row r : Util.getRangeSlice(cfs))
+ for (FilteredPartition p : Util.getAll(Util.cmd(cfs).build()))
{
- k.add(r.key);
- assertEquals(ByteBufferUtil.bytes("a"),r.cf.getColumn(Util.cellname("a")).value());
- assertNull(r.cf.getColumn(Util.cellname("01")));
- assertEquals(3,r.cf.getColumn(Util.cellname("a")).timestamp());
+ k.add(p.partitionKey());
+ final SinglePartitionReadCommand command = SinglePartitionReadCommand.create(cfs.metadata, FBUtilities.nowInSeconds(), ColumnFilter.all(cfs.metadata), RowFilter.NONE, DataLimits.NONE, p.partitionKey(), new ClusteringIndexSliceFilter(Slices.ALL, false));
+ try (ReadOrderGroup orderGroup = command.startOrderGroup();
+ PartitionIterator iterator = command.executeInternal(orderGroup))
+ {
+ try (RowIterator rowIterator = iterator.next())
+ {
+ Row row = rowIterator.next();
+ Cell cell = row.getCell(cfs.metadata.getColumnDefinition(new ColumnIdentifier("val", false)));
+ assertEquals(ByteBufferUtil.bytes("a"), cell.value());
+ assertEquals(3, cell.timestamp());
+ assertNotSame(ByteBufferUtil.bytes("01"), row.clustering().getRawValues()[0]);
+ assertEquals(ByteBufferUtil.bytes("02"), row.clustering().getRawValues()[0]);
+ }
+ }
}
-
for (SSTableReader sstable : cfs.getLiveSSTables())
{
StatsMetadata stats = sstable.getSSTableMetadata();
- assertEquals(ByteBufferUtil.bytes("0"), stats.minColumnNames.get(0));
- assertEquals(ByteBufferUtil.bytes("b"), stats.maxColumnNames.get(0));
+ assertEquals(ByteBufferUtil.bytes("0"), stats.minClusteringValues.get(0));
+ assertEquals(ByteBufferUtil.bytes("b"), stats.maxClusteringValues.get(0));
}
assertEquals(keys, k);
}
- @Test
- public void testCompactionLog() throws Exception
- {
- SystemKeyspace.discardCompactionsInProgress();
-
- String cf = "Standard4";
- ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(cf);
- SchemaLoader.insertData(KEYSPACE1, cf, 0, 1);
- cfs.forceBlockingFlush();
-
- Collection<SSTableReader> sstables = cfs.getLiveSSTables();
- assertFalse(sstables.isEmpty());
- Set<Integer> generations = Sets.newHashSet(Iterables.transform(sstables, new Function<SSTableReader, Integer>()
- {
- public Integer apply(SSTableReader sstable)
- {
- return sstable.descriptor.generation;
- }
- }));
- UUID taskId = SystemKeyspace.startCompaction(cfs, sstables);
- Map<Pair<String, String>, Map<Integer, UUID>> compactionLogs = SystemKeyspace.getUnfinishedCompactions();
- Set<Integer> unfinishedCompactions = compactionLogs.get(Pair.create(KEYSPACE1, cf)).keySet();
- assertTrue(unfinishedCompactions.containsAll(generations));
-
- SystemKeyspace.finishCompaction(taskId);
- compactionLogs = SystemKeyspace.getUnfinishedCompactions();
- assertFalse(compactionLogs.containsKey(Pair.create(KEYSPACE1, cf)));
- }
-
- private void testDontPurgeAccidentaly(String k, String cfname) throws InterruptedException
+ private void testDontPurgeAccidentally(String k, String cfname) throws InterruptedException
{
// This test catches the regression of CASSANDRA-2786
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
+ CFMetaData table = cfs.metadata;
// disable compaction while flushing
cfs.clearUnsafe();
@@ -492,24 +426,24 @@ public class CompactionsTest
// Add test row
DecoratedKey key = Util.dk(k);
- Mutation rm = new Mutation(KEYSPACE1, key.getKey());
- rm.add(cfname, Util.cellname(ByteBufferUtil.bytes("sc"), ByteBufferUtil.bytes("c")), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
- rm.applyUnsafe();
+ RowUpdateBuilder rowUpdateBuilder = new RowUpdateBuilder(table, 0, key);
+ rowUpdateBuilder.clustering("c").add("val", "a");
+ rowUpdateBuilder.build().applyUnsafe();
cfs.forceBlockingFlush();
Collection<SSTableReader> sstablesBefore = cfs.getLiveSSTables();
- QueryFilter filter = QueryFilter.getIdentityFilter(key, cfname, System.currentTimeMillis());
- assertTrue(cfs.getColumnFamily(filter).hasColumns());
+ ImmutableBTreePartition partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
+ assertTrue(!partition.isEmpty());
+ RowUpdateBuilder deleteRowBuilder = new RowUpdateBuilder(table, 2, key);
+ deleteRowBuilder.clustering("c").delete("val");
+ deleteRowBuilder.build().applyUnsafe();
// Remove key
- rm = new Mutation(KEYSPACE1, key.getKey());
- rm.delete(cfname, 2);
- rm.applyUnsafe();
- ColumnFamily cf = cfs.getColumnFamily(filter);
- assertTrue("should be empty: " + cf, cf == null || !cf.hasColumns());
+ partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
+ assertTrue(partition.iterator().next().cells().iterator().next().isTombstone());
// Sleep one second so that the removal is indeed purgeable even with gcgrace == 0
Thread.sleep(1000);
@@ -524,10 +458,28 @@ public class CompactionsTest
Util.compact(cfs, toCompact);
- cf = cfs.getColumnFamily(filter);
- assertTrue("should be empty: " + cf, cf == null || !cf.hasColumns());
+ SSTableReader newSSTable = null;
+ for (SSTableReader reader : cfs.getLiveSSTables())
+ {
+ assert !toCompact.contains(reader);
+ if (!sstablesBefore.contains(reader))
+ newSSTable = reader;
+ }
+
+ // We cannot read the data, since {@link ReadCommand#withoutPurgeableTombstones} will purge droppable tombstones
+ // but we just want to check here that compaction did *NOT* drop the tombstone, so we read from the SSTable directly
+ // instead
+ ISSTableScanner scanner = newSSTable.getScanner();
+ assertTrue(scanner.hasNext());
+ UnfilteredRowIterator rowIt = scanner.next();
+ assertTrue(rowIt.hasNext());
+ Unfiltered unfiltered = rowIt.next();
+ assertTrue(unfiltered.isRow());
+ Row row = (Row)unfiltered;
+ assertTrue(row.cells().iterator().next().isTombstone());
+ assertFalse(rowIt.hasNext());
+ assertFalse(scanner.hasNext());
}
- */
private static Range<Token> rangeFor(int start, int end)
{
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org