You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by ma...@apache.org on 2016/09/27 07:23:11 UTC

[1/3] cassandra git commit: Make sure repaired tombstones are dropped when only_purge_repaired_tombstones is enabled

Repository: cassandra
Updated Branches:
  refs/heads/cassandra-3.0 405a828c1 -> b7da0031f
  refs/heads/trunk 979af884e -> 577701336


Make sure repaired tombstones are dropped when only_purge_repaired_tombstones is enabled

Patch by Sharvanath Pathak; reviewed by marcuse for CASSANDRA-12703


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/b7da0031
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/b7da0031
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/b7da0031

Branch: refs/heads/cassandra-3.0
Commit: b7da0031fad4f7ce6493bcbd059c58d61c589a3b
Parents: 405a828
Author: sharvanath <sh...@rubrik.com>
Authored: Mon Sep 26 09:53:07 2016 +0200
Committer: Marcus Eriksson <ma...@apache.org>
Committed: Tue Sep 27 09:09:29 2016 +0200

----------------------------------------------------------------------
 .../db/compaction/CompactionIterator.java       |  2 +-
 .../db/RepairedDataTombstonesTest.java          | 80 ++++++++++++--------
 2 files changed, 51 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/b7da0031/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java b/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
index d39da2a..f8f620c 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
@@ -266,7 +266,7 @@ public class CompactionIterator extends CompactionInfo.Holder implements Unfilte
 
         private Purger(boolean isForThrift, CompactionController controller, int nowInSec)
         {
-            super(isForThrift, nowInSec, controller.gcBefore, controller.compactingRepaired() ? Integer.MIN_VALUE : Integer.MAX_VALUE, controller.cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones());
+            super(isForThrift, nowInSec, controller.gcBefore, controller.compactingRepaired() ? Integer.MAX_VALUE : Integer.MIN_VALUE, controller.cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones());
             this.controller = controller;
         }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b7da0031/test/unit/org/apache/cassandra/db/RepairedDataTombstonesTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RepairedDataTombstonesTest.java b/test/unit/org/apache/cassandra/db/RepairedDataTombstonesTest.java
index 3a74029..e0d68a4 100644
--- a/test/unit/org/apache/cassandra/db/RepairedDataTombstonesTest.java
+++ b/test/unit/org/apache/cassandra/db/RepairedDataTombstonesTest.java
@@ -60,10 +60,9 @@ public class RepairedDataTombstonesTest extends CQLTester
         Thread.sleep(1000);
         // at this point we have 2 sstables, one repaired and one unrepaired. Both sstables contain expired tombstones, but we should only drop the tombstones from the repaired sstable.
         getCurrentColumnFamilyStore().forceMajorCompaction();
-        verify();
-        verify2(1);
+        verifyIncludingPurgeable();
+        verify2IncludingPurgeable(1);
         assertEquals(2, Iterables.size(getCurrentColumnFamilyStore().getSSTables(SSTableSet.LIVE)));
-
     }
 
     @Test
@@ -85,8 +84,8 @@ public class RepairedDataTombstonesTest extends CQLTester
         flush();
         Thread.sleep(1000);
         getCurrentColumnFamilyStore().forceMajorCompaction();
-        verify();
-        verify2(1);
+        verifyIncludingPurgeable();
+        verify2IncludingPurgeable(1);
         assertEquals(1, Iterables.size(getCurrentColumnFamilyStore().getSSTables(SSTableSet.LIVE)));
         assertFalse(getCurrentColumnFamilyStore().getSSTables(SSTableSet.LIVE).iterator().next().isRepaired());
 
@@ -129,8 +128,8 @@ public class RepairedDataTombstonesTest extends CQLTester
 
         // allow gcgrace to properly expire:
         Thread.sleep(1000);
-        verify();
-        verify2(123);
+        verifyIncludingPurgeable();
+        verify2IncludingPurgeable(123);
     }
 
 
@@ -219,34 +218,44 @@ public class RepairedDataTombstonesTest extends CQLTester
 
         Thread.sleep(2000);
         // we will keep all tombstones since the oldest tombstones are unrepaired:
-        verify(30, 0, 30);
-        verify2(1, 30, 0, 30);
+        verify(30, 0, 30, false);
+        verify2(1, 30, 0, 30, false);
     }
 
     private void verify()
     {
-        verify(10, 10, 20);
+        verify(10, 10, 20, false);
+    }
+
+    private void verifyIncludingPurgeable()
+    {
+        verify(10, 10, 20, true);
     }
 
-    private void verify(int expectedRows, int minVal, int maxVal)
+    private void verify(int expectedRows, int minVal, int maxVal, boolean includePurgeable)
     {
         ReadCommand cmd = Util.cmd(getCurrentColumnFamilyStore()).build();
         int foundRows = 0;
-        try (ReadOrderGroup orderGroup = cmd.startOrderGroup(); UnfilteredPartitionIterator iterator = cmd.executeLocally(orderGroup))
+        try (ReadOrderGroup orderGroup = cmd.startOrderGroup();
+             UnfilteredPartitionIterator iterator =
+             includePurgeable ? cmd.queryStorage(getCurrentColumnFamilyStore(), orderGroup) :
+                                cmd.executeLocally(orderGroup))
         {
             while (iterator.hasNext())
             {
-                UnfilteredRowIterator rowIter = iterator.next();
-                if (!rowIter.partitionKey().equals(Util.dk(ByteBufferUtil.bytes(999)))) // partition key 999 is 'live' and used to avoid sstables from being dropped
+                try (UnfilteredRowIterator rowIter = iterator.next())
                 {
-                    while (rowIter.hasNext())
+                    if (!rowIter.partitionKey().equals(Util.dk(ByteBufferUtil.bytes(999)))) // partition key 999 is 'live' and used to avoid sstables from being dropped
                     {
-                        AbstractRow row = (AbstractRow) rowIter.next();
-                        for (int i = 0; i < row.clustering().size(); i++)
+                        while (rowIter.hasNext())
                         {
-                            foundRows++;
-                            int val = ByteBufferUtil.toInt(row.clustering().get(i));
-                            assertTrue("val=" + val, val >= minVal && val < maxVal);
+                            AbstractRow row = (AbstractRow) rowIter.next();
+                            for (int i = 0; i < row.clustering().size(); i++)
+                            {
+                                foundRows++;
+                                int val = ByteBufferUtil.toInt(row.clustering().get(i));
+                                assertTrue("val=" + val, val >= minVal && val < maxVal);
+                            }
                         }
                     }
                 }
@@ -254,28 +263,39 @@ public class RepairedDataTombstonesTest extends CQLTester
         }
         assertEquals(expectedRows, foundRows);
     }
+
     private void verify2(int key)
     {
-        verify2(key, 10, 10, 20);
+        verify2(key, 10, 10, 20, false);
+    }
+
+    private void verify2IncludingPurgeable(int key)
+    {
+        verify2(key, 10, 10, 20, true);
     }
 
-    private void verify2(int key, int expectedRows, int minVal, int maxVal)
+    private void verify2(int key, int expectedRows, int minVal, int maxVal, boolean includePurgeable)
     {
         ReadCommand cmd = Util.cmd(getCurrentColumnFamilyStore(), Util.dk(ByteBufferUtil.bytes(key))).build();
         int foundRows = 0;
-        try (ReadOrderGroup orderGroup = cmd.startOrderGroup(); UnfilteredPartitionIterator iterator = cmd.executeLocally(orderGroup))
+        try (ReadOrderGroup orderGroup = cmd.startOrderGroup();
+             UnfilteredPartitionIterator iterator =
+             includePurgeable ? cmd.queryStorage(getCurrentColumnFamilyStore(), orderGroup) :
+                                cmd.executeLocally(orderGroup))
         {
             while (iterator.hasNext())
             {
-                UnfilteredRowIterator rowIter = iterator.next();
-                while (rowIter.hasNext())
+                try (UnfilteredRowIterator rowIter = iterator.next())
                 {
-                    AbstractRow row = (AbstractRow) rowIter.next();
-                    for (int i = 0; i < row.clustering().size(); i++)
+                    while (rowIter.hasNext())
                     {
-                        foundRows++;
-                        int val = ByteBufferUtil.toInt(row.clustering().get(i));
-                        assertTrue("val=" + val, val >= minVal && val < maxVal);
+                        AbstractRow row = (AbstractRow) rowIter.next();
+                        for (int i = 0; i < row.clustering().size(); i++)
+                        {
+                            foundRows++;
+                            int val = ByteBufferUtil.toInt(row.clustering().get(i));
+                            assertTrue("val=" + val, val >= minVal && val < maxVal);
+                        }
                     }
                 }
             }


[3/3] cassandra git commit: Merge branch 'cassandra-3.0' into trunk

Posted by ma...@apache.org.
Merge branch 'cassandra-3.0' into trunk


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/57770133
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/57770133
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/57770133

Branch: refs/heads/trunk
Commit: 5777013368fd549bf603d1a108bb2dad714c8b55
Parents: 979af88 b7da003
Author: Marcus Eriksson <ma...@apache.org>
Authored: Tue Sep 27 09:11:37 2016 +0200
Committer: Marcus Eriksson <ma...@apache.org>
Committed: Tue Sep 27 09:13:49 2016 +0200

----------------------------------------------------------------------
 CHANGES.txt                                     |  1 +
 .../db/compaction/CompactionIterator.java       |  2 +-
 .../db/RepairedDataTombstonesTest.java          | 78 ++++++++++++--------
 3 files changed, 50 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/57770133/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index 0a7f9b0,0524e49..e10b870
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,73 -1,4 +1,74 @@@
 -3.0.10
 +3.10
 + * Tune compaction thread count via nodetool (CASSANDRA-12248)
 + * Add +=/-= shortcut syntax for update queries (CASSANDRA-12232)
 + * Include repair session IDs in repair start message (CASSANDRA-12532)
 + * Add a blocking task to Index, run before joining the ring (CASSANDRA-12039)
 + * Fix NPE when using CQLSSTableWriter (CASSANDRA-12667)
 + * Support optional backpressure strategies at the coordinator (CASSANDRA-9318)
 + * Make randompartitioner work with new vnode allocation (CASSANDRA-12647)
 + * Fix cassandra-stress graphing (CASSANDRA-12237)
 + * Allow filtering on partition key columns for queries without secondary indexes (CASSANDRA-11031)
 + * Fix Cassandra Stress reporting thread model and precision (CASSANDRA-12585)
 + * Add JMH benchmarks.jar (CASSANDRA-12586)
 + * Add row offset support to SASI (CASSANDRA-11990)
 + * Cleanup uses of AlterTableStatementColumn (CASSANDRA-12567)
 + * Add keep-alive to streaming (CASSANDRA-11841)
 + * Tracing payload is passed through newSession(..) (CASSANDRA-11706)
 + * avoid deleting non existing sstable files and improve related log messages (CASSANDRA-12261)
 + * json/yaml output format for nodetool compactionhistory (CASSANDRA-12486)
 + * Retry all internode messages once after a connection is
 +   closed and reopened (CASSANDRA-12192)
 + * Add support to rebuild from targeted replica (CASSANDRA-9875)
 + * Add sequence distribution type to cassandra stress (CASSANDRA-12490)
 + * "SELECT * FROM foo LIMIT ;" does not error out (CASSANDRA-12154)
 + * Define executeLocally() at the ReadQuery Level (CASSANDRA-12474)
 + * Extend read/write failure messages with a map of replica addresses
 +   to error codes in the v5 native protocol (CASSANDRA-12311)
 + * Fix rebuild of SASI indexes with existing index files (CASSANDRA-12374)
 + * Let DatabaseDescriptor not implicitly startup services (CASSANDRA-9054, 12550)
 + * Fix clustering indexes in presence of static columns in SASI (CASSANDRA-12378)
 + * Fix queries on columns with reversed type on SASI indexes (CASSANDRA-12223)
 + * Added slow query log (CASSANDRA-12403)
 + * Count full coordinated request against timeout (CASSANDRA-12256)
 + * Allow TTL with null value on insert and update (CASSANDRA-12216)
 + * Make decommission operation resumable (CASSANDRA-12008)
 + * Add support to one-way targeted repair (CASSANDRA-9876)
 + * Remove clientutil jar (CASSANDRA-11635)
 + * Fix compaction throughput throttle (CASSANDRA-12366)
 + * Delay releasing Memtable memory on flush until PostFlush has finished running (CASSANDRA-12358)
 + * Cassandra stress should dump all setting on startup (CASSANDRA-11914)
 + * Make it possible to compact a given token range (CASSANDRA-10643)
 + * Allow updating DynamicEndpointSnitch properties via JMX (CASSANDRA-12179)
 + * Collect metrics on queries by consistency level (CASSANDRA-7384)
 + * Add support for GROUP BY to SELECT statement (CASSANDRA-10707)
 + * Deprecate memtable_cleanup_threshold and update default for memtable_flush_writers (CASSANDRA-12228)
 + * Upgrade to OHC 0.4.4 (CASSANDRA-12133)
 + * Add version command to cassandra-stress (CASSANDRA-12258)
 + * Create compaction-stress tool (CASSANDRA-11844)
 + * Garbage-collecting compaction operation and schema option (CASSANDRA-7019)
 + * Add beta protocol flag for v5 native protocol (CASSANDRA-12142)
 + * Support filtering on non-PRIMARY KEY columns in the CREATE
 +   MATERIALIZED VIEW statement's WHERE clause (CASSANDRA-10368)
 + * Unify STDOUT and SYSTEMLOG logback format (CASSANDRA-12004)
 + * COPY FROM should raise error for non-existing input files (CASSANDRA-12174)
 + * Faster write path (CASSANDRA-12269)
 + * Option to leave omitted columns in INSERT JSON unset (CASSANDRA-11424)
 + * Support json/yaml output in nodetool tpstats (CASSANDRA-12035)
 + * Expose metrics for successful/failed authentication attempts (CASSANDRA-10635)
 + * Prepend snapshot name with "truncated" or "dropped" when a snapshot
 +   is taken before truncating or dropping a table (CASSANDRA-12178)
 + * Optimize RestrictionSet (CASSANDRA-12153)
 + * cqlsh does not automatically downgrade CQL version (CASSANDRA-12150)
 + * Omit (de)serialization of state variable in UDAs (CASSANDRA-9613)
 + * Create a system table to expose prepared statements (CASSANDRA-8831)
 + * Reuse DataOutputBuffer from ColumnIndex (CASSANDRA-11970)
 + * Remove DatabaseDescriptor dependency from SegmentedFile (CASSANDRA-11580)
 + * Add supplied username to authentication error messages (CASSANDRA-12076)
 + * Remove pre-startup check for open JMX port (CASSANDRA-12074)
 + * Remove compaction Severity from DynamicEndpointSnitch (CASSANDRA-11738)
 + * Restore resumable hints delivery (CASSANDRA-11960)
 +Merged from 3.0:
++ * Make sure repaired tombstones are dropped when only_purge_repaired_tombstones is enabled (CASSANDRA-12703)
   * Skip writing MV mutations to commitlog on mutation.applyUnsafe() (CASSANDRA-11670)
   * Establish consistent distinction between non-existing partition and NULL value for LWTs on static columns (CASSANDRA-12060)
   * Extend ColumnIdentifier.internedInstances key to include the type that generated the byte buffer (CASSANDRA-12516)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/57770133/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/57770133/test/unit/org/apache/cassandra/db/RepairedDataTombstonesTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/db/RepairedDataTombstonesTest.java
index ad009a4,e0d68a4..ea9f88a
--- a/test/unit/org/apache/cassandra/db/RepairedDataTombstonesTest.java
+++ b/test/unit/org/apache/cassandra/db/RepairedDataTombstonesTest.java
@@@ -233,8 -236,10 +237,10 @@@ public class RepairedDataTombstonesTes
      {
          ReadCommand cmd = Util.cmd(getCurrentColumnFamilyStore()).build();
          int foundRows = 0;
 -        try (ReadOrderGroup orderGroup = cmd.startOrderGroup();
 +        try (ReadExecutionController executionController = cmd.executionController();
-              UnfilteredPartitionIterator iterator = cmd.executeLocally(executionController))
+              UnfilteredPartitionIterator iterator =
 -             includePurgeable ? cmd.queryStorage(getCurrentColumnFamilyStore(), orderGroup) :
 -                                cmd.executeLocally(orderGroup))
++             includePurgeable ? cmd.queryStorage(getCurrentColumnFamilyStore(), executionController) :
++                                cmd.executeLocally(executionController))
          {
              while (iterator.hasNext())
              {
@@@ -265,8 -278,10 +279,10 @@@
      {
          ReadCommand cmd = Util.cmd(getCurrentColumnFamilyStore(), Util.dk(ByteBufferUtil.bytes(key))).build();
          int foundRows = 0;
 -        try (ReadOrderGroup orderGroup = cmd.startOrderGroup();
 +        try (ReadExecutionController executionController = cmd.executionController();
-              UnfilteredPartitionIterator iterator = cmd.executeLocally(executionController))
+              UnfilteredPartitionIterator iterator =
 -             includePurgeable ? cmd.queryStorage(getCurrentColumnFamilyStore(), orderGroup) :
 -                                cmd.executeLocally(orderGroup))
++             includePurgeable ? cmd.queryStorage(getCurrentColumnFamilyStore(), executionController) :
++                                cmd.executeLocally(executionController))
          {
              while (iterator.hasNext())
              {


[2/3] cassandra git commit: Make sure repaired tombstones are dropped when only_purge_repaired_tombstones is enabled

Posted by ma...@apache.org.
Make sure repaired tombstones are dropped when only_purge_repaired_tombstones is enabled

Patch by Sharvanath Pathak; reviewed by marcuse for CASSANDRA-12703


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/b7da0031
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/b7da0031
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/b7da0031

Branch: refs/heads/trunk
Commit: b7da0031fad4f7ce6493bcbd059c58d61c589a3b
Parents: 405a828
Author: sharvanath <sh...@rubrik.com>
Authored: Mon Sep 26 09:53:07 2016 +0200
Committer: Marcus Eriksson <ma...@apache.org>
Committed: Tue Sep 27 09:09:29 2016 +0200

----------------------------------------------------------------------
 .../db/compaction/CompactionIterator.java       |  2 +-
 .../db/RepairedDataTombstonesTest.java          | 80 ++++++++++++--------
 2 files changed, 51 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/b7da0031/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java b/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
index d39da2a..f8f620c 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
@@ -266,7 +266,7 @@ public class CompactionIterator extends CompactionInfo.Holder implements Unfilte
 
         private Purger(boolean isForThrift, CompactionController controller, int nowInSec)
         {
-            super(isForThrift, nowInSec, controller.gcBefore, controller.compactingRepaired() ? Integer.MIN_VALUE : Integer.MAX_VALUE, controller.cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones());
+            super(isForThrift, nowInSec, controller.gcBefore, controller.compactingRepaired() ? Integer.MAX_VALUE : Integer.MIN_VALUE, controller.cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones());
             this.controller = controller;
         }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b7da0031/test/unit/org/apache/cassandra/db/RepairedDataTombstonesTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RepairedDataTombstonesTest.java b/test/unit/org/apache/cassandra/db/RepairedDataTombstonesTest.java
index 3a74029..e0d68a4 100644
--- a/test/unit/org/apache/cassandra/db/RepairedDataTombstonesTest.java
+++ b/test/unit/org/apache/cassandra/db/RepairedDataTombstonesTest.java
@@ -60,10 +60,9 @@ public class RepairedDataTombstonesTest extends CQLTester
         Thread.sleep(1000);
         // at this point we have 2 sstables, one repaired and one unrepaired. Both sstables contain expired tombstones, but we should only drop the tombstones from the repaired sstable.
         getCurrentColumnFamilyStore().forceMajorCompaction();
-        verify();
-        verify2(1);
+        verifyIncludingPurgeable();
+        verify2IncludingPurgeable(1);
         assertEquals(2, Iterables.size(getCurrentColumnFamilyStore().getSSTables(SSTableSet.LIVE)));
-
     }
 
     @Test
@@ -85,8 +84,8 @@ public class RepairedDataTombstonesTest extends CQLTester
         flush();
         Thread.sleep(1000);
         getCurrentColumnFamilyStore().forceMajorCompaction();
-        verify();
-        verify2(1);
+        verifyIncludingPurgeable();
+        verify2IncludingPurgeable(1);
         assertEquals(1, Iterables.size(getCurrentColumnFamilyStore().getSSTables(SSTableSet.LIVE)));
         assertFalse(getCurrentColumnFamilyStore().getSSTables(SSTableSet.LIVE).iterator().next().isRepaired());
 
@@ -129,8 +128,8 @@ public class RepairedDataTombstonesTest extends CQLTester
 
         // allow gcgrace to properly expire:
         Thread.sleep(1000);
-        verify();
-        verify2(123);
+        verifyIncludingPurgeable();
+        verify2IncludingPurgeable(123);
     }
 
 
@@ -219,34 +218,44 @@ public class RepairedDataTombstonesTest extends CQLTester
 
         Thread.sleep(2000);
         // we will keep all tombstones since the oldest tombstones are unrepaired:
-        verify(30, 0, 30);
-        verify2(1, 30, 0, 30);
+        verify(30, 0, 30, false);
+        verify2(1, 30, 0, 30, false);
     }
 
     private void verify()
     {
-        verify(10, 10, 20);
+        verify(10, 10, 20, false);
+    }
+
+    private void verifyIncludingPurgeable()
+    {
+        verify(10, 10, 20, true);
     }
 
-    private void verify(int expectedRows, int minVal, int maxVal)
+    private void verify(int expectedRows, int minVal, int maxVal, boolean includePurgeable)
     {
         ReadCommand cmd = Util.cmd(getCurrentColumnFamilyStore()).build();
         int foundRows = 0;
-        try (ReadOrderGroup orderGroup = cmd.startOrderGroup(); UnfilteredPartitionIterator iterator = cmd.executeLocally(orderGroup))
+        try (ReadOrderGroup orderGroup = cmd.startOrderGroup();
+             UnfilteredPartitionIterator iterator =
+             includePurgeable ? cmd.queryStorage(getCurrentColumnFamilyStore(), orderGroup) :
+                                cmd.executeLocally(orderGroup))
         {
             while (iterator.hasNext())
             {
-                UnfilteredRowIterator rowIter = iterator.next();
-                if (!rowIter.partitionKey().equals(Util.dk(ByteBufferUtil.bytes(999)))) // partition key 999 is 'live' and used to avoid sstables from being dropped
+                try (UnfilteredRowIterator rowIter = iterator.next())
                 {
-                    while (rowIter.hasNext())
+                    if (!rowIter.partitionKey().equals(Util.dk(ByteBufferUtil.bytes(999)))) // partition key 999 is 'live' and used to avoid sstables from being dropped
                     {
-                        AbstractRow row = (AbstractRow) rowIter.next();
-                        for (int i = 0; i < row.clustering().size(); i++)
+                        while (rowIter.hasNext())
                         {
-                            foundRows++;
-                            int val = ByteBufferUtil.toInt(row.clustering().get(i));
-                            assertTrue("val=" + val, val >= minVal && val < maxVal);
+                            AbstractRow row = (AbstractRow) rowIter.next();
+                            for (int i = 0; i < row.clustering().size(); i++)
+                            {
+                                foundRows++;
+                                int val = ByteBufferUtil.toInt(row.clustering().get(i));
+                                assertTrue("val=" + val, val >= minVal && val < maxVal);
+                            }
                         }
                     }
                 }
@@ -254,28 +263,39 @@ public class RepairedDataTombstonesTest extends CQLTester
         }
         assertEquals(expectedRows, foundRows);
     }
+
     private void verify2(int key)
     {
-        verify2(key, 10, 10, 20);
+        verify2(key, 10, 10, 20, false);
+    }
+
+    private void verify2IncludingPurgeable(int key)
+    {
+        verify2(key, 10, 10, 20, true);
     }
 
-    private void verify2(int key, int expectedRows, int minVal, int maxVal)
+    private void verify2(int key, int expectedRows, int minVal, int maxVal, boolean includePurgeable)
     {
         ReadCommand cmd = Util.cmd(getCurrentColumnFamilyStore(), Util.dk(ByteBufferUtil.bytes(key))).build();
         int foundRows = 0;
-        try (ReadOrderGroup orderGroup = cmd.startOrderGroup(); UnfilteredPartitionIterator iterator = cmd.executeLocally(orderGroup))
+        try (ReadOrderGroup orderGroup = cmd.startOrderGroup();
+             UnfilteredPartitionIterator iterator =
+             includePurgeable ? cmd.queryStorage(getCurrentColumnFamilyStore(), orderGroup) :
+                                cmd.executeLocally(orderGroup))
         {
             while (iterator.hasNext())
             {
-                UnfilteredRowIterator rowIter = iterator.next();
-                while (rowIter.hasNext())
+                try (UnfilteredRowIterator rowIter = iterator.next())
                 {
-                    AbstractRow row = (AbstractRow) rowIter.next();
-                    for (int i = 0; i < row.clustering().size(); i++)
+                    while (rowIter.hasNext())
                     {
-                        foundRows++;
-                        int val = ByteBufferUtil.toInt(row.clustering().get(i));
-                        assertTrue("val=" + val, val >= minVal && val < maxVal);
+                        AbstractRow row = (AbstractRow) rowIter.next();
+                        for (int i = 0; i < row.clustering().size(); i++)
+                        {
+                            foundRows++;
+                            int val = ByteBufferUtil.toInt(row.clustering().get(i));
+                            assertTrue("val=" + val, val >= minVal && val < maxVal);
+                        }
                     }
                 }
             }