You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by pa...@apache.org on 2018/02/11 13:26:04 UTC

[19/29] cassandra git commit: Merge branch 'cassandra-2.2' into cassandra-3.0

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table2/mc-1-big-Summary.db
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table2/mc-1-big-Summary.db
index 0000000,0000000..66cf70f
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table2/mc-1-big-TOC.txt
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table2/mc-1-big-TOC.txt
index 0000000,0000000..45113dc
new file mode 100644
--- /dev/null
+++ b/test/data/negative-local-expiration-test/table2/mc-1-big-TOC.txt
@@@ -1,0 -1,0 +1,8 @@@
++CompressionInfo.db
++Data.db
++Summary.db
++Filter.db
++Statistics.db
++TOC.txt
++Digest.crc32
++Index.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table3/mc-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table3/mc-1-big-CompressionInfo.db
index 0000000,0000000..b4de068
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table3/mc-1-big-Data.db
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table3/mc-1-big-Data.db
index 0000000,0000000..e96f772
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table3/mc-1-big-Digest.crc32
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table3/mc-1-big-Digest.crc32
index 0000000,0000000..459804b
new file mode 100644
--- /dev/null
+++ b/test/data/negative-local-expiration-test/table3/mc-1-big-Digest.crc32
@@@ -1,0 -1,0 +1,1 @@@
++3064924389

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table3/mc-1-big-Filter.db
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table3/mc-1-big-Filter.db
index 0000000,0000000..a397f35
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table3/mc-1-big-Index.db
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table3/mc-1-big-Index.db
index 0000000,0000000..807a27b
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table3/mc-1-big-Statistics.db
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table3/mc-1-big-Statistics.db
index 0000000,0000000..1ee01e6
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table3/mc-1-big-Summary.db
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table3/mc-1-big-Summary.db
index 0000000,0000000..66cf70f
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table3/mc-1-big-TOC.txt
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table3/mc-1-big-TOC.txt
index 0000000,0000000..f445537
new file mode 100644
--- /dev/null
+++ b/test/data/negative-local-expiration-test/table3/mc-1-big-TOC.txt
@@@ -1,0 -1,0 +1,8 @@@
++Summary.db
++TOC.txt
++Filter.db
++Index.db
++Digest.crc32
++CompressionInfo.db
++Data.db
++Statistics.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table4/mc-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table4/mc-1-big-CompressionInfo.db
index 0000000,0000000..5d22c04
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table4/mc-1-big-Data.db
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table4/mc-1-big-Data.db
index 0000000,0000000..a22a7a3
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table4/mc-1-big-Digest.crc32
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table4/mc-1-big-Digest.crc32
index 0000000,0000000..db7a6c7
new file mode 100644
--- /dev/null
+++ b/test/data/negative-local-expiration-test/table4/mc-1-big-Digest.crc32
@@@ -1,0 -1,0 +1,1 @@@
++1803989939

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table4/mc-1-big-Filter.db
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table4/mc-1-big-Filter.db
index 0000000,0000000..a397f35
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table4/mc-1-big-Index.db
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table4/mc-1-big-Index.db
index 0000000,0000000..6397b5e
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table4/mc-1-big-Statistics.db
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table4/mc-1-big-Statistics.db
index 0000000,0000000..4ee9294
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table4/mc-1-big-Summary.db
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table4/mc-1-big-Summary.db
index 0000000,0000000..66cf70f
new file mode 100644
Binary files differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/data/negative-local-expiration-test/table4/mc-1-big-TOC.txt
----------------------------------------------------------------------
diff --cc test/data/negative-local-expiration-test/table4/mc-1-big-TOC.txt
index 0000000,0000000..f445537
new file mode 100644
--- /dev/null
+++ b/test/data/negative-local-expiration-test/table4/mc-1-big-TOC.txt
@@@ -1,0 -1,0 +1,8 @@@
++Summary.db
++TOC.txt
++Filter.db
++Index.db
++Digest.crc32
++CompressionInfo.db
++Data.db
++Statistics.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java
index 9f375d4,b1eaac1..fc70974
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java
@@@ -5,20 -11,33 +11,33 @@@ import static org.junit.Assert.fail
  
  import org.apache.cassandra.cql3.Attributes;
  import org.apache.cassandra.cql3.CQLTester;
+ import org.apache.cassandra.cql3.UntypedResultSet;
 -import org.apache.cassandra.db.BufferExpiringCell;
+ import org.apache.cassandra.db.ColumnFamilyStore;
 -import org.apache.cassandra.db.ExpiringCell;
++import org.apache.cassandra.db.ExpirationDateOverflowHandling;
+ import org.apache.cassandra.db.Keyspace;
++import org.apache.cassandra.db.rows.AbstractCell;
  import org.apache.cassandra.exceptions.InvalidRequestException;
+ import org.apache.cassandra.utils.FBUtilities;
+ 
  import org.junit.Test;
  
  public class TTLTest extends CQLTester
  {
+     public static String NEGATIVE_LOCAL_EXPIRATION_TEST_DIR = "test/data/negative-local-expiration-test/%s";
+ 
 -    public static int MAX_TTL = ExpiringCell.MAX_TTL;
++    public static int MAX_TTL = Attributes.MAX_TTL;
+ 
+     public static final String SIMPLE_NOCLUSTERING = "table1";
+     public static final String SIMPLE_CLUSTERING = "table2";
+     public static final String COMPLEX_NOCLUSTERING = "table3";
+     public static final String COMPLEX_CLUSTERING = "table4";
  
      @Test
      public void testTTLPerRequestLimit() throws Throwable
      {
          createTable("CREATE TABLE %s (k int PRIMARY KEY, i int)");
-         // insert
-         execute("INSERT INTO %s (k, i) VALUES (1, 1) USING TTL ?", Attributes.MAX_TTL); // max ttl
-         int ttl = execute("SELECT ttl(i) FROM %s").one().getInt("ttl(i)");
-         assertTrue(ttl > Attributes.MAX_TTL - 10);
+         // insert with low TTL should not be denied
 -        execute("INSERT INTO %s (k, i) VALUES (1, 1) USING TTL ?", 10); // max ttl
++        execute("INSERT INTO %s (k, i) VALUES (1, 1) USING TTL ?", 10);
  
          try
          {
@@@ -41,10 -60,8 +60,8 @@@
          }
          execute("TRUNCATE %s");
  
-         // update
-         execute("UPDATE %s USING TTL ? SET i = 1 WHERE k = 2", Attributes.MAX_TTL); // max ttl
-         ttl = execute("SELECT ttl(i) FROM %s").one().getInt("ttl(i)");
-         assertTrue(ttl > Attributes.MAX_TTL - 10);
+         // insert with low TTL should not be denied
 -        execute("UPDATE %s USING TTL ? SET i = 1 WHERE k = 2", 5); // max ttl
++        execute("UPDATE %s USING TTL ? SET i = 1 WHERE k = 2", 5);
  
          try
          {
@@@ -91,14 -109,297 +108,288 @@@
          {
              assertTrue(e.getCause()
                          .getMessage()
-                         .contains("default_time_to_live must be less than or equal to " + Attributes.MAX_TTL + " (got "
-                                 + (Attributes.MAX_TTL + 1) + ")"));
+                         .contains("default_time_to_live must be less than or equal to " + MAX_TTL + " (got "
+                                   + (MAX_TTL + 1) + ")"));
          }
  
-         createTable("CREATE TABLE %s (k int PRIMARY KEY, i int) WITH default_time_to_live=" + Attributes.MAX_TTL);
+         // table with default low TTL should not be denied
+         createTable("CREATE TABLE %s (k int PRIMARY KEY, i int) WITH default_time_to_live=" + 5);
+         execute("INSERT INTO %s (k, i) VALUES (1, 1)");
+     }
+ 
+     @Test
 -    public void testRejectExpirationDateOverflowPolicy() throws Throwable
++    public void testCapWarnExpirationOverflowPolicy() throws Throwable
++    {
++        // We don't test that the actual warn is logged here, only on dtest
++        testCapExpirationDateOverflowPolicy(ExpirationDateOverflowHandling.ExpirationDateOverflowPolicy.CAP);
++    }
++
++    @Test
++    public void testCapNoWarnExpirationOverflowPolicy() throws Throwable
++    {
++        testCapExpirationDateOverflowPolicy(ExpirationDateOverflowHandling.ExpirationDateOverflowPolicy.CAP_NOWARN);
++    }
++
++    @Test
++    public void testCapNoWarnExpirationOverflowPolicyDefaultTTL() throws Throwable
++    {
++        ExpirationDateOverflowHandling.policy = ExpirationDateOverflowHandling.policy.CAP_NOWARN;
++        createTable("CREATE TABLE %s (k int PRIMARY KEY, i int) WITH default_time_to_live=" + MAX_TTL);
 +        execute("INSERT INTO %s (k, i) VALUES (1, 1)");
-         int ttl = execute("SELECT ttl(i) FROM %s").one().getInt("ttl(i)");
-         assertTrue(ttl > 10000 - 10); // within 10 second
++        checkTTLIsCapped("i");
++        ExpirationDateOverflowHandling.policy = ExpirationDateOverflowHandling.policy.REJECT;
++    }
++
++    @Test
++    public void testRejectExpirationOverflowPolicy() throws Throwable
+     {
 -        Attributes.policy = Attributes.ExpirationDateOverflowPolicy.REJECT;
++        //ExpirationDateOverflowHandling.expirationDateOverflowPolicy = ExpirationDateOverflowHandling.expirationDateOverflowPolicy.REJECT;
+         createTable("CREATE TABLE %s (k int PRIMARY KEY, i int)");
+         try
+         {
+             execute("INSERT INTO %s (k, i) VALUES (1, 1) USING TTL " + MAX_TTL);
+         }
+         catch (InvalidRequestException e)
+         {
+             assertTrue(e.getMessage().contains("exceeds maximum supported expiration date"));
+         }
+         try
+         {
+             createTable("CREATE TABLE %s (k int PRIMARY KEY, i int) WITH default_time_to_live=" + MAX_TTL);
+             execute("INSERT INTO %s (k, i) VALUES (1, 1)");
+         }
+         catch (InvalidRequestException e)
+         {
+             assertTrue(e.getMessage().contains("exceeds maximum supported expiration date"));
+         }
+     }
+ 
+     @Test
 -    public void testCapExpirationDatePolicyDefaultTTL() throws Throwable
++    public void testRecoverOverflowedExpirationWithScrub() throws Throwable
+     {
 -        Attributes.policy = Attributes.ExpirationDateOverflowPolicy.CAP;
 -        createTable("CREATE TABLE %s (k int PRIMARY KEY, i int) WITH default_time_to_live=" + MAX_TTL);
 -        execute("INSERT INTO %s (k, i) VALUES (1, 1)");
 -        checkTTLIsCapped("i");
 -        Attributes.policy = Attributes.ExpirationDateOverflowPolicy.REJECT;
++        baseTestRecoverOverflowedExpiration(false, false);
++        baseTestRecoverOverflowedExpiration(true, false);
++        baseTestRecoverOverflowedExpiration(true, true);
+     }
+ 
 -    @Test
 -    public void testCapExpirationDatePolicyPerRequest() throws Throwable
++    public void testCapExpirationDateOverflowPolicy(ExpirationDateOverflowHandling.ExpirationDateOverflowPolicy policy) throws Throwable
+     {
 -        // Test cap policy
 -        Attributes.policy = Attributes.ExpirationDateOverflowPolicy.CAP;
++        ExpirationDateOverflowHandling.policy = policy;
+ 
+         // simple column, clustering, flush
 -        baseCapExpirationDateOverflowTest(true, true, true);
++        testCapExpirationDateOverflowPolicy(true, true, true);
+         // simple column, clustering, noflush
 -        baseCapExpirationDateOverflowTest(true, true, false);
++        testCapExpirationDateOverflowPolicy(true, true, false);
+         // simple column, noclustering, flush
 -        baseCapExpirationDateOverflowTest(true, false, true);
++        testCapExpirationDateOverflowPolicy(true, false, true);
+         // simple column, noclustering, noflush
 -        baseCapExpirationDateOverflowTest(true, false, false);
++        testCapExpirationDateOverflowPolicy(true, false, false);
+         // complex column, clustering, flush
 -        baseCapExpirationDateOverflowTest(false, true, true);
++        testCapExpirationDateOverflowPolicy(false, true, true);
+         // complex column, clustering, noflush
 -        baseCapExpirationDateOverflowTest(false, true, false);
++        testCapExpirationDateOverflowPolicy(false, true, false);
+         // complex column, noclustering, flush
 -        baseCapExpirationDateOverflowTest(false, false, true);
++        testCapExpirationDateOverflowPolicy(false, false, true);
+         // complex column, noclustering, noflush
 -        baseCapExpirationDateOverflowTest(false, false, false);
 -        // complex column, noclustering, flush
 -        baseCapExpirationDateOverflowTest(false, false, false);
++        testCapExpirationDateOverflowPolicy(false, false, false);
+ 
+         // Return to previous policy
 -        Attributes.policy = Attributes.ExpirationDateOverflowPolicy.REJECT;
 -    }
 -
 -    @Test
 -    public void testRecoverOverflowedExpirationWithScrub() throws Throwable
 -    {
 -        baseTestRecoverOverflowedExpiration(false, false);
 -        baseTestRecoverOverflowedExpiration(true, false);
 -        baseTestRecoverOverflowedExpiration(true, true);
++        ExpirationDateOverflowHandling.policy = ExpirationDateOverflowHandling.ExpirationDateOverflowPolicy.REJECT;
+     }
+ 
 -    public void baseCapExpirationDateOverflowTest(boolean simple, boolean clustering, boolean flush) throws Throwable
++    public void testCapExpirationDateOverflowPolicy(boolean simple, boolean clustering, boolean flush) throws Throwable
+     {
+         // Create Table
 -        if (simple)
 -        {
 -            if (clustering)
 -                createTable("create table %s (k int, a int, b int, primary key(k, a))");
 -            else
 -                createTable("create table %s (k int primary key, a int, b int)");
 -        }
 -        else
 -        {
 -            if (clustering)
 -                createTable("create table %s (k int, a int, b set<text>, primary key(k, a))");
 -            else
 -                createTable("create table %s (k int primary key, a int, b set<text>)");
 -        }
++        createTable(simple, clustering);
+ 
+         // Insert data with INSERT and UPDATE
+         if (simple)
+         {
 -            execute("INSERT INTO %s (k, a, b) VALUES (?, ?, ?) USING TTL " + MAX_TTL, 2, 2, 2);
++            execute("INSERT INTO %s (k, a) VALUES (?, ?) USING TTL " + MAX_TTL, 2, 2);
+             if (clustering)
+                 execute("UPDATE %s USING TTL " + MAX_TTL + " SET b = 1 WHERE k = 1 AND a = 1;");
+             else
+                 execute("UPDATE %s USING TTL " + MAX_TTL + " SET a = 1, b = 1 WHERE k = 1;");
+         }
+         else
+         {
+             execute("INSERT INTO %s (k, a, b) VALUES (?, ?, ?) USING TTL " + MAX_TTL, 2, 2, set("v21", "v22", "v23", "v24"));
+             if (clustering)
+                 execute("UPDATE  %s USING TTL " + MAX_TTL + " SET b = ? WHERE k = 1 AND a = 1;", set("v11", "v12", "v13", "v14"));
+             else
+                 execute("UPDATE  %s USING TTL " + MAX_TTL + " SET a = 1, b = ? WHERE k = 1;", set("v11", "v12", "v13", "v14"));
+         }
+ 
+         // Maybe Flush
+         Keyspace ks = Keyspace.open(keyspace());
+         if (flush)
+             FBUtilities.waitOnFutures(ks.flush());
+ 
+         // Verify data
+         verifyData(simple);
+ 
+         // Maybe major compact
+         if (flush)
+         {
+             // Major compact and check data is still present
+             ks.getColumnFamilyStore(currentTable()).forceMajorCompaction();
+ 
+             // Verify data again
+             verifyData(simple);
+         }
+     }
+ 
+     public void baseTestRecoverOverflowedExpiration(boolean runScrub, boolean reinsertOverflowedTTL) throws Throwable
+     {
+         // simple column, clustering
+         testRecoverOverflowedExpirationWithScrub(true, true, runScrub, reinsertOverflowedTTL);
+         // simple column, noclustering
+         testRecoverOverflowedExpirationWithScrub(true, false, runScrub, reinsertOverflowedTTL);
+         // complex column, clustering
+         testRecoverOverflowedExpirationWithScrub(false, true, runScrub, reinsertOverflowedTTL);
+         // complex column, noclustering
+         testRecoverOverflowedExpirationWithScrub(false, false, runScrub, reinsertOverflowedTTL);
+     }
+ 
++    private void createTable(boolean simple, boolean clustering)
++    {
++        if (simple)
++        {
++            if (clustering)
++                createTable("create table %s (k int, a int, b int, primary key(k, a))");
++            else
++                createTable("create table %s (k int primary key, a int, b int)");
++        }
++        else
++        {
++            if (clustering)
++                createTable("create table %s (k int, a int, b set<text>, primary key(k, a))");
++            else
++                createTable("create table %s (k int primary key, a int, b set<text>)");
++        }
++    }
++
+     private void verifyData(boolean simple) throws Throwable
+     {
+         if (simple)
+         {
 -            assertRows(execute("SELECT * from %s"), row(1, 1, 1), row(2, 2, 2));
++            assertRows(execute("SELECT * from %s"), row(1, 1, 1), row(2, 2, null));
+         }
+         else
+         {
+             assertRows(execute("SELECT * from %s"), row(1, 1, set("v11", "v12", "v13", "v14")), row(2, 2, set("v21", "v22", "v23", "v24")));
+         }
+         // Cannot retrieve TTL from collections
+         if (simple)
+             checkTTLIsCapped("b");
      }
  
+     /**
 -     * Verify that the computed TTL is approximately equal to the maximum allowed ttl given the
 -     * {@link ExpiringCell#getLocalDeletionTime()} field limitation (CASSANDRA-14092)
++     * Verify that the computed TTL is equal to the maximum allowed ttl given the
++     * {@link AbstractCell#localDeletionTime()} field limitation (CASSANDRA-14092)
+      */
+     private void checkTTLIsCapped(String field) throws Throwable
+     {
+ 
+         // TTL is computed dynamically from row expiration time, so if it is
+         // equal or higher to the minimum max TTL we compute before the query
+         // we are fine.
+         int minMaxTTL = computeMaxTTL();
 -        UntypedResultSet execute = execute("SELECT ttl(" + field + ") FROM %s");
++        UntypedResultSet execute = execute("SELECT ttl(" + field + ") FROM %s WHERE k = 1");
+         for (UntypedResultSet.Row row : execute)
+         {
+             int ttl = row.getInt("ttl(" + field + ")");
+             assertTrue(ttl >= minMaxTTL);
+         }
+     }
+ 
+     /**
+      * The max TTL is computed such that the TTL summed with the current time is equal to the maximum
 -     * allowed expiration time {@link BufferExpiringCell#getLocalDeletionTime()} (2038-01-19T03:14:06+00:00)
++     * allowed expiration time {@link org.apache.cassandra.db.rows.Cell#MAX_DELETION_TIME} (2038-01-19T03:14:06+00:00)
+      */
+     private int computeMaxTTL()
+     {
+         int nowInSecs = (int) (System.currentTimeMillis() / 1000);
 -        return BufferExpiringCell.MAX_DELETION_TIME - nowInSecs;
++        return AbstractCell.MAX_DELETION_TIME - nowInSecs;
+     }
+ 
+     public void testRecoverOverflowedExpirationWithScrub(boolean simple, boolean clustering, boolean runScrub, boolean reinsertOverflowedTTL) throws Throwable
+     {
+         if (reinsertOverflowedTTL)
+         {
+             assert runScrub;
+         }
+ 
+         createTable(simple, clustering);
+ 
+         Keyspace keyspace = Keyspace.open(KEYSPACE);
+         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(currentTable());
+ 
 -        assertEquals(0, cfs.getSSTables().size());
++        assertEquals(0, cfs.getLiveSSTables().size());
+ 
+         copySSTablesToTableDir(currentTable(), simple, clustering);
+ 
+         cfs.loadNewSSTables();
+ 
+         if (runScrub)
+         {
 -            cfs.scrub(true, false, false, reinsertOverflowedTTL, 1);
++            cfs.scrub(true, false, true, reinsertOverflowedTTL, 1);
+         }
+ 
+         if (reinsertOverflowedTTL)
+         {
+             if (simple)
 -                assertRows(execute("SELECT * from %s"), row(1, 1, 1), row(2, 2, 2));
++                assertRows(execute("SELECT * from %s"), row(1, 1, 1), row(2, 2, null));
+             else
+                 assertRows(execute("SELECT * from %s"), row(1, 1, set("v11", "v12", "v13", "v14")), row(2, 2, set("v21", "v22", "v23", "v24")));
+ 
+             cfs.forceMajorCompaction();
+ 
+             if (simple)
 -                assertRows(execute("SELECT * from %s"), row(1, 1, 1), row(2, 2, 2));
++                assertRows(execute("SELECT * from %s"), row(1, 1, 1), row(2, 2, null));
+             else
+                 assertRows(execute("SELECT * from %s"), row(1, 1, set("v11", "v12", "v13", "v14")), row(2, 2, set("v21", "v22", "v23", "v24")));
+         }
+         else
+         {
+             assertEmpty(execute("SELECT * from %s"));
+         }
+     }
+ 
+     private void copySSTablesToTableDir(String table, boolean simple, boolean clustering) throws IOException
+     {
 -        File destDir = Keyspace.open(keyspace()).getColumnFamilyStore(table).directories.getCFDirectories().iterator().next();
++        File destDir = Keyspace.open(keyspace()).getColumnFamilyStore(table).getDirectories().getCFDirectories().iterator().next();
+         File sourceDir = getTableDir(table, simple, clustering);
+         for (File file : sourceDir.listFiles())
+         {
+             copyFile(file, destDir);
+         }
+     }
+ 
+     private static File getTableDir(String table, boolean simple, boolean clustering)
+     {
+         return new File(String.format(NEGATIVE_LOCAL_EXPIRATION_TEST_DIR, getTableName(simple, clustering)));
+     }
+ 
 -    private void createTable(boolean simple, boolean clustering)
 -    {
 -        if (simple)
 -        {
 -            if (clustering)
 -                createTable("create table %s (k int, a int, b int, primary key(k, a))");
 -            else
 -                createTable("create table %s (k int primary key, a int, b int)");
 -        }
 -        else
 -        {
 -            if (clustering)
 -                createTable("create table %s (k int, a int, b set<text>, primary key(k, a))");
 -            else
 -                createTable("create table %s (k int primary key, a int, b set<text>)");
 -        }
 -    }
 -
 -    private static File getTableDir(boolean simple, boolean clustering)
 -    {
 -        return new File(String.format(NEGATIVE_LOCAL_EXPIRATION_TEST_DIR, getTableName(simple, clustering)));
 -    }
 -
+     private static void copyFile(File src, File dest) throws IOException
+     {
+         byte[] buf = new byte[65536];
+         if (src.isFile())
+         {
+             File target = new File(dest, src.getName());
+             int rd;
+             FileInputStream is = new FileInputStream(src);
+             FileOutputStream os = new FileOutputStream(target);
+             while ((rd = is.read(buf)) >= 0)
+                 os.write(buf, 0, rd);
+         }
+     }
+ 
+     public static String getTableName(boolean simple, boolean clustering)
+     {
+         if (simple)
+             return clustering ? SIMPLE_CLUSTERING : SIMPLE_NOCLUSTERING;
+         else
+             return clustering ? COMPLEX_CLUSTERING : COMPLEX_NOCLUSTERING;
+     }
  }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/c231ed5b/test/unit/org/apache/cassandra/db/ScrubTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/db/ScrubTest.java
index 08336a1,9b1ede4..fc2faea
--- a/test/unit/org/apache/cassandra/db/ScrubTest.java
+++ b/test/unit/org/apache/cassandra/db/ScrubTest.java
@@@ -113,14 -127,18 +113,14 @@@ public class ScrubTes
          ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
          cfs.clearUnsafe();
  
 -        List<Row> rows;
 -
          // insert data and verify we get it back w/ range query
          fillCF(cfs, 1);
 -        rows = cfs.getRangeSlice(Util.range("", ""), null, new IdentityQueryFilter(), 1000);
 -        assertEquals(1, rows.size());
 +        assertOrderedAll(cfs, 1);
  
--        CompactionManager.instance.performScrub(cfs, false, true, 2);
++        CompactionManager.instance.performScrub(cfs, false, true, false, 2);
  
          // check data is still there
 -        rows = cfs.getRangeSlice(Util.range("", ""), null, new IdentityQueryFilter(), 1000);
 -        assertEquals(1, rows.size());
 +        assertOrderedAll(cfs, 1);
      }
  
      @Test
@@@ -617,9 -763,9 +617,9 @@@
                  boolean failure = !scrubs[i];
                  if (failure)
                  { //make sure the next scrub fails
 -                    overrideWithGarbage(indexCfs.getSSTables().iterator().next(), ByteBufferUtil.bytes(1L), ByteBufferUtil.bytes(2L));
 +                    overrideWithGarbage(indexCfs.getLiveSSTables().iterator().next(), ByteBufferUtil.bytes(1L), ByteBufferUtil.bytes(2L));
                  }
-                 CompactionManager.AllSSTableOpStatus result = indexCfs.scrub(false, false, true, true, 0);
 -                CompactionManager.AllSSTableOpStatus result = indexCfs.scrub(false, false, true, true, true, 0);
++                CompactionManager.AllSSTableOpStatus result = indexCfs.scrub(false, false, false, true, false,0);
                  assertEquals(failure ?
                               CompactionManager.AllSSTableOpStatus.ABORTED :
                               CompactionManager.AllSSTableOpStatus.SUCCESSFUL,
@@@ -629,118 -775,8 +629,118 @@@
  
  
          // check index is still working
 -        rows = cfs.search(Util.range("", ""), Arrays.asList(expr), new IdentityQueryFilter(), numRows);
 -        assertNotNull(rows);
 -        assertEquals(numRows / 2, rows.size());
 +        assertOrdered(Util.cmd(cfs).filterOn(colName, Operator.EQ, 1L).build(), numRows / 2);
 +    }
 +
 +    private static SSTableMultiWriter createTestWriter(Descriptor descriptor, long keyCount, CFMetaData metadata, LifecycleTransaction txn)
 +    {
 +        SerializationHeader header = new SerializationHeader(true, metadata, metadata.partitionColumns(), EncodingStats.NO_STATS);
 +        MetadataCollector collector = new MetadataCollector(metadata.comparator).sstableLevel(0);
 +        return new TestMultiWriter(new TestWriter(descriptor, keyCount, 0, metadata, collector, header, txn));
 +    }
 +
 +    private static class TestMultiWriter extends SimpleSSTableMultiWriter
 +    {
 +        TestMultiWriter(SSTableWriter writer)
 +        {
 +            super(writer);
 +        }
 +    }
 +
 +    /**
 +     * Test writer that allows to write out of order SSTable.
 +     */
 +    private static class TestWriter extends BigTableWriter
 +    {
 +        TestWriter(Descriptor descriptor, long keyCount, long repairedAt, CFMetaData metadata,
 +                   MetadataCollector collector, SerializationHeader header, LifecycleTransaction txn)
 +        {
 +            super(descriptor, keyCount, repairedAt, metadata, collector, header, txn);
 +        }
 +
 +        @Override
 +        protected long beforeAppend(DecoratedKey decoratedKey)
 +        {
 +            return dataFile.position();
 +        }
 +    }
 +
 +    /**
 +     * Tests with invalid sstables (containing duplicate entries in 2.0 and 3.0 storage format),
 +     * that were caused by upgrading from 2.x with duplicate range tombstones.
 +     *
 +     * See CASSANDRA-12144 for details.
 +     */
 +    @Test
 +    public void testFilterOutDuplicates() throws Exception
 +    {
 +        DatabaseDescriptor.setPartitionerUnsafe(Murmur3Partitioner.instance);
 +        QueryProcessor.process(String.format("CREATE TABLE \"%s\".cf_with_duplicates_3_0 (a int, b int, c int, PRIMARY KEY (a, b))", KEYSPACE), ConsistencyLevel.ONE);
 +
 +        Keyspace keyspace = Keyspace.open(KEYSPACE);
 +        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("cf_with_duplicates_3_0");
 +
 +        Path legacySSTableRoot = Paths.get(System.getProperty(INVALID_LEGACY_SSTABLE_ROOT_PROP),
 +                                           "Keyspace1",
 +                                           "cf_with_duplicates_3_0");
 +
 +        for (String filename : new String[]{ "mb-3-big-CompressionInfo.db",
 +                                             "mb-3-big-Digest.crc32",
 +                                             "mb-3-big-Index.db",
 +                                             "mb-3-big-Summary.db",
 +                                             "mb-3-big-Data.db",
 +                                             "mb-3-big-Filter.db",
 +                                             "mb-3-big-Statistics.db",
 +                                             "mb-3-big-TOC.txt" })
 +        {
 +            Files.copy(Paths.get(legacySSTableRoot.toString(), filename), cfs.getDirectories().getDirectoryForNewSSTables().toPath().resolve(filename));
 +        }
 +
 +        cfs.loadNewSSTables();
 +
-         cfs.scrub(true, true, true, 1);
++        cfs.scrub(true, true, false, false, false, 1);
 +
 +        UntypedResultSet rs = QueryProcessor.executeInternal(String.format("SELECT * FROM \"%s\".cf_with_duplicates_3_0", KEYSPACE));
 +        assertEquals(1, rs.size());
 +        QueryProcessor.executeInternal(String.format("DELETE FROM \"%s\".cf_with_duplicates_3_0 WHERE a=1 AND b =2", KEYSPACE));
 +        rs = QueryProcessor.executeInternal(String.format("SELECT * FROM \"%s\".cf_with_duplicates_3_0", KEYSPACE));
 +        assertEquals(0, rs.size());
 +    }
 +
 +    @Test
 +    public void testUpgradeSstablesWithDuplicates() throws Exception
 +    {
 +        DatabaseDescriptor.setPartitionerUnsafe(Murmur3Partitioner.instance);
 +        String cf = "cf_with_duplicates_2_0";
 +        QueryProcessor.process(String.format("CREATE TABLE \"%s\".%s (a int, b int, c int, PRIMARY KEY (a, b))", KEYSPACE, cf), ConsistencyLevel.ONE);
 +
 +        Keyspace keyspace = Keyspace.open(KEYSPACE);
 +        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cf);
 +
 +        Path legacySSTableRoot = Paths.get(System.getProperty(INVALID_LEGACY_SSTABLE_ROOT_PROP),
 +                                           "Keyspace1",
 +                                           cf);
 +
 +        for (String filename : new String[]{ "lb-1-big-CompressionInfo.db",
 +                                             "lb-1-big-Data.db",
 +                                             "lb-1-big-Digest.adler32",
 +                                             "lb-1-big-Filter.db",
 +                                             "lb-1-big-Index.db",
 +                                             "lb-1-big-Statistics.db",
 +                                             "lb-1-big-Summary.db",
 +                                             "lb-1-big-TOC.txt" })
 +        {
 +            Files.copy(Paths.get(legacySSTableRoot.toString(), filename), cfs.getDirectories().getDirectoryForNewSSTables().toPath().resolve(filename));
 +        }
 +
 +        cfs.loadNewSSTables();
 +
 +        cfs.sstablesRewrite(true, 1);
 +
 +        UntypedResultSet rs = QueryProcessor.executeInternal(String.format("SELECT * FROM \"%s\".%s", KEYSPACE, cf));
 +        assertEquals(1, rs.size());
 +        QueryProcessor.executeInternal(String.format("DELETE FROM \"%s\".%s WHERE a=1 AND b =2", KEYSPACE, cf));
 +        rs = QueryProcessor.executeInternal(String.format("SELECT * FROM \"%s\".%s", KEYSPACE, cf));
 +        assertEquals(0, rs.size());
      }
  }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org