You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by bl...@apache.org on 2015/09/04 21:14:19 UTC

[1/5] cassandra git commit: Allow range deletions in CQL

Repository: cassandra
Updated Branches:
  refs/heads/trunk 10fa32931 -> 86faf8cb3


http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/test/unit/org/apache/cassandra/cql3/validation/operations/UpdateTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/UpdateTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/UpdateTest.java
index c5d153f..b939b7f 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/UpdateTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/UpdateTest.java
@@ -18,8 +18,11 @@
 
 package org.apache.cassandra.cql3.validation.operations;
 
+import java.util.Arrays;
+
 import org.junit.Test;
 
+import static org.apache.commons.lang3.StringUtils.isEmpty;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
@@ -83,4 +86,448 @@ public class UpdateTest extends CQLTester
         assertInvalid("UPDATE %s SET d = (int)3 WHERE k = ?", 0);
         assertInvalid("UPDATE %s SET i = (double)3 WHERE k = ?", 0);
     }
+
+    @Test
+    public void testUpdate() throws Throwable
+    {
+        testUpdate(false);
+        testUpdate(true);
+    }
+
+    private void testUpdate(boolean forceFlush) throws Throwable
+    {
+        for (String compactOption : new String[] {"", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (partitionKey int," +
+                    "clustering_1 int," +
+                    "value int," +
+                    " PRIMARY KEY (partitionKey, clustering_1))" + compactOption);
+
+            execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 0, 0)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 1, 1)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 2, 2)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 3, 3)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (1, 0, 4)");
+
+            flush(forceFlush);
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ?", 7, 0, 1);
+            flush(forceFlush);
+            assertRows(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering_1 = ?",
+                               0, 1),
+                       row(7));
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND (clustering_1) = (?)", 8, 0, 2);
+            flush(forceFlush);
+            assertRows(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering_1 = ?",
+                               0, 2),
+                       row(8));
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey IN (?, ?) AND clustering_1 = ?", 9, 0, 1, 0);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?) AND clustering_1 = ?",
+                               0, 1, 0),
+                       row(0, 0, 9),
+                       row(1, 0, 9));
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey IN ? AND clustering_1 = ?", 19, Arrays.asList(0, 1), 0);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey IN ? AND clustering_1 = ?",
+                               Arrays.asList(0, 1), 0),
+                       row(0, 0, 19),
+                       row(1, 0, 19));
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 IN (?, ?)", 10, 0, 1, 0);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 IN (?, ?)",
+                               0, 1, 0),
+                       row(0, 0, 10),
+                       row(0, 1, 10));
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND (clustering_1) IN ((?), (?))", 20, 0, 0, 1);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1) IN ((?), (?))",
+                               0, 0, 1),
+                       row(0, 0, 20),
+                       row(0, 1, 20));
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ?", null, 0, 0);
+            flush(forceFlush);
+
+            if (isEmpty(compactOption))
+            {
+                assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1) IN ((?), (?))",
+                                   0, 0, 1),
+                           row(0, 0, null),
+                           row(0, 1, 20));
+            }
+            else
+            {
+                assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1) IN ((?), (?))",
+                                   0, 0, 1),
+                           row(0, 1, 20));
+            }
+
+            // test invalid queries
+
+            // missing primary key element
+            assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                                 "UPDATE %s SET value = ? WHERE clustering_1 = ? ", 7, 1);
+
+            assertInvalidMessage("Some clustering keys are missing: clustering_1",
+                                 "UPDATE %s SET value = ? WHERE partitionKey = ?", 7, 0);
+
+            assertInvalidMessage("Some clustering keys are missing: clustering_1",
+                                 "UPDATE %s SET value = ? WHERE partitionKey = ?", 7, 0);
+
+            // token function
+            assertInvalidMessage("The token function cannot be used in WHERE clauses for UPDATE statements",
+                                 "UPDATE %s SET value = ? WHERE token(partitionKey) = token(?) AND clustering_1 = ?",
+                                 7, 0, 1);
+
+            // multiple time the same value
+            assertInvalidSyntax("UPDATE %s SET value = ?, value = ? WHERE partitionKey = ? AND clustering_1 = ?", 7, 0, 1);
+
+            // multiple time same primary key element in WHERE clause
+            assertInvalidMessage("clustering_1 cannot be restricted by more than one relation if it includes an Equal",
+                                 "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_1 = ?", 7, 0, 1, 1);
+
+            // unknown identifiers
+            assertInvalidMessage("Unknown identifier value1",
+                                 "UPDATE %s SET value1 = ? WHERE partitionKey = ? AND clustering_1 = ?", 7, 0, 1);
+
+            assertInvalidMessage("Undefined name partitionkey1 in where clause ('partitionkey1 = ?')",
+                                 "UPDATE %s SET value = ? WHERE partitionKey1 = ? AND clustering_1 = ?", 7, 0, 1);
+
+            assertInvalidMessage("Undefined name clustering_3 in where clause ('clustering_3 = ?')",
+                                 "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_3 = ?", 7, 0, 1);
+
+            // Invalid operator in the where clause
+            assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function)",
+                                 "UPDATE %s SET value = ? WHERE partitionKey > ? AND clustering_1 = ?", 7, 0, 1);
+
+            assertInvalidMessage("Cannot use CONTAINS on non-collection column partitionkey",
+                                 "UPDATE %s SET value = ? WHERE partitionKey CONTAINS ? AND clustering_1 = ?", 7, 0, 1);
+
+            String expectedMsg = isEmpty(compactOption) ? "Non PRIMARY KEY columns found in where clause: value"
+                                                        : "Predicates on the non-primary-key column (value) of a COMPACT table are not yet supported";
+            assertInvalidMessage(expectedMsg,
+                                 "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND value = ?", 7, 0, 1, 3);
+
+            assertInvalidMessage("Slice restrictions are not supported on the clustering columns in UPDATE statements",
+                                 "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 > ?", 7, 0, 1);
+        }
+    }
+
+    @Test
+    public void testUpdateWithSecondaryIndices() throws Throwable
+    {
+        testUpdateWithSecondaryIndices(false);
+        testUpdateWithSecondaryIndices(true);
+    }
+
+    private void testUpdateWithSecondaryIndices(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                "clustering_1 int," +
+                "value int," +
+                "values set<int>," +
+                " PRIMARY KEY (partitionKey, clustering_1))");
+
+        createIndex("CREATE INDEX ON %s (value)");
+        createIndex("CREATE INDEX ON %s (clustering_1)");
+        createIndex("CREATE INDEX ON %s (values)");
+
+        execute("INSERT INTO %s (partitionKey, clustering_1, value, values) VALUES (0, 0, 0, {0})");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value, values) VALUES (0, 1, 1, {0, 1})");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value, values) VALUES (0, 2, 2, {0, 1, 2})");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value, values) VALUES (0, 3, 3, {0, 1, 2, 3})");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value, values) VALUES (1, 0, 4, {0, 1, 2, 3, 4})");
+
+        flush(forceFlush);
+
+        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: value",
+                             "UPDATE %s SET values= {6} WHERE partitionKey = ? AND clustering_1 = ? AND value = ?", 3, 3, 3);
+        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: values",
+                             "UPDATE %s SET value= ? WHERE partitionKey = ? AND clustering_1 = ? AND values CONTAINS ?", 6, 3, 3, 3);
+        assertInvalidMessage("Some clustering keys are missing: clustering_1",
+                             "UPDATE %s SET values= {6} WHERE partitionKey = ? AND value = ?", 3, 3);
+        assertInvalidMessage("Some clustering keys are missing: clustering_1",
+                             "UPDATE %s SET value= ? WHERE partitionKey = ? AND values CONTAINS ?", 6, 3, 3);
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "UPDATE %s SET values= {6} WHERE clustering_1 = ?", 3);
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "UPDATE %s SET values= {6} WHERE value = ?", 3);
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "UPDATE %s SET value= ? WHERE values CONTAINS ?", 6, 3);
+    }
+
+    @Test
+    public void testUpdateWithTwoClusteringColumns() throws Throwable
+    {
+        testUpdateWithTwoClusteringColumns(false);
+        testUpdateWithTwoClusteringColumns(true);
+    }
+
+    private void testUpdateWithTwoClusteringColumns(boolean forceFlush) throws Throwable
+    {
+        for (String compactOption : new String[] { "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (partitionKey int," +
+                    "clustering_1 int," +
+                    "clustering_2 int," +
+                    "value int," +
+                    " PRIMARY KEY (partitionKey, clustering_1, clustering_2))" + compactOption);
+
+            execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 1, 1)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 2, 2)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 3, 3)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 1, 1, 4)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 1, 2, 5)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (1, 0, 0, 6)");
+            flush(forceFlush);
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
+            flush(forceFlush);
+            assertRows(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
+                               0, 1, 1),
+                       row(7));
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND (clustering_1, clustering_2) = (?, ?)", 8, 0, 1, 2);
+            flush(forceFlush);
+            assertRows(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
+                               0, 1, 2),
+                       row(8));
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey IN (?, ?) AND clustering_1 = ? AND clustering_2 = ?", 9, 0, 1, 0, 0);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?) AND clustering_1 = ? AND clustering_2 = ?",
+                               0, 1, 0, 0),
+                       row(0, 0, 0, 9),
+                       row(1, 0, 0, 9));
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey IN ? AND clustering_1 = ? AND clustering_2 = ?", 9, Arrays.asList(0, 1), 0, 0);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey IN ? AND clustering_1 = ? AND clustering_2 = ?",
+                               Arrays.asList(0, 1), 0, 0),
+                       row(0, 0, 0, 9),
+                       row(1, 0, 0, 9));
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 IN (?, ?)", 12, 0, 1, 1, 2);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 IN (?, ?)",
+                               0, 1, 1, 2),
+                       row(0, 1, 1, 12),
+                       row(0, 1, 2, 12));
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 IN (?, ?) AND clustering_2 IN (?, ?)", 10, 0, 1, 0, 1, 2);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 IN (?, ?) AND clustering_2 IN (?, ?)",
+                               0, 1, 0, 1, 2),
+                       row(0, 0, 1, 10),
+                       row(0, 0, 2, 10),
+                       row(0, 1, 1, 10),
+                       row(0, 1, 2, 10));
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND (clustering_1, clustering_2) IN ((?, ?), (?, ?))", 20, 0, 0, 2, 1, 2);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) IN ((?, ?), (?, ?))",
+                               0, 0, 2, 1, 2),
+                       row(0, 0, 2, 20),
+                       row(0, 1, 2, 20));
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", null, 0, 0, 2);
+            flush(forceFlush);
+
+            if (isEmpty(compactOption))
+            {
+                assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) IN ((?, ?), (?, ?))",
+                                   0, 0, 2, 1, 2),
+                           row(0, 0, 2, null),
+                           row(0, 1, 2, 20));
+            }
+            else
+            {
+                assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) IN ((?, ?), (?, ?))",
+                                   0, 0, 2, 1, 2),
+                           row(0, 1, 2, 20));
+            }
+
+            // test invalid queries
+
+            // missing primary key element
+            assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                                 "UPDATE %s SET value = ? WHERE clustering_1 = ? AND clustering_2 = ?", 7, 1, 1);
+
+            String errorMsg = isEmpty(compactOption) ? "Some clustering keys are missing: clustering_1"
+                                                     : "PRIMARY KEY column \"clustering_2\" cannot be restricted as preceding column \"clustering_1\" is not restricted";
+
+            assertInvalidMessage(errorMsg,
+                                 "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_2 = ?", 7, 0, 1);
+
+            assertInvalidMessage("Some clustering keys are missing: clustering_1, clustering_2",
+                                 "UPDATE %s SET value = ? WHERE partitionKey = ?", 7, 0);
+
+            // token function
+            assertInvalidMessage("The token function cannot be used in WHERE clauses for UPDATE statements",
+                                 "UPDATE %s SET value = ? WHERE token(partitionKey) = token(?) AND clustering_1 = ? AND clustering_2 = ?",
+                                 7, 0, 1, 1);
+
+            // multiple time the same value
+            assertInvalidSyntax("UPDATE %s SET value = ?, value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
+
+            // multiple time same primary key element in WHERE clause
+            assertInvalidMessage("clustering_1 cannot be restricted by more than one relation if it includes an Equal",
+                                 "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ? AND clustering_1 = ?", 7, 0, 1, 1, 1);
+
+            // unknown identifiers
+            assertInvalidMessage("Unknown identifier value1",
+                                 "UPDATE %s SET value1 = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
+
+            assertInvalidMessage("Undefined name partitionkey1 in where clause ('partitionkey1 = ?')",
+                                 "UPDATE %s SET value = ? WHERE partitionKey1 = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
+
+            assertInvalidMessage("Undefined name clustering_3 in where clause ('clustering_3 = ?')",
+                                 "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_3 = ?", 7, 0, 1, 1);
+
+            // Invalid operator in the where clause
+            assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function)",
+                                 "UPDATE %s SET value = ? WHERE partitionKey > ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
+
+            assertInvalidMessage("Cannot use CONTAINS on non-collection column partitionkey",
+                                 "UPDATE %s SET value = ? WHERE partitionKey CONTAINS ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 1, 1);
+
+            String expectedMsg = isEmpty(compactOption) ? "Non PRIMARY KEY columns found in where clause: value"
+                                                        : "Predicates on the non-primary-key column (value) of a COMPACT table are not yet supported";
+            assertInvalidMessage(expectedMsg,
+                                 "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ? AND value = ?", 7, 0, 1, 1, 3);
+
+            assertInvalidMessage("Slice restrictions are not supported on the clustering columns in UPDATE statements",
+                                 "UPDATE %s SET value = ? WHERE partitionKey = ? AND clustering_1 > ?", 7, 0, 1);
+
+            assertInvalidMessage("Slice restrictions are not supported on the clustering columns in UPDATE statements",
+                                 "UPDATE %s SET value = ? WHERE partitionKey = ? AND (clustering_1, clustering_2) > (?, ?)", 7, 0, 1, 1);
+        }
+    }
+
+    @Test
+    public void testUpdateWithMultiplePartitionKeyComponents() throws Throwable
+    {
+        testUpdateWithMultiplePartitionKeyComponents(false);
+        testUpdateWithMultiplePartitionKeyComponents(true);
+    }
+
+    public void testUpdateWithMultiplePartitionKeyComponents(boolean forceFlush) throws Throwable
+    {
+        for (String compactOption : new String[] { "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (partitionKey_1 int," +
+                    "partitionKey_2 int," +
+                    "clustering_1 int," +
+                    "clustering_2 int," +
+                    "value int," +
+                    " PRIMARY KEY ((partitionKey_1, partitionKey_2), clustering_1, clustering_2))" + compactOption);
+
+            execute("INSERT INTO %s (partitionKey_1, partitionKey_2, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0, 0)");
+            execute("INSERT INTO %s (partitionKey_1, partitionKey_2, clustering_1, clustering_2, value) VALUES (0, 1, 0, 1, 1)");
+            execute("INSERT INTO %s (partitionKey_1, partitionKey_2, clustering_1, clustering_2, value) VALUES (0, 1, 1, 1, 2)");
+            execute("INSERT INTO %s (partitionKey_1, partitionKey_2, clustering_1, clustering_2, value) VALUES (1, 0, 0, 1, 3)");
+            execute("INSERT INTO %s (partitionKey_1, partitionKey_2, clustering_1, clustering_2, value) VALUES (1, 1, 0, 1, 3)");
+            flush(forceFlush);
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey_1 = ? AND partitionKey_2 = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 0, 0, 0, 0);
+            flush(forceFlush);
+            assertRows(execute("SELECT value FROM %s WHERE partitionKey_1 = ? AND partitionKey_2 = ? AND clustering_1 = ? AND clustering_2 = ?",
+                               0, 0, 0, 0),
+                       row(7));
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey_1 IN (?, ?) AND partitionKey_2 = ? AND clustering_1 = ? AND clustering_2 = ?", 9, 0, 1, 1, 0, 1);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey_1 IN (?, ?) AND partitionKey_2 = ? AND clustering_1 = ? AND clustering_2 = ?",
+                               0, 1, 1, 0, 1),
+                       row(0, 1, 0, 1, 9),
+                       row(1, 1, 0, 1, 9));
+
+            execute("UPDATE %s SET value = ? WHERE partitionKey_1 IN (?, ?) AND partitionKey_2 IN (?, ?) AND clustering_1 = ? AND clustering_2 = ?", 10, 0, 1, 0, 1, 0, 1);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s"),
+                       row(0, 0, 0, 0, 7),
+                       row(0, 0, 0, 1, 10),
+                       row(0, 1, 0, 1, 10),
+                       row(0, 1, 1, 1, 2),
+                       row(1, 0, 0, 1, 10),
+                       row(1, 1, 0, 1, 10));
+
+            // missing primary key element
+            assertInvalidMessage("Some partition key parts are missing: partitionkey_2",
+                                 "UPDATE %s SET value = ? WHERE partitionKey_1 = ? AND clustering_1 = ? AND clustering_2 = ?", 7, 1, 1);
+        }
+    }
+
+    @Test
+    public void testUpdateWithAStaticColumn() throws Throwable
+    {
+        testUpdateWithAStaticColumn(false);
+        testUpdateWithAStaticColumn(true);
+    }
+
+    private void testUpdateWithAStaticColumn(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                                      "clustering_1 int," +
+                                      "clustering_2 int," +
+                                      "value int," +
+                                      "staticValue text static," +
+                                      " PRIMARY KEY (partitionKey, clustering_1, clustering_2))");
+
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value, staticValue) VALUES (0, 0, 0, 0, 'A')");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 1, 1)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value, staticValue) VALUES (1, 0, 0, 6, 'B')");
+        flush(forceFlush);
+
+        execute("UPDATE %s SET staticValue = ? WHERE partitionKey = ?", "A2", 0);
+        flush(forceFlush);
+
+        assertRows(execute("SELECT DISTINCT staticValue FROM %s WHERE partitionKey = ?", 0),
+                   row("A2"));
+
+        assertInvalidMessage("Some clustering keys are missing: clustering_1, clustering_2",
+                             "UPDATE %s SET staticValue = ?, value = ? WHERE partitionKey = ?", "A2", 7, 0);
+
+        execute("UPDATE %s SET staticValue = ?, value = ?  WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
+                "A3", 7, 0, 0, 1);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
+                           0, 0, 1),
+                   row(0, 0, 1, "A3", 7));
+
+        assertInvalidMessage("Invalid restrictions on clustering columns since the UPDATE statement modifies only static columns",
+                             "UPDATE %s SET staticValue = ? WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
+                             "A3", 0, 0, 1);
+    }
+
+    @Test
+    public void testUpdateWithStaticList() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int," +
+                                      "clustering int," +
+                                      "value int," +
+                                      "l list<text> static," +
+                                      " PRIMARY KEY (k, clustering))");
+
+        execute("INSERT INTO %s(k, clustering, value, l) VALUES (?, ?, ?, ?)", 0, 0, 0 ,list("v1", "v2", "v3"));
+
+        assertRows(execute("SELECT l FROM %s WHERE k = 0"), row(list("v1", "v2", "v3")));
+
+        execute("UPDATE %s SET l[?] = ? WHERE k = ?", 1, "v4", 0);
+
+        assertRows(execute("SELECT l FROM %s WHERE k = 0"), row(list("v1", "v4", "v3")));
+    }
+
+    private void flush(boolean forceFlush)
+    {
+        if (forceFlush)
+            flush();
+    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java b/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java
index 79efb87..f40abe9 100644
--- a/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java
+++ b/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java
@@ -28,8 +28,6 @@ import com.google.common.base.Joiner;
 import org.junit.Test;
 import static org.junit.Assert.*;
 
-import org.apache.cassandra.Util;
-import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.marshal.Int32Type;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
@@ -346,6 +344,201 @@ public class RangeTombstoneListTest
         }
     }
 
+    @Test
+    public void nonSortedAdditionTestWithOneTombstoneWithEmptyEnd()
+    {
+        nonSortedAdditionTestWithOneRangeWithEmptyEnd(0);
+        nonSortedAdditionTestWithOneRangeWithEmptyEnd(10);
+    }
+
+  private static void nonSortedAdditionTestWithOneRangeWithEmptyEnd(int initialCapacity)
+    {
+        RangeTombstoneList l = new RangeTombstoneList(cmp, initialCapacity);
+        RangeTombstone rt1 = rt(1, 5, 3);
+        RangeTombstone rt2 = rt(7, 10, 2);
+        RangeTombstone rt3 = atLeast(11, 1, 0);
+
+        l.add(rt2);
+        l.add(rt3);
+        l.add(rt1);
+
+        Iterator<RangeTombstone> iter = l.iterator();
+        assertRT(rt1, iter.next());
+        assertRT(rt2, iter.next());
+        assertRT(rt3, iter.next());
+
+        assert !iter.hasNext();
+    }
+
+    @Test
+    public void addRangeWithEmptyEndWitchIncludeExistingRange()
+    {
+
+        RangeTombstoneList l = new RangeTombstoneList(cmp, 0);
+
+        l.add(rt(4, 10, 3));
+        l.add(atLeast(3, 4, 0));
+
+        Iterator<RangeTombstone> iter = l.iterator();
+        assertRT(atLeast(3, 4, 0), iter.next());
+        assertFalse(iter.hasNext());
+    }
+
+    @Test
+    public void addRangeWithEmptyStartAndEnd()
+    {
+
+        RangeTombstoneList l = new RangeTombstoneList(cmp, 0);
+
+        l.add(rt(4, 10, 3));
+        l.add(atMost(12, 4, 0));
+
+        Iterator<RangeTombstone> iter = l.iterator();
+        assertRT(atMost(12, 4, 0), iter.next());
+        assertFalse(iter.hasNext());
+    }
+
+    @Test
+    public void addRangeWithEmptyEndToRangeWithEmptyStartAndEnd()
+    {
+
+        RangeTombstoneList l = new RangeTombstoneList(cmp, 0);
+
+        l.add(new RangeTombstone(Slice.ALL, new DeletionTime(2, 0)));
+        l.add(atLeast(12, 4, 0));
+
+        Iterator<RangeTombstone> iter = l.iterator();
+        assertRT(lessThan(12, 2, 0), iter.next());
+        assertRT(atLeast(12, 4, 0), iter.next());
+        assertFalse(iter.hasNext());
+    }
+
+    @Test
+    public void addRangeWithEmptyEndWitchIncludeExistingRangeWithEmptyEnd()
+    {
+
+        RangeTombstoneList l = new RangeTombstoneList(cmp, 0);
+
+        l.add(atLeast(5, 3, 0));
+        l.add(atLeast(3, 4, 0));
+
+        Iterator<RangeTombstone> iter = l.iterator();
+        assertRT(atLeast(3, 4, 0), iter.next());
+        assertFalse(iter.hasNext());
+    }
+
+    @Test
+    public void addIncludedRangeToRangeWithEmptyEnd()
+    {
+
+        RangeTombstoneList l = new RangeTombstoneList(cmp, 0);
+
+        l.add(atLeast(3, 3, 0));
+        l.add(rt(4, 10, 4));
+
+        Iterator<RangeTombstone> iter = l.iterator();
+        assertRT(rtie(3, 4, 3), iter.next());
+        assertRT(rt(4, 10, 4), iter.next());
+        assertRT(greaterThan(10, 3, 0), iter.next());
+        assertFalse(iter.hasNext());
+    }
+
+    @Test
+    public void addIncludedRangeWithEmptyEndToRangeWithEmptyEnd()
+    {
+
+        RangeTombstoneList l = new RangeTombstoneList(cmp, 0);
+
+        l.add(atLeast(3, 3, 0));
+        l.add(atLeast(5, 4, 0));
+
+        Iterator<RangeTombstone> iter = l.iterator();
+        assertRT(rtie(3, 5, 3), iter.next());
+        assertRT(atLeast(5, 4, 0), iter.next());
+        assertFalse(iter.hasNext());
+    }
+
+    @Test
+    public void addRangeWithEmptyEndWitchOverlapExistingRange()
+    {
+
+        RangeTombstoneList l = new RangeTombstoneList(cmp, 0);
+
+        l.add(rt(4, 10, 3));
+        l.add(atLeast(6, 4, 0));
+
+        Iterator<RangeTombstone> iter = l.iterator();
+        assertRT(rtie(4, 6, 3), iter.next());
+        assertRT(atLeast(6, 4, 0), iter.next());
+        assertFalse(iter.hasNext());
+    }
+
+    @Test
+    public void addOverlappingRangeToRangeWithEmptyEnd()
+    {
+
+        RangeTombstoneList l = new RangeTombstoneList(cmp, 0);
+
+        l.add(atLeast(3, 3, 0));
+        l.add(rt(1, 10, 4));
+
+        Iterator<RangeTombstone> iter = l.iterator();
+        assertRT(rt(1, 10, 4), iter.next());
+        assertRT(greaterThan(10, 3, 0), iter.next());
+        assertFalse(iter.hasNext());
+    }
+
+    @Test
+    public void searchTestWithEmptyStart()
+    {
+        RangeTombstoneList l = new RangeTombstoneList(cmp, 0);
+        l.add(atMost(4, 5, 0));
+        l.add(rt(4, 6, 2));
+        l.add(rt(9, 12, 1));
+        l.add(rt(14, 15, 3));
+        l.add(rt(15, 17, 6));
+
+        assertEquals(5, l.searchDeletionTime(clustering(-1)).markedForDeleteAt());
+        assertEquals(5, l.searchDeletionTime(clustering(0)).markedForDeleteAt());
+        assertEquals(5, l.searchDeletionTime(clustering(3)).markedForDeleteAt());
+        assertEquals(5, l.searchDeletionTime(clustering(4)).markedForDeleteAt());
+
+        assertEquals(2, l.searchDeletionTime(clustering(5)).markedForDeleteAt());
+
+        assertEquals(null, l.searchDeletionTime(clustering(7)));
+
+        assertEquals(3, l.searchDeletionTime(clustering(14)).markedForDeleteAt());
+
+        assertEquals(6, l.searchDeletionTime(clustering(15)).markedForDeleteAt());
+        assertEquals(null, l.searchDeletionTime(clustering(18)));
+    }
+
+    @Test
+    public void searchTestWithRangeWithEmptyEnd()
+    {
+        RangeTombstoneList l = new RangeTombstoneList(cmp, 0);
+        l.add(rt(0, 4, 5));
+        l.add(rt(4, 6, 2));
+        l.add(rt(9, 12, 1));
+        l.add(rt(14, 15, 3));
+        l.add(atLeast(15, 6, 0));
+
+        assertEquals(null, l.searchDeletionTime(clustering(-1)));
+
+        assertEquals(5, l.searchDeletionTime(clustering(0)).markedForDeleteAt());
+        assertEquals(5, l.searchDeletionTime(clustering(3)).markedForDeleteAt());
+        assertEquals(5, l.searchDeletionTime(clustering(4)).markedForDeleteAt());
+
+        assertEquals(2, l.searchDeletionTime(clustering(5)).markedForDeleteAt());
+
+        assertEquals(null, l.searchDeletionTime(clustering(7)));
+
+        assertEquals(3, l.searchDeletionTime(clustering(14)).markedForDeleteAt());
+
+        assertEquals(6, l.searchDeletionTime(clustering(15)).markedForDeleteAt());
+        assertEquals(6, l.searchDeletionTime(clustering(1000)).markedForDeleteAt());
+    }
+
     private static void assertRT(RangeTombstone expected, RangeTombstone actual)
     {
         assertTrue(String.format("%s != %s", toString(expected), toString(actual)), cmp.compare(expected.deletedSlice().start(), actual.deletedSlice().start()) == 0);
@@ -420,11 +613,6 @@ public class RangeTombstoneListTest
         return ByteBufferUtil.bytes(i);
     }
 
-    private static int i(Slice.Bound bound)
-    {
-        return ByteBufferUtil.toInt(bound.get(0));
-    }
-
     private static RangeTombstone rt(int start, int end, long tstamp)
     {
         return rt(start, end, tstamp, 0);
@@ -459,4 +647,24 @@ public class RangeTombstoneListTest
     {
         return new RangeTombstone(Slice.make(Slice.Bound.inclusiveStartOf(bb(start)), Slice.Bound.exclusiveEndOf(bb(end))), new DeletionTime(tstamp, delTime));
     }
+
+    private static RangeTombstone atLeast(int start, long tstamp, int delTime)
+    {
+        return new RangeTombstone(Slice.make(Slice.Bound.inclusiveStartOf(bb(start)), Slice.Bound.TOP), new DeletionTime(tstamp, delTime));
+    }
+
+    private static RangeTombstone atMost(int end, long tstamp, int delTime)
+    {
+        return new RangeTombstone(Slice.make(Slice.Bound.BOTTOM, Slice.Bound.inclusiveEndOf(bb(end))), new DeletionTime(tstamp, delTime));
+    }
+
+    private static RangeTombstone lessThan(int end, long tstamp, int delTime)
+    {
+        return new RangeTombstone(Slice.make(Slice.Bound.BOTTOM, Slice.Bound.exclusiveEndOf(bb(end))), new DeletionTime(tstamp, delTime));
+    }
+
+    private static RangeTombstone greaterThan(int start, long tstamp, int delTime)
+    {
+        return new RangeTombstone(Slice.make(Slice.Bound.exclusiveStartOf(bb(start)), Slice.Bound.TOP), new DeletionTime(tstamp, delTime));
+    }
 }


[4/5] cassandra git commit: Allow range deletions in CQL

Posted by bl...@apache.org.
Allow range deletions in CQL

patch by Benjamin Lerer; reviewed by Joshua McKenzie for CASSANDRA-6237


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/2e3727e3
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/2e3727e3
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/2e3727e3

Branch: refs/heads/trunk
Commit: 2e3727e3ff682dbab734aaccf641360bc62a8561
Parents: 8f249a6
Author: blerer <be...@datastax.com>
Authored: Fri Sep 4 21:10:29 2015 +0200
Committer: blerer <be...@datastax.com>
Committed: Fri Sep 4 21:10:29 2015 +0200

----------------------------------------------------------------------
 NEWS.txt                                        |   6 +-
 doc/cql3/CQL.textile                            |  13 +-
 .../cassandra/config/ColumnDefinition.java      |   6 +-
 .../cassandra/cql3/AbstractConditions.java      |  65 ++
 .../apache/cassandra/cql3/ColumnConditions.java | 167 ++++
 .../apache/cassandra/cql3/ColumnIdentifier.java |  56 +-
 .../org/apache/cassandra/cql3/Conditions.java   | 100 +++
 src/java/org/apache/cassandra/cql3/Cql.g        |   6 +-
 .../cassandra/cql3/IfExistsCondition.java       |  36 +
 .../cassandra/cql3/IfNotExistsCondition.java    |  36 +
 src/java/org/apache/cassandra/cql3/Json.java    |  36 +-
 .../org/apache/cassandra/cql3/Operations.java   | 135 ++++
 .../cassandra/cql3/SingleColumnRelation.java    |   1 +
 .../apache/cassandra/cql3/UpdateParameters.java |   9 +-
 .../restrictions/StatementRestrictions.java     | 246 ++++--
 .../cql3/statements/BatchStatement.java         |  91 +--
 .../cql3/statements/CQL3CasRequest.java         |  14 +-
 .../cql3/statements/DeleteStatement.java        |  99 +--
 .../cql3/statements/ModificationStatement.java  | 765 +++++++++----------
 .../cql3/statements/SelectStatement.java        |   9 +-
 .../cql3/statements/StatementType.java          | 138 ++++
 .../cql3/statements/UpdateStatement.java        | 198 +++--
 .../cql3/statements/UpdatesCollector.java       | 130 ++++
 src/java/org/apache/cassandra/db/CBuilder.java  |   9 +-
 .../org/apache/cassandra/db/RangeTombstone.java |   4 +-
 src/java/org/apache/cassandra/db/Slices.java    |   9 +
 .../cassandra/io/sstable/CQLSSTableWriter.java  |   7 +-
 .../cassandra/cql3/MaterializedViewTest.java    |  33 +
 .../cql3/validation/entities/UFAuthTest.java    |   8 +-
 .../entities/UFIdentificationTest.java          |  44 +-
 .../cql3/validation/operations/BatchTest.java   |  79 +-
 .../cql3/validation/operations/DeleteTest.java  | 681 ++++++++++++++++-
 .../cql3/validation/operations/InsertTest.java  | 233 ++++++
 .../operations/InsertUpdateIfConditionTest.java |  28 +-
 .../cql3/validation/operations/UpdateTest.java  | 447 +++++++++++
 .../cassandra/db/RangeTombstoneListTest.java    | 222 +++++-
 36 files changed, 3384 insertions(+), 782 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/NEWS.txt
----------------------------------------------------------------------
diff --git a/NEWS.txt b/NEWS.txt
index c7976b9..af2f64c 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -18,6 +18,11 @@ using the provided 'sstableupgrade' tool.
 
 New features
 ------------
+   - Support for IN restrictions on any partition key component or clustering key
+     as well as support for EQ and IN multicolumn restrictions has been added to
+     UPDATE and DELETE statement.
+   - Support for single-column and multi-colum slice restrictions (>, >=, <= and <)
+     has been added to DELETE statements
    - nodetool rebuild_index accepts the index argument without
      the redundant table name
    - Materialized Views, which allow for server-side denormalization, is now
@@ -35,7 +40,6 @@ New features
      you do not run repair for a long time, you will keep all tombstones around which
      can cause other problems.
 
-
 Upgrading
 ---------
    - Max mutation size is now configurable via max_mutation_size_in_kb setting in

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/doc/cql3/CQL.textile
----------------------------------------------------------------------
diff --git a/doc/cql3/CQL.textile b/doc/cql3/CQL.textile
index 74ed64e..0e04528 100644
--- a/doc/cql3/CQL.textile
+++ b/doc/cql3/CQL.textile
@@ -863,8 +863,11 @@ bc(syntax)..
 <where-clause> ::= <relation> ( AND <relation> )*
 
 <relation> ::= <identifier> '=' <term>
-             | <identifier> IN '(' ( <term> ( ',' <term> )* )? ')'
+             | '(' <identifier> (',' <identifier>)* ')' '=' <term-tuple>
+             | <identifier> IN '(' ( <term> ( ',' <term>)* )? ')'
              | <identifier> IN '?'
+             | '(' <identifier> (',' <identifier>)* ')' IN '(' ( <term-tuple> ( ',' <term-tuple>)* )? ')'
+             | '(' <identifier> (',' <identifier>)* ')' IN '?'
 
 <option> ::= TIMESTAMP <integer>
            | TTL <integer>
@@ -914,10 +917,14 @@ bc(syntax)..
 
 <where-clause> ::= <relation> ( AND <relation> )*
 
-<relation> ::= <identifier> '=' <term>
-             | <identifier> IN '(' ( <term> ( ',' <term> )* )? ')'
+<relation> ::= <identifier> <op> <term>
+             | '(' <identifier> (',' <identifier>)* ')' <op> <term-tuple>
+             | <identifier> IN '(' ( <term> ( ',' <term>)* )? ')'
              | <identifier> IN '?'
+             | '(' <identifier> (',' <identifier>)* ')' IN '(' ( <term-tuple> ( ',' <term-tuple>)* )? ')'
+             | '(' <identifier> (',' <identifier>)* ')' IN '?'
 
+<op> ::= '=' | '<' | '>' | '<=' | '>='
 <condition> ::= <identifier> '=' <term>
               | <identifier> '[' <term> ']' '=' <term>
 p. 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/config/ColumnDefinition.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/config/ColumnDefinition.java b/src/java/org/apache/cassandra/config/ColumnDefinition.java
index 6afd3e7..82f2556 100644
--- a/src/java/org/apache/cassandra/config/ColumnDefinition.java
+++ b/src/java/org/apache/cassandra/config/ColumnDefinition.java
@@ -23,7 +23,7 @@ import java.util.*;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Function;
 import com.google.common.base.Objects;
-import com.google.common.collect.Lists;
+import com.google.common.collect.Collections2;
 
 import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.db.rows.*;
@@ -285,9 +285,9 @@ public class ColumnDefinition extends ColumnSpecification implements Comparable<
      * @param definitions the column definitions to convert.
      * @return the column identifiers corresponding to the specified definitions
      */
-    public static List<ColumnIdentifier> toIdentifiers(List<ColumnDefinition> definitions)
+    public static Collection<ColumnIdentifier> toIdentifiers(Collection<ColumnDefinition> definitions)
     {
-        return Lists.transform(definitions, new Function<ColumnDefinition, ColumnIdentifier>()
+        return Collections2.transform(definitions, new Function<ColumnDefinition, ColumnIdentifier>()
         {
             @Override
             public ColumnIdentifier apply(ColumnDefinition columnDef)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/AbstractConditions.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/AbstractConditions.java b/src/java/org/apache/cassandra/cql3/AbstractConditions.java
new file mode 100644
index 0000000..71e3595
--- /dev/null
+++ b/src/java/org/apache/cassandra/cql3/AbstractConditions.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3;
+
+import java.util.Collections;
+
+import org.apache.cassandra.config.ColumnDefinition;
+import org.apache.cassandra.cql3.functions.Function;
+
+/**
+ * Base class for <code>Conditions</code> classes.
+ *
+ */
+abstract class AbstractConditions implements Conditions
+{
+    public Iterable<Function> getFunctions()
+    {
+        return Collections.emptyList();
+    }
+
+    public Iterable<ColumnDefinition> getColumns()
+    {
+        return null;
+    }
+
+    public boolean isEmpty()
+    {
+        return false;
+    }
+
+    public boolean appliesToStaticColumns()
+    {
+        return false;
+    }
+
+    public boolean appliesToRegularColumns()
+    {
+        return false;
+    }
+
+    public boolean isIfExists()
+    {
+        return false;
+    }
+
+    public boolean isIfNotExists()
+    {
+        return false;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/ColumnConditions.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/ColumnConditions.java b/src/java/org/apache/cassandra/cql3/ColumnConditions.java
new file mode 100644
index 0000000..5ac8119
--- /dev/null
+++ b/src/java/org/apache/cassandra/cql3/ColumnConditions.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.apache.cassandra.config.ColumnDefinition;
+import org.apache.cassandra.cql3.functions.Function;
+import org.apache.cassandra.cql3.statements.CQL3CasRequest;
+import org.apache.cassandra.db.Clustering;
+
+import static java.util.stream.StreamSupport.stream;
+
+/**
+ * A set of <code>ColumnCondition</code>s.
+ *
+ */
+public final class ColumnConditions extends AbstractConditions
+{
+    /**
+     * The conditions on regular columns.
+     */
+    private final List<ColumnCondition> columnConditions;
+
+    /**
+     * The conditions on static columns
+     */
+    private final List<ColumnCondition> staticConditions;
+
+    /**
+     * Creates a new <code>ColumnConditions</code> instance for the specified builder.
+     */
+    private ColumnConditions(Builder builder)
+    {
+        this.columnConditions = builder.columnConditions;
+        this.staticConditions = builder.staticConditions;
+    }
+
+    @Override
+    public boolean appliesToStaticColumns()
+    {
+        return !staticConditions.isEmpty();
+    }
+
+    @Override
+    public boolean appliesToRegularColumns()
+    {
+        return !columnConditions.isEmpty();
+    }
+
+    @Override
+    public Collection<ColumnDefinition> getColumns()
+    {
+        return Stream.concat(columnConditions.stream(), staticConditions.stream())
+                     .map(e -> e.column)
+                     .collect(Collectors.toList());
+    }
+
+    @Override
+    public boolean isEmpty()
+    {
+        return columnConditions.isEmpty() && staticConditions.isEmpty();
+    }
+
+    /**
+     * Adds the conditions to the specified CAS request.
+     *
+     * @param request the request
+     * @param clustering the clustering prefix
+     * @param options the query options
+     */
+    public void addConditionsTo(CQL3CasRequest request,
+                                Clustering clustering,
+                                QueryOptions options)
+    {
+        if (!columnConditions.isEmpty())
+            request.addConditions(clustering, columnConditions, options);
+        if (!staticConditions.isEmpty())
+            request.addConditions(Clustering.STATIC_CLUSTERING, staticConditions, options);
+    }
+
+    @Override
+    public Iterable<Function> getFunctions()
+    {
+        return Stream.concat(columnConditions.stream(), staticConditions.stream())
+                     .flatMap(e -> stream(e.getFunctions().spliterator(), false))
+                     .collect(Collectors.toList());
+    }
+
+    /**
+     * Creates a new <code>Builder</code> for <code>ColumnConditions</code>.
+     * @return a new <code>Builder</code> for <code>ColumnConditions</code>
+     */
+    public static Builder newBuilder()
+    {
+        return new Builder();
+    }
+
+    /**
+     * A <code>Builder</code> for <code>ColumnConditions</code>.
+     *
+     */
+    public static final class Builder
+    {
+        /**
+         * The conditions on regular columns.
+         */
+        private List<ColumnCondition> columnConditions = Collections.emptyList();
+
+        /**
+         * The conditions on static columns
+         */
+        private List<ColumnCondition> staticConditions = Collections.emptyList();
+
+        /**
+         * Adds the specified <code>ColumnCondition</code> to this set of conditions.
+         * @param condition the condition to add
+         */
+        public Builder add(ColumnCondition condition)
+        {
+            List<ColumnCondition> conds = null;
+            if (condition.column.isStatic())
+            {
+                if (staticConditions.isEmpty())
+                    staticConditions = new ArrayList<>();
+                conds = staticConditions;
+            }
+            else
+            {
+                if (columnConditions.isEmpty())
+                    columnConditions = new ArrayList<>();
+                conds = columnConditions;
+            }
+            conds.add(condition);
+            return this;
+        }
+
+        public ColumnConditions build()
+        {
+            return new ColumnConditions(this);
+        }
+
+        private Builder()
+        {
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/ColumnIdentifier.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/ColumnIdentifier.java b/src/java/org/apache/cassandra/cql3/ColumnIdentifier.java
index 47e4384..6102bb9 100644
--- a/src/java/org/apache/cassandra/cql3/ColumnIdentifier.java
+++ b/src/java/org/apache/cassandra/cql3/ColumnIdentifier.java
@@ -194,12 +194,18 @@ public class ColumnIdentifier extends org.apache.cassandra.cql3.selection.Select
      * once the comparator is known with prepare(). This should only be used with identifiers that are actual
      * column names. See CASSANDRA-8178 for more background.
      */
-    public static class Raw implements Selectable.Raw
+    public static interface Raw extends Selectable.Raw
+    {
+
+        public ColumnIdentifier prepare(CFMetaData cfm);
+    }
+
+    public static class Literal implements Raw
     {
         private final String rawText;
         private final String text;
 
-        public Raw(String rawText, boolean keepCase)
+        public Literal(String rawText, boolean keepCase)
         {
             this.rawText = rawText;
             this.text =  keepCase ? rawText : rawText.toLowerCase(Locale.US);
@@ -239,9 +245,10 @@ public class ColumnIdentifier extends org.apache.cassandra.cql3.selection.Select
         @Override
         public final boolean equals(Object o)
         {
-            if(!(o instanceof ColumnIdentifier.Raw))
+            if(!(o instanceof Literal))
                 return false;
-            ColumnIdentifier.Raw that = (ColumnIdentifier.Raw)o;
+
+            Literal that = (Literal) o;
             return text.equals(that.text);
         }
 
@@ -251,4 +258,45 @@ public class ColumnIdentifier extends org.apache.cassandra.cql3.selection.Select
             return text;
         }
     }
+
+    public static class ColumnIdentifierValue implements Raw
+    {
+        private final ColumnIdentifier identifier;
+
+        public ColumnIdentifierValue(ColumnIdentifier identifier)
+        {
+            this.identifier = identifier;
+        }
+
+        public ColumnIdentifier prepare(CFMetaData cfm)
+        {
+            return identifier;
+        }
+
+        public boolean processesSelection()
+        {
+            return false;
+        }
+
+        @Override
+        public final int hashCode()
+        {
+            return identifier.hashCode();
+        }
+
+        @Override
+        public final boolean equals(Object o)
+        {
+            if(!(o instanceof ColumnIdentifierValue))
+                return false;
+            ColumnIdentifierValue that = (ColumnIdentifierValue) o;
+            return identifier.equals(that.identifier);
+        }
+
+        @Override
+        public String toString()
+        {
+            return identifier.toString();
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/Conditions.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/Conditions.java b/src/java/org/apache/cassandra/cql3/Conditions.java
new file mode 100644
index 0000000..85459c4
--- /dev/null
+++ b/src/java/org/apache/cassandra/cql3/Conditions.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3;
+
+import org.apache.cassandra.config.ColumnDefinition;
+import org.apache.cassandra.cql3.functions.Function;
+import org.apache.cassandra.cql3.statements.CQL3CasRequest;
+import org.apache.cassandra.db.Clustering;
+
+/**
+ * Conditions that can be applied to a mutation statement.
+ *
+ */
+public interface Conditions
+{
+    /**
+     * An EMPTY condition
+     */
+    static final Conditions EMPTY_CONDITION = ColumnConditions.newBuilder().build();
+
+    /**
+     * IF EXISTS condition
+     */
+    static final Conditions IF_EXISTS_CONDITION = new IfExistsCondition();
+
+    /**
+     * IF NOT EXISTS condition
+     */
+    static final Conditions IF_NOT_EXISTS_CONDITION = new IfNotExistsCondition();
+
+    /**
+     * Returns the functions used by the conditions.
+     * @return the functions used by the conditions
+     */
+    Iterable<Function> getFunctions();
+
+    /**
+     * Returns the column definitions to which apply the conditions.
+     * @return the column definitions to which apply the conditions.
+     */
+    Iterable<ColumnDefinition> getColumns();
+
+    /**
+     * Checks if this <code>Conditions</code> is empty.
+     * @return <code>true</code> if this <code>Conditions</code> is empty, <code>false</code> otherwise.
+     */
+    boolean isEmpty();
+
+    /**
+     * Checks if this is a IF EXIST condition.
+     * @return <code>true</code> if this is a IF EXIST condition, <code>false</code> otherwise.
+     */
+    boolean isIfExists();
+
+    /**
+     * Checks if this is a IF NOT EXIST condition.
+     * @return <code>true</code> if this is a IF NOT EXIST condition, <code>false</code> otherwise.
+     */
+    boolean isIfNotExists();
+
+    /**
+     * Checks if some of the conditions apply to static columns.
+     *
+     * @return <code>true</code> if some of the conditions apply to static columns, <code>false</code> otherwise.
+     */
+    boolean appliesToStaticColumns();
+
+    /**
+     * Checks if some of the conditions apply to regular columns.
+     *
+     * @return <code>true</code> if some of the conditions apply to regular columns, <code>false</code> otherwise.
+     */
+    boolean appliesToRegularColumns();
+
+    /**
+     * Adds the conditions to the specified CAS request.
+     *
+     * @param request the request
+     * @param clustering the clustering prefix
+     * @param options the query options
+     */
+    public void addConditionsTo(CQL3CasRequest request,
+                                Clustering clustering,
+                                QueryOptions options);
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/Cql.g
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/Cql.g b/src/java/org/apache/cassandra/cql3/Cql.g
index 2149f10..87bec4b 100644
--- a/src/java/org/apache/cassandra/cql3/Cql.g
+++ b/src/java/org/apache/cassandra/cql3/Cql.g
@@ -1154,9 +1154,9 @@ userPassword[RoleOptions opts]
 // identifiers because the underlying comparator is not necessarily text. See
 // CASSANDRA-8178 for details.
 cident returns [ColumnIdentifier.Raw id]
-    : t=IDENT              { $id = new ColumnIdentifier.Raw($t.text, false); }
-    | t=QUOTED_NAME        { $id = new ColumnIdentifier.Raw($t.text, true); }
-    | k=unreserved_keyword { $id = new ColumnIdentifier.Raw(k, false); }
+    : t=IDENT              { $id = new ColumnIdentifier.Literal($t.text, false); }
+    | t=QUOTED_NAME        { $id = new ColumnIdentifier.Literal($t.text, true); }
+    | k=unreserved_keyword { $id = new ColumnIdentifier.Literal(k, false); }
     ;
 
 // Column identifiers where the comparator is known to be text

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/IfExistsCondition.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/IfExistsCondition.java b/src/java/org/apache/cassandra/cql3/IfExistsCondition.java
new file mode 100644
index 0000000..a24d8c0
--- /dev/null
+++ b/src/java/org/apache/cassandra/cql3/IfExistsCondition.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3;
+
+import org.apache.cassandra.cql3.statements.CQL3CasRequest;
+import org.apache.cassandra.db.Clustering;
+
+final class IfExistsCondition extends AbstractConditions
+{
+    @Override
+    public void addConditionsTo(CQL3CasRequest request, Clustering clustering, QueryOptions options)
+    {
+        request.addExist(clustering);
+    }
+
+    @Override
+    public boolean isIfExists()
+    {
+        return true;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/IfNotExistsCondition.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/IfNotExistsCondition.java b/src/java/org/apache/cassandra/cql3/IfNotExistsCondition.java
new file mode 100644
index 0000000..05cb864
--- /dev/null
+++ b/src/java/org/apache/cassandra/cql3/IfNotExistsCondition.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3;
+
+import org.apache.cassandra.cql3.statements.CQL3CasRequest;
+import org.apache.cassandra.db.Clustering;
+
+final class IfNotExistsCondition extends AbstractConditions
+{
+    @Override
+    public void addConditionsTo(CQL3CasRequest request, Clustering clustering, QueryOptions options)
+    {
+        request.addNotExist(clustering);
+    }
+
+    @Override
+    public boolean isIfNotExists()
+    {
+        return true;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/Json.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/Json.java b/src/java/org/apache/cassandra/cql3/Json.java
index e4bce29..35c69ed 100644
--- a/src/java/org/apache/cassandra/cql3/Json.java
+++ b/src/java/org/apache/cassandra/cql3/Json.java
@@ -71,7 +71,7 @@ public class Json
 
         public Prepared prepareAndCollectMarkers(CFMetaData metadata, Collection<ColumnDefinition> receivers, VariableSpecifications boundNames)
         {
-            return new PreparedLiteral(metadata.ksName, parseJson(text, receivers));
+            return new PreparedLiteral(parseJson(text, receivers));
         }
     }
 
@@ -91,7 +91,7 @@ public class Json
         public Prepared prepareAndCollectMarkers(CFMetaData metadata, Collection<ColumnDefinition> receivers, VariableSpecifications boundNames)
         {
             boundNames.add(bindIndex, makeReceiver(metadata));
-            return new PreparedMarker(metadata.ksName, bindIndex, receivers);
+            return new PreparedMarker(bindIndex, receivers);
         }
 
         private ColumnSpecification makeReceiver(CFMetaData metadata)
@@ -105,27 +105,7 @@ public class Json
      */
     public static abstract class Prepared
     {
-        private final String keyspace;
-
-        protected Prepared(String keyspace)
-        {
-            this.keyspace = keyspace;
-        }
-
-        protected abstract Term.Raw getRawTermForColumn(ColumnDefinition def);
-
-        public Term getPrimaryKeyValueForColumn(ColumnDefinition def)
-        {
-            // Note that we know we don't have to call collectMarkerSpecification since it has already been collected
-            return getRawTermForColumn(def).prepare(keyspace, def);
-        }
-
-        public Operation getSetOperationForColumn(ColumnDefinition def)
-        {
-            // Note that we know we don't have to call collectMarkerSpecification on the operation since we have
-            // already collected all we need.
-            return new Operation.SetValue(getRawTermForColumn(def)).prepare(keyspace, def);
-        }
+        public abstract Term.Raw getRawTermForColumn(ColumnDefinition def);
     }
 
     /**
@@ -135,13 +115,12 @@ public class Json
     {
         private final Map<ColumnIdentifier, Term> columnMap;
 
-        public PreparedLiteral(String keyspace, Map<ColumnIdentifier, Term> columnMap)
+        public PreparedLiteral(Map<ColumnIdentifier, Term> columnMap)
         {
-            super(keyspace);
             this.columnMap = columnMap;
         }
 
-        protected Term.Raw getRawTermForColumn(ColumnDefinition def)
+        public Term.Raw getRawTermForColumn(ColumnDefinition def)
         {
             Term value = columnMap.get(def.name);
             return value == null ? Constants.NULL_LITERAL : new ColumnValue(value);
@@ -158,14 +137,13 @@ public class Json
 
         private Map<ColumnIdentifier, Term> columnMap;
 
-        public PreparedMarker(String keyspace, int bindIndex, Collection<ColumnDefinition> columns)
+        public PreparedMarker(int bindIndex, Collection<ColumnDefinition> columns)
         {
-            super(keyspace);
             this.bindIndex = bindIndex;
             this.columns = columns;
         }
 
-        protected DelayedColumnValue getRawTermForColumn(ColumnDefinition def)
+        public DelayedColumnValue getRawTermForColumn(ColumnDefinition def)
         {
             return new DelayedColumnValue(this, def);
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/Operations.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/Operations.java b/src/java/org/apache/cassandra/cql3/Operations.java
new file mode 100644
index 0000000..c4cade1
--- /dev/null
+++ b/src/java/org/apache/cassandra/cql3/Operations.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.cassandra.cql3.functions.Function;
+
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Iterators;
+
+/**
+ * A set of <code>Operation</code>s.
+ *
+ */
+public final class Operations implements Iterable<Operation>
+{
+    /**
+     * The operations on regular columns.
+     */
+    private final List<Operation> regularOperations = new ArrayList<>();
+
+    /**
+     * The operations on static columns.
+     */
+    private final List<Operation> staticOperations = new ArrayList<>();
+
+    /**
+     * Checks if some of the operations apply to static columns.
+     *
+     * @return <code>true</code> if some of the operations apply to static columns, <code>false</code> otherwise.
+     */
+    public boolean appliesToStaticColumns()
+    {
+        return !staticOperations.isEmpty();
+    }
+
+    /**
+     * Checks if some of the operations apply to regular columns.
+     *
+     * @return <code>true</code> if some of the operations apply to regular columns, <code>false</code> otherwise.
+     */
+    public boolean appliesToRegularColumns()
+    {
+        return !regularOperations.isEmpty();
+    }
+
+    /**
+     * Returns the operation on regular columns.
+     * @return the operation on regular columns
+     */
+    public List<Operation> regularOperations()
+    {
+        return regularOperations;
+    }
+
+    /**
+     * Returns the operation on static columns.
+     * @return the operation on static columns
+     */
+    public List<Operation> staticOperations()
+    {
+        return staticOperations;
+    }
+
+    /**
+     * Adds the specified <code>Operation</code> to this set of operations.
+     * @param operation the operation to add
+     */
+    public void add(Operation operation)
+    {
+        if (operation.column.isStatic())
+            staticOperations.add(operation);
+        else
+            regularOperations.add(operation);
+    }
+
+    /**
+     * Checks if one of the operations requires a read.
+     *
+     * @return <code>true</code> if one of the operations requires a read, <code>false</code> otherwise.
+     */
+    public boolean requiresRead()
+    {
+        // Lists SET operation incurs a read.
+        for (Operation operation : this)
+            if (operation.requiresRead())
+                return true;
+
+        return false;
+    }
+
+    /**
+     * Checks if this <code>Operations</code> is empty.
+     * @return <code>true</code> if this <code>Operations</code> is empty, <code>false</code> otherwise.
+     */
+    public boolean isEmpty()
+    {
+        return staticOperations.isEmpty() && regularOperations.isEmpty();
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public Iterator<Operation> iterator()
+    {
+        return Iterators.concat(staticOperations.iterator(), regularOperations.iterator());
+    }
+
+    public Iterable<? extends Function> getFunctions()
+    {
+        List<Function> functions = new ArrayList<>();
+        for (Operation operation : this)
+            Iterables.addAll(functions, operation.getFunctions());
+        return functions;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/SingleColumnRelation.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/SingleColumnRelation.java b/src/java/org/apache/cassandra/cql3/SingleColumnRelation.java
index c848b9e..84e6274 100644
--- a/src/java/org/apache/cassandra/cql3/SingleColumnRelation.java
+++ b/src/java/org/apache/cassandra/cql3/SingleColumnRelation.java
@@ -223,6 +223,7 @@ public final class SingleColumnRelation extends Relation
         }
 
         checkFalse(isContainsKey() && !(receiver.type instanceof MapType), "Cannot use CONTAINS KEY on non-map column %s", receiver.name);
+        checkFalse(isContains() && !(receiver.type.isCollection()), "Cannot use CONTAINS on non-collection column %s", receiver.name);
 
         if (mapKey != null)
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/UpdateParameters.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/UpdateParameters.java b/src/java/org/apache/cassandra/cql3/UpdateParameters.java
index cd81f84..dbcf803 100644
--- a/src/java/org/apache/cassandra/cql3/UpdateParameters.java
+++ b/src/java/org/apache/cassandra/cql3/UpdateParameters.java
@@ -216,9 +216,14 @@ public class UpdateParameters
         return deletionTime;
     }
 
-    public RangeTombstone makeRangeTombstone(CBuilder cbuilder)
+    public RangeTombstone makeRangeTombstone(ClusteringComparator comparator, Clustering clustering)
     {
-        return new RangeTombstone(cbuilder.buildSlice(), deletionTime);
+        return makeRangeTombstone(Slice.make(comparator, clustering));
+    }
+
+    public RangeTombstone makeRangeTombstone(Slice slice)
+    {
+        return new RangeTombstone(slice, deletionTime);
     }
 
     public Row getPrefetchedRow(DecoratedKey key, Clustering clustering)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java b/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
index b0c81b8..3cf6bfb 100644
--- a/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
+++ b/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
@@ -25,13 +25,16 @@ import com.google.common.collect.Iterables;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
-import org.apache.cassandra.cql3.*;
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.cql3.QueryOptions;
+import org.apache.cassandra.cql3.Relation;
+import org.apache.cassandra.cql3.VariableSpecifications;
 import org.apache.cassandra.cql3.functions.Function;
 import org.apache.cassandra.cql3.statements.Bound;
+import org.apache.cassandra.cql3.statements.StatementType;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.filter.RowFilter;
 import org.apache.cassandra.dht.*;
-import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.index.SecondaryIndexManager;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.btree.BTreeSet;
@@ -49,6 +52,11 @@ public final class StatementRestrictions
     public static final String NO_INDEX_FOUND_MESSAGE =
         "No supported secondary index found for the non primary key columns restrictions";
     /**
+     * The type of statement
+     */
+    private final StatementType type;
+
+    /**
      * The Column Family meta data
      */
     public final CFMetaData cfm;
@@ -86,30 +94,33 @@ public final class StatementRestrictions
     /**
      * Creates a new empty <code>StatementRestrictions</code>.
      *
+     * @param type the type of statement
      * @param cfm the column family meta data
      * @return a new empty <code>StatementRestrictions</code>.
      */
-    public static StatementRestrictions empty(CFMetaData cfm)
+    public static StatementRestrictions empty(StatementType type, CFMetaData cfm)
     {
-        return new StatementRestrictions(cfm);
+        return new StatementRestrictions(type, cfm);
     }
 
-    private StatementRestrictions(CFMetaData cfm)
+    private StatementRestrictions(StatementType type, CFMetaData cfm)
     {
+        this.type = type;
         this.cfm = cfm;
         this.partitionKeyRestrictions = new PrimaryKeyRestrictionSet(cfm.getKeyValidatorAsClusteringComparator(), true);
         this.clusteringColumnsRestrictions = new PrimaryKeyRestrictionSet(cfm.comparator, false);
         this.nonPrimaryKeyRestrictions = new RestrictionSet();
     }
 
-    public StatementRestrictions(CFMetaData cfm,
+    public StatementRestrictions(StatementType type,
+                                 CFMetaData cfm,
                                  List<Relation> whereClause,
                                  VariableSpecifications boundNames,
                                  boolean selectsOnlyStaticColumns,
                                  boolean selectACollection,
-                                 boolean useFiltering) throws InvalidRequestException
+                                 boolean useFiltering)
     {
-        this(cfm);
+        this(type, cfm);
 
         /*
          * WHERE clause. For a given entity, rules are:
@@ -123,13 +134,19 @@ public final class StatementRestrictions
         for (Relation relation : whereClause)
             addRestriction(relation.toRestriction(cfm, boundNames));
 
-        ColumnFamilyStore cfs = Keyspace.open(cfm.ksName).getColumnFamilyStore(cfm.cfName);
-        SecondaryIndexManager secondaryIndexManager = cfs.indexManager;
+        boolean hasQueriableClusteringColumnIndex = false;
+        boolean hasQueriableIndex = false;
 
-        boolean hasQueriableClusteringColumnIndex = clusteringColumnsRestrictions.hasSupportingIndex(secondaryIndexManager);
-        boolean hasQueriableIndex = hasQueriableClusteringColumnIndex
-                || partitionKeyRestrictions.hasSupportingIndex(secondaryIndexManager)
-                || nonPrimaryKeyRestrictions.hasSupportingIndex(secondaryIndexManager);
+        if (type.allowUseOfSecondaryIndices())
+        {
+            ColumnFamilyStore cfs = Keyspace.open(cfm.ksName).getColumnFamilyStore(cfm.cfName);
+            SecondaryIndexManager secondaryIndexManager = cfs.indexManager;
+
+            hasQueriableClusteringColumnIndex = clusteringColumnsRestrictions.hasSupportingIndex(secondaryIndexManager);
+            hasQueriableIndex = hasQueriableClusteringColumnIndex
+                    || partitionKeyRestrictions.hasSupportingIndex(secondaryIndexManager)
+                    || nonPrimaryKeyRestrictions.hasSupportingIndex(secondaryIndexManager);
+        }
 
         // At this point, the select statement if fully constructed, but we still have a few things to validate
         processPartitionKeyRestrictions(hasQueriableIndex);
@@ -139,10 +156,26 @@ public final class StatementRestrictions
         if (usesSecondaryIndexing)
             indexRestrictions.add(partitionKeyRestrictions);
 
-        checkFalse(selectsOnlyStaticColumns && hasClusteringColumnsRestriction(),
-                   "Cannot restrict clustering columns when selecting only static columns");
+        if (selectsOnlyStaticColumns && hasClusteringColumnsRestriction())
+        {
+            // If the only updated/deleted columns are static, then we don't need clustering columns.
+            // And in fact, unless it is an INSERT, we reject if clustering colums are provided as that
+            // suggest something unintended. For instance, given:
+            //   CREATE TABLE t (k int, v int, s int static, PRIMARY KEY (k, v))
+            // it can make sense to do:
+            //   INSERT INTO t(k, v, s) VALUES (0, 1, 2)
+            // but both
+            //   UPDATE t SET s = 3 WHERE k = 0 AND v = 1
+            //   DELETE v FROM t WHERE k = 0 AND v = 1
+            // sounds like you don't really understand what your are doing.
+            if (type.isDelete() || type.isUpdate())
+                throw invalidRequest("Invalid restrictions on clustering columns since the %s statement modifies only static columns",
+                                     type);
+            if (type.isSelect())
+                throw invalidRequest("Cannot restrict clustering columns when selecting only static columns");
+        }
 
-        processClusteringColumnsRestrictions(hasQueriableIndex, selectACollection);
+        processClusteringColumnsRestrictions(hasQueriableIndex, selectsOnlyStaticColumns, selectACollection);
 
         // Covers indexes on the first clustering column (among others).
         if (isKeyRange && hasQueriableClusteringColumnIndex)
@@ -157,10 +190,18 @@ public final class StatementRestrictions
         // there is restrictions not covered by the PK.
         if (!nonPrimaryKeyRestrictions.isEmpty())
         {
+            if (!type.allowNonPrimaryKeyInWhereClause())
+            {
+                Collection<ColumnIdentifier> nonPrimaryKeyColumns =
+                        ColumnDefinition.toIdentifiers(nonPrimaryKeyRestrictions.getColumnDefs());
+
+                throw invalidRequest("Non PRIMARY KEY columns found in where clause: %s ",
+                                     Joiner.on(", ").join(nonPrimaryKeyColumns));
+            }
             if (hasQueriableIndex)
                 usesSecondaryIndexing = true;
             else if (!useFiltering)
-                throw new InvalidRequestException(NO_INDEX_FOUND_MESSAGE);
+                throw invalidRequest(NO_INDEX_FOUND_MESSAGE);
 
             indexRestrictions.add(nonPrimaryKeyRestrictions);
         }
@@ -169,7 +210,7 @@ public final class StatementRestrictions
             validateSecondaryIndexSelections(selectsOnlyStaticColumns);
     }
 
-    private void addRestriction(Restriction restriction) throws InvalidRequestException
+    private void addRestriction(Restriction restriction)
     {
         if (restriction.isMultiColumn())
             clusteringColumnsRestrictions = clusteringColumnsRestrictions.mergeWith(restriction);
@@ -186,7 +227,7 @@ public final class StatementRestrictions
                                 nonPrimaryKeyRestrictions.getFunctions());
     }
 
-    private void addSingleColumnRestriction(SingleColumnRestriction restriction) throws InvalidRequestException
+    private void addSingleColumnRestriction(SingleColumnRestriction restriction)
     {
         ColumnDefinition def = restriction.columnDef;
         if (def.isPartitionKey())
@@ -241,8 +282,19 @@ public final class StatementRestrictions
         return this.usesSecondaryIndexing;
     }
 
-    private void processPartitionKeyRestrictions(boolean hasQueriableIndex) throws InvalidRequestException
+    private void processPartitionKeyRestrictions(boolean hasQueriableIndex)
     {
+        if (!type.allowPartitionKeyRanges())
+        {
+            checkFalse(partitionKeyRestrictions.isOnToken(),
+                       "The token function cannot be used in WHERE clauses for %s statements", type);
+
+            if (hasUnrestrictedPartitionKeyComponents())
+                throw invalidRequest("Some partition key parts are missing: %s",
+                                     Joiner.on(", ").join(getPartitionKeyUnrestrictedComponents()));
+        }
+        else
+        {
         // If there is a queriable index, no special condition are required on the other restrictions.
         // But we still need to know 2 things:
         // - If we don't have a queriable index, is the query ok
@@ -252,17 +304,18 @@ public final class StatementRestrictions
         if (partitionKeyRestrictions.isOnToken())
             isKeyRange = true;
 
-        if (hasPartitionKeyUnrestrictedComponents())
-        {
-            if (!partitionKeyRestrictions.isEmpty())
+            if (hasUnrestrictedPartitionKeyComponents())
             {
-                if (!hasQueriableIndex)
-                    throw invalidRequest("Partition key parts: %s must be restricted as other parts are",
-                                         Joiner.on(", ").join(getPartitionKeyUnrestrictedComponents()));
-            }
+                if (!partitionKeyRestrictions.isEmpty())
+                {
+                    if (!hasQueriableIndex)
+                        throw invalidRequest("Partition key parts: %s must be restricted as other parts are",
+                                             Joiner.on(", ").join(getPartitionKeyUnrestrictedComponents()));
+                }
 
-            isKeyRange = true;
-            usesSecondaryIndexing = hasQueriableIndex;
+                isKeyRange = true;
+                usesSecondaryIndexing = hasQueriableIndex;
+            }
         }
     }
 
@@ -270,7 +323,7 @@ public final class StatementRestrictions
      * Checks if the partition key has some unrestricted components.
      * @return <code>true</code> if the partition key has some unrestricted components, <code>false</code> otherwise.
      */
-    private boolean hasPartitionKeyUnrestrictedComponents()
+    private boolean hasUnrestrictedPartitionKeyComponents()
     {
         return partitionKeyRestrictions.size() <  cfm.partitionKeyColumns().size();
     }
@@ -284,7 +337,7 @@ public final class StatementRestrictions
      * Returns the partition key components that are not restricted.
      * @return the partition key components that are not restricted.
      */
-    private List<ColumnIdentifier> getPartitionKeyUnrestrictedComponents()
+    private Collection<ColumnIdentifier> getPartitionKeyUnrestrictedComponents()
     {
         List<ColumnDefinition> list = new ArrayList<>(cfm.partitionKeyColumns());
         list.removeAll(partitionKeyRestrictions.getColumnDefs());
@@ -292,39 +345,65 @@ public final class StatementRestrictions
     }
 
     /**
+     * Checks if the restrictions on the partition key are token restrictions.
+     *
+     * @return <code>true</code> if the restrictions on the partition key are token restrictions,
+     * <code>false</code> otherwise.
+     */
+    public boolean isPartitionKeyRestrictionsOnToken()
+    {
+        return partitionKeyRestrictions.isOnToken();
+    }
+
+    /**
      * Processes the clustering column restrictions.
      *
      * @param hasQueriableIndex <code>true</code> if some of the queried data are indexed, <code>false</code> otherwise
+     * @param selectsOnlyStaticColumns <code>true</code> if the selected or modified columns are all statics,
+     * <code>false</code> otherwise.
      * @param selectACollection <code>true</code> if the query should return a collection column
-     * @throws InvalidRequestException if the request is invalid
      */
     private void processClusteringColumnsRestrictions(boolean hasQueriableIndex,
-                                                      boolean selectACollection) throws InvalidRequestException
+                                                      boolean selectsOnlyStaticColumns,
+                                                      boolean selectACollection)
     {
-        checkFalse(clusteringColumnsRestrictions.isIN() && selectACollection,
-                   "Cannot restrict clustering columns by IN relations when a collection is selected by the query");
-        checkFalse(clusteringColumnsRestrictions.isContains() && !hasQueriableIndex,
-                   "Cannot restrict clustering columns by a CONTAINS relation without a secondary index");
+        checkFalse(!type.allowClusteringColumnSlices() && clusteringColumnsRestrictions.isSlice(),
+                   "Slice restrictions are not supported on the clustering columns in %s statements", type);
 
-        if (hasClusteringColumnsRestriction())
+        if (!type.allowClusteringColumnSlices()
+               && (!cfm.isCompactTable() || (cfm.isCompactTable() && !hasClusteringColumnsRestriction())))
         {
-            List<ColumnDefinition> clusteringColumns = cfm.clusteringColumns();
-            List<ColumnDefinition> restrictedColumns = new LinkedList<>(clusteringColumnsRestrictions.getColumnDefs());
+            if (!selectsOnlyStaticColumns && hasUnrestrictedClusteringColumns())
+                throw invalidRequest("Some clustering keys are missing: %s",
+                                     Joiner.on(", ").join(getUnrestrictedClusteringColumns()));
+        }
+        else
+        {
+            checkFalse(clusteringColumnsRestrictions.isIN() && selectACollection,
+                       "Cannot restrict clustering columns by IN relations when a collection is selected by the query");
+            checkFalse(clusteringColumnsRestrictions.isContains() && !hasQueriableIndex,
+                       "Cannot restrict clustering columns by a CONTAINS relation without a secondary index");
 
-            for (int i = 0, m = restrictedColumns.size(); i < m; i++)
+            if (hasClusteringColumnsRestriction())
             {
-                ColumnDefinition clusteringColumn = clusteringColumns.get(i);
-                ColumnDefinition restrictedColumn = restrictedColumns.get(i);
+                List<ColumnDefinition> clusteringColumns = cfm.clusteringColumns();
+                List<ColumnDefinition> restrictedColumns = new LinkedList<>(clusteringColumnsRestrictions.getColumnDefs());
 
-                if (!clusteringColumn.equals(restrictedColumn))
+                for (int i = 0, m = restrictedColumns.size(); i < m; i++)
                 {
-                    checkTrue(hasQueriableIndex,
-                              "PRIMARY KEY column \"%s\" cannot be restricted as preceding column \"%s\" is not restricted",
-                              restrictedColumn.name,
-                              clusteringColumn.name);
-
-                    usesSecondaryIndexing = true; // handle gaps and non-keyrange cases.
-                    break;
+                    ColumnDefinition clusteringColumn = clusteringColumns.get(i);
+                    ColumnDefinition restrictedColumn = restrictedColumns.get(i);
+
+                    if (!clusteringColumn.equals(restrictedColumn))
+                    {
+                        checkTrue(hasQueriableIndex,
+                                  "PRIMARY KEY column \"%s\" cannot be restricted as preceding column \"%s\" is not restricted",
+                                  restrictedColumn.name,
+                                  clusteringColumn.name);
+
+                        usesSecondaryIndexing = true; // handle gaps and non-keyrange cases.
+                        break;
+                    }
                 }
             }
         }
@@ -333,7 +412,27 @@ public final class StatementRestrictions
             usesSecondaryIndexing = true;
     }
 
-    public RowFilter getRowFilter(SecondaryIndexManager indexManager, QueryOptions options) throws InvalidRequestException
+    /**
+     * Returns the clustering columns that are not restricted.
+     * @return the clustering columns that are not restricted.
+     */
+    private Collection<ColumnIdentifier> getUnrestrictedClusteringColumns()
+    {
+        List<ColumnDefinition> missingClusteringColumns = new ArrayList<>(cfm.clusteringColumns());
+        missingClusteringColumns.removeAll(new LinkedList<>(clusteringColumnsRestrictions.getColumnDefs()));
+        return ColumnDefinition.toIdentifiers(missingClusteringColumns);
+    }
+
+    /**
+     * Checks if some clustering columns are not restricted.
+     * @return <code>true</code> if some clustering columns are not restricted, <code>false</code> otherwise.
+     */
+    private boolean hasUnrestrictedClusteringColumns()
+    {
+        return cfm.clusteringColumns().size() != clusteringColumnsRestrictions.size();
+    }
+
+    public RowFilter getRowFilter(SecondaryIndexManager indexManager, QueryOptions options)
     {
         if (indexRestrictions.isEmpty())
             return RowFilter.NONE;
@@ -350,9 +449,8 @@ public final class StatementRestrictions
      *
      * @param options the query options
      * @return the partition keys for which the data is requested.
-     * @throws InvalidRequestException if the partition keys cannot be retrieved
      */
-    public Collection<ByteBuffer> getPartitionKeys(final QueryOptions options) throws InvalidRequestException
+    public List<ByteBuffer> getPartitionKeys(final QueryOptions options)
     {
         return partitionKeyRestrictions.values(options);
     }
@@ -363,13 +461,12 @@ public final class StatementRestrictions
      * @param b the boundary type
      * @param options the query options
      * @return the specified bound of the partition key
-     * @throws InvalidRequestException if the boundary cannot be retrieved
      */
-    private ByteBuffer getPartitionKeyBound(Bound b, QueryOptions options) throws InvalidRequestException
+    private ByteBuffer getPartitionKeyBound(Bound b, QueryOptions options)
     {
         // Deal with unrestricted partition key components (special-casing is required to deal with 2i queries on the
         // first component of a composite partition key).
-        if (hasPartitionKeyUnrestrictedComponents())
+        if (hasUnrestrictedPartitionKeyComponents())
             return ByteBufferUtil.EMPTY_BYTE_BUFFER;
 
         // We deal with IN queries for keys in other places, so we know buildBound will return only one result
@@ -381,9 +478,8 @@ public final class StatementRestrictions
      *
      * @param options the query options
      * @return the partition key bounds
-     * @throws InvalidRequestException if the query is invalid
      */
-    public AbstractBounds<PartitionPosition> getPartitionKeyBounds(QueryOptions options) throws InvalidRequestException
+    public AbstractBounds<PartitionPosition> getPartitionKeyBounds(QueryOptions options)
     {
         IPartitioner p = cfm.partitioner;
 
@@ -396,7 +492,7 @@ public final class StatementRestrictions
     }
 
     private AbstractBounds<PartitionPosition> getPartitionKeyBounds(IPartitioner p,
-                                                              QueryOptions options) throws InvalidRequestException
+                                                                    QueryOptions options)
     {
         ByteBuffer startKeyBytes = getPartitionKeyBound(Bound.START, options);
         ByteBuffer finishKeyBytes = getPartitionKeyBound(Bound.END, options);
@@ -420,8 +516,7 @@ public final class StatementRestrictions
     }
 
     private AbstractBounds<PartitionPosition> getPartitionKeyBoundsForTokenRestrictions(IPartitioner p,
-                                                                                  QueryOptions options)
-                                                                                          throws InvalidRequestException
+                                                                                        QueryOptions options)
     {
         Token startToken = getTokenBound(Bound.START, options, p);
         Token endToken = getTokenBound(Bound.END, options, p);
@@ -450,7 +545,7 @@ public final class StatementRestrictions
         return new Range<>(start, end);
     }
 
-    private Token getTokenBound(Bound b, QueryOptions options, IPartitioner p) throws InvalidRequestException
+    private Token getTokenBound(Bound b, QueryOptions options, IPartitioner p)
     {
         if (!partitionKeyRestrictions.hasBound(b))
             return p.getMinimumToken();
@@ -476,9 +571,8 @@ public final class StatementRestrictions
      *
      * @param options the query options
      * @return the requested clustering columns
-     * @throws InvalidRequestException if the query is not valid
      */
-    public NavigableSet<Clustering> getClusteringColumns(QueryOptions options) throws InvalidRequestException
+    public NavigableSet<Clustering> getClusteringColumns(QueryOptions options)
     {
         // If this is a names command and the table is a static compact one, then as far as CQL is concerned we have
         // only a single row which internally correspond to the static parts. In which case we want to return an empty
@@ -495,9 +589,8 @@ public final class StatementRestrictions
      * @param b the bound type
      * @param options the query options
      * @return the bounds (start or end) of the clustering columns
-     * @throws InvalidRequestException if the request is not valid
      */
-    public NavigableSet<Slice.Bound> getClusteringColumnsBounds(Bound b, QueryOptions options) throws InvalidRequestException
+    public NavigableSet<Slice.Bound> getClusteringColumnsBounds(Bound b, QueryOptions options)
     {
         return clusteringColumnsRestrictions.boundsAsClustering(b, options);
     }
@@ -546,7 +639,7 @@ public final class StatementRestrictions
                         && nonPrimaryKeyRestrictions.hasMultipleContains());
     }
 
-    private void validateSecondaryIndexSelections(boolean selectsOnlyStaticColumns) throws InvalidRequestException
+    private void validateSecondaryIndexSelections(boolean selectsOnlyStaticColumns)
     {
         checkFalse(keyIsInRelation(),
                    "Select on indexed columns and with IN clause for the PRIMARY KEY are not supported");
@@ -556,4 +649,19 @@ public final class StatementRestrictions
         // so far, 2i means that you've restricted a non static column, so the query is somewhat non-sensical.
         checkFalse(selectsOnlyStaticColumns, "Queries using 2ndary indexes don't support selecting only static columns");
     }
+
+    /**
+     * Checks that all the primary key columns (partition key and clustering columns) are restricted by an equality
+     * relation ('=' or 'IN').
+     *
+     * @return <code>true</code> if all the primary key columns are restricted by an equality relation.
+     */
+    public boolean hasAllPKColumnsRestrictedByEqualities()
+    {
+        return !isPartitionKeyRestrictionsOnToken()
+                && !hasUnrestrictedPartitionKeyComponents()
+                && (partitionKeyRestrictions.isEQ() || partitionKeyRestrictions.isIN())
+                && !hasUnrestrictedClusteringColumns()
+                && (clusteringColumnsRestrictions.isEQ() || clusteringColumnsRestrictions.isIN());
+    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java b/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java
index c8482b3..4a92ec1 100644
--- a/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java
@@ -27,7 +27,6 @@ import com.google.common.collect.Iterables;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.helpers.MessageFormatter;
-
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.*;
@@ -44,6 +43,8 @@ import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.NoSpamLogger;
 import org.apache.cassandra.utils.Pair;
 
+import static org.apache.cassandra.cql3.statements.RequestValidations.checkFalse;
+
 /**
  * A <code>BATCH</code> statement parsed from a CQL query.
  */
@@ -217,8 +218,7 @@ public class BatchStatement implements CQLStatement
     throws RequestExecutionException, RequestValidationException
     {
         Set<String> tablesWithZeroGcGs = null;
-
-        Map<String, Map<ByteBuffer, IMutation>> mutations = new HashMap<>();
+        UpdatesCollector collector = new UpdatesCollector(updatedColumns, updatedRows());
         for (int i = 0; i < statements.size(); i++)
         {
             ModificationStatement statement = statements.get(i);
@@ -230,7 +230,7 @@ public class BatchStatement implements CQLStatement
             }
             QueryOptions statementOptions = options.forStatement(i);
             long timestamp = attrs.getTimestamp(now, statementOptions);
-            addStatementMutations(statement, statementOptions, local, timestamp, mutations);
+            statement.addUpdates(collector, statementOptions, local, timestamp);
         }
 
         if (tablesWithZeroGcGs != null)
@@ -242,27 +242,7 @@ public class BatchStatement implements CQLStatement
                                             .getMessage());
         }
 
-        return unzipMutations(mutations);
-    }
-
-    private Collection<? extends IMutation> unzipMutations(Map<String, Map<ByteBuffer, IMutation>> mutations)
-    {
-
-        // The case where all statement where on the same keyspace is pretty common
-        if (mutations.size() == 1)
-            return mutations.values().iterator().next().values();
-
-
-        List<IMutation> ms = new ArrayList<>();
-        for (Map<ByteBuffer, IMutation> ksMap : mutations.values())
-            ms.addAll(ksMap.values());
-
-        return ms;
-    }
-
-    private PartitionColumns updatedColumns()
-    {
-        return updatedColumns;
+        return collector.toMutations();
     }
 
     private int updatedRows()
@@ -272,55 +252,6 @@ public class BatchStatement implements CQLStatement
         return statements.size();
     }
 
-    private void addStatementMutations(ModificationStatement statement,
-                                       QueryOptions options,
-                                       boolean local,
-                                       long now,
-                                       Map<String, Map<ByteBuffer, IMutation>> mutations)
-    throws RequestExecutionException, RequestValidationException
-    {
-        String ksName = statement.keyspace();
-        Map<ByteBuffer, IMutation> ksMap = mutations.get(ksName);
-        if (ksMap == null)
-        {
-            ksMap = new HashMap<>();
-            mutations.put(ksName, ksMap);
-        }
-
-        // The following does the same than statement.getMutations(), but we inline it here because
-        // we don't want to recreate mutations every time as this is particularly inefficient when applying
-        // multiple batch to the same partition (see #6737).
-        List<ByteBuffer> keys = statement.buildPartitionKeyNames(options);
-        CBuilder clustering = statement.createClustering(options);
-        UpdateParameters params = statement.makeUpdateParameters(keys, clustering, options, local, now);
-
-        for (ByteBuffer key : keys)
-        {
-            DecoratedKey dk = statement.cfm.decorateKey(key);
-            IMutation mutation = ksMap.get(dk.getKey());
-            Mutation mut;
-            if (mutation == null)
-            {
-                mut = new Mutation(ksName, dk);
-                mutation = statement.cfm.isCounter() ? new CounterMutation(mut, options.getConsistency()) : mut;
-                ksMap.put(dk.getKey(), mutation);
-            }
-            else
-            {
-                mut = statement.cfm.isCounter() ? ((CounterMutation) mutation).getMutation() : (Mutation) mutation;
-            }
-
-            PartitionUpdate upd = mut.get(statement.cfm);
-            if (upd == null)
-            {
-                upd = new PartitionUpdate(statement.cfm, dk, updatedColumns(), updatedRows());
-                mut.add(upd);
-            }
-
-            statement.addUpdateForKey(upd, clustering, params);
-        }
-    }
-
     /**
      * Checks batch size to ensure threshold is met. If not, a warning is logged.
      *
@@ -470,17 +401,23 @@ public class BatchStatement implements CQLStatement
                 throw new InvalidRequestException("Batch with conditions cannot span multiple partitions");
             }
 
-            CBuilder cbuilder = statement.createClustering(statementOptions);
+            SortedSet<Clustering> clusterings = statement.createClustering(statementOptions);
+
+            checkFalse(clusterings.size() > 1,
+                       "IN on the clustering key columns is not supported with conditional updates");
+
+            Clustering clustering = Iterables.getOnlyElement(clusterings);
+
             if (statement.hasConditions())
             {
-                statement.addConditions(cbuilder.build(), casRequest, statementOptions);
+                statement.addConditions(clustering, casRequest, statementOptions);
                 // As soon as we have a ifNotExists, we set columnsWithConditions to null so that everything is in the resultSet
                 if (statement.hasIfNotExistCondition() || statement.hasIfExistCondition())
                     columnsWithConditions = null;
                 else if (columnsWithConditions != null)
                     Iterables.addAll(columnsWithConditions, statement.getColumnsWithConditions());
             }
-            casRequest.addRowUpdate(cbuilder, statement, statementOptions, timestamp);
+            casRequest.addRowUpdate(clustering, statement, statementOptions, timestamp);
         }
 
         return Pair.create(casRequest, columnsWithConditions);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/statements/CQL3CasRequest.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/CQL3CasRequest.java b/src/java/org/apache/cassandra/cql3/statements/CQL3CasRequest.java
index dc70bd2..1c3c795 100644
--- a/src/java/org/apache/cassandra/cql3/statements/CQL3CasRequest.java
+++ b/src/java/org/apache/cassandra/cql3/statements/CQL3CasRequest.java
@@ -67,9 +67,9 @@ public class CQL3CasRequest implements CASRequest
         this.updatesStaticRow = updatesStaticRow;
     }
 
-    public void addRowUpdate(CBuilder cbuilder, ModificationStatement stmt, QueryOptions options, long timestamp)
+    public void addRowUpdate(Clustering clustering, ModificationStatement stmt, QueryOptions options, long timestamp)
     {
-        updates.add(new RowUpdate(cbuilder, stmt, options, timestamp));
+        updates.add(new RowUpdate(clustering, stmt, options, timestamp));
     }
 
     public void addNotExist(Clustering clustering) throws InvalidRequestException
@@ -129,7 +129,7 @@ public class CQL3CasRequest implements CASRequest
         return conditionColumns;
     }
 
-    public SinglePartitionReadCommand readCommand(int nowInSec)
+    public SinglePartitionReadCommand<?> readCommand(int nowInSec)
     {
         assert !conditions.isEmpty();
         Slices.Builder builder = new Slices.Builder(cfm.comparator, conditions.size());
@@ -184,14 +184,14 @@ public class CQL3CasRequest implements CASRequest
      */
     private class RowUpdate
     {
-        private final CBuilder cbuilder;
+        private final Clustering clustering;
         private final ModificationStatement stmt;
         private final QueryOptions options;
         private final long timestamp;
 
-        private RowUpdate(CBuilder cbuilder, ModificationStatement stmt, QueryOptions options, long timestamp)
+        private RowUpdate(Clustering clustering, ModificationStatement stmt, QueryOptions options, long timestamp)
         {
-            this.cbuilder = cbuilder;
+            this.clustering = clustering;
             this.stmt = stmt;
             this.options = options;
             this.timestamp = timestamp;
@@ -201,7 +201,7 @@ public class CQL3CasRequest implements CASRequest
         {
             Map<DecoratedKey, Partition> map = stmt.requiresRead() ? Collections.<DecoratedKey, Partition>singletonMap(key, current) : null;
             UpdateParameters params = new UpdateParameters(cfm, updates.columns(), options, timestamp, stmt.getTimeToLive(options), map, true);
-            stmt.addUpdateForKey(updates, cbuilder, params);
+            stmt.addUpdateForKey(updates, clustering, params);
         }
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/statements/DeleteStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/DeleteStatement.java b/src/java/org/apache/cassandra/cql3/statements/DeleteStatement.java
index a33696e..cd6ce77 100644
--- a/src/java/org/apache/cassandra/cql3/statements/DeleteStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/DeleteStatement.java
@@ -17,37 +17,38 @@
  */
 package org.apache.cassandra.cql3.statements;
 
-import java.util.Iterator;
 import java.util.List;
 
-import com.google.common.collect.Iterators;
-
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.cql3.*;
-import org.apache.cassandra.cql3.restrictions.Restriction;
-import org.apache.cassandra.db.CBuilder;
+import org.apache.cassandra.cql3.restrictions.StatementRestrictions;
 import org.apache.cassandra.db.Clustering;
+import org.apache.cassandra.db.Slice;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.utils.Pair;
 
+import static org.apache.cassandra.cql3.statements.RequestValidations.checkFalse;
+import static org.apache.cassandra.cql3.statements.RequestValidations.checkTrue;
+
 /**
  * A <code>DELETE</code> parsed from a CQL query statement.
  */
 public class DeleteStatement extends ModificationStatement
 {
-    private DeleteStatement(StatementType type, int boundTerms, CFMetaData cfm, Attributes attrs)
+    private DeleteStatement(int boundTerms,
+                            CFMetaData cfm,
+                            Operations operations,
+                            StatementRestrictions restrictions,
+                            Conditions conditions,
+                            Attributes attrs)
     {
-        super(type, boundTerms, cfm, attrs);
+        super(StatementType.DELETE, boundTerms, cfm, operations, restrictions, conditions, attrs);
     }
 
-    public boolean requireFullClusteringKey()
-    {
-        return false;
-    }
-
-    public void addUpdateForKey(PartitionUpdate update, CBuilder cbuilder, UpdateParameters params)
+    @Override
+    public void addUpdateForKey(PartitionUpdate update, Clustering clustering, UpdateParameters params)
     throws InvalidRequestException
     {
         List<Operation> regularDeletions = getRegularOperations();
@@ -56,32 +57,29 @@ public class DeleteStatement extends ModificationStatement
         if (regularDeletions.isEmpty() && staticDeletions.isEmpty())
         {
             // We're not deleting any specific columns so it's either a full partition deletion ....
-            if (cbuilder.count() == 0)
+            if (clustering.size() == 0)
             {
                 update.addPartitionDeletion(params.deletionTime());
             }
             // ... or a row deletion ...
-            else if (cbuilder.remainingCount() == 0)
+            else if (clustering.size() == cfm.clusteringColumns().size())
             {
-                params.newRow(cbuilder.build());
+                params.newRow(clustering);
                 params.addRowDeletion();
                 update.add(params.buildRow());
             }
             // ... or a range of rows deletion.
             else
             {
-                update.add(params.makeRangeTombstone(cbuilder));
+                update.add(params.makeRangeTombstone(cfm.comparator, clustering));
             }
         }
         else
         {
             if (!regularDeletions.isEmpty())
             {
-                // We can't delete specific (regular) columns if not all clustering columns have been specified.
-                if (cbuilder.remainingCount() > 0)
-                    throw new InvalidRequestException(String.format("Primary key column '%s' must be specified in order to delete column '%s'", getFirstEmptyKey().name, regularDeletions.get(0).column.name));
+                params.newRow(clustering);
 
-                params.newRow(cbuilder.build());
                 for (Operation op : regularDeletions)
                     op.execute(update.partitionKey(), params);
                 update.add(params.buildRow());
@@ -99,21 +97,16 @@ public class DeleteStatement extends ModificationStatement
         params.validateIndexedColumns(update);
     }
 
-    protected void validateWhereClauseForConditions() throws InvalidRequestException
+    @Override
+    public void addUpdateForKey(PartitionUpdate update, Slice slice, UpdateParameters params)
     {
-        Iterator<ColumnDefinition> iterator = Iterators.concat(cfm.partitionKeyColumns().iterator(), cfm.clusteringColumns().iterator());
-        while (iterator.hasNext())
-        {
-            ColumnDefinition def = iterator.next();
-            Restriction restriction = processedKeys.get(def.name);
-            if (restriction == null || !(restriction.isEQ() || restriction.isIN()))
-            {
-                throw new InvalidRequestException(
-                        String.format("DELETE statements must restrict all PRIMARY KEY columns with equality relations in order " +
-                                      "to use IF conditions, but column '%s' is not restricted", def.name));
-            }
-        }
+        List<Operation> regularDeletions = getRegularOperations();
+        List<Operation> staticDeletions = getStaticOperations();
+
+        checkTrue(regularDeletions.isEmpty() && staticDeletions.isEmpty(),
+                  "Range deletions are not supported for specific columns");
 
+        update.add(params.makeRangeTombstone(slice));
     }
 
     public static class Parsed extends ModificationStatement.Parsed
@@ -133,28 +126,46 @@ public class DeleteStatement extends ModificationStatement
             this.whereClause = whereClause;
         }
 
-        protected ModificationStatement prepareInternal(CFMetaData cfm, VariableSpecifications boundNames, Attributes attrs) throws InvalidRequestException
+
+        @Override
+        protected ModificationStatement prepareInternal(CFMetaData cfm,
+                                                        VariableSpecifications boundNames,
+                                                        Conditions conditions,
+                                                        Attributes attrs)
         {
-            DeleteStatement stmt = new DeleteStatement(ModificationStatement.StatementType.DELETE, boundNames.size(), cfm, attrs);
+            Operations operations = new Operations();
 
             for (Operation.RawDeletion deletion : deletions)
             {
-                ColumnIdentifier id = deletion.affectedColumn().prepare(cfm);
-                ColumnDefinition def = cfm.getColumnDefinition(id);
-                if (def == null)
-                    throw new InvalidRequestException(String.format("Unknown identifier %s", id));
+                ColumnDefinition def = getColumnDefinition(cfm, deletion.affectedColumn());
 
                 // For compact, we only have one value except the key, so the only form of DELETE that make sense is without a column
                 // list. However, we support having the value name for coherence with the static/sparse case
-                if (def.isPrimaryKeyColumn())
-                    throw new InvalidRequestException(String.format("Invalid identifier %s for deletion (should not be a PRIMARY KEY part)", def.name));
+                checkFalse(def.isPrimaryKeyColumn(), "Invalid identifier %s for deletion (should not be a PRIMARY KEY part)", def.name);
 
                 Operation op = deletion.prepare(cfm.ksName, def);
                 op.collectMarkerSpecification(boundNames);
-                stmt.addOperation(op);
+                operations.add(op);
             }
 
-            stmt.processWhereClause(whereClause, boundNames);
+            StatementRestrictions restrictions = newRestrictions(StatementType.DELETE,
+                                                                 cfm,
+                                                                 boundNames,
+                                                                 operations,
+                                                                 whereClause,
+                                                                 conditions);
+
+            DeleteStatement stmt = new DeleteStatement(boundNames.size(),
+                                                       cfm,
+                                                       operations,
+                                                       restrictions,
+                                                       conditions,
+                                                       attrs);
+
+            if (stmt.hasConditions())
+                checkTrue(restrictions.hasAllPKColumnsRestrictedByEqualities(),
+                          "DELETE statements must restrict all PRIMARY KEY columns with equality relations" +
+                          " in order to use IF conditions");
             return stmt;
         }
     }


[3/5] cassandra git commit: Allow range deletions in CQL

Posted by bl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java
index 5fa1842..9ddf7b8 100644
--- a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java
@@ -21,29 +21,30 @@ import java.nio.ByteBuffer;
 import java.util.*;
 
 import com.google.common.collect.Iterables;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+
+import static org.apache.cassandra.cql3.statements.RequestValidations.checkNull;
 
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.config.MaterializedViewDefinition;
-import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.cql3.*;
+import org.apache.cassandra.cql3.ColumnIdentifier.Raw;
 import org.apache.cassandra.cql3.functions.Function;
-import org.apache.cassandra.cql3.restrictions.Restriction;
-import org.apache.cassandra.cql3.restrictions.SingleColumnRestriction;
+import org.apache.cassandra.cql3.restrictions.StatementRestrictions;
 import org.apache.cassandra.cql3.selection.Selection;
 import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.rows.*;
-import org.apache.cassandra.db.partitions.*;
 import org.apache.cassandra.db.filter.*;
 import org.apache.cassandra.db.marshal.BooleanType;
-import org.apache.cassandra.exceptions.*;
+import org.apache.cassandra.db.partitions.*;
+import org.apache.cassandra.db.rows.RowIterator;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.exceptions.RequestExecutionException;
+import org.apache.cassandra.exceptions.RequestValidationException;
+import org.apache.cassandra.exceptions.UnauthorizedException;
 import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.service.StorageProxy;
-import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.service.paxos.Commit;
 import org.apache.cassandra.thrift.ThriftValidation;
 import org.apache.cassandra.transport.messages.ResultMessage;
@@ -51,11 +52,10 @@ import org.apache.cassandra.triggers.TriggerExecutor;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.Pair;
 import org.apache.cassandra.utils.UUIDGen;
-import org.apache.cassandra.utils.btree.BTreeSet;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkFalse;
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkNotNull;
-import static org.apache.cassandra.cql3.statements.RequestValidations.invalidRequest;
 
 /*
  * Abstract parent class of individual modifications, i.e. INSERT, UPDATE and DELETE.
@@ -66,75 +66,89 @@ public abstract class ModificationStatement implements CQLStatement
 
     private static final ColumnIdentifier CAS_RESULT_COLUMN = new ColumnIdentifier("[applied]", false);
 
-    public static enum StatementType { INSERT, UPDATE, DELETE }
-    public final StatementType type;
+    protected final StatementType type;
 
     private final int boundTerms;
     public final CFMetaData cfm;
-    public final Attributes attrs;
+    private final Attributes attrs;
 
-    protected final Map<ColumnIdentifier, Restriction> processedKeys = new HashMap<>();
-    private final List<Operation> regularOperations = new ArrayList<>();
-    private final List<Operation> staticOperations = new ArrayList<>();
+    private final StatementRestrictions restrictions;
 
-    // TODO: If we had a builder for this statement, we could have updatedColumns/conditionColumns final and only have
-    // updatedColumnsBuilder/conditionColumnsBuilder in the builder ...
-    private PartitionColumns updatedColumns;
-    private PartitionColumns.Builder updatedColumnsBuilder = PartitionColumns.builder();
-    private PartitionColumns conditionColumns;
-    private PartitionColumns.Builder conditionColumnsBuilder = PartitionColumns.builder();
+    private final Operations operations;
 
-    // Separating normal and static conditions makes things somewhat easier
-    private List<ColumnCondition> columnConditions;
-    private List<ColumnCondition> staticConditions;
-    private boolean ifNotExists;
-    private boolean ifExists;
+    private final PartitionColumns updatedColumns;
 
-    private boolean hasNoClusteringColumns = true;
+    private final Conditions conditions;
 
-    private boolean setsStaticColumns;
-    private boolean setsRegularColumns;
+    private final PartitionColumns conditionColumns;
 
-    private final com.google.common.base.Function<ColumnCondition, ColumnDefinition> getColumnForCondition =
-      new com.google.common.base.Function<ColumnCondition, ColumnDefinition>()
-    {
-        public ColumnDefinition apply(ColumnCondition cond)
-        {
-            return cond.column;
-        }
-    };
+    private final PartitionColumns requiresRead;
 
-    public ModificationStatement(StatementType type, int boundTerms, CFMetaData cfm, Attributes attrs)
+    public ModificationStatement(StatementType type,
+                                 int boundTerms,
+                                 CFMetaData cfm,
+                                 Operations operations,
+                                 StatementRestrictions restrictions,
+                                 Conditions conditions,
+                                 Attributes attrs)
     {
         this.type = type;
         this.boundTerms = boundTerms;
         this.cfm = cfm;
+        this.restrictions = restrictions;
+        this.operations = operations;
+        this.conditions = conditions;
         this.attrs = attrs;
-    }
 
-    public Iterable<Function> getFunctions()
-    {
-        Iterable<Function> functions = attrs.getFunctions();
+        if (!conditions.isEmpty())
+        {
+            checkFalse(cfm.isCounter(), "Conditional updates are not supported on counter tables");
+            checkFalse(attrs.isTimestampSet(), "Cannot provide custom timestamp for conditional updates");
+        }
 
-        for (Restriction restriction : processedKeys.values())
-            functions = Iterables.concat(functions, restriction.getFunctions());
+        PartitionColumns.Builder conditionColumnsBuilder = PartitionColumns.builder();
+        Iterable<ColumnDefinition> columns = conditions.getColumns();
+        if (columns != null)
+            conditionColumnsBuilder.addAll(columns);
 
-        for (Operation operation : allOperations())
-            functions = Iterables.concat(functions, operation.getFunctions());
+        PartitionColumns.Builder updatedColumnsBuilder = PartitionColumns.builder();
+        PartitionColumns.Builder requiresReadBuilder = PartitionColumns.builder();
+        for (Operation operation : operations)
+        {
+            updatedColumnsBuilder.add(operation.column);
+            // If the operation requires a read-before-write and we're doing a conditional read, we want to read
+            // the affected column as part of the read-for-conditions paxos phase (see #7499).
+            if (operation.requiresRead())
+            {
+                conditionColumnsBuilder.add(operation.column);
+                requiresReadBuilder.add(operation.column);
+            }
+        }
 
-        for (ColumnCondition condition : allConditions())
-            functions = Iterables.concat(functions, condition.getFunctions());
+        PartitionColumns modifiedColumns = updatedColumnsBuilder.build();
+        // Compact tables have not row marker. So if we don't actually update any particular column,
+        // this means that we're only updating the PK, which we allow if only those were declared in
+        // the definition. In that case however, we do went to write the compactValueColumn (since again
+        // we can't use a "row marker") so add it automatically.
+        if (cfm.isCompactTable() && modifiedColumns.isEmpty() && updatesRegularRows())
+            modifiedColumns = cfm.partitionColumns();
 
-        return functions;
+        this.updatedColumns = modifiedColumns;
+        this.conditionColumns = conditionColumnsBuilder.build();
+        this.requiresRead = requiresReadBuilder.build();
     }
 
-    public boolean hasNoClusteringColumns()
+    public Iterable<Function> getFunctions()
     {
-        return hasNoClusteringColumns;
+        return Iterables.concat(attrs.getFunctions(),
+                                restrictions.getFunctions(),
+                                operations.getFunctions(),
+                                conditions.getFunctions());
     }
 
-    public abstract boolean requireFullClusteringKey();
-    public abstract void addUpdateForKey(PartitionUpdate update, CBuilder clusteringBuilder, UpdateParameters params) throws InvalidRequestException;
+    public abstract void addUpdateForKey(PartitionUpdate update, Clustering clustering, UpdateParameters params);
+
+    public abstract void addUpdateForKey(PartitionUpdate update, Slice slice, UpdateParameters params);
 
     public int getBoundTerms()
     {
@@ -204,37 +218,10 @@ public abstract class ModificationStatement implements CQLStatement
 
     public void validate(ClientState state) throws InvalidRequestException
     {
-        if (hasConditions() && attrs.isTimestampSet())
-            throw new InvalidRequestException("Cannot provide custom timestamp for conditional updates");
-
-        if (isCounter() && attrs.isTimestampSet())
-            throw new InvalidRequestException("Cannot provide custom timestamp for counter updates");
-
-        if (isCounter() && attrs.isTimeToLiveSet())
-            throw new InvalidRequestException("Cannot provide custom TTL for counter updates");
-
-        if (isMaterializedView())
-            throw new InvalidRequestException("Cannot directly modify a materialized view");
-    }
-
-    public void addOperation(Operation op)
-    {
-        updatedColumnsBuilder.add(op.column);
-        // If the operation requires a read-before-write and we're doing a conditional read, we want to read
-        // the affected column as part of the read-for-conditions paxos phase (see #7499).
-        if (op.requiresRead())
-            conditionColumnsBuilder.add(op.column);
-
-        if (op.column.isStatic())
-        {
-            setsStaticColumns = true;
-            staticOperations.add(op);
-        }
-        else
-        {
-            setsRegularColumns = true;
-            regularOperations.add(op);
-        }
+        checkFalse(hasConditions() && attrs.isTimestampSet(), "Cannot provide custom timestamp for conditional updates");
+        checkFalse(isCounter() && attrs.isTimestampSet(), "Cannot provide custom timestamp for counter updates");
+        checkFalse(isCounter() && attrs.isTimeToLiveSet(), "Cannot provide custom TTL for counter updates");
+        checkFalse(isMaterializedView(), "Cannot directly modify a materialized view");
     }
 
     public PartitionColumns updatedColumns()
@@ -254,249 +241,77 @@ public abstract class ModificationStatement implements CQLStatement
         // columns is if we set some static columns, and in that case no clustering
         // columns should be given. So in practice, it's enough to check if we have
         // either the table has no clustering or if it has at least one of them set.
-        return cfm.clusteringColumns().isEmpty() || !hasNoClusteringColumns;
+        return cfm.clusteringColumns().isEmpty() || restrictions.hasClusteringColumnsRestriction();
     }
 
     public boolean updatesStaticRow()
     {
-        return !staticOperations.isEmpty();
-    }
-
-    private void finishPreparation()
-    {
-        updatedColumns = updatedColumnsBuilder.build();
-        // Compact tables have not row marker. So if we don't actually update any particular column,
-        // this means that we're only updating the PK, which we allow if only those were declared in
-        // the definition. In that case however, we do went to write the compactValueColumn (since again
-        // we can't use a "row marker") so add it automatically.
-        if (cfm.isCompactTable() && updatedColumns.isEmpty() && updatesRegularRows())
-            updatedColumns = cfm.partitionColumns();
-
-        conditionColumns = conditionColumnsBuilder.build();
+        return operations.appliesToStaticColumns();
     }
 
     public List<Operation> getRegularOperations()
     {
-        return regularOperations;
+        return operations.regularOperations();
     }
 
     public List<Operation> getStaticOperations()
     {
-        return staticOperations;
+        return operations.staticOperations();
     }
 
     public Iterable<Operation> allOperations()
     {
-        return Iterables.concat(staticOperations, regularOperations);
+        return operations;
     }
 
     public Iterable<ColumnDefinition> getColumnsWithConditions()
     {
-        if (ifNotExists || ifExists)
-            return null;
-
-        return Iterables.concat(columnConditions == null ? Collections.<ColumnDefinition>emptyList() : Iterables.transform(columnConditions, getColumnForCondition),
-                                staticConditions == null ? Collections.<ColumnDefinition>emptyList() : Iterables.transform(staticConditions, getColumnForCondition));
-    }
-
-    public Iterable<ColumnCondition> allConditions()
-    {
-        if (staticConditions == null)
-            return columnConditions == null ? Collections.<ColumnCondition>emptySet(): columnConditions;
-        if (columnConditions == null)
-            return staticConditions;
-        return Iterables.concat(staticConditions, columnConditions);
-    }
-
-    public void addCondition(ColumnCondition cond)
-    {
-        conditionColumnsBuilder.add(cond.column);
-
-        List<ColumnCondition> conds = null;
-        if (cond.column.isStatic())
-        {
-            setsStaticColumns = true;
-            if (staticConditions == null)
-                staticConditions = new ArrayList<ColumnCondition>();
-            conds = staticConditions;
-        }
-        else
-        {
-            setsRegularColumns = true;
-            if (columnConditions == null)
-                columnConditions = new ArrayList<ColumnCondition>();
-            conds = columnConditions;
-        }
-        conds.add(cond);
-    }
-
-    public void setIfNotExistCondition()
-    {
-        ifNotExists = true;
+         return conditions.getColumns();
     }
 
     public boolean hasIfNotExistCondition()
     {
-        return ifNotExists;
-    }
-
-    public void setIfExistCondition()
-    {
-        ifExists = true;
+        return conditions.isIfNotExists();
     }
 
     public boolean hasIfExistCondition()
     {
-        return ifExists;
-    }
-
-    private void addKeyValues(ColumnDefinition def, Restriction values) throws InvalidRequestException
-    {
-        if (def.kind == ColumnDefinition.Kind.CLUSTERING)
-            hasNoClusteringColumns = false;
-        if (processedKeys.put(def.name, values) != null)
-            throw new InvalidRequestException(String.format("Multiple definitions found for PRIMARY KEY part %s", def.name));
-    }
-
-    public void addKeyValue(ColumnDefinition def, Term value) throws InvalidRequestException
-    {
-        addKeyValues(def, new SingleColumnRestriction.EQRestriction(def, value));
-    }
-
-    public void processWhereClause(List<Relation> whereClause, VariableSpecifications names) throws InvalidRequestException
-    {
-        for (Relation relation : whereClause)
-        {
-            if (relation.isMultiColumn())
-            {
-                throw new InvalidRequestException(
-                        String.format("Multi-column relations cannot be used in WHERE clauses for UPDATE and DELETE statements: %s", relation));
-            }
-            SingleColumnRelation rel = (SingleColumnRelation) relation;
-
-            if (rel.onToken())
-                throw new InvalidRequestException(String.format("The token function cannot be used in WHERE clauses for UPDATE and DELETE statements: %s", relation));
-
-            ColumnIdentifier id = rel.getEntity().prepare(cfm);
-            ColumnDefinition def = cfm.getColumnDefinition(id);
-            if (def == null)
-                throw new InvalidRequestException(String.format("Unknown key identifier %s", id));
-
-            switch (def.kind)
-            {
-                case PARTITION_KEY:
-                case CLUSTERING:
-                    Restriction restriction;
-
-                    if (rel.isEQ() || (def.isPartitionKey() && rel.isIN()))
-                    {
-                        restriction = rel.toRestriction(cfm, names);
-                    }
-                    else
-                    {
-                        throw new InvalidRequestException(String.format("Invalid operator %s for PRIMARY KEY part %s", rel.operator(), def.name));
-                    }
-
-                    addKeyValues(def, restriction);
-                    break;
-                default:
-                    throw new InvalidRequestException(String.format("Non PRIMARY KEY %s found in where clause", def.name));
-            }
-        }
+        return conditions.isIfExists();
     }
 
     public List<ByteBuffer> buildPartitionKeyNames(QueryOptions options)
     throws InvalidRequestException
     {
-        MultiCBuilder keyBuilder = MultiCBuilder.create(cfm.getKeyValidatorAsClusteringComparator());
-        for (ColumnDefinition def : cfm.partitionKeyColumns())
-        {
-            Restriction r = checkNotNull(processedKeys.get(def.name), "Missing mandatory PRIMARY KEY part %s", def.name);
-            r.appendTo(keyBuilder, options);
-        }
-
-        NavigableSet<Clustering> clusterings = keyBuilder.build();
-        List<ByteBuffer> keys = new ArrayList<ByteBuffer>(clusterings.size());
-        for (Clustering clustering : clusterings)
-        {
-            ByteBuffer key = CFMetaData.serializePartitionKey(clustering);
-            ThriftValidation.validateKey(cfm, key);
-            keys.add(key);
-        }
-        return keys;
+        return restrictions.getPartitionKeys(options);
     }
 
-    public CBuilder createClustering(QueryOptions options)
+    public NavigableSet<Clustering> createClustering(QueryOptions options)
     throws InvalidRequestException
     {
-        // If the only updated/deleted columns are static, then we don't need clustering columns.
-        // And in fact, unless it is an INSERT, we reject if clustering colums are provided as that
-        // suggest something unintended. For instance, given:
-        //   CREATE TABLE t (k int, v int, s int static, PRIMARY KEY (k, v))
-        // it can make sense to do:
-        //   INSERT INTO t(k, v, s) VALUES (0, 1, 2)
-        // but both
-        //   UPDATE t SET s = 3 WHERE k = 0 AND v = 1
-        //   DELETE v FROM t WHERE k = 0 AND v = 1
-        // sounds like you don't really understand what your are doing.
-        if (setsStaticColumns && !setsRegularColumns)
-        {
-            // If we set no non-static columns, then it's fine not to have clustering columns
-            if (hasNoClusteringColumns)
-                return CBuilder.STATIC_BUILDER;
-
-            // If we do have clustering columns however, then either it's an INSERT and the query is valid
-            // but we still need to build a proper prefix, or it's not an INSERT, and then we want to reject
-            // (see above)
-            if (type != StatementType.INSERT)
-            {
-                for (ColumnDefinition def : cfm.clusteringColumns())
-                    if (processedKeys.get(def.name) != null)
-                        throw new InvalidRequestException(String.format("Invalid restriction on clustering column %s since the %s statement modifies only static columns", def.name, type));
-                // we should get there as it contradicts hasNoClusteringColumns == false
-                throw new AssertionError();
-            }
-        }
+        if (appliesOnlyToStaticColumns() && !restrictions.hasClusteringColumnsRestriction())
+            return FBUtilities.singleton(CBuilder.STATIC_BUILDER.build(), cfm.comparator);
 
-        return createClusteringInternal(options);
+        return restrictions.getClusteringColumns(options);
     }
 
-    private CBuilder createClusteringInternal(QueryOptions options)
-    throws InvalidRequestException
+    /**
+     * Checks that the modification only apply to static columns.
+     * @return <code>true</code> if the modification only apply to static columns, <code>false</code> otherwise.
+     */
+    private boolean appliesOnlyToStaticColumns()
     {
-        CBuilder builder = CBuilder.create(cfm.comparator);
-        MultiCBuilder multiBuilder = MultiCBuilder.wrap(builder);
-
-        ColumnDefinition firstEmptyKey = null;
-        for (ColumnDefinition def : cfm.clusteringColumns())
-        {
-            Restriction r = processedKeys.get(def.name);
-            if (r == null)
-            {
-                firstEmptyKey = def;
-                checkFalse(requireFullClusteringKey() && !cfm.isDense() && cfm.isCompound(),
-                           "Missing mandatory PRIMARY KEY part %s", def.name);
-            }
-            else if (firstEmptyKey != null)
-            {
-                throw invalidRequest("Missing PRIMARY KEY part %s since %s is set", firstEmptyKey.name, def.name);
-            }
-            else
-            {
-                r.appendTo(multiBuilder, options);
-            }
-        }
-        return builder;
+        return appliesOnlyToStaticColumns(operations, conditions);
     }
 
-    protected ColumnDefinition getFirstEmptyKey()
+    /**
+     * Checks that the specified operations and conditions only apply to static columns.
+     * @return <code>true</code> if the specified operations and conditions only apply to static columns,
+     * <code>false</code> otherwise.
+     */
+    public static boolean appliesOnlyToStaticColumns(Operations operation, Conditions conditions)
     {
-        for (ColumnDefinition def : cfm.clusteringColumns())
-        {
-            if (processedKeys.get(def.name) == null)
-                return def;
-        }
-        return null;
+        return !operation.appliesToRegularColumns() && !conditions.appliesToRegularColumns()
+                && (operation.appliesToStaticColumns() || conditions.appliesToStaticColumns());
     }
 
     public boolean requiresRead()
@@ -509,8 +324,11 @@ public abstract class ModificationStatement implements CQLStatement
         return false;
     }
 
-    protected Map<DecoratedKey, Partition> readRequiredLists(Collection<ByteBuffer> partitionKeys, CBuilder cbuilder, boolean local, ConsistencyLevel cl)
-    throws RequestExecutionException, RequestValidationException
+    private Map<DecoratedKey, Partition> readRequiredLists(Collection<ByteBuffer> partitionKeys,
+                                                           ClusteringIndexFilter filter,
+                                                           DataLimits limits,
+                                                           boolean local,
+                                                           ConsistencyLevel cl)
     {
         if (!requiresRead())
             return null;
@@ -524,27 +342,16 @@ public abstract class ModificationStatement implements CQLStatement
             throw new InvalidRequestException(String.format("Write operation require a read but consistency %s is not supported on reads", cl));
         }
 
-        // TODO: no point in recomputing that every time. Should move to preparation phase.
-        PartitionColumns.Builder builder = PartitionColumns.builder();
-        for (Operation op : allOperations())
-            if (op.requiresRead())
-                builder.add(op.column);
-
-        PartitionColumns toRead = builder.build();
-
-        NavigableSet<Clustering> clusterings = BTreeSet.of(cfm.comparator, cbuilder.build());
         List<SinglePartitionReadCommand<?>> commands = new ArrayList<>(partitionKeys.size());
         int nowInSec = FBUtilities.nowInSeconds();
         for (ByteBuffer key : partitionKeys)
-            commands.add(new SinglePartitionNamesCommand(cfm,
-                                                         nowInSec,
-                                                         ColumnFilter.selection(toRead),
-                                                         RowFilter.NONE,
-                                                         DataLimits.NONE,
-                                                         key,
-                                                         new ClusteringIndexNamesFilter(clusterings, false)));
-
-        Map<DecoratedKey, Partition> map = new HashMap<>();
+            commands.add(SinglePartitionReadCommand.create(cfm,
+                                                           nowInSec,
+                                                           ColumnFilter.selection(this.requiresRead),
+                                                           RowFilter.NONE,
+                                                           limits,
+                                                           cfm.decorateKey(key),
+                                                           filter));
 
         SinglePartitionReadCommand.Group group = new SinglePartitionReadCommand.Group(commands, DataLimits.NONE);
 
@@ -555,18 +362,16 @@ public abstract class ModificationStatement implements CQLStatement
                 return asMaterializedMap(iter);
             }
         }
-        else
+
+        try (PartitionIterator iter = group.execute(cl, null))
         {
-            try (PartitionIterator iter = group.execute(cl, null))
-            {
-                return asMaterializedMap(iter);
-            }
+            return asMaterializedMap(iter);
         }
     }
 
     private Map<DecoratedKey, Partition> asMaterializedMap(PartitionIterator iterator)
     {
-        Map<DecoratedKey, Partition> map = new HashMap();
+        Map<DecoratedKey, Partition> map = new HashMap<>();
         while (iterator.hasNext())
         {
             try (RowIterator partition = iterator.next())
@@ -579,10 +384,7 @@ public abstract class ModificationStatement implements CQLStatement
 
     public boolean hasConditions()
     {
-        return ifNotExists
-            || ifExists
-            || (columnConditions != null && !columnConditions.isEmpty())
-            || (staticConditions != null && !staticConditions.isEmpty());
+        return !conditions.isEmpty();
     }
 
     public ResultMessage execute(QueryState queryState, QueryOptions options)
@@ -636,39 +438,31 @@ public abstract class ModificationStatement implements CQLStatement
     {
         List<ByteBuffer> keys = buildPartitionKeyNames(options);
         // We don't support IN for CAS operation so far
-        if (keys.size() > 1)
-            throw new InvalidRequestException("IN on the partition key is not supported with conditional updates");
+        checkFalse(keys.size() > 1,
+                   "IN on the partition key is not supported with conditional %s",
+                   type.isUpdate()? "updates" : "deletions");
 
         DecoratedKey key = cfm.decorateKey(keys.get(0));
         long now = options.getTimestamp(queryState);
-        CBuilder cbuilder = createClustering(options);
+        SortedSet<Clustering> clusterings = createClustering(options);
+
+        checkFalse(clusterings.size() > 1,
+                   "IN on the clustering key columns is not supported with conditional %s",
+                    type.isUpdate()? "updates" : "deletions");
+
+        Clustering clustering = Iterables.getOnlyElement(clusterings);
 
         CQL3CasRequest request = new CQL3CasRequest(cfm, key, false, conditionColumns(), updatesRegularRows(), updatesStaticRow());
-        addConditions(cbuilder.build(), request, options);
-        request.addRowUpdate(cbuilder, this, options, now);
+
+        addConditions(clustering, request, options);
+        request.addRowUpdate(clustering, this, options, now);
+
         return request;
     }
 
     public void addConditions(Clustering clustering, CQL3CasRequest request, QueryOptions options) throws InvalidRequestException
     {
-        if (ifNotExists)
-        {
-            // If we use ifNotExists, if the statement applies to any non static columns, then the condition is on the row of the non-static
-            // columns and the prefix should be the clustering. But if only static columns are set, then the ifNotExists apply to the existence
-            // of any static columns and we should use the prefix for the "static part" of the partition.
-            request.addNotExist(clustering);
-        }
-        else if (ifExists)
-        {
-            request.addExist(clustering);
-        }
-        else
-        {
-            if (columnConditions != null)
-                request.addConditions(clustering, columnConditions, options);
-            if (staticConditions != null)
-                request.addConditions(Clustering.STATIC_CLUSTERING, staticConditions, options);
-        }
+        conditions.addConditionsTo(request, clustering, options);
     }
 
     private ResultSet buildCasResultSet(RowIterator partition, QueryOptions options) throws InvalidRequestException
@@ -778,9 +572,8 @@ public abstract class ModificationStatement implements CQLStatement
     static RowIterator casInternal(CQL3CasRequest request, QueryState state)
     {
         UUID ballot = UUIDGen.getTimeUUIDFromMicros(state.getTimestamp());
-        CFMetaData metadata = Schema.instance.getCFMetaData(request.cfm.ksName, request.cfm.cfName);
 
-        SinglePartitionReadCommand readCommand = request.readCommand(FBUtilities.nowInSeconds());
+        SinglePartitionReadCommand<?> readCommand = request.readCommand(FBUtilities.nowInSeconds());
         FilteredPartition current;
         try (ReadOrderGroup orderGroup = readCommand.startOrderGroup(); PartitionIterator iter = readCommand.executeInternal(orderGroup))
         {
@@ -806,55 +599,141 @@ public abstract class ModificationStatement implements CQLStatement
      * @param now the current timestamp in microseconds to use if no timestamp is user provided.
      *
      * @return list of the mutations
-     * @throws InvalidRequestException on invalid requests
      */
     private Collection<? extends IMutation> getMutations(QueryOptions options, boolean local, long now)
-    throws RequestExecutionException, RequestValidationException
+    {
+        UpdatesCollector collector = new UpdatesCollector(updatedColumns, 1);
+        addUpdates(collector, options, local, now);
+        return collector.toMutations();
+    }
+
+    final void addUpdates(UpdatesCollector collector,
+                          QueryOptions options,
+                          boolean local,
+                          long now)
     {
         List<ByteBuffer> keys = buildPartitionKeyNames(options);
-        CBuilder clustering = createClustering(options);
 
-        UpdateParameters params = makeUpdateParameters(keys, clustering, options, local, now);
+        if (type.allowClusteringColumnSlices()
+                && restrictions.hasClusteringColumnsRestriction()
+                && restrictions.isColumnRange())
+        {
+            Slices slices = createSlice(options);
+
+            // If all the ranges were invalid we do not need to do anything.
+            if (slices.isEmpty())
+                return;
+
+            UpdateParameters params = makeUpdateParameters(keys,
+                                                           new ClusteringIndexSliceFilter(slices, false),
+                                                           options,
+                                                           DataLimits.NONE,
+                                                           local,
+                                                           now);
+            for (ByteBuffer key : keys)
+            {
+                ThriftValidation.validateKey(cfm, key);
+                DecoratedKey dk = cfm.decorateKey(key);
+
+                PartitionUpdate upd = collector.getPartitionUpdate(cfm, dk, options.getConsistency());
 
-        Collection<IMutation> mutations = new ArrayList<IMutation>(keys.size());
-        for (ByteBuffer key: keys)
+                for (Slice slice : slices)
+                    addUpdateForKey(upd, slice, params);
+            }
+        }
+        else
         {
-            ThriftValidation.validateKey(cfm, key);
-            PartitionUpdate upd = new PartitionUpdate(cfm, key, updatedColumns(), 1);
-            addUpdateForKey(upd, clustering, params);
-            Mutation mut = new Mutation(upd);
+            NavigableSet<Clustering> clusterings = createClustering(options);
+
+            UpdateParameters params = makeUpdateParameters(keys, clusterings, options, local, now);
+
+            for (ByteBuffer key : keys)
+            {
+                ThriftValidation.validateKey(cfm, key);
+                DecoratedKey dk = cfm.decorateKey(key);
+
+                PartitionUpdate upd = collector.getPartitionUpdate(cfm, dk, options.getConsistency());
 
-            mutations.add(isCounter() ? new CounterMutation(mut, options.getConsistency()) : mut);
+                if (clusterings.isEmpty())
+                {
+                    addUpdateForKey(upd, Clustering.EMPTY, params);
+                }
+                else
+                {
+                    for (Clustering clustering : clusterings)
+                        addUpdateForKey(upd, clustering, params);
+                }
+            }
         }
-        return mutations;
     }
 
-    public UpdateParameters makeUpdateParameters(Collection<ByteBuffer> keys,
-                                                 CBuilder clustering,
-                                                 QueryOptions options,
-                                                 boolean local,
-                                                 long now)
-    throws RequestExecutionException, RequestValidationException
+    private Slices createSlice(QueryOptions options)
+    {
+        SortedSet<Slice.Bound> startBounds = restrictions.getClusteringColumnsBounds(Bound.START, options);
+        SortedSet<Slice.Bound> endBounds = restrictions.getClusteringColumnsBounds(Bound.END, options);
+
+        return toSlices(startBounds, endBounds);
+    }
+
+    private UpdateParameters makeUpdateParameters(Collection<ByteBuffer> keys,
+                                                  NavigableSet<Clustering> clusterings,
+                                                  QueryOptions options,
+                                                  boolean local,
+                                                  long now)
+    {
+        if (clusterings.contains(Clustering.STATIC_CLUSTERING))
+            return makeUpdateParameters(keys,
+                                        new ClusteringIndexSliceFilter(Slices.ALL, false),
+                                        options,
+                                        DataLimits.cqlLimits(1),
+                                        local,
+                                        now);
+
+        return makeUpdateParameters(keys,
+                                    new ClusteringIndexNamesFilter(clusterings, false),
+                                    options,
+                                    DataLimits.NONE,
+                                    local,
+                                    now);
+    }
+
+    private UpdateParameters makeUpdateParameters(Collection<ByteBuffer> keys,
+                                                  ClusteringIndexFilter filter,
+                                                  QueryOptions options,
+                                                  DataLimits limits,
+                                                  boolean local,
+                                                  long now)
     {
         // Some lists operation requires reading
-        Map<DecoratedKey, Partition> lists = readRequiredLists(keys, clustering, local, options.getConsistency());
+        Map<DecoratedKey, Partition> lists = readRequiredLists(keys, filter, limits, local, options.getConsistency());
         return new UpdateParameters(cfm, updatedColumns(), options, getTimestamp(now, options), getTimeToLive(options), lists, true);
     }
 
-    /**
-     * If there are conditions on the statement, this is called after the where clause and conditions have been
-     * processed to check that they are compatible.
-     * @throws InvalidRequestException
-     */
-    protected void validateWhereClauseForConditions() throws InvalidRequestException
+    private Slices toSlices(SortedSet<Slice.Bound> startBounds, SortedSet<Slice.Bound> endBounds)
     {
-        //  no-op by default
+        assert startBounds.size() == endBounds.size();
+
+        Slices.Builder builder = new Slices.Builder(cfm.comparator);
+
+        Iterator<Slice.Bound> starts = startBounds.iterator();
+        Iterator<Slice.Bound> ends = endBounds.iterator();
+
+        while (starts.hasNext())
+        {
+            Slice slice = Slice.make(starts.next(), ends.next());
+            if (!slice.isEmpty(cfm.comparator))
+            {
+                builder.add(slice);
+            }
+        }
+
+        return builder.build();
     }
 
     public static abstract class Parsed extends CFStatement
     {
-        protected final Attributes.Raw attrs;
-        protected final List<Pair<ColumnIdentifier.Raw, ColumnCondition.Raw>> conditions;
+        private final Attributes.Raw attrs;
+        private final List<Pair<ColumnIdentifier.Raw, ColumnCondition.Raw>> conditions;
         private final boolean ifNotExists;
         private final boolean ifExists;
 
@@ -867,7 +746,7 @@ public abstract class ModificationStatement implements CQLStatement
             this.ifExists = ifExists;
         }
 
-        public ParsedStatement.Prepared prepare() throws InvalidRequestException
+        public ParsedStatement.Prepared prepare()
         {
             VariableSpecifications boundNames = getBoundVariables();
             ModificationStatement statement = prepare(boundNames);
@@ -875,68 +754,118 @@ public abstract class ModificationStatement implements CQLStatement
             return new ParsedStatement.Prepared(statement, boundNames, boundNames.getPartitionKeyBindIndexes(cfm));
         }
 
-        public ModificationStatement prepare(VariableSpecifications boundNames) throws InvalidRequestException
+        public ModificationStatement prepare(VariableSpecifications boundNames)
         {
             CFMetaData metadata = ThriftValidation.validateColumnFamily(keyspace(), columnFamily());
 
             Attributes preparedAttributes = attrs.prepare(keyspace(), columnFamily());
             preparedAttributes.collectMarkerSpecification(boundNames);
 
-            ModificationStatement stmt = prepareInternal(metadata, boundNames, preparedAttributes);
+            Conditions preparedConditions = prepareConditions(metadata, boundNames);
 
-            if (ifNotExists || ifExists || !conditions.isEmpty())
+            return prepareInternal(metadata,
+                                   boundNames,
+                                   preparedConditions,
+                                   preparedAttributes);
+        }
+
+        /**
+         * Returns the column conditions.
+         *
+         * @param metadata the column family meta data
+         * @param boundNames the bound names
+         * @return the column conditions.
+         */
+        private Conditions prepareConditions(CFMetaData metadata, VariableSpecifications boundNames)
+        {
+            // To have both 'IF EXISTS'/'IF NOT EXISTS' and some other conditions doesn't make sense.
+            // So far this is enforced by the parser, but let's assert it for sanity if ever the parse changes.
+            if (ifExists)
             {
-                if (stmt.isCounter())
-                    throw new InvalidRequestException("Conditional updates are not supported on counter tables");
+                assert conditions.isEmpty();
+                assert !ifNotExists;
+                return Conditions.IF_EXISTS_CONDITION;
+            }
 
-                if (attrs.timestamp != null)
-                    throw new InvalidRequestException("Cannot provide custom timestamp for conditional updates");
+            if (ifNotExists)
+            {
+                assert conditions.isEmpty();
+                assert !ifExists;
+                return Conditions.IF_NOT_EXISTS_CONDITION;
+            }
 
-                if (ifNotExists)
-                {
-                    // To have both 'IF NOT EXISTS' and some other conditions doesn't make sense.
-                    // So far this is enforced by the parser, but let's assert it for sanity if ever the parse changes.
-                    assert conditions.isEmpty();
-                    assert !ifExists;
-                    stmt.setIfNotExistCondition();
-                }
-                else if (ifExists)
-                {
-                    assert conditions.isEmpty();
-                    assert !ifNotExists;
-                    stmt.setIfExistCondition();
-                }
-                else
-                {
-                    for (Pair<ColumnIdentifier.Raw, ColumnCondition.Raw> entry : conditions)
-                    {
-                        ColumnIdentifier id = entry.left.prepare(metadata);
-                        ColumnDefinition def = metadata.getColumnDefinition(id);
-                        if (def == null)
-                            throw new InvalidRequestException(String.format("Unknown identifier %s", id));
-
-                        ColumnCondition condition = entry.right.prepare(keyspace(), def);
-                        condition.collectMarkerSpecification(boundNames);
-
-                        switch (def.kind)
-                        {
-                            case PARTITION_KEY:
-                            case CLUSTERING:
-                                throw new InvalidRequestException(String.format("PRIMARY KEY column '%s' cannot have IF conditions", id));
-                            default:
-                                stmt.addCondition(condition);
-                                break;
-                        }
-                    }
-                }
+            if (conditions.isEmpty())
+                return Conditions.EMPTY_CONDITION;
 
-                stmt.validateWhereClauseForConditions();
+            return prepareColumnConditions(metadata, boundNames);
+        }
+
+        /**
+         * Returns the column conditions.
+         *
+         * @param metadata the column family meta data
+         * @param boundNames the bound names
+         * @return the column conditions.
+         */
+        private ColumnConditions prepareColumnConditions(CFMetaData metadata, VariableSpecifications boundNames)
+        {
+            checkNull(attrs.timestamp, "Cannot provide custom timestamp for conditional updates");
+
+            ColumnConditions.Builder builder = ColumnConditions.newBuilder();
+
+            for (Pair<ColumnIdentifier.Raw, ColumnCondition.Raw> entry : conditions)
+            {
+                ColumnIdentifier id = entry.left.prepare(metadata);
+                ColumnDefinition def = metadata.getColumnDefinition(id);
+                checkNotNull(metadata.getColumnDefinition(id), "Unknown identifier %s in IF conditions", id);
+
+                ColumnCondition condition = entry.right.prepare(keyspace(), def);
+                condition.collectMarkerSpecification(boundNames);
+
+                checkFalse(def.isPrimaryKeyColumn(), "PRIMARY KEY column '%s' cannot have IF conditions", id);
+                builder.add(condition);
             }
+            return builder.build();
+        }
 
-            stmt.finishPreparation();
-            return stmt;
+        protected abstract ModificationStatement prepareInternal(CFMetaData cfm,
+                                                                 VariableSpecifications boundNames,
+                                                                 Conditions conditions,
+                                                                 Attributes attrs);
+
+        /**
+         * Creates the restrictions.
+         *
+         * @param type the statement type
+         * @param cfm the column family meta data
+         * @param boundNames the bound names
+         * @param operations the column operations
+         * @param relations the where relations
+         * @param conditions the conditions
+         * @return the restrictions
+         */
+        protected static StatementRestrictions newRestrictions(StatementType type,
+                                                               CFMetaData cfm,
+                                                               VariableSpecifications boundNames,
+                                                               Operations operations,
+                                                               List<Relation> relations,
+                                                               Conditions conditions)
+        {
+            boolean applyOnlyToStaticColumns = appliesOnlyToStaticColumns(operations, conditions);
+            return new StatementRestrictions(type, cfm, relations, boundNames, applyOnlyToStaticColumns, false, false);
         }
 
-        protected abstract ModificationStatement prepareInternal(CFMetaData cfm, VariableSpecifications boundNames, Attributes attrs) throws InvalidRequestException;
+        /**
+         * Retrieves the <code>ColumnDefinition</code> corresponding to the specified raw <code>ColumnIdentifier</code>.
+         *
+         * @param cfm the column family meta data
+         * @param rawId the raw <code>ColumnIdentifier</code>
+         * @return the <code>ColumnDefinition</code> corresponding to the specified raw <code>ColumnIdentifier</code>
+         */
+        protected static ColumnDefinition getColumnDefinition(CFMetaData cfm, Raw rawId)
+        {
+            ColumnIdentifier id = rawId.prepare(cfm);
+            return checkNotNull(cfm.getColumnDefinition(id), "Unknown identifier %s", id);
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
index 5d5dfea..2aac6ab 100644
--- a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
@@ -23,9 +23,10 @@ import java.util.*;
 import com.google.common.base.Objects;
 import com.google.common.base.Predicate;
 import com.google.common.collect.Iterables;
-import org.slf4j.Logger;
+
 import org.slf4j.LoggerFactory;
 
+import org.slf4j.Logger;
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
@@ -57,7 +58,6 @@ import org.apache.cassandra.thrift.ThriftValidation;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
-
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkFalse;
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkNotNull;
 import static org.apache.cassandra.cql3.statements.RequestValidations.checkTrue;
@@ -156,7 +156,7 @@ public class SelectStatement implements CQLStatement
                                    0,
                                    defaultParameters,
                                    selection,
-                                   StatementRestrictions.empty(cfm),
+                                   StatementRestrictions.empty(StatementType.SELECT, cfm),
                                    false,
                                    null,
                                    null);
@@ -790,7 +790,8 @@ public class SelectStatement implements CQLStatement
         {
             try
             {
-                return new StatementRestrictions(cfm,
+                return new StatementRestrictions(StatementType.SELECT,
+                                                 cfm,
                                                  whereClause,
                                                  boundNames,
                                                  selection.containsOnlyStaticColumns(),

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/statements/StatementType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/StatementType.java b/src/java/org/apache/cassandra/cql3/statements/StatementType.java
new file mode 100644
index 0000000..d399931
--- /dev/null
+++ b/src/java/org/apache/cassandra/cql3/statements/StatementType.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3.statements;
+
+public enum StatementType
+{
+    INSERT
+    {
+        @Override
+        public boolean allowClusteringColumnSlices()
+        {
+            return false;
+        }
+    },
+    UPDATE
+    {
+
+        @Override
+        public boolean allowClusteringColumnSlices()
+        {
+            return false;
+        }
+    },
+    DELETE
+    {
+    },
+    SELECT
+    {
+        @Override
+        public boolean allowPartitionKeyRanges()
+        {
+            return true;
+        }
+
+        @Override
+        public boolean allowNonPrimaryKeyInWhereClause()
+        {
+            return true;
+        }
+
+        @Override
+        public boolean allowUseOfSecondaryIndices()
+        {
+            return true;
+        }
+    };
+
+    /**
+     * Checks if this type is an insert.
+     * @return <code>true</code> if this type is an insert, <code>false</code> otherwise.
+     */
+    public boolean isInsert()
+    {
+        return this == INSERT;
+    }
+
+    /**
+     * Checks if this type is an update.
+     * @return <code>true</code> if this type is an update, <code>false</code> otherwise.
+     */
+    public boolean isUpdate()
+    {
+        return this == UPDATE;
+    }
+
+    /**
+     * Checks if this type is a delete.
+     * @return <code>true</code> if this type is a delete, <code>false</code> otherwise.
+     */
+    public boolean isDelete()
+    {
+        return this == DELETE;
+    }
+
+    /**
+     * Checks if this type is a select.
+     * @return <code>true</code> if this type is a select, <code>false</code> otherwise.
+     */
+    public boolean isSelect()
+    {
+        return this == SELECT;
+    }
+
+    /**
+     * Checks this statement allow the where clause to contains missing partition key components or token relation.
+     * @return <code>true</code> if this statement allow the where clause to contains missing partition key components
+     * or token relation, <code>false</code> otherwise.
+     */
+    public boolean allowPartitionKeyRanges()
+    {
+        return false;
+    }
+
+    /**
+     * Checks this type of statement allow the where clause to contains clustering column slices.
+     * @return <code>true</code> if this type of statement allow the where clause to contains clustering column slices,
+     * <code>false</code> otherwise.
+     */
+    public boolean allowClusteringColumnSlices()
+    {
+        return true;
+    }
+
+    /**
+     * Checks if this type of statement allow non primary key in the where clause.
+     * @return <code>true</code> if this type of statement allow non primary key in the where clause,
+     * <code>false</code> otherwise.
+     */
+    public boolean allowNonPrimaryKeyInWhereClause()
+    {
+        return false;
+    }
+
+    /**
+     * Checks if this type of statement allow the use of secondary indices.
+     * @return <code>true</code> if this type of statement allow the use of secondary indices,
+     * <code>false</code> otherwise.
+     */
+    public boolean allowUseOfSecondaryIndices()
+    {
+        return false;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java b/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java
index e19deaa..8fa16e1 100644
--- a/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java
@@ -17,6 +17,7 @@
  */
 package org.apache.cassandra.cql3.statements;
 
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
@@ -24,14 +25,18 @@ import java.util.List;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.cql3.*;
-import org.apache.cassandra.db.CBuilder;
+import org.apache.cassandra.cql3.restrictions.StatementRestrictions;
 import org.apache.cassandra.db.Clustering;
 import org.apache.cassandra.db.CompactTables;
+import org.apache.cassandra.db.Slice;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
-import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.Pair;
 
+import static org.apache.cassandra.cql3.statements.RequestValidations.checkContainsNoDuplicates;
+import static org.apache.cassandra.cql3.statements.RequestValidations.checkFalse;
+import static org.apache.cassandra.cql3.statements.RequestValidations.checkTrue;
+
 /**
  * An <code>UPDATE</code> statement parsed from a CQL query statement.
  *
@@ -40,9 +45,15 @@ public class UpdateStatement extends ModificationStatement
 {
     private static final Constants.Value EMPTY = new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER);
 
-    private UpdateStatement(StatementType type, int boundTerms, CFMetaData cfm, Attributes attrs)
+    private UpdateStatement(StatementType type,
+                            int boundTerms,
+                            CFMetaData cfm,
+                            Operations operations,
+                            StatementRestrictions restrictions,
+                            Conditions conditions,
+                            Attributes attrs)
     {
-        super(type, boundTerms, cfm, attrs);
+        super(type, boundTerms, cfm, operations, restrictions, conditions, attrs);
     }
 
     public boolean requireFullClusteringKey()
@@ -50,22 +61,22 @@ public class UpdateStatement extends ModificationStatement
         return true;
     }
 
-    public void addUpdateForKey(PartitionUpdate update, CBuilder cbuilder, UpdateParameters params)
-    throws InvalidRequestException
+    @Override
+    public void addUpdateForKey(PartitionUpdate update, Clustering clustering, UpdateParameters params)
     {
         if (updatesRegularRows())
         {
-            params.newRow(cbuilder.build());
+            params.newRow(clustering);
 
             // We update the row timestamp (ex-row marker) only on INSERT (#6782)
             // Further, COMPACT tables semantic differs from "CQL3" ones in that a row exists only if it has
             // a non-null column, so we don't want to set the row timestamp for them.
-            if (type == StatementType.INSERT && cfm.isCQLTable())
+            if (type.isInsert() && cfm.isCQLTable())
                 params.addPrimaryKeyLivenessInfo();
 
             List<Operation> updates = getRegularOperations();
 
-            // For compact tablw, when we translate it to thrift, we don't have a row marker. So we don't accept an insert/update
+            // For compact table, when we translate it to thrift, we don't have a row marker. So we don't accept an insert/update
             // that only sets the PK unless the is no declared non-PK columns (in the latter we just set the value empty).
 
             // For a dense layout, when we translate it to thrift, we don't have a row marker. So we don't accept an insert/update
@@ -73,10 +84,11 @@ public class UpdateStatement extends ModificationStatement
             // value is of type "EmptyType").
             if (cfm.isCompactTable() && updates.isEmpty())
             {
-                if (CompactTables.hasEmptyCompactValue(cfm))
-                    updates = Collections.<Operation>singletonList(new Constants.Setter(cfm.compactValueColumn(), EMPTY));
-                else
-                    throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfm.compactValueColumn().name));
+                checkTrue(CompactTables.hasEmptyCompactValue(cfm),
+                          "Column %s is mandatory for this COMPACT STORAGE table",
+                          cfm.compactValueColumn().name);
+
+                updates = Collections.<Operation>singletonList(new Constants.Setter(cfm.compactValueColumn(), EMPTY));
             }
 
             for (Operation op : updates)
@@ -96,6 +108,12 @@ public class UpdateStatement extends ModificationStatement
         params.validateIndexedColumns(update);
     }
 
+    @Override
+    public void addUpdateForKey(PartitionUpdate update, Slice slice, UpdateParameters params)
+    {
+        throw new UnsupportedOperationException();
+    }
+
     public static class ParsedInsert extends ModificationStatement.Parsed
     {
         private final List<ColumnIdentifier.Raw> columnNames;
@@ -121,52 +139,63 @@ public class UpdateStatement extends ModificationStatement
             this.columnValues = columnValues;
         }
 
-        protected ModificationStatement prepareInternal(CFMetaData cfm, VariableSpecifications boundNames, Attributes attrs) throws InvalidRequestException
+        @Override
+        protected ModificationStatement prepareInternal(CFMetaData cfm,
+                                                        VariableSpecifications boundNames,
+                                                        Conditions conditions,
+                                                        Attributes attrs)
         {
-            UpdateStatement stmt = new UpdateStatement(ModificationStatement.StatementType.INSERT, boundNames.size(), cfm, attrs);
 
             // Created from an INSERT
-            if (stmt.isCounter())
-                throw new InvalidRequestException("INSERT statements are not allowed on counter tables, use UPDATE instead");
+            checkFalse(cfm.isCounter(), "INSERT statements are not allowed on counter tables, use UPDATE instead");
 
-            if (columnNames == null)
-                throw new InvalidRequestException("Column names for INSERT must be provided when using VALUES");
-            if (columnNames.isEmpty())
-                throw new InvalidRequestException("No columns provided to INSERT");
-            if (columnNames.size() != columnValues.size())
-                throw new InvalidRequestException("Unmatched column names/values");
+            checkFalse(columnNames == null, "Column names for INSERT must be provided when using VALUES");
+            checkFalse(columnNames.isEmpty(), "No columns provided to INSERT");
+            checkFalse(columnNames.size() != columnValues.size(), "Unmatched column names/values");
+            checkContainsNoDuplicates(columnNames, "The column names contains duplicates");
+
+            List<Relation> relations = new ArrayList<>();
+            Operations operations = new Operations();
+            boolean hasClusteringColumnsSet = false;
 
-            String ks = keyspace();
             for (int i = 0; i < columnNames.size(); i++)
             {
-                ColumnIdentifier id = columnNames.get(i).prepare(cfm);
-                ColumnDefinition def = cfm.getColumnDefinition(id);
-                if (def == null)
-                    throw new InvalidRequestException(String.format("Unknown identifier %s", id));
+                ColumnDefinition def = getColumnDefinition(cfm, columnNames.get(i));
 
-                for (int j = 0; j < i; j++)
-                {
-                    ColumnIdentifier otherId = columnNames.get(j).prepare(cfm);
-                    if (id.equals(otherId))
-                        throw new InvalidRequestException(String.format("Multiple definitions found for column %s", id));
-                }
+                if (def.isClusteringColumn())
+                    hasClusteringColumnsSet = true;
 
                 Term.Raw value = columnValues.get(i);
+
                 if (def.isPrimaryKeyColumn())
                 {
-                    Term t = value.prepare(ks, def);
-                    t.collectMarkerSpecification(boundNames);
-                    stmt.addKeyValue(def, t);
+                    relations.add(new SingleColumnRelation(columnNames.get(i), Operator.EQ, value));
                 }
                 else
                 {
-                    Operation operation = new Operation.SetValue(value).prepare(ks, def);
+                    Operation operation = new Operation.SetValue(value).prepare(keyspace(), def);
                     operation.collectMarkerSpecification(boundNames);
-                    stmt.addOperation(operation);
+                    operations.add(operation);
                 }
             }
 
-            return stmt;
+            boolean applyOnlyToStaticColumns = appliesOnlyToStaticColumns(operations, conditions) && !hasClusteringColumnsSet;
+
+            StatementRestrictions restrictions = new StatementRestrictions(StatementType.INSERT,
+                                                                           cfm,
+                                                                           relations,
+                                                                           boundNames,
+                                                                           applyOnlyToStaticColumns,
+                                                                           false,
+                                                                           false);
+
+            return new UpdateStatement(StatementType.INSERT,
+                                       boundNames.size(),
+                                       cfm,
+                                       operations,
+                                       restrictions,
+                                       conditions,
+                                       attrs);
         }
     }
 
@@ -183,24 +212,58 @@ public class UpdateStatement extends ModificationStatement
             this.jsonValue = jsonValue;
         }
 
-        protected ModificationStatement prepareInternal(CFMetaData cfm, VariableSpecifications boundNames, Attributes attrs) throws InvalidRequestException
+        @Override
+        protected ModificationStatement prepareInternal(CFMetaData cfm,
+                                                        VariableSpecifications boundNames,
+                                                        Conditions conditions,
+                                                        Attributes attrs)
         {
-            UpdateStatement stmt = new UpdateStatement(ModificationStatement.StatementType.INSERT, boundNames.size(), cfm, attrs);
-            if (stmt.isCounter())
-                throw new InvalidRequestException("INSERT statements are not allowed on counter tables, use UPDATE instead");
+            checkFalse(cfm.isCounter(), "INSERT statements are not allowed on counter tables, use UPDATE instead");
 
             Collection<ColumnDefinition> defs = cfm.allColumns();
             Json.Prepared prepared = jsonValue.prepareAndCollectMarkers(cfm, defs, boundNames);
 
+            List<Relation> relations = new ArrayList<>();
+            Operations operations = new Operations();
+            boolean hasClusteringColumnsSet = false;
+
             for (ColumnDefinition def : defs)
             {
+                if (def.isClusteringColumn())
+                    hasClusteringColumnsSet = true;
+
+                Term.Raw raw = prepared.getRawTermForColumn(def);
                 if (def.isPrimaryKeyColumn())
-                    stmt.addKeyValue(def, prepared.getPrimaryKeyValueForColumn(def));
+                {
+                    relations.add(new SingleColumnRelation(new ColumnIdentifier.ColumnIdentifierValue(def.name),
+                                                           Operator.EQ,
+                                                           raw));
+                }
                 else
-                    stmt.addOperation(prepared.getSetOperationForColumn(def));
+                {
+                    Operation operation = new Operation.SetValue(raw).prepare(keyspace(), def);
+                    operation.collectMarkerSpecification(boundNames);
+                    operations.add(operation);
+                }
             }
 
-            return stmt;
+            boolean applyOnlyToStaticColumns = appliesOnlyToStaticColumns(operations, conditions) && !hasClusteringColumnsSet;
+
+            StatementRestrictions restrictions = new StatementRestrictions(StatementType.INSERT,
+                                                                           cfm,
+                                                                           relations,
+                                                                           boundNames,
+                                                                           applyOnlyToStaticColumns,
+                                                                           false,
+                                                                           false);
+
+            return new UpdateStatement(StatementType.INSERT,
+                                       boundNames.size(),
+                                       cfm,
+                                       operations,
+                                       restrictions,
+                                       conditions,
+                                       attrs);
         }
     }
 
@@ -232,32 +295,39 @@ public class UpdateStatement extends ModificationStatement
             this.whereClause = whereClause;
         }
 
-        protected ModificationStatement prepareInternal(CFMetaData cfm, VariableSpecifications boundNames, Attributes attrs) throws InvalidRequestException
+        @Override
+        protected ModificationStatement prepareInternal(CFMetaData cfm,
+                                                        VariableSpecifications boundNames,
+                                                        Conditions conditions,
+                                                        Attributes attrs)
         {
-            UpdateStatement stmt = new UpdateStatement(ModificationStatement.StatementType.UPDATE, boundNames.size(), cfm, attrs);
+            Operations operations = new Operations();
 
             for (Pair<ColumnIdentifier.Raw, Operation.RawUpdate> entry : updates)
             {
-                ColumnDefinition def = cfm.getColumnDefinition(entry.left.prepare(cfm));
-                if (def == null)
-                    throw new InvalidRequestException(String.format("Unknown identifier %s", entry.left));
+                ColumnDefinition def = getColumnDefinition(cfm, entry.left);
+
+                checkFalse(def.isPrimaryKeyColumn(), "PRIMARY KEY part %s found in SET part", def.name);
 
                 Operation operation = entry.right.prepare(keyspace(), def);
                 operation.collectMarkerSpecification(boundNames);
-
-                switch (def.kind)
-                {
-                    case PARTITION_KEY:
-                    case CLUSTERING:
-                        throw new InvalidRequestException(String.format("PRIMARY KEY part %s found in SET part", entry.left));
-                    default:
-                        stmt.addOperation(operation);
-                        break;
-                }
+                operations.add(operation);
             }
 
-            stmt.processWhereClause(whereClause, boundNames);
-            return stmt;
+            StatementRestrictions restrictions = newRestrictions(StatementType.UPDATE,
+                                                                 cfm,
+                                                                 boundNames,
+                                                                 operations,
+                                                                 whereClause,
+                                                                 conditions);
+
+            return new UpdateStatement(StatementType.UPDATE,
+                                       boundNames.size(),
+                                       cfm,
+                                       operations,
+                                       restrictions,
+                                       conditions,
+                                       attrs);
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/cql3/statements/UpdatesCollector.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/UpdatesCollector.java b/src/java/org/apache/cassandra/cql3/statements/UpdatesCollector.java
new file mode 100644
index 0000000..f291000
--- /dev/null
+++ b/src/java/org/apache/cassandra/cql3/statements/UpdatesCollector.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3.statements;
+
+import java.nio.ByteBuffer;
+import java.util.*;
+
+import org.apache.cassandra.db.CounterMutation;
+
+import org.apache.cassandra.db.Mutation;
+import org.apache.cassandra.db.partitions.PartitionUpdate;
+import org.apache.cassandra.db.PartitionColumns;
+import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.db.*;
+
+/**
+ * Utility class to collect updates.
+ *
+ * <p>In a batch statement we don't want to recreate mutations every time as this is particularly inefficient when
+ * applying multiple batch to the same partition (see #6737). </p>
+ *
+ */
+final class UpdatesCollector
+{
+    /**
+     * The columns that will be updated.
+     */
+    private final PartitionColumns updatedColumns;
+
+    /**
+     * The estimated number of updated row.
+     */
+    private final int updatedRows;
+
+    /**
+     * The mutations per keyspace.
+     */
+    private final Map<String, Map<ByteBuffer, IMutation>> mutations = new HashMap<>();
+
+    public UpdatesCollector(PartitionColumns updatedColumns, int updatedRows)
+    {
+        super();
+        this.updatedColumns = updatedColumns;
+        this.updatedRows = updatedRows;
+    }
+
+    /**
+     * Gets the <code>PartitionUpdate</code> for the specified column family and key. If the update does not
+     * exist it will be created.
+     *
+     * @param cfm the column family meta data
+     * @param dk the partition key
+     * @param consistency the consistency level
+     * @return the <code>PartitionUpdate</code> for the specified column family and key
+     */
+    public PartitionUpdate getPartitionUpdate(CFMetaData cfm, DecoratedKey dk, ConsistencyLevel consistency)
+    {
+        Mutation mut = getMutation(cfm, dk, consistency);
+        PartitionUpdate upd = mut.get(cfm);
+        if (upd == null)
+        {
+            upd = new PartitionUpdate(cfm, dk, updatedColumns, updatedRows);
+            mut.add(upd);
+        }
+        return upd;
+    }
+
+    private Mutation getMutation(CFMetaData cfm, DecoratedKey dk, ConsistencyLevel consistency)
+    {
+        String ksName = cfm.ksName;
+        IMutation mutation = keyspaceMap(ksName).get(dk.getKey());
+        if (mutation == null)
+        {
+            Mutation mut = new Mutation(ksName, dk);
+            mutation = cfm.isCounter() ? new CounterMutation(mut, consistency) : mut;
+            keyspaceMap(ksName).put(dk.getKey(), mutation);
+            return mut;
+        }
+        return cfm.isCounter() ? ((CounterMutation) mutation).getMutation() : (Mutation) mutation;
+    }
+
+    /**
+     * Returns a collection containing all the mutations.
+     * @return a collection containing all the mutations.
+     */
+    public Collection<IMutation> toMutations()
+    {
+        // The case where all statement where on the same keyspace is pretty common
+        if (mutations.size() == 1)
+            return mutations.values().iterator().next().values();
+
+        List<IMutation> ms = new ArrayList<>();
+        for (Map<ByteBuffer, IMutation> ksMap : mutations.values())
+            ms.addAll(ksMap.values());
+
+        return ms;
+    }
+
+    /**
+     * Returns the key-mutation mappings for the specified keyspace.
+     *
+     * @param ksName the keyspace name
+     * @return the key-mutation mappings for the specified keyspace.
+     */
+    private Map<ByteBuffer, IMutation> keyspaceMap(String ksName)
+    {
+        Map<ByteBuffer, IMutation> ksMap = mutations.get(ksName);
+        if (ksMap == null)
+        {
+            ksMap = new HashMap<>();
+            mutations.put(ksName, ksMap);
+        }
+        return ksMap;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/db/CBuilder.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/CBuilder.java b/src/java/org/apache/cassandra/db/CBuilder.java
index fe130dc..94feb93 100644
--- a/src/java/org/apache/cassandra/db/CBuilder.java
+++ b/src/java/org/apache/cassandra/db/CBuilder.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.db;
 
 import java.nio.ByteBuffer;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
@@ -193,17 +192,17 @@ public abstract class CBuilder
 
         public Clustering buildWith(ByteBuffer value)
         {
-            assert size+1 == type.size();
+            assert size+1 <= type.size();
 
-            ByteBuffer[] newValues = Arrays.copyOf(values, size+1);
+            ByteBuffer[] newValues = Arrays.copyOf(values, type.size());
             newValues[size] = value;
             return new Clustering(newValues);
         }
 
         public Clustering buildWith(List<ByteBuffer> newValues)
         {
-            assert size + newValues.size() == type.size();
-            ByteBuffer[] buffers = Arrays.copyOf(values, size + newValues.size());
+            assert size + newValues.size() <= type.size();
+            ByteBuffer[] buffers = Arrays.copyOf(values, type.size());
             int newSize = size;
             for (ByteBuffer value : newValues)
                 buffers[newSize++] = value;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/db/RangeTombstone.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/RangeTombstone.java b/src/java/org/apache/cassandra/db/RangeTombstone.java
index 8865b0f..54199ab 100644
--- a/src/java/org/apache/cassandra/db/RangeTombstone.java
+++ b/src/java/org/apache/cassandra/db/RangeTombstone.java
@@ -19,7 +19,8 @@ package org.apache.cassandra.db;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
-import java.util.*;
+import java.util.List;
+import java.util.Objects;
 
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.rows.RangeTombstoneMarker;
@@ -27,6 +28,7 @@ import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.utils.memory.AbstractAllocator;
 
+
 /**
  * A range tombstone is a tombstone that covers a slice/range of rows.
  * <p>

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/db/Slices.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Slices.java b/src/java/org/apache/cassandra/db/Slices.java
index bde9d96..db34c86 100644
--- a/src/java/org/apache/cassandra/db/Slices.java
+++ b/src/java/org/apache/cassandra/db/Slices.java
@@ -154,6 +154,15 @@ public abstract class Slices implements Iterable<Slice>
     public abstract String toCQLString(CFMetaData metadata);
 
     /**
+     * Checks if this <code>Slices</code> is empty.
+     * @return <code>true</code> if this <code>Slices</code> is empty, <code>false</code> otherwise.
+     */
+    public final boolean isEmpty()
+    {
+        return size() == 0;
+    }
+
+    /**
      * In simple object that allows to test the inclusion of rows in those slices assuming those rows
      * are passed (to {@link #includes}) in clustering order (or reverse clustering ordered, depending
      * of the argument passed to {@link #inOrderTester}).

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
index 7ae5651..4a2af66 100644
--- a/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
@@ -206,7 +206,7 @@ public class CQLSSTableWriter implements Closeable
 
         QueryOptions options = QueryOptions.forInternalCalls(null, values);
         List<ByteBuffer> keys = insert.buildPartitionKeyNames(options);
-        CBuilder clustering = insert.createClustering(options);
+        SortedSet<Clustering> clusterings = insert.createClustering(options);
 
         long now = System.currentTimeMillis() * 1000;
         // Note that we asks indexes to not validate values (the last 'false' arg below) because that triggers a 'Keyspace.open'
@@ -222,7 +222,10 @@ public class CQLSSTableWriter implements Closeable
         try
         {
             for (ByteBuffer key : keys)
-                insert.addUpdateForKey(writer.getUpdateFor(key), clustering, params);
+            {
+                for (Clustering clustering : clusterings)
+                    insert.addUpdateForKey(writer.getUpdateFor(key), clustering, params);
+            }
             return this;
         }
         catch (SSTableSimpleUnsortedWriter.SyncException e)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/test/unit/org/apache/cassandra/cql3/MaterializedViewTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/MaterializedViewTest.java b/test/unit/org/apache/cassandra/cql3/MaterializedViewTest.java
index 6442a11..75379fb 100644
--- a/test/unit/org/apache/cassandra/cql3/MaterializedViewTest.java
+++ b/test/unit/org/apache/cassandra/cql3/MaterializedViewTest.java
@@ -448,6 +448,39 @@ public class MaterializedViewTest extends CQLTester
     }
 
     @Test
+    public void testRangeTombstone3() throws Throwable
+    {
+        createTable("CREATE TABLE %s (" +
+                    "k int, " +
+                    "asciival ascii, " +
+                    "bigintval bigint, " +
+                    "textval1 text, " +
+                    "PRIMARY KEY((k, asciival), bigintval)" +
+                    ")");
+
+        execute("USE " + keyspace());
+        executeNet(protocolVersion, "USE " + keyspace());
+
+        createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE textval1 IS NOT NULL AND k IS NOT NULL AND asciival IS NOT NULL AND bigintval IS NOT NULL PRIMARY KEY ((textval1, k), asciival, bigintval)");
+
+        for (int i = 0; i < 100; i++)
+            updateMV("INSERT into %s (k,asciival,bigintval,textval1)VALUES(?,?,?,?)", 0, "foo", (long) i % 2, "bar" + i);
+
+        Assert.assertEquals(1, execute("select * from %s where k = 0 and asciival = 'foo' and bigintval = 0").size());
+        Assert.assertEquals(1, execute("select * from %s where k = 0 and asciival = 'foo' and bigintval = 1").size());
+
+
+        Assert.assertEquals(2, execute("select * from %s").size());
+        Assert.assertEquals(2, execute("select * from mv").size());
+
+        //Write a RT and verify the data is removed from index
+        updateMV("DELETE FROM %s WHERE k = ? AND asciival = ? and bigintval >= ?", 0, "foo", 0L);
+
+        Assert.assertEquals(0, execute("select * from %s").size());
+        Assert.assertEquals(0, execute("select * from mv").size());
+    }
+
+    @Test
     public void testCompoundPartitionKey() throws Throwable
     {
         createTable("CREATE TABLE %s (" +

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/test/unit/org/apache/cassandra/cql3/validation/entities/UFAuthTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/UFAuthTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/UFAuthTest.java
index 25fe227..6993bec 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/UFAuthTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/UFAuthTest.java
@@ -25,8 +25,6 @@ import com.google.common.collect.ImmutableSet;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.auth.*;
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -49,8 +47,6 @@ import static org.junit.Assert.fail;
 
 public class UFAuthTest extends CQLTester
 {
-    private static final Logger logger = LoggerFactory.getLogger(UFAuthTest.class);
-
     String roleName = "test_role";
     AuthenticatedUser user;
     RoleResource role;
@@ -319,14 +315,14 @@ public class UFAuthTest extends CQLTester
     public void systemFunctionsRequireNoExplicitPrivileges() throws Throwable
     {
         // with terminal arguments, so evaluated at prepare time
-        String cql = String.format("UPDATE %s SET v2 = 0 WHERE k = blobasint(intasblob(0))",
+        String cql = String.format("UPDATE %s SET v2 = 0 WHERE k = blobasint(intasblob(0)) and v1 = 0",
                                    KEYSPACE + "." + currentTable());
         getStatement(cql).checkAccess(clientState);
 
         // with non-terminal arguments, so evaluated at execution
         String functionName = createSimpleFunction();
         grantExecuteOnFunction(functionName);
-        cql = String.format("UPDATE %s SET v2 = 0 WHERE k = blobasint(intasblob(%s))",
+        cql = String.format("UPDATE %s SET v2 = 0 WHERE k = blobasint(intasblob(%s)) and v1 = 0",
                             KEYSPACE + "." + currentTable(),
                             functionCall(functionName));
         getStatement(cql).checkAccess(clientState);


[2/5] cassandra git commit: Allow range deletions in CQL

Posted by bl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/test/unit/org/apache/cassandra/cql3/validation/entities/UFIdentificationTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/UFIdentificationTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/UFIdentificationTest.java
index 8ab8a23..b2288e4 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/UFIdentificationTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/UFIdentificationTest.java
@@ -97,15 +97,15 @@ public class UFIdentificationTest extends CQLTester
     @Test
     public void testSimpleModificationStatement() throws Throwable
     {
-        assertFunctions(cql("INSERT INTO %s (key, t_sc) VALUES (0, %s)", functionCall(tFunc, "'foo'")), tFunc);
-        assertFunctions(cql("INSERT INTO %s (key, i_cc) VALUES (0, %s)", functionCall(iFunc, "1")), iFunc);
-        assertFunctions(cql("INSERT INTO %s (key, t_cc) VALUES (0, %s)", functionCall(tFunc, "'foo'")), tFunc);
-        assertFunctions(cql("INSERT INTO %s (key, i_val) VALUES (0, %s)", functionCall(iFunc, "1")), iFunc);
-        assertFunctions(cql("INSERT INTO %s (key, l_val) VALUES (0, %s)", functionCall(lFunc, "[1]")), lFunc);
-        assertFunctions(cql("INSERT INTO %s (key, s_val) VALUES (0, %s)", functionCall(sFunc, "{1}")), sFunc);
-        assertFunctions(cql("INSERT INTO %s (key, m_val) VALUES (0, %s)", functionCall(mFunc, "{1:1}")), mFunc);
-        assertFunctions(cql("INSERT INTO %s (key, udt_val) VALUES (0,%s)", functionCall(udtFunc, "{i : 1, t : 'foo'}")), udtFunc);
-        assertFunctions(cql("INSERT INTO %s (key, u_val) VALUES (0, %s)", functionCall(uFunc, "now()")), uFunc, "system.now");
+        assertFunctions(cql("INSERT INTO %s (key, i_cc, t_cc, t_sc) VALUES (0, 0, 'A', %s)", functionCall(tFunc, "'foo'")), tFunc);
+        assertFunctions(cql("INSERT INTO %s (key, i_cc, t_cc) VALUES (0, %s, 'A')", functionCall(iFunc, "1")), iFunc);
+        assertFunctions(cql("INSERT INTO %s (key, t_cc, i_cc) VALUES (0, %s, 1)", functionCall(tFunc, "'foo'")), tFunc);
+        assertFunctions(cql("INSERT INTO %s (key, i_cc, t_cc, i_val) VALUES (0, 0, 'A', %s)", functionCall(iFunc, "1")), iFunc);
+        assertFunctions(cql("INSERT INTO %s (key, i_cc, t_cc, l_val) VALUES (0, 0, 'A', %s)", functionCall(lFunc, "[1]")), lFunc);
+        assertFunctions(cql("INSERT INTO %s (key, i_cc, t_cc, s_val) VALUES (0, 0, 'A', %s)", functionCall(sFunc, "{1}")), sFunc);
+        assertFunctions(cql("INSERT INTO %s (key, i_cc, t_cc, m_val) VALUES (0, 0, 'A', %s)", functionCall(mFunc, "{1:1}")), mFunc);
+        assertFunctions(cql("INSERT INTO %s (key, i_cc, t_cc, udt_val) VALUES (0, 0, 'A', %s)", functionCall(udtFunc, "{i : 1, t : 'foo'}")), udtFunc);
+        assertFunctions(cql("INSERT INTO %s (key, i_cc, t_cc, u_val) VALUES (0, 0, 'A', %s)", functionCall(uFunc, "now()")), uFunc, "system.now");
     }
 
     @Test
@@ -113,48 +113,48 @@ public class UFIdentificationTest extends CQLTester
     {
         String iFunc2 = createEchoFunction("int");
         String mapValue = String.format("{%s:%s}", functionCall(iFunc, "1"), functionCall(iFunc2, "1"));
-        assertFunctions(cql("INSERT INTO %s (key, m_val) VALUES (0, %s)", mapValue), iFunc, iFunc2);
+        assertFunctions(cql("INSERT INTO %s (key, i_cc, t_cc, m_val) VALUES (0, 0, 'A', %s)", mapValue), iFunc, iFunc2);
 
         String listValue = String.format("[%s]", functionCall(iFunc, "1"));
-        assertFunctions(cql("INSERT INTO %s (key, l_val) VALUES (0, %s)", listValue), iFunc);
+        assertFunctions(cql("INSERT INTO %s (key, i_cc, t_cc, l_val) VALUES (0, 0, 'A',  %s)", listValue), iFunc);
 
         String setValue = String.format("{%s}", functionCall(iFunc, "1"));
-        assertFunctions(cql("INSERT INTO %s (key, s_val) VALUES (0, %s)", setValue), iFunc);
+        assertFunctions(cql("INSERT INTO %s (key, i_cc, t_cc, s_val) VALUES (0, 0, 'A', %s)", setValue), iFunc);
     }
 
     @Test
     public void testNonTerminalUDTLiterals() throws Throwable
     {
         String udtValue = String.format("{ i: %s, t : %s } ", functionCall(iFunc, "1"), functionCall(tFunc, "'foo'"));
-        assertFunctions(cql("INSERT INTO %s (key, udt_val) VALUES (0, %s)", udtValue), iFunc, tFunc);
+        assertFunctions(cql("INSERT INTO %s (key, i_cc, t_cc, udt_val) VALUES (0, 0, 'A', %s)", udtValue), iFunc, tFunc);
     }
 
     @Test
     public void testModificationStatementWithConditions() throws Throwable
     {
-        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 IF t_sc=%s", functionCall(tFunc, "'foo'")), tFunc);
-        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 IF i_val=%s", functionCall(iFunc, "1")), iFunc);
-        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 IF l_val=%s", functionCall(lFunc, "[1]")), lFunc);
-        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 IF s_val=%s", functionCall(sFunc, "{1}")), sFunc);
-        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 IF m_val=%s", functionCall(mFunc, "{1:1}")), mFunc);
+        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 AND i_cc = 0 AND t_cc = 'A' IF t_sc=%s", functionCall(tFunc, "'foo'")), tFunc);
+        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 AND i_cc = 0 AND t_cc = 'A' IF i_val=%s", functionCall(iFunc, "1")), iFunc);
+        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 AND i_cc = 0 AND t_cc = 'A' IF l_val=%s", functionCall(lFunc, "[1]")), lFunc);
+        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 AND i_cc = 0 AND t_cc = 'A' IF s_val=%s", functionCall(sFunc, "{1}")), sFunc);
+        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 AND i_cc = 0 AND t_cc = 'A' IF m_val=%s", functionCall(mFunc, "{1:1}")), mFunc);
 
 
         String iFunc2 = createEchoFunction("int");
-        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 IF i_val IN (%s, %S)",
+        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 AND i_cc = 0 AND t_cc = 'A' IF i_val IN (%s, %S)",
                             functionCall(iFunc, "1"),
                             functionCall(iFunc2, "2")),
                         iFunc, iFunc2);
 
-        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 IF u_val=%s",
+        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 AND i_cc = 0 AND t_cc = 'A' IF u_val=%s",
                             functionCall(uFunc, "now()")),
                         uFunc, "system.now");
 
         // conditions on collection elements
-        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 IF l_val[%s] = %s",
+        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 AND i_cc = 0 AND t_cc = 'A' IF l_val[%s] = %s",
                             functionCall(iFunc, "1"),
                             functionCall(iFunc2, "1")),
                         iFunc, iFunc2);
-        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 IF m_val[%s] = %s",
+        assertFunctions(cql("UPDATE %s SET i_val=0 WHERE key=0 AND i_cc = 0 AND t_cc = 'A' IF m_val[%s] = %s",
                             functionCall(iFunc, "1"),
                             functionCall(iFunc2, "1")),
                         iFunc, iFunc2);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/test/unit/org/apache/cassandra/cql3/validation/operations/BatchTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/BatchTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/BatchTest.java
index 1447845..c0d1df5 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/BatchTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/BatchTest.java
@@ -18,8 +18,12 @@
 
 package org.apache.cassandra.cql3.validation.operations;
 
+import java.util.Arrays;
+
 import org.junit.Test;
 
+import static org.apache.commons.lang3.StringUtils.isEmpty;
+
 import org.apache.cassandra.cql3.CQLTester;
 
 public class BatchTest extends CQLTester
@@ -91,16 +95,81 @@ public class BatchTest extends CQLTester
         createTable("CREATE TABLE %s (k int PRIMARY KEY, s text, i int)");
 
         // test batch and update
-        String qualifiedTable = keyspace() + "." + currentTable();
         execute("BEGIN BATCH " +
-                "INSERT INTO %s (k, s, i) VALUES (100, 'batchtext', 7); " +
-                "INSERT INTO " + qualifiedTable + " (k, s, i) VALUES (111, 'batchtext', 7); " +
-                "UPDATE " + qualifiedTable + " SET s=?, i=? WHERE k = 100; " +
-                "UPDATE " + qualifiedTable + " SET s=?, i=? WHERE k=111; " +
+                "INSERT INTO %1$s (k, s, i) VALUES (100, 'batchtext', 7); " +
+                "INSERT INTO %1$s (k, s, i) VALUES (111, 'batchtext', 7); " +
+                "UPDATE %1$s SET s=?, i=? WHERE k = 100; " +
+                "UPDATE %1$s SET s=?, i=? WHERE k=111; " +
                 "APPLY BATCH;", null, unset(), unset(), null);
         assertRows(execute("SELECT k, s, i FROM %s where k in (100,111)"),
                    row(100, null, 7),
                    row(111, "batchtext", null)
         );
     }
+
+    @Test
+    public void testBatchRangeDelete() throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                "clustering int," +
+                "value int," +
+                " PRIMARY KEY (partitionKey, clustering)) WITH COMPACT STORAGE");
+
+        int value = 0;
+        for (int partitionKey = 0; partitionKey < 4; partitionKey++)
+            for (int clustering1 = 0; clustering1 < 5; clustering1++)
+                execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (?, ?, ?)",
+                        partitionKey, clustering1, value++);
+
+        execute("BEGIN BATCH " +
+                "DELETE FROM %1$s WHERE partitionKey = 1;" +
+                "DELETE FROM %1$s WHERE partitionKey = 0 AND  clustering >= 4;" +
+                "DELETE FROM %1$s WHERE partitionKey = 0 AND clustering <= 0;" +
+                "DELETE FROM %1$s WHERE partitionKey = 2 AND clustering >= 0 AND clustering <= 3;" +
+                "DELETE FROM %1$s WHERE partitionKey = 2 AND clustering <= 3 AND clustering >= 4;" +
+                "DELETE FROM %1$s WHERE partitionKey = 3 AND (clustering) >= (3) AND (clustering) <= (6);" +
+                "APPLY BATCH;");
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, 1, 1),
+                   row(0, 2, 2),
+                   row(0, 3, 3),
+                   row(2, 4, 14),
+                   row(3, 0, 15),
+                   row(3, 1, 16),
+                   row(3, 2, 17));
+    }
+
+    @Test
+    public void testBatchUpdate() throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                "clustering_1 int," +
+                "value int," +
+                " PRIMARY KEY (partitionKey, clustering_1))");
+
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 0, 0)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 1, 1)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 2, 2)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 3, 3)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 4, 4)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 5, 5)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 6, 6)");
+
+        execute("BEGIN BATCH " +
+                "UPDATE %1$s SET value = 7 WHERE partitionKey = 0 AND clustering_1 = 1" +
+                "UPDATE %1$s SET value = 8 WHERE partitionKey = 0 AND (clustering_1) = (2)" +
+                "UPDATE %1$s SET value = 10 WHERE partitionKey = 0 AND clustering_1 IN (3, 4)" +
+                "UPDATE %1$s SET value = 20 WHERE partitionKey = 0 AND (clustering_1) IN ((5), (6))" +
+                "APPLY BATCH;");
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, 0, 0),
+                   row(0, 1, 7),
+                   row(0, 2, 8),
+                   row(0, 3, 10),
+                   row(0, 4, 10),
+                   row(0, 5, 20),
+                   row(0, 6, 20));
+    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/test/unit/org/apache/cassandra/cql3/validation/operations/DeleteTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/DeleteTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/DeleteTest.java
index 476ec83..5d9ef8f 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/DeleteTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/DeleteTest.java
@@ -26,7 +26,7 @@ import java.util.List;
 import org.junit.Test;
 
 import org.apache.cassandra.cql3.CQLTester;
-
+import static org.apache.commons.lang3.StringUtils.isEmpty;
 import static org.junit.Assert.assertEquals;
 
 public class DeleteTest extends CQLTester
@@ -326,4 +326,683 @@ public class DeleteTest extends CQLTester
 
         assertEmpty(execute("select * from %s  where a=1 and b=1"));
     }
+    @Test
+    public void testDeleteWithNoClusteringColumns() throws Throwable
+    {
+        testDeleteWithNoClusteringColumns(false);
+        testDeleteWithNoClusteringColumns(true);
+    }
+
+    private void testDeleteWithNoClusteringColumns(boolean forceFlush) throws Throwable
+    {
+        for (String compactOption : new String[] {"", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (partitionKey int PRIMARY KEY," +
+                                      "value int)" + compactOption);
+
+            execute("INSERT INTO %s (partitionKey, value) VALUES (0, 0)");
+            execute("INSERT INTO %s (partitionKey, value) VALUES (1, 1)");
+            execute("INSERT INTO %s (partitionKey, value) VALUES (2, 2)");
+            execute("INSERT INTO %s (partitionKey, value) VALUES (3, 3)");
+            flush(forceFlush);
+
+            execute("DELETE value FROM %s WHERE partitionKey = ?", 0);
+            flush(forceFlush);
+
+            if (isEmpty(compactOption))
+            {
+                assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 0),
+                           row(0, null));
+            }
+            else
+            {
+                assertEmpty(execute("SELECT * FROM %s WHERE partitionKey = ?", 0));
+            }
+
+            execute("DELETE FROM %s WHERE partitionKey IN (?, ?)", 0, 1);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s"),
+                       row(2, 2),
+                       row(3, 3));
+
+            // test invalid queries
+
+            // token function
+            assertInvalidMessage("The token function cannot be used in WHERE clauses for DELETE statements",
+                                 "DELETE FROM %s WHERE token(partitionKey) = token(?)", 0);
+
+            // multiple time same primary key element in WHERE clause
+            assertInvalidMessage("partitionkey cannot be restricted by more than one relation if it includes an Equal",
+                                 "DELETE FROM %s WHERE partitionKey = ? AND partitionKey = ?", 0, 1);
+
+            // unknown identifiers
+            assertInvalidMessage("Unknown identifier unknown",
+                                 "DELETE unknown FROM %s WHERE partitionKey = ?", 0);
+
+            assertInvalidMessage("Undefined name partitionkey1 in where clause ('partitionkey1 = ?')",
+                                 "DELETE FROM %s WHERE partitionKey1 = ?", 0);
+
+            // Invalid operator in the where clause
+            assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function)",
+                                 "DELETE FROM %s WHERE partitionKey > ? ", 0);
+
+            assertInvalidMessage("Cannot use CONTAINS on non-collection column partitionkey",
+                                 "DELETE FROM %s WHERE partitionKey CONTAINS ?", 0);
+
+            // Non primary key in the where clause
+            assertInvalidMessage("Non PRIMARY KEY columns found in where clause: value",
+                                 "DELETE FROM %s WHERE partitionKey = ? AND value = ?", 0, 1);
+        }
+    }
+
+    @Test
+    public void testDeleteWithOneClusteringColumns() throws Throwable
+    {
+        testDeleteWithOneClusteringColumns(false);
+        testDeleteWithOneClusteringColumns(true);
+    }
+
+    private void testDeleteWithOneClusteringColumns(boolean forceFlush) throws Throwable
+    {
+        for (String compactOption : new String[] {"", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (partitionKey int," +
+                                      "clustering int," +
+                                      "value int," +
+                                      " PRIMARY KEY (partitionKey, clustering))" + compactOption);
+
+            execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 0, 0)");
+            execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 1, 1)");
+            execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 2, 2)");
+            execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 3, 3)");
+            execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 4, 4)");
+            execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 5, 5)");
+            execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (1, 0, 6)");
+            flush(forceFlush);
+
+            execute("DELETE value FROM %s WHERE partitionKey = ? AND clustering = ?", 0, 1);
+            flush(forceFlush);
+            if (isEmpty(compactOption))
+            {
+                assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering = ?", 0, 1),
+                           row(0, 1, null));
+            }
+            else
+            {
+                assertEmpty(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering = ?", 0, 1));
+            }
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering) = (?)", 0, 1);
+            flush(forceFlush);
+            assertEmpty(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering = ?", 0, 1));
+
+            execute("DELETE FROM %s WHERE partitionKey IN (?, ?) AND clustering = ?", 0, 1, 0);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1),
+                       row(0, 2, 2),
+                       row(0, 3, 3),
+                       row(0, 4, 4),
+                       row(0, 5, 5));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering) IN ((?), (?))", 0, 4, 5);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1),
+                       row(0, 2, 2),
+                       row(0, 3, 3));
+
+            // test invalid queries
+
+            // missing primary key element
+            assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                                 "DELETE FROM %s WHERE clustering = ?", 1);
+
+            // token function
+            assertInvalidMessage("The token function cannot be used in WHERE clauses for DELETE statements",
+                                 "DELETE FROM %s WHERE token(partitionKey) = token(?) AND clustering = ? ", 0, 1);
+
+            // multiple time same primary key element in WHERE clause
+            assertInvalidMessage("clustering cannot be restricted by more than one relation if it includes an Equal",
+                                 "DELETE FROM %s WHERE partitionKey = ? AND clustering = ? AND clustering = ?", 0, 1, 1);
+
+            // unknown identifiers
+            assertInvalidMessage("Unknown identifier value1",
+                                 "DELETE value1 FROM %s WHERE partitionKey = ? AND clustering = ?", 0, 1);
+
+            assertInvalidMessage("Undefined name partitionkey1 in where clause ('partitionkey1 = ?')",
+                                 "DELETE FROM %s WHERE partitionKey1 = ? AND clustering = ?", 0, 1);
+
+            assertInvalidMessage("Undefined name clustering_3 in where clause ('clustering_3 = ?')",
+                                 "DELETE FROM %s WHERE partitionKey = ? AND clustering_3 = ?", 0, 1);
+
+            // Invalid operator in the where clause
+            assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function)",
+                                 "DELETE FROM %s WHERE partitionKey > ? AND clustering = ?", 0, 1);
+
+            assertInvalidMessage("Cannot use CONTAINS on non-collection column partitionkey",
+                                 "DELETE FROM %s WHERE partitionKey CONTAINS ? AND clustering = ?", 0, 1);
+
+            // Non primary key in the where clause
+            String errorMsg = isEmpty(compactOption) ? "Non PRIMARY KEY columns found in where clause: value"
+                                                     : "Predicates on the non-primary-key column (value) of a COMPACT table are not yet supported";
+
+            assertInvalidMessage(errorMsg,
+                                 "DELETE FROM %s WHERE partitionKey = ? AND clustering = ? AND value = ?", 0, 1, 3);
+        }
+    }
+
+    @Test
+    public void testDeleteWithTwoClusteringColumns() throws Throwable
+    {
+        testDeleteWithTwoClusteringColumns(false);
+        testDeleteWithTwoClusteringColumns(true);
+    }
+
+    private void testDeleteWithTwoClusteringColumns(boolean forceFlush) throws Throwable
+    {
+        for (String compactOption : new String[] { "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (partitionKey int," +
+                                      "clustering_1 int," +
+                                      "clustering_2 int," +
+                                      "value int," +
+                                      " PRIMARY KEY (partitionKey, clustering_1, clustering_2))" + compactOption);
+
+            execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 1, 1)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 2, 2)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 3, 3)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 1, 1, 4)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 1, 2, 5)");
+            execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (1, 0, 0, 6)");
+            flush(forceFlush);
+
+            execute("DELETE value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
+            flush(forceFlush);
+
+            if (isEmpty(compactOption))
+            {
+                assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
+                                   0, 1, 1),
+                           row(0, 1, 1, null));
+            }
+            else
+            {
+                assertEmpty(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
+                                   0, 1, 1));
+            }
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) = (?, ?)", 0, 1, 1);
+            flush(forceFlush);
+            assertEmpty(execute("SELECT value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
+                                0, 1, 1));
+
+            execute("DELETE FROM %s WHERE partitionKey IN (?, ?) AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 0, 0);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1),
+                       row(0, 0, 1, 1),
+                       row(0, 0, 2, 2),
+                       row(0, 0, 3, 3),
+                       row(0, 1, 2, 5));
+
+            Object[][] rows;
+            if (isEmpty(compactOption))
+            {
+                rows = new Object[][]{row(0, 0, 1, 1),
+                                      row(0, 0, 2, null),
+                                      row(0, 0, 3, null),
+                                      row(0, 1, 2, 5)};
+            }
+            else
+            {
+                rows = new Object[][]{row(0, 0, 1, 1), row(0, 1, 2, 5)};
+            }
+
+            execute("DELETE value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 IN (?, ?)", 0, 0, 2, 3);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1), rows);
+
+            if (isEmpty(compactOption))
+            {
+                rows = new Object[][]{row(0, 0, 1, 1),
+                                      row(0, 0, 3, null)};
+            }
+            else
+            {
+                rows = new Object[][]{row(0, 0, 1, 1)};
+            }
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) IN ((?, ?), (?, ?))", 0, 0, 2, 1, 2);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1), rows);
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering_1) IN ((?), (?)) AND clustering_2 = ?", 0, 0, 2, 3);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey IN (?, ?)", 0, 1),
+                       row(0, 0, 1, 1));
+
+            // test invalid queries
+
+            // missing primary key element
+            assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                                 "DELETE FROM %s WHERE clustering_1 = ? AND clustering_2 = ?", 1, 1);
+
+            assertInvalidMessage("PRIMARY KEY column \"clustering_2\" cannot be restricted as preceding column \"clustering_1\" is not restricted",
+                                 "DELETE FROM %s WHERE partitionKey = ? AND clustering_2 = ?", 0, 1);
+
+            // token function
+            assertInvalidMessage("The token function cannot be used in WHERE clauses for DELETE statements",
+                                 "DELETE FROM %s WHERE token(partitionKey) = token(?) AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
+
+            // multiple time same primary key element in WHERE clause
+            assertInvalidMessage("clustering_1 cannot be restricted by more than one relation if it includes an Equal",
+                                 "DELETE FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ? AND clustering_1 = ?", 0, 1, 1, 1);
+
+            // unknown identifiers
+            assertInvalidMessage("Unknown identifier value1",
+                                 "DELETE value1 FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
+
+            assertInvalidMessage("Undefined name partitionkey1 in where clause ('partitionkey1 = ?')",
+                                 "DELETE FROM %s WHERE partitionKey1 = ? AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
+
+            assertInvalidMessage("Undefined name clustering_3 in where clause ('clustering_3 = ?')",
+                                 "DELETE FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_3 = ?", 0, 1, 1);
+
+            // Invalid operator in the where clause
+            assertInvalidMessage("Only EQ and IN relation are supported on the partition key (unless you use the token() function)",
+                                 "DELETE FROM %s WHERE partitionKey > ? AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
+
+            assertInvalidMessage("Cannot use CONTAINS on non-collection column partitionkey",
+                                 "DELETE FROM %s WHERE partitionKey CONTAINS ? AND clustering_1 = ? AND clustering_2 = ?", 0, 1, 1);
+
+            // Non primary key in the where clause
+            String errorMsg = isEmpty(compactOption) ? "Non PRIMARY KEY columns found in where clause: value"
+                                                     : "Predicates on the non-primary-key column (value) of a COMPACT table are not yet supported";
+
+            assertInvalidMessage(errorMsg,
+                                 "DELETE FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ? AND value = ?", 0, 1, 1, 3);
+        }
+    }
+
+    @Test
+    public void testDeleteWithRangeAndOneClusteringColumn() throws Throwable
+    {
+        testDeleteWithRangeAndOneClusteringColumn(false);
+        testDeleteWithRangeAndOneClusteringColumn(true);
+    }
+
+    private void testDeleteWithRangeAndOneClusteringColumn(boolean forceFlush) throws Throwable
+    {
+        for (String compactOption : new String[] { "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (partitionKey int," +
+                                          "clustering int," +
+                                          "value int," +
+                                          " PRIMARY KEY (partitionKey, clustering))" + compactOption);
+
+            int value = 0;
+            for (int partitionKey = 0; partitionKey < 5; partitionKey++)
+                for (int clustering1 = 0; clustering1 < 5; clustering1++)
+                        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (?, ?, ?)",
+                                partitionKey, clustering1, value++);
+
+            flush(forceFlush);
+
+            // test delete partition
+            execute("DELETE FROM %s WHERE partitionKey = ?", 1);
+            flush(forceFlush);
+            assertEmpty(execute("SELECT * FROM %s WHERE partitionKey = ?", 1));
+
+            // test slices on the first clustering column
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND  clustering >= ?", 0, 4);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 0),
+                       row(0, 0, 0),
+                       row(0, 1, 1),
+                       row(0, 2, 2),
+                       row(0, 3, 3));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND  clustering > ?", 0, 2);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 0),
+                       row(0, 0, 0),
+                       row(0, 1, 1),
+                       row(0, 2, 2));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND clustering <= ?", 0, 0);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 0),
+                       row(0, 1, 1),
+                       row(0, 2, 2));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND clustering < ?", 0, 2);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 0),
+                       row(0, 2, 2));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND clustering >= ? AND clustering < ?", 2, 0, 3);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 2),
+                       row(2, 3, 13),
+                       row(2, 4, 14));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND clustering > ? AND clustering <= ?", 2, 3, 5);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 2),
+                       row(2, 3, 13));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND clustering < ? AND clustering > ?", 2, 3, 5);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 2),
+                       row(2, 3, 13));
+
+            // test multi-column slices
+            execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering) > (?)", 3, 2);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 3),
+                       row(3, 0, 15),
+                       row(3, 1, 16),
+                       row(3, 2, 17));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering) < (?)", 3, 1);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 3),
+                       row(3, 1, 16),
+                       row(3, 2, 17));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering) >= (?) AND (clustering) <= (?)", 3, 0, 1);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 3),
+                       row(3, 2, 17));
+
+            // Test invalid queries
+            assertInvalidMessage("Range deletions are not supported for specific columns",
+                                 "DELETE value FROM %s WHERE partitionKey = ? AND clustering >= ?", 2, 1);
+        }
+    }
+
+    @Test
+    public void testDeleteWithRangeAndTwoClusteringColumns() throws Throwable
+    {
+        testDeleteWithRangeAndTwoClusteringColumns(false);
+        testDeleteWithRangeAndTwoClusteringColumns(true);
+    }
+
+    private void testDeleteWithRangeAndTwoClusteringColumns(boolean forceFlush) throws Throwable
+    {
+        for (String compactOption : new String[] { "", " WITH COMPACT STORAGE" })
+        {
+            createTable("CREATE TABLE %s (partitionKey int," +
+                    "clustering_1 int," +
+                    "clustering_2 int," +
+                    "value int," +
+                    " PRIMARY KEY (partitionKey, clustering_1, clustering_2))" + compactOption);
+
+            int value = 0;
+            for (int partitionKey = 0; partitionKey < 5; partitionKey++)
+                for (int clustering1 = 0; clustering1 < 5; clustering1++)
+                    for (int clustering2 = 0; clustering2 < 5; clustering2++) {
+                        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (?, ?, ?, ?)",
+                                partitionKey, clustering1, clustering2, value++);}
+            flush(forceFlush);
+
+            // test unspecified second clustering column
+            execute("DELETE FROM %s WHERE partitionKey = ? AND clustering_1 = ?", 0, 1);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 < ?", 0, 2),
+                       row(0, 0, 0, 0),
+                       row(0, 0, 1, 1),
+                       row(0, 0, 2, 2),
+                       row(0, 0, 3, 3),
+                       row(0, 0, 4, 4));
+
+            // test delete partition
+            execute("DELETE FROM %s WHERE partitionKey = ?", 1);
+            flush(forceFlush);
+            assertEmpty(execute("SELECT * FROM %s WHERE partitionKey = ?", 1));
+
+            // test slices on the second clustering column
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 < ?", 0, 0, 2);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 < ?", 0, 2),
+                       row(0, 0, 2, 2),
+                       row(0, 0, 3, 3),
+                       row(0, 0, 4, 4));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 <= ?", 0, 0, 3);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 < ?", 0, 2),
+                       row(0, 0, 4, 4));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND  clustering_1 = ? AND clustering_2 > ? ", 0, 2, 2);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND  clustering_1 = ?", 0, 2),
+                       row(0, 2, 0, 10),
+                       row(0, 2, 1, 11),
+                       row(0, 2, 2, 12));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND  clustering_1 = ? AND clustering_2 >= ? ", 0, 2, 1);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND  clustering_1 = ?", 0, 2),
+                       row(0, 2, 0, 10));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND  clustering_1 = ? AND clustering_2 > ? AND clustering_2 < ? ",
+                    0, 3, 1, 4);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND  clustering_1 = ?", 0, 3),
+                       row(0, 3, 0, 15),
+                       row(0, 3, 1, 16),
+                       row(0, 3, 4, 19));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND  clustering_1 = ? AND clustering_2 > ? AND clustering_2 < ? ",
+                    0, 3, 4, 1);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND  clustering_1 = ?", 0, 3),
+                       row(0, 3, 0, 15),
+                       row(0, 3, 1, 16),
+                       row(0, 3, 4, 19));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND  clustering_1 = ? AND clustering_2 >= ? AND clustering_2 <= ? ",
+                    0, 3, 1, 4);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND  clustering_1 = ?", 0, 3),
+                       row(0, 3, 0, 15));
+
+            // test slices on the first clustering column
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND  clustering_1 >= ?", 0, 4);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 0),
+                       row(0, 0, 4, 4),
+                       row(0, 2, 0, 10),
+                       row(0, 3, 0, 15));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND  clustering_1 > ?", 0, 3);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 0),
+                       row(0, 0, 4, 4),
+                       row(0, 2, 0, 10),
+                       row(0, 3, 0, 15));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND clustering_1 < ?", 0, 3);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 0),
+                       row(0, 3, 0, 15));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND clustering_1 >= ? AND clustering_1 < ?", 2, 0, 3);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 2),
+                       row(2, 3, 0, 65),
+                       row(2, 3, 1, 66),
+                       row(2, 3, 2, 67),
+                       row(2, 3, 3, 68),
+                       row(2, 3, 4, 69),
+                       row(2, 4, 0, 70),
+                       row(2, 4, 1, 71),
+                       row(2, 4, 2, 72),
+                       row(2, 4, 3, 73),
+                       row(2, 4, 4, 74));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND clustering_1 > ? AND clustering_1 <= ?", 2, 3, 5);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 2),
+                       row(2, 3, 0, 65),
+                       row(2, 3, 1, 66),
+                       row(2, 3, 2, 67),
+                       row(2, 3, 3, 68),
+                       row(2, 3, 4, 69));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND clustering_1 < ? AND clustering_1 > ?", 2, 3, 5);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 2),
+                       row(2, 3, 0, 65),
+                       row(2, 3, 1, 66),
+                       row(2, 3, 2, 67),
+                       row(2, 3, 3, 68),
+                       row(2, 3, 4, 69));
+
+            // test multi-column slices
+            execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) > (?, ?)", 2, 3, 3);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 2),
+                       row(2, 3, 0, 65),
+                       row(2, 3, 1, 66),
+                       row(2, 3, 2, 67),
+                       row(2, 3, 3, 68));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) < (?, ?)", 2, 3, 1);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 2),
+                       row(2, 3, 1, 66),
+                       row(2, 3, 2, 67),
+                       row(2, 3, 3, 68));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) >= (?, ?) AND (clustering_1) <= (?)", 2, 3, 2, 4);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ?", 2),
+                       row(2, 3, 1, 66));
+
+            // Test with a mix of single column and multi-column restrictions
+            execute("DELETE FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND (clustering_2) < (?)", 3, 0, 3);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 = ?", 3, 0),
+                       row(3, 0, 3, 78),
+                       row(3, 0, 4, 79));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND clustering_1 IN (?, ?) AND (clustering_2) >= (?)", 3, 0, 1, 3);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 IN (?, ?)", 3, 0, 1),
+                       row(3, 1, 0, 80),
+                       row(3, 1, 1, 81),
+                       row(3, 1, 2, 82));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering_1) IN ((?), (?)) AND clustering_2 < ?", 3, 0, 1, 1);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 IN (?, ?)", 3, 0, 1),
+                       row(3, 1, 1, 81),
+                       row(3, 1, 2, 82));
+
+            execute("DELETE FROM %s WHERE partitionKey = ? AND (clustering_1) = (?) AND clustering_2 >= ?", 3, 1, 2);
+            flush(forceFlush);
+            assertRows(execute("SELECT * FROM %s WHERE partitionKey = ? AND clustering_1 IN (?, ?)", 3, 0, 1),
+                       row(3, 1, 1, 81));
+
+            // Test invalid queries
+            assertInvalidMessage("Range deletions are not supported for specific columns",
+                                 "DELETE value FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) >= (?, ?)", 2, 3, 1);
+        }
+    }
+
+    @Test
+    public void testDeleteWithAStaticColumn() throws Throwable
+    {
+        testDeleteWithAStaticColumn(false);
+        testDeleteWithAStaticColumn(true);
+    }
+
+    private void testDeleteWithAStaticColumn(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                                      "clustering_1 int," +
+                                      "clustering_2 int," +
+                                      "value int," +
+                                      "staticValue text static," +
+                                      " PRIMARY KEY (partitionKey, clustering_1, clustering_2))");
+
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value, staticValue) VALUES (0, 0, 0, 0, 'A')");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 1, 1)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value, staticValue) VALUES (1, 0, 0, 6, 'B')");
+        flush(forceFlush);
+
+        execute("DELETE staticValue FROM %s WHERE partitionKey = ?", 0);
+        flush(forceFlush);
+        assertRows(execute("SELECT DISTINCT staticValue FROM %s WHERE partitionKey IN (?, ?)", 0, 1),
+                   row(new Object[1]), row("B"));
+
+        execute("DELETE staticValue, value FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
+                1, 0, 0);
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s"),
+                   row(1, 0, 0, null, null),
+                   row(0, 0, 0, null, 0),
+                   row(0, 0, 1, null, 1));
+
+        assertInvalidMessage("Invalid restrictions on clustering columns since the DELETE statement modifies only static columns",
+                             "DELETE staticValue FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND clustering_2 = ?",
+                             0, 0, 1);
+
+        assertInvalidMessage("Invalid restrictions on clustering columns since the DELETE statement modifies only static columns",
+                             "DELETE staticValue FROM %s WHERE partitionKey = ? AND (clustering_1, clustering_2) >= (?, ?)",
+                             0, 0, 1);
+    }
+
+    @Test
+    public void testDeleteWithSecondaryIndices() throws Throwable
+    {
+        testDeleteWithSecondaryIndices(false);
+        testDeleteWithSecondaryIndices(true);
+    }
+
+    private void testDeleteWithSecondaryIndices(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                "clustering_1 int," +
+                "value int," +
+                "values set<int>," +
+                " PRIMARY KEY (partitionKey, clustering_1))");
+
+        createIndex("CREATE INDEX ON %s (value)");
+        createIndex("CREATE INDEX ON %s (clustering_1)");
+        createIndex("CREATE INDEX ON %s (values)");
+
+        execute("INSERT INTO %s (partitionKey, clustering_1, value, values) VALUES (0, 0, 0, {0})");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value, values) VALUES (0, 1, 1, {0, 1})");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value, values) VALUES (0, 2, 2, {0, 1, 2})");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value, values) VALUES (0, 3, 3, {0, 1, 2, 3})");
+        execute("INSERT INTO %s (partitionKey, clustering_1, value, values) VALUES (1, 0, 4, {0, 1, 2, 3, 4})");
+
+        flush(forceFlush);
+
+        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: value",
+                             "DELETE FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND value = ?", 3, 3, 3);
+        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: values",
+                             "DELETE FROM %s WHERE partitionKey = ? AND clustering_1 = ? AND values CONTAINS ?", 3, 3, 3);
+        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: value",
+                             "DELETE FROM %s WHERE partitionKey = ? AND value = ?", 3, 3);
+        assertInvalidMessage("Non PRIMARY KEY columns found in where clause: values",
+                             "DELETE FROM %s WHERE partitionKey = ? AND values CONTAINS ?", 3, 3);
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "DELETE FROM %s WHERE clustering_1 = ?", 3);
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "DELETE FROM %s WHERE value = ?", 3);
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "DELETE FROM %s WHERE values CONTAINS ?", 3);
+    }
+
+    private void flush(boolean forceFlush)
+    {
+        if (forceFlush)
+            flush();
+    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/test/unit/org/apache/cassandra/cql3/validation/operations/InsertTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/InsertTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/InsertTest.java
index 6e9d212..3c49989 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/InsertTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/InsertTest.java
@@ -56,4 +56,237 @@ public class InsertTest extends CQLTester
                    row(new Object[]{ null })
         );
     }
+
+    @Test
+    public void testInsert() throws Throwable
+    {
+        testInsert(false);
+        testInsert(true);
+    }
+
+    private void testInsert(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                                      "clustering int," +
+                                      "value int," +
+                                      " PRIMARY KEY (partitionKey, clustering))");
+
+        execute("INSERT INTO %s (partitionKey, clustering) VALUES (0, 0)");
+        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 1, 1)");
+        flush(forceFlush);
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, 0, null),
+                   row(0, 1, 1));
+
+        // Missing primary key columns
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "INSERT INTO %s (clustering, value) VALUES (0, 1)");
+        assertInvalidMessage("Some clustering keys are missing: clustering",
+                             "INSERT INTO %s (partitionKey, value) VALUES (0, 2)");
+
+        // multiple time the same value
+        assertInvalidMessage("The column names contains duplicates",
+                             "INSERT INTO %s (partitionKey, clustering, value, value) VALUES (0, 0, 2, 2)");
+
+        // multiple time same primary key element in WHERE clause
+        assertInvalidMessage("The column names contains duplicates",
+                             "INSERT INTO %s (partitionKey, clustering, clustering, value) VALUES (0, 0, 0, 2)");
+
+        // unknown identifiers
+        assertInvalidMessage("Unknown identifier clusteringx",
+                             "INSERT INTO %s (partitionKey, clusteringx, value) VALUES (0, 0, 2)");
+
+        assertInvalidMessage("Unknown identifier valuex",
+                             "INSERT INTO %s (partitionKey, clustering, valuex) VALUES (0, 0, 2)");
+    }
+
+    @Test
+    public void testInsertWithCompactFormat() throws Throwable
+    {
+        testInsertWithCompactFormat(false);
+        testInsertWithCompactFormat(true);
+    }
+
+    private void testInsertWithCompactFormat(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                                      "clustering int," +
+                                      "value int," +
+                                      " PRIMARY KEY (partitionKey, clustering)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 0, 0)");
+        execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 1, 1)");
+        flush(forceFlush);
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, 0, 0),
+                   row(0, 1, 1));
+
+        // Invalid Null values for the clustering key or the regular column
+        assertInvalidMessage("Some clustering keys are missing: clustering",
+                             "INSERT INTO %s (partitionKey, value) VALUES (0, 0)");
+        assertInvalidMessage("Column value is mandatory for this COMPACT STORAGE table",
+                             "INSERT INTO %s (partitionKey, clustering) VALUES (0, 0)");
+
+        // Missing primary key columns
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "INSERT INTO %s (clustering, value) VALUES (0, 1)");
+
+        // multiple time the same value
+        assertInvalidMessage("The column names contains duplicates",
+                             "INSERT INTO %s (partitionKey, clustering, value, value) VALUES (0, 0, 2, 2)");
+
+        // multiple time same primary key element in WHERE clause
+        assertInvalidMessage("The column names contains duplicates",
+                             "INSERT INTO %s (partitionKey, clustering, clustering, value) VALUES (0, 0, 0, 2)");
+
+        // unknown identifiers
+        assertInvalidMessage("Unknown identifier clusteringx",
+                             "INSERT INTO %s (partitionKey, clusteringx, value) VALUES (0, 0, 2)");
+
+        assertInvalidMessage("Unknown identifier valuex",
+                             "INSERT INTO %s (partitionKey, clustering, valuex) VALUES (0, 0, 2)");
+    }
+
+    @Test
+    public void testInsertWithTwoClusteringColumns() throws Throwable
+    {
+        testInsertWithTwoClusteringColumns(false);
+        testInsertWithTwoClusteringColumns(true);
+    }
+
+    private void testInsertWithTwoClusteringColumns(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                                      "clustering_1 int," +
+                                      "clustering_2 int," +
+                                      "value int," +
+                                      " PRIMARY KEY (partitionKey, clustering_1, clustering_2))");
+
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2) VALUES (0, 0, 0)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 1, 1)");
+        flush(forceFlush);
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, 0, 0, null),
+                   row(0, 0, 1, 1));
+
+        // Missing primary key columns
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "INSERT INTO %s (clustering_1, clustering_2, value) VALUES (0, 0, 1)");
+        assertInvalidMessage("Some clustering keys are missing: clustering_1",
+                             "INSERT INTO %s (partitionKey, clustering_2, value) VALUES (0, 0, 2)");
+
+        // multiple time the same value
+        assertInvalidMessage("The column names contains duplicates",
+                             "INSERT INTO %s (partitionKey, clustering_1, value, clustering_2, value) VALUES (0, 0, 2, 0, 2)");
+
+        // multiple time same primary key element in WHERE clause
+        assertInvalidMessage("The column names contains duplicates",
+                             "INSERT INTO %s (partitionKey, clustering_1, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0, 2)");
+
+        // unknown identifiers
+        assertInvalidMessage("Unknown identifier clustering_1x",
+                             "INSERT INTO %s (partitionKey, clustering_1x, clustering_2, value) VALUES (0, 0, 0, 2)");
+
+        assertInvalidMessage("Unknown identifier valuex",
+                             "INSERT INTO %s (partitionKey, clustering_1, clustering_2, valuex) VALUES (0, 0, 0, 2)");
+    }
+
+    @Test
+    public void testInsertWithCompactStorageAndTwoClusteringColumns() throws Throwable
+    {
+        testInsertWithCompactStorageAndTwoClusteringColumns(false);
+        testInsertWithCompactStorageAndTwoClusteringColumns(true);
+    }
+
+    private void testInsertWithCompactStorageAndTwoClusteringColumns(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                                      "clustering_1 int," +
+                                      "clustering_2 int," +
+                                      "value int," +
+                                      " PRIMARY KEY (partitionKey, clustering_1, clustering_2)) WITH COMPACT STORAGE");
+
+        execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 0, 0)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0)");
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 1, 1)");
+        flush(forceFlush);
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(0, 0, null, 0),
+                   row(0, 0, 0, 0),
+                   row(0, 0, 1, 1));
+
+        // Invalid Null values for the clustering key or the regular column
+        assertInvalidMessage("PRIMARY KEY column \"clustering_2\" cannot be restricted as preceding column \"clustering_1\" is not restricted",
+                             "INSERT INTO %s (partitionKey, clustering_2, value) VALUES (0, 0, 0)");
+        assertInvalidMessage("Column value is mandatory for this COMPACT STORAGE table",
+                             "INSERT INTO %s (partitionKey, clustering_1, clustering_2) VALUES (0, 0, 0)");
+
+        // Missing primary key columns
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "INSERT INTO %s (clustering_1, clustering_2, value) VALUES (0, 0, 1)");
+        assertInvalidMessage("PRIMARY KEY column \"clustering_2\" cannot be restricted as preceding column \"clustering_1\" is not restricted",
+                             "INSERT INTO %s (partitionKey, clustering_2, value) VALUES (0, 0, 2)");
+
+        // multiple time the same value
+        assertInvalidMessage("The column names contains duplicates",
+                             "INSERT INTO %s (partitionKey, clustering_1, value, clustering_2, value) VALUES (0, 0, 2, 0, 2)");
+
+        // multiple time same primary key element in WHERE clause
+        assertInvalidMessage("The column names contains duplicates",
+                             "INSERT INTO %s (partitionKey, clustering_1, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0, 2)");
+
+        // unknown identifiers
+        assertInvalidMessage("Unknown identifier clustering_1x",
+                             "INSERT INTO %s (partitionKey, clustering_1x, clustering_2, value) VALUES (0, 0, 0, 2)");
+
+        assertInvalidMessage("Unknown identifier valuex",
+                             "INSERT INTO %s (partitionKey, clustering_1, clustering_2, valuex) VALUES (0, 0, 0, 2)");
+    }
+
+    @Test
+    public void testInsertWithAStaticColumn() throws Throwable
+    {
+        testInsertWithAStaticColumn(false);
+        testInsertWithAStaticColumn(true);
+    }
+
+    private void testInsertWithAStaticColumn(boolean forceFlush) throws Throwable
+    {
+        createTable("CREATE TABLE %s (partitionKey int," +
+                                      "clustering_1 int," +
+                                      "clustering_2 int," +
+                                      "value int," +
+                                      "staticValue text static," +
+                                      " PRIMARY KEY (partitionKey, clustering_1, clustering_2))");
+
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, staticValue) VALUES (0, 0, 0, 'A')");
+        execute("INSERT INTO %s (partitionKey, staticValue) VALUES (1, 'B')");
+        flush(forceFlush);
+
+        assertRows(execute("SELECT * FROM %s"),
+                   row(1, null, null, "B", null),
+                   row(0, 0, 0, "A", null));
+
+        execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (1, 0, 0, 0)");
+        flush(forceFlush);
+        assertRows(execute("SELECT * FROM %s"),
+                   row(1, 0, 0, "B", 0),
+                   row(0, 0, 0, "A", null));
+
+        // Missing primary key columns
+        assertInvalidMessage("Some partition key parts are missing: partitionkey",
+                             "INSERT INTO %s (clustering_1, clustering_2, staticValue) VALUES (0, 0, 'A')");
+        assertInvalidMessage("Some clustering keys are missing: clustering_1",
+                             "INSERT INTO %s (partitionKey, clustering_2, staticValue) VALUES (0, 0, 'A')");
+    }
+
+    private void flush(boolean forceFlush)
+    {
+        if (forceFlush)
+            flush();
+    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/2e3727e3/test/unit/org/apache/cassandra/cql3/validation/operations/InsertUpdateIfConditionTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/InsertUpdateIfConditionTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/InsertUpdateIfConditionTest.java
index a289df9..9cde6d7 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/InsertUpdateIfConditionTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/InsertUpdateIfConditionTest.java
@@ -112,6 +112,10 @@ public class InsertUpdateIfConditionTest extends CQLTester
 
         // Should apply
         assertRows(execute("DELETE FROM %s WHERE k = 0 IF v1 IN (null)"), row(true));
+
+        createTable(" CREATE TABLE %s (k int, c int, v1 text, PRIMARY KEY(k, c))");
+        assertInvalidMessage("IN on the clustering key columns is not supported with conditional updates",
+                             "UPDATE %s SET v1 = 'A' WHERE k = 0 AND c IN (1, 2) IF EXISTS");
     }
 
     /**
@@ -184,11 +188,25 @@ public class InsertUpdateIfConditionTest extends CQLTester
         assertRows(execute("DELETE FROM %s WHERE k='k' AND i=0 IF EXISTS"), row(false));
 
         // CASSANDRA-6430
-        assertInvalid("DELETE FROM %s WHERE k = 'k' IF EXISTS");
-        assertInvalid("DELETE FROM %s WHERE k = 'k' IF v = 'foo'");
-        assertInvalid("DELETE FROM %s WHERE i = 0 IF EXISTS");
-        assertInvalid("DELETE FROM %s WHERE k = 0 AND i > 0 IF EXISTS");
-        assertInvalid("DELETE FROM %s WHERE k = 0 AND i > 0 IF v = 'foo'");
+        assertInvalidMessage("DELETE statements must restrict all PRIMARY KEY columns with equality relations in order to use IF conditions",
+                             "DELETE FROM %s WHERE k = 'k' IF EXISTS");
+        assertInvalidMessage("DELETE statements must restrict all PRIMARY KEY columns with equality relations in order to use IF conditions",
+                             "DELETE FROM %s WHERE k = 'k' IF v = 'foo'");
+        assertInvalidMessage("Some partition key parts are missing: k",
+                             "DELETE FROM %s WHERE i = 0 IF EXISTS");
+
+        assertInvalidMessage("Invalid INTEGER constant (0) for \"k\" of type text",
+                             "DELETE FROM %s WHERE k = 0 AND i > 0 IF EXISTS");
+        assertInvalidMessage("Invalid INTEGER constant (0) for \"k\" of type text",
+                             "DELETE FROM %s WHERE k = 0 AND i > 0 IF v = 'foo'");
+        assertInvalidMessage("DELETE statements must restrict all PRIMARY KEY columns with equality relations in order to use IF conditions",
+                             "DELETE FROM %s WHERE k = 'k' AND i > 0 IF EXISTS");
+        assertInvalidMessage("DELETE statements must restrict all PRIMARY KEY columns with equality relations in order to use IF conditions",
+                             "DELETE FROM %s WHERE k = 'k' AND i > 0 IF v = 'foo'");
+        assertInvalidMessage("IN on the clustering key columns is not supported with conditional deletions",
+                             "DELETE FROM %s WHERE k = 'k' AND i IN (0, 1) IF v = 'foo'");
+        assertInvalidMessage("IN on the clustering key columns is not supported with conditional deletions",
+                             "DELETE FROM %s WHERE k = 'k' AND i IN (0, 1) IF EXISTS");
     }
 
     /**


[5/5] cassandra git commit: Merge branch 'cassandra-3.0' into trunk

Posted by bl...@apache.org.
Merge branch 'cassandra-3.0' into trunk


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/86faf8cb
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/86faf8cb
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/86faf8cb

Branch: refs/heads/trunk
Commit: 86faf8cb3ca2d193c7a5b9c2cfa2f098ff46070a
Parents: 10fa329 2e3727e
Author: blerer <be...@datastax.com>
Authored: Fri Sep 4 21:13:18 2015 +0200
Committer: blerer <be...@datastax.com>
Committed: Fri Sep 4 21:13:42 2015 +0200

----------------------------------------------------------------------
 NEWS.txt                                        |   6 +-
 doc/cql3/CQL.textile                            |  13 +-
 .../cassandra/config/ColumnDefinition.java      |   6 +-
 .../cassandra/cql3/AbstractConditions.java      |  65 ++
 .../apache/cassandra/cql3/ColumnConditions.java | 167 ++++
 .../apache/cassandra/cql3/ColumnIdentifier.java |  56 +-
 .../org/apache/cassandra/cql3/Conditions.java   | 100 +++
 src/java/org/apache/cassandra/cql3/Cql.g        |   6 +-
 .../cassandra/cql3/IfExistsCondition.java       |  36 +
 .../cassandra/cql3/IfNotExistsCondition.java    |  36 +
 src/java/org/apache/cassandra/cql3/Json.java    |  36 +-
 .../org/apache/cassandra/cql3/Operations.java   | 135 ++++
 .../cassandra/cql3/SingleColumnRelation.java    |   1 +
 .../apache/cassandra/cql3/UpdateParameters.java |   9 +-
 .../restrictions/StatementRestrictions.java     | 246 ++++--
 .../cql3/statements/BatchStatement.java         |  91 +--
 .../cql3/statements/CQL3CasRequest.java         |  14 +-
 .../cql3/statements/DeleteStatement.java        |  99 +--
 .../cql3/statements/ModificationStatement.java  | 765 +++++++++----------
 .../cql3/statements/SelectStatement.java        |   9 +-
 .../cql3/statements/StatementType.java          | 138 ++++
 .../cql3/statements/UpdateStatement.java        | 198 +++--
 .../cql3/statements/UpdatesCollector.java       | 130 ++++
 src/java/org/apache/cassandra/db/CBuilder.java  |   9 +-
 .../org/apache/cassandra/db/RangeTombstone.java |   4 +-
 src/java/org/apache/cassandra/db/Slices.java    |   9 +
 .../cassandra/io/sstable/CQLSSTableWriter.java  |   7 +-
 .../cassandra/cql3/MaterializedViewTest.java    |  33 +
 .../cql3/validation/entities/UFAuthTest.java    |   8 +-
 .../entities/UFIdentificationTest.java          |  44 +-
 .../cql3/validation/operations/BatchTest.java   |  79 +-
 .../cql3/validation/operations/DeleteTest.java  | 681 ++++++++++++++++-
 .../cql3/validation/operations/InsertTest.java  | 233 ++++++
 .../operations/InsertUpdateIfConditionTest.java |  28 +-
 .../cql3/validation/operations/UpdateTest.java  | 447 +++++++++++
 .../cassandra/db/RangeTombstoneListTest.java    | 222 +++++-
 36 files changed, 3384 insertions(+), 782 deletions(-)
----------------------------------------------------------------------