You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by sl...@apache.org on 2014/05/29 11:37:08 UTC

[1/3] Tuple types

Repository: cassandra
Updated Branches:
  refs/heads/trunk 4b9fde93d -> 56c2f0ab1


http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/test/unit/org/apache/cassandra/cql3/MultiColumnRelationTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/MultiColumnRelationTest.java b/test/unit/org/apache/cassandra/cql3/MultiColumnRelationTest.java
index 35646be..e37759d 100644
--- a/test/unit/org/apache/cassandra/cql3/MultiColumnRelationTest.java
+++ b/test/unit/org/apache/cassandra/cql3/MultiColumnRelationTest.java
@@ -31,1084 +31,476 @@ import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.MD5Digest;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.nio.ByteBuffer;
 import java.util.*;
 
-import static org.apache.cassandra.cql3.QueryProcessor.executeOnceInternal;
-import static org.apache.cassandra.cql3.QueryProcessor.process;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
 import static com.google.common.collect.Lists.newArrayList;
-import static org.junit.Assert.fail;
 
-public class MultiColumnRelationTest
+public class MultiColumnRelationTest extends CQLTester
 {
-    private static final Logger logger = LoggerFactory.getLogger(MultiColumnRelationTest.class);
-    static ClientState clientState;
-    static String keyspace = "multi_column_relation_test";
-
-    @BeforeClass
-    public static void setUpClass() throws Throwable
-    {
-        SchemaLoader.loadSchema();
-        executeSchemaChange("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}");
-        executeSchemaChange("CREATE TABLE IF NOT EXISTS %s.single_partition (a int PRIMARY KEY, b int)");
-        executeSchemaChange("CREATE TABLE IF NOT EXISTS %s.compound_partition (a int, b int, c int, PRIMARY KEY ((a, b)))");
-        executeSchemaChange("CREATE TABLE IF NOT EXISTS %s.single_clustering (a int, b int, c int, PRIMARY KEY (a, b))");
-        executeSchemaChange("CREATE TABLE IF NOT EXISTS %s.multiple_clustering (a int, b int, c int, d int, PRIMARY KEY (a, b, c, d))");
-        executeSchemaChange("CREATE TABLE IF NOT EXISTS %s.multiple_clustering_reversed (a int, b int, c int, d int, PRIMARY KEY (a, b, c, d)) WITH CLUSTERING ORDER BY (b DESC, c ASC, d DESC)");
-        clientState = ClientState.forInternalCalls();
-    }
-
-    @AfterClass
-    public static void stopGossiper()
-    {
-        Gossiper.instance.stop();
-    }
-
-    private static void executeSchemaChange(String query) throws Throwable
-    {
-        try
-        {
-            process(String.format(query, keyspace), ConsistencyLevel.ONE);
-        } catch (RuntimeException exc)
-        {
-            throw exc.getCause();
-        }
-    }
-
-    private static UntypedResultSet execute(String query) throws Throwable
-    {
-        try
-        {
-            return executeOnceInternal(String.format(query, keyspace));
-        } catch (RuntimeException exc)
-        {
-            if (exc.getCause() != null)
-                throw exc.getCause();
-            throw exc;
-        }
-    }
-
-    private MD5Digest prepare(String query) throws RequestValidationException
-    {
-        ResultMessage.Prepared prepared = QueryProcessor.prepare(String.format(query, keyspace), clientState, false);
-        return prepared.statementId;
-    }
-
-    private UntypedResultSet executePrepared(MD5Digest statementId, QueryOptions options) throws RequestValidationException, RequestExecutionException
-    {
-        ParsedStatement.Prepared prepared = QueryProcessor.instance.getPrepared(statementId);
-        ResultMessage message = prepared.statement.executeInternal(QueryState.forInternalCalls(), options);
-
-        if (message instanceof ResultMessage.Rows)
-            return UntypedResultSet.create(((ResultMessage.Rows)message).result);
-        else
-            return null;
-    }
-
-    @Test(expected=SyntaxException.class)
-    public void testEmptyIdentifierTuple() throws Throwable
-    {
-        execute("SELECT * FROM %s.single_clustering WHERE () = (1, 2)");
-    }
-
-    @Test(expected=SyntaxException.class)
-    public void testEmptyValueTuple() throws Throwable
-    {
-        execute("SELECT * FROM %s.multiple_clustering WHERE (b, c) > ()");
-    }
-
-    @Test(expected=InvalidRequestException.class)
-    public void testDifferentTupleLengths() throws Throwable
-    {
-        execute("SELECT * FROM %s.multiple_clustering WHERE (b, c) > (1, 2, 3)");
-    }
-
-    @Test(expected=InvalidRequestException.class)
-    public void testNullInTuple() throws Throwable
-    {
-        execute("SELECT * FROM %s.multiple_clustering WHERE (b, c) > (1, null)");
-    }
-
     @Test
-    public void testEmptyIN() throws Throwable
-    {
-        UntypedResultSet results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN ()");
-        assertTrue(results.isEmpty());
-    }
-
-    @Test(expected=InvalidRequestException.class)
-    public void testNullInINValues() throws Throwable
-    {
-        UntypedResultSet results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN ((1, 2, null))");
-        assertTrue(results.isEmpty());
-    }
-
-    @Test(expected=InvalidRequestException.class)
-    public void testPartitionKeyInequality() throws Throwable
-    {
-        execute("SELECT * FROM %s.single_partition WHERE (a) > (1)");
-    }
-
-    @Test(expected=InvalidRequestException.class)
-    public void testPartitionKeyEquality() throws Throwable
-    {
-        execute("SELECT * FROM %s.single_partition WHERE (a) = (0)");
-    }
-
-    @Test(expected=InvalidRequestException.class)
-    public void testRestrictNonPrimaryKey() throws Throwable
+    public void testSingleClusteringInvalidQueries() throws Throwable
     {
-        execute("SELECT * FROM %s.single_partition WHERE (b) = (0)");
-    }
+        createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY (a, b))");
 
-    @Test(expected=InvalidRequestException.class)
-    public void testMixEqualityAndInequality() throws Throwable
-    {
-        execute("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) = (0) AND (b) > (0)");
-    }
-
-    @Test(expected=InvalidRequestException.class)
-    public void testMixMultipleInequalitiesOnSameBound() throws Throwable
-    {
-        execute("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) > (0) AND (b) > (1)");
-    }
-
-    @Test(expected=InvalidRequestException.class)
-    public void testClusteringColumnsOutOfOrderInInequality() throws Throwable
-    {
-        execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (d, c, b) > (0, 0, 0)");
-    }
-
-    @Test(expected=InvalidRequestException.class)
-    public void testSkipClusteringColumnInEquality() throws Throwable
-    {
-        execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (c, d) = (0, 0)");
-    }
-
-    @Test(expected=InvalidRequestException.class)
-    public void testSkipClusteringColumnInInequality() throws Throwable
-    {
-        execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (c, d) > (0, 0)");
-    }
-
-    @Test
-    public void testSingleClusteringColumnEquality() throws Throwable
-    {
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 0, 0)");
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 1, 0)");
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 2, 0)");
-        UntypedResultSet results = execute("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) = (1)");
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 0);
-
-        results = execute("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) = (3)");
-        assertEquals(0, results.size());
+        assertInvalid("SELECT * FROM %s WHERE () = (?, ?)", 1, 2);
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND (b) = (?) AND (b) > (?)", 0, 0);
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND (b) > (?) AND (b) > (?)", 0, 1);
+        assertInvalid("SELECT * FROM %s WHERE (a, b) = (?, ?)", 0, 0);
     }
 
     @Test
-    public void testMultipleClusteringColumnEquality() throws Throwable
-    {
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 1, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 1, 1, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 1, 1, 1)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 2, 0, 0)");
-        UntypedResultSet results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b) = (1)");
-        assertEquals(3, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-        checkRow(1, results, 0, 1, 1, 0);
-        checkRow(2, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c) = (1, 1)");
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 1, 1, 0);
-        checkRow(1, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) = (1, 1, 1)");
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 1, 1);
-        execute("DELETE FROM %s.multiple_clustering WHERE a=0 AND b=2 and c=0 and d=0");
-    }
-
-    @Test(expected=InvalidRequestException.class)
-    public void testPartitionAndClusteringColumnEquality() throws Throwable
+    public void testMultiClusteringInvalidQueries() throws Throwable
     {
-        execute("SELECT * FROM %s.single_clustering WHERE (a, b) = (0, 0)");
-    }
+        createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a, b, c, d))");
 
-    @Test(expected=InvalidRequestException.class)
-    public void testClusteringColumnsOutOfOrderInEquality() throws Throwable
-    {
-        execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (d, c, b) = (3, 2, 1)");
-    }
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND (b, c) > ()");
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND (b, c) > (?, ?, ?)", 1, 2, 3);
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND (b, c) > (?, ?)", 1, null);
 
-    @Test(expected=InvalidRequestException.class)
-    public void testBadType() throws Throwable
-    {
-        execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) = (1, 2, 'foobar')");
-    }
+        // Wrong order of columns
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND (d, c, b) = (?, ?, ?)", 0, 0, 0);
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND (d, c, b) > (?, ?, ?)", 0, 0, 0);
 
-    @Test(expected=SyntaxException.class)
-    public void testSingleColumnTupleRelation() throws Throwable
-    {
-        execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND b = (1, 2, 3)");
-    }
+        // Wrong number of values
+        assertInvalid("SELECT * FROM %s WHERE a=0 AND (b, c, d) IN ((?, ?))", 0, 1);
+        assertInvalid("SELECT * FROM %s WHERE a=0 AND (b, c, d) IN ((?, ?, ?, ?, ?))", 0, 1, 2, 3, 4);
 
-    @Test
-    public void testMixSingleAndTupleInequalities() throws Throwable
-    {
-        String[] queries = new String[]{
-            "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > (0, 1, 0) AND b < 1",
-            "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > (0, 1, 0) AND c < 1",
-            "SELECT * FROM %s.multiple_clustering WHERE a=0 AND b > 1 AND (b, c, d) < (1, 1, 0)",
-            "SELECT * FROM %s.multiple_clustering WHERE a=0 AND c > 1 AND (b, c, d) < (1, 1, 0)",
-        };
-
-        for (String query : queries)
-        {
-            try
-            {
-                execute(query);
-                fail(String.format("Expected query \"%s\" to throw an InvalidRequestException", query));
-            }
-            catch (InvalidRequestException e) {}
-        }
-    }
-
-    @Test
-    public void testSingleClusteringColumnInequality() throws Throwable
-    {
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 0, 0)");
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 1, 0)");
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 2, 0)");
-
-        UntypedResultSet results = execute("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) > (0)");
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 1, 0);
-        checkRow(1, results, 0, 2, 0);
-
-        results = execute("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) >= (1)");
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 1, 0);
-        checkRow(1, results, 0, 2, 0);
-
-        results = execute("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) < (2)");
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 0);
-        checkRow(1, results, 0, 1, 0);
-
-        results = execute("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) <= (1)");
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 0);
-        checkRow(1, results, 0, 1, 0);
-
-        results = execute("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) > (0) AND (b) < (2)");
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 0);
-    }
-
-    @Test
-    public void testMultipleClusteringColumnInequality() throws Throwable
-    {
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 1)");
-
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 1, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 1, 1, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 1, 1, 1)");
-
-        UntypedResultSet results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b) > (0)");
-        assertEquals(3, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-        checkRow(1, results, 0, 1, 1, 0);
-        checkRow(2, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b) >= (0)");
-        assertEquals(6, results.size());
-        checkRow(0, results, 0, 0, 0, 0);
-        checkRow(1, results, 0, 0, 1, 0);
-        checkRow(2, results, 0, 0, 1, 1);
-        checkRow(3, results, 0, 1, 0, 0);
-        checkRow(4, results, 0, 1, 1, 0);
-        checkRow(5, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c) > (1, 0)");
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 1, 1, 0);
-        checkRow(1, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c) >= (1, 0)");
-        assertEquals(3, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-        checkRow(1, results, 0, 1, 1, 0);
-        checkRow(2, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > (1, 1, 0)");
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) >= (1, 1, 0)");
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 1, 1, 0);
-        checkRow(1, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b) < (1)");
-        assertEquals(3, results.size());
-        checkRow(0, results, 0, 0, 0, 0);
-        checkRow(1, results, 0, 0, 1, 0);
-        checkRow(2, results, 0, 0, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b) <= (1)");
-        assertEquals(6, results.size());
-        checkRow(0, results, 0, 0, 0, 0);
-        checkRow(1, results, 0, 0, 1, 0);
-        checkRow(2, results, 0, 0, 1, 1);
-        checkRow(3, results, 0, 1, 0, 0);
-        checkRow(4, results, 0, 1, 1, 0);
-        checkRow(5, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c) < (0, 1)");
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 0, 0, 0);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c) <= (0, 1)");
-        assertEquals(3, results.size());
-        checkRow(0, results, 0, 0, 0, 0);
-        checkRow(1, results, 0, 0, 1, 0);
-        checkRow(2, results, 0, 0, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) < (0, 1, 1)");
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 0, 0);
-        checkRow(1, results, 0, 0, 1, 0);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) <= (0, 1, 1)");
-        checkRow(0, results, 0, 0, 0, 0);
-        checkRow(1, results, 0, 0, 1, 0);
-        checkRow(2, results, 0, 0, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > (0, 1, 0) AND (b) < (1)");
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 0, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > (0, 1, 1) AND (b, c) < (1, 1)");
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > (0, 1, 1) AND (b, c, d) < (1, 1, 0)");
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-
-        // reversed
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b) > (0) ORDER BY b DESC, c DESC, d DESC");
-        assertEquals(3, results.size());
-        checkRow(2, results, 0, 1, 0, 0);
-        checkRow(1, results, 0, 1, 1, 0);
-        checkRow(0, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b) >= (0) ORDER BY b DESC, c DESC, d DESC");
-        assertEquals(6, results.size());
-        checkRow(5, results, 0, 0, 0, 0);
-        checkRow(4, results, 0, 0, 1, 0);
-        checkRow(3, results, 0, 0, 1, 1);
-        checkRow(2, results, 0, 1, 0, 0);
-        checkRow(1, results, 0, 1, 1, 0);
-        checkRow(0, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c) > (1, 0) ORDER BY b DESC, c DESC, d DESC");
-        assertEquals(2, results.size());
-        checkRow(1, results, 0, 1, 1, 0);
-        checkRow(0, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c) >= (1, 0) ORDER BY b DESC, c DESC, d DESC");
-        assertEquals(3, results.size());
-        checkRow(2, results, 0, 1, 0, 0);
-        checkRow(1, results, 0, 1, 1, 0);
-        checkRow(0, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > (1, 1, 0) ORDER BY b DESC, c DESC, d DESC");
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) >= (1, 1, 0) ORDER BY b DESC, c DESC, d DESC");
-        assertEquals(2, results.size());
-        checkRow(1, results, 0, 1, 1, 0);
-        checkRow(0, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b) < (1) ORDER BY b DESC, c DESC, d DESC");
-        assertEquals(3, results.size());
-        checkRow(2, results, 0, 0, 0, 0);
-        checkRow(1, results, 0, 0, 1, 0);
-        checkRow(0, results, 0, 0, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b) <= (1) ORDER BY b DESC, c DESC, d DESC");
-        assertEquals(6, results.size());
-        checkRow(5, results, 0, 0, 0, 0);
-        checkRow(4, results, 0, 0, 1, 0);
-        checkRow(3, results, 0, 0, 1, 1);
-        checkRow(2, results, 0, 1, 0, 0);
-        checkRow(1, results, 0, 1, 1, 0);
-        checkRow(0, results, 0, 1, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c) < (0, 1) ORDER BY b DESC, c DESC, d DESC");
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 0, 0, 0);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c) <= (0, 1) ORDER BY b DESC, c DESC, d DESC");
-        assertEquals(3, results.size());
-        checkRow(2, results, 0, 0, 0, 0);
-        checkRow(1, results, 0, 0, 1, 0);
-        checkRow(0, results, 0, 0, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) < (0, 1, 1) ORDER BY b DESC, c DESC, d DESC");
-        assertEquals(2, results.size());
-        checkRow(1, results, 0, 0, 0, 0);
-        checkRow(0, results, 0, 0, 1, 0);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) <= (0, 1, 1) ORDER BY b DESC, c DESC, d DESC");
-        checkRow(2, results, 0, 0, 0, 0);
-        checkRow(1, results, 0, 0, 1, 0);
-        checkRow(0, results, 0, 0, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > (0, 1, 0) AND (b) < (1) ORDER BY b DESC, c DESC, d DESC");
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 0, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > (0, 1, 1) AND (b, c) < (1, 1) ORDER BY b DESC, c DESC, d DESC");
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > (0, 1, 1) AND (b, c, d) < (1, 1, 0) ORDER BY b DESC, c DESC, d DESC");
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-    }
-
-    @Test
-    public void testMultipleClusteringColumnInequalityReversedComponents() throws Throwable
-    {
-        // b and d are reversed in the clustering order
-        execute("INSERT INTO %s.multiple_clustering_reversed (a, b, c, d) VALUES (0, 1, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering_reversed (a, b, c, d) VALUES (0, 1, 1, 1)");
-        execute("INSERT INTO %s.multiple_clustering_reversed (a, b, c, d) VALUES (0, 1, 1, 0)");
-
-        execute("INSERT INTO %s.multiple_clustering_reversed (a, b, c, d) VALUES (0, 0, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering_reversed (a, b, c, d) VALUES (0, 0, 1, 1)");
-        execute("INSERT INTO %s.multiple_clustering_reversed (a, b, c, d) VALUES (0, 0, 1, 0)");
-
-
-        UntypedResultSet results = execute("SELECT * FROM %s.multiple_clustering_reversed WHERE a=0 AND (b) > (0)");
-        assertEquals(3, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-        checkRow(1, results, 0, 1, 1, 1);
-        checkRow(2, results, 0, 1, 1, 0);
-
-        results = execute("SELECT * FROM %s.multiple_clustering_reversed WHERE a=0 AND (b) >= (0)");
-        assertEquals(6, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-        checkRow(1, results, 0, 1, 1, 1);
-        checkRow(2, results, 0, 1, 1, 0);
-        checkRow(3, results, 0, 0, 0, 0);
-        checkRow(4, results, 0, 0, 1, 1);
-        checkRow(5, results, 0, 0, 1, 0);
-
-        results = execute("SELECT * FROM %s.multiple_clustering_reversed WHERE a=0 AND (b) < (1)");
-        assertEquals(3, results.size());
-        checkRow(0, results, 0, 0, 0, 0);
-        checkRow(1, results, 0, 0, 1, 1);
-        checkRow(2, results, 0, 0, 1, 0);
-
-        results = execute("SELECT * FROM %s.multiple_clustering_reversed WHERE a=0 AND (b) <= (1)");
-        assertEquals(6, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-        checkRow(1, results, 0, 1, 1, 1);
-        checkRow(2, results, 0, 1, 1, 0);
-        checkRow(3, results, 0, 0, 0, 0);
-        checkRow(4, results, 0, 0, 1, 1);
-        checkRow(5, results, 0, 0, 1, 0);
-
-        // preserve pre-6875 behavior (even though the query result is technically incorrect)
-        results = execute("SELECT * FROM %s.multiple_clustering_reversed WHERE a=0 AND (b, c) > (1, 0)");
-        assertEquals(0, results.size());
-    }
-
-    @Test
-    public void testLiteralIn() throws Throwable
-    {
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 1)");
+        // Missing first clustering column
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND (c, d) = (?, ?)", 0, 0);
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND (c, d) > (?, ?)", 0, 0);
 
-        UntypedResultSet results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN ((0, 1, 0), (0, 1, 1))");
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 1, 0);
-        checkRow(1, results, 0, 0, 1, 1);
+        // Nulls
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND (b, c, d) IN ((?, ?, ?))", 1, 2, null);
 
-        // same query, but reversed order for the IN values
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN ((0, 1, 1), (0, 1, 0))");
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 1, 0);
-        checkRow(1, results, 0, 0, 1, 1);
-
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 and (b, c) IN ((0, 1))");
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 1, 0);
-        checkRow(1, results, 0, 0, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a=0 and (b) IN ((0))");
-        assertEquals(3, results.size());
-        checkRow(0, results, 0, 0, 0, 0);
-        checkRow(1, results, 0, 0, 1, 0);
-        checkRow(2, results, 0, 0, 1, 1);
-    }
+        // Wrong type for 'd'
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND (b, c, d) = (?, ?, ?)", 1, 2, "foobar");
 
-    @Test(expected=InvalidRequestException.class)
-    public void testLiteralInWithShortTuple() throws Throwable
-    {
-        execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN ((0, 1))");
-    }
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND b = (?, ?, ?)", 1, 2, 3);
 
-    @Test(expected=InvalidRequestException.class)
-    public void testLiteralInWithLongTuple() throws Throwable
-    {
-        execute("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN ((0, 1, 2, 3, 4))");
-    }
+        // Mix single and tuple inequalities
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND (b, c, d) > (?, ?, ?) AND b < ?", 0, 1, 0, 1);
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND (b, c, d) > (?, ?, ?) AND c < ?", 0, 1, 0, 1);
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND b > ? AND (b, c, d) < (?, ?, ?)", 1, 1, 1, 0);
+        assertInvalid("SELECT * FROM %s WHERE a = 0 AND c > ? AND (b, c, d) < (?, ?, ?)", 1, 1, 1, 0);
 
-    @Test(expected=InvalidRequestException.class)
-    public void testLiteralInWithPartitionKey() throws Throwable
-    {
-        execute("SELECT * FROM %s.multiple_clustering WHERE (a, b, c, d) IN ((0, 1, 2, 3))");
+        assertInvalid("SELECT * FROM %s WHERE (a, b, c, d) IN ((?, ?, ?, ?))", 0, 1, 2, 3);
+        assertInvalid("SELECT * FROM %s WHERE (c, d) IN ((?, ?))", 0, 1);
     }
 
-    @Test(expected=InvalidRequestException.class)
-    public void testLiteralInSkipsClusteringColumn() throws Throwable
-    {
-        execute("SELECT * FROM %s.multiple_clustering WHERE (c, d) IN ((0, 1))");
-    }
     @Test
-    public void testPartitionAndClusteringInClauses() throws Throwable
-    {
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 1)");
-
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (1, 0, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (1, 0, 1, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (1, 0, 1, 1)");
-
-        UntypedResultSet results = execute("SELECT * FROM %s.multiple_clustering WHERE a IN (0, 1) AND (b, c, d) IN ((0, 1, 0), (0, 1, 1))");
-        assertEquals(4, results.size());
-        checkRow(0, results, 0, 0, 1, 0);
-        checkRow(1, results, 0, 0, 1, 1);
-        checkRow(2, results, 1, 0, 1, 0);
-        checkRow(3, results, 1, 0, 1, 1);
-
-        // same query, but reversed order for the IN values
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a IN (1, 0) AND (b, c, d) IN ((0, 1, 1), (0, 1, 0))");
-        assertEquals(4, results.size());
-        checkRow(0, results, 1, 0, 1, 0);
-        checkRow(1, results, 1, 0, 1, 1);
-        checkRow(2, results, 0, 0, 1, 0);
-        checkRow(3, results, 0, 0, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a IN (0, 1) and (b, c) IN ((0, 1))");
-        assertEquals(4, results.size());
-        checkRow(0, results, 0, 0, 1, 0);
-        checkRow(1, results, 0, 0, 1, 1);
-        checkRow(2, results, 1, 0, 1, 0);
-        checkRow(3, results, 1, 0, 1, 1);
-
-        results = execute("SELECT * FROM %s.multiple_clustering WHERE a IN (0, 1) and (b) IN ((0))");
-        assertEquals(6, results.size());
-        checkRow(0, results, 0, 0, 0, 0);
-        checkRow(1, results, 0, 0, 1, 0);
-        checkRow(2, results, 0, 0, 1, 1);
-        checkRow(3, results, 1, 0, 0, 0);
-        checkRow(4, results, 1, 0, 1, 0);
-        checkRow(5, results, 1, 0, 1, 1);
-    }
-
-    // prepare statement tests
-
-    @Test(expected=InvalidRequestException.class)
-    public void testPreparePartitionAndClusteringColumnEquality() throws Throwable
+    public void testSinglePartitionInvalidQueries() throws Throwable
     {
-        prepare("SELECT * FROM %s.single_clustering WHERE (a, b) = (?, ?)");
-    }
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b int)");
 
-    @Test(expected=InvalidRequestException.class)
-    public void testPrepareDifferentTupleLengths() throws Throwable
-    {
-        prepare("SELECT * FROM %s.multiple_clustering WHERE (b, c) > (?, ?, ?)");
+        assertInvalid("SELECT * FROM %s WHERE (a) > (?)", 0);
+        assertInvalid("SELECT * FROM %s WHERE (a) = (?)", 0);
+        assertInvalid("SELECT * FROM %s WHERE (b) = (?)", 0);
     }
 
     @Test
-    public void testPrepareEmptyIN() throws Throwable
+    public void testSingleClustering() throws Throwable
     {
-        MD5Digest id = prepare("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN ()");
-        UntypedResultSet results = executePrepared(id, makeIntOptions());
-        assertTrue(results.isEmpty());
-    }
+        createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY (a, b))");
 
-    @Test(expected=InvalidRequestException.class)
-    public void testPreparePartitionKeyInequality() throws Throwable
-    {
-        prepare("SELECT * FROM %s.single_partition WHERE (a) > (?)");
-    }
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 0, 0);
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 1, 0);
+        execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", 0, 2, 0);
 
-    @Test(expected=InvalidRequestException.class)
-    public void testPreparePartitionKeyEquality() throws Throwable
-    {
-        prepare("SELECT * FROM %s.single_partition WHERE (a) = (?)");
-    }
+        // Equalities
 
-    @Test(expected=InvalidRequestException.class)
-    public void testPrepareRestrictNonPrimaryKey() throws Throwable
-    {
-        prepare("SELECT * FROM %s.single_partition WHERE (b) = (?)");
-    }
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) = (?)", 0, 1),
+            row(0, 1, 0)
+        );
 
-    @Test(expected=InvalidRequestException.class)
-    public void testPrepareMixEqualityAndInequality() throws Throwable
-    {
-        prepare("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) = (?) AND (b) > (?)");
-    }
+        // Same but check the whole tuple can be prepared
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) = ?", 0, tuple(1)),
+            row(0, 1, 0)
+        );
 
-    @Test(expected=InvalidRequestException.class)
-    public void testPrepareMixMultipleInequalitiesOnSameBound() throws Throwable
-    {
-        prepare("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) > (?) AND (b) > (?)");
-    }
+        assertEmpty(execute("SELECT * FROM %s WHERE a = ? AND (b) = (?)", 0, 3));
 
-    @Test(expected=InvalidRequestException.class)
-    public void testPrepareClusteringColumnsOutOfOrderInInequality() throws Throwable
-    {
-        prepare("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (d, c, b) > (?, ?, ?)");
-    }
+        // Inequalities
 
-    @Test(expected=InvalidRequestException.class)
-    public void testPrepareSkipClusteringColumnInEquality() throws Throwable
-    {
-        prepare("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (c, d) = (?, ?)");
-    }
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) > (?)", 0, 0),
+            row(0, 1, 0),
+            row(0, 2, 0)
+        );
 
-    @Test(expected=InvalidRequestException.class)
-    public void testPrepareSkipClusteringColumnInInequality() throws Throwable
-    {
-        prepare("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (c, d) > (?, ?)");
-    }
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) >= (?)", 0, 1),
+            row(0, 1, 0),
+            row(0, 2, 0)
+        );
 
-    @Test
-    public void testPreparedClusteringColumnEquality() throws Throwable
-    {
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 0, 0)");
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 1, 0)");
-        MD5Digest id = prepare("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) = (?)");
-        UntypedResultSet results = executePrepared(id, makeIntOptions(0));
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 0, 0);
-    }
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) < (?)", 0, 2),
+            row(0, 0, 0),
+            row(0, 1, 0)
+        );
 
-    @Test
-    public void testPreparedClusteringColumnEqualitySingleMarker() throws Throwable
-    {
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 0, 0)");
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 1, 0)");
-        MD5Digest id = prepare("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) = ?");
-        UntypedResultSet results = executePrepared(id, options(tuple(0)));
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 0, 0);
-    }
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) <= (?)", 0, 1),
+            row(0, 0, 0),
+            row(0, 1, 0)
+        );
 
-    @Test
-    public void testPreparedSingleClusteringColumnInequality() throws Throwable
-    {
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 0, 0)");
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 1, 0)");
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 2, 0)");
-
-        MD5Digest id = prepare("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) > (?)");
-        UntypedResultSet results = executePrepared(id, makeIntOptions(0));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 1, 0);
-        checkRow(1, results, 0, 2, 0);
-
-        results = executePrepared(prepare("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) >= (?)"), makeIntOptions(1));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 1, 0);
-        checkRow(1, results, 0, 2, 0);
-
-        results = executePrepared(prepare("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) < (?)"), makeIntOptions(2));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 0);
-        checkRow(1, results, 0, 1, 0);
-
-        results = executePrepared(prepare("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) <= (?)"), makeIntOptions(1));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 0);
-        checkRow(1, results, 0, 1, 0);
-
-        results = executePrepared(prepare("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) > (?) AND (b) < (?)"), makeIntOptions(0, 2));
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 0);
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) > (?) AND (b) < (?)", 0, 0, 2),
+            row(0, 1, 0)
+        );
     }
 
     @Test
-    public void testPreparedSingleClusteringColumnInequalitySingleMarker() throws Throwable
-    {
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 0, 0)");
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 1, 0)");
-        execute("INSERT INTO %s.single_clustering (a, b, c) VALUES (0, 2, 0)");
-
-        MD5Digest id = prepare("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) > ?");
-        UntypedResultSet results = executePrepared(id, options(tuple(0)));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 1, 0);
-        checkRow(1, results, 0, 2, 0);
-
-        results = executePrepared(prepare("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) >= ?"), options(tuple(1)));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 1, 0);
-        checkRow(1, results, 0, 2, 0);
-
-        results = executePrepared(prepare("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) < ?"), options(tuple(2)));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 0);
-        checkRow(1, results, 0, 1, 0);
-
-        results = executePrepared(prepare("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) <= ?"), options(tuple(1)));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 0);
-        checkRow(1, results, 0, 1, 0);
-
-
-        results = executePrepared(prepare("SELECT * FROM %s.single_clustering WHERE a=0 AND (b) > ? AND (b) < ?"),
-                options(tuple(0), tuple(2)));
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 0);
-    }
-
-    @Test
-    public void testPrepareMultipleClusteringColumnInequality() throws Throwable
-    {
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 1)");
-
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 1, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 1, 1, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 1, 1, 1)");
-
-        UntypedResultSet results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b) > (?)"), makeIntOptions(0));
-        assertEquals(3, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-        checkRow(1, results, 0, 1, 1, 0);
-        checkRow(2, results, 0, 1, 1, 1);
-
-        results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c) > (?, ?)"), makeIntOptions(1, 0));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 1, 1, 0);
-        checkRow(1, results, 0, 1, 1, 1);
-
-        results = executePrepared(prepare
-                ("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > (?, ?, ?)"), makeIntOptions(1, 1, 0));
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 1, 1);
-
-        results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > (?, ?, ?) AND (b) < (?)"),
-                makeIntOptions(0, 1, 0, 1));
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 0, 1, 1);
-
-        results = executePrepared(prepare
-                ("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > (?, ?, ?) AND (b, c) < (?, ?)"),
-                makeIntOptions(0, 1, 1, 1, 1));
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-
-        results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > (?, ?, ?) AND (b, c, d) < (?, ?, ?)"),
-                makeIntOptions(0, 1, 1, 1, 1, 0));
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-
-        // reversed
-        results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b) > (?) ORDER BY b DESC, c DESC, d DESC"),
-                makeIntOptions(0));
-        assertEquals(3, results.size());
-        checkRow(2, results, 0, 1, 0, 0);
-        checkRow(1, results, 0, 1, 1, 0);
-        checkRow(0, results, 0, 1, 1, 1);
-
-        results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > (?, ?, ?) AND (b, c) < (?, ?) ORDER BY b DESC, c DESC, d DESC"),
-                makeIntOptions(0, 1, 1, 1, 1));
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-    }
-
-    @Test
-    public void testPrepareMultipleClusteringColumnInequalitySingleMarker() throws Throwable
-    {
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 1)");
-
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 1, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 1, 1, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 1, 1, 1)");
-
-        UntypedResultSet results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b) > ?"), options(tuple(0)));
-        assertEquals(3, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-        checkRow(1, results, 0, 1, 1, 0);
-        checkRow(2, results, 0, 1, 1, 1);
-
-        results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c) > ?"), options(tuple(1, 0)));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 1, 1, 0);
-        checkRow(1, results, 0, 1, 1, 1);
-
-        results = executePrepared(prepare
-                ("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > ?"), options(tuple(1, 1, 0)));
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 1, 1);
-
-        results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > ? AND (b) < ?"),
-                options(tuple(0, 1, 0), tuple(1)));
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 0, 1, 1);
-
-        results = executePrepared(prepare
-                ("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > ? AND (b, c) < ?"),
-                options(tuple(0, 1, 1), tuple(1, 1)));
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-
-        results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > ? AND (b, c, d) < ?"),
-                options(tuple(0, 1, 1), tuple(1, 1, 0)));
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
+    public void testMultipleClustering() throws Throwable
+    {
+        createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a, b, c, d))");
+
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 1);
+
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 0, 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 1, 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 1, 1);
+
+        // Empty query
+        assertEmpty(execute("SELECT * FROM %s WHERE a = 0 AND (b, c, d) IN ()"));
+
+        // Equalities
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) = (?)", 0, 1),
+            row(0, 1, 0, 0),
+            row(0, 1, 1, 0),
+            row(0, 1, 1, 1)
+        );
+
+        // Same with whole tuple prepared
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) = ?", 0, tuple(1)),
+            row(0, 1, 0, 0),
+            row(0, 1, 1, 0),
+            row(0, 1, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) = (?, ?)", 0, 1, 1),
+            row(0, 1, 1, 0),
+            row(0, 1, 1, 1)
+        );
+
+        // Same with whole tuple prepared
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) = ?", 0, tuple(1, 1)),
+            row(0, 1, 1, 0),
+            row(0, 1, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) = (?, ?, ?)", 0, 1, 1, 1),
+            row(0, 1, 1, 1)
+        );
+
+        // Same with whole tuple prepared
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) = ?", 0, tuple(1, 1, 1)),
+            row(0, 1, 1, 1)
+        );
+
+        // Inequalities
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) > (?)", 0, 0),
+            row(0, 1, 0, 0),
+            row(0, 1, 1, 0),
+            row(0, 1, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) >= (?)", 0, 0),
+            row(0, 0, 0, 0),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1),
+            row(0, 1, 0, 0),
+            row(0, 1, 1, 0),
+            row(0, 1, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) > (?, ?)", 0, 1, 0),
+            row(0, 1, 1, 0),
+            row(0, 1, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) >= (?, ?)", 0, 1, 0),
+            row(0, 1, 0, 0),
+            row(0, 1, 1, 0),
+            row(0, 1, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?)", 0, 1, 1, 0),
+            row(0, 1, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) >= (?, ?, ?)", 0, 1, 1, 0),
+            row(0, 1, 1, 0),
+            row(0, 1, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) < (?)", 0, 1),
+            row(0, 0, 0, 0),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) <= (?)", 0, 1),
+            row(0, 0, 0, 0),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1),
+            row(0, 1, 0, 0),
+            row(0, 1, 1, 0),
+            row(0, 1, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) < (?, ?)", 0, 0, 1),
+            row(0, 0, 0, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) <= (?, ?)", 0, 0, 1),
+            row(0, 0, 0, 0),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) < (?, ?, ?)", 0, 0, 1, 1),
+            row(0, 0, 0, 0),
+            row(0, 0, 1, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) <= (?, ?, ?)", 0, 0, 1, 1),
+            row(0, 0, 0, 0),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) AND (b) < (?)", 0, 0, 1, 0, 1),
+            row(0, 0, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) AND (b, c) < (?, ?)", 0, 0, 1, 1, 1, 1),
+            row(0, 1, 0, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) AND (b, c, d) < (?, ?, ?)", 0, 0, 1, 1, 1, 1, 0),
+            row(0, 1, 0, 0)
+        );
+
+        // Same with whole tuple prepared
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > ? AND (b, c, d) < ?", 0, tuple(0, 1, 1), tuple(1, 1, 0)),
+            row(0, 1, 0, 0)
+        );
 
         // reversed
-        results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b) > ? ORDER BY b DESC, c DESC, d DESC"),
-                options(tuple(0)));
-        assertEquals(3, results.size());
-        checkRow(2, results, 0, 1, 0, 0);
-        checkRow(1, results, 0, 1, 1, 0);
-        checkRow(0, results, 0, 1, 1, 1);
-
-        results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) > ? AND (b, c) < ? ORDER BY b DESC, c DESC, d DESC"),
-                options(tuple(0, 1, 1), tuple(1, 1)));
-        assertEquals(1, results.size());
-        checkRow(0, results, 0, 1, 0, 0);
-    }
-
-    @Test
-    public void testPrepareLiteralIn() throws Throwable
-    {
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 1)");
-
-        UntypedResultSet results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN ((?, ?, ?), (?, ?, ?))"),
-                makeIntOptions(0, 1, 0, 0, 1, 1));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 1, 0);
-        checkRow(1, results, 0, 0, 1, 1);
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) > (?) ORDER BY b DESC, c DESC, d DESC", 0, 0),
+            row(0, 1, 1, 1),
+            row(0, 1, 1, 0),
+            row(0, 1, 0, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) >= (?) ORDER BY b DESC, c DESC, d DESC", 0, 0),
+            row(0, 1, 1, 1),
+            row(0, 1, 1, 0),
+            row(0, 1, 0, 0),
+            row(0, 0, 1, 1),
+            row(0, 0, 1, 0),
+            row(0, 0, 0, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) > (?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 1, 0),
+            row(0, 1, 1, 1),
+            row(0, 1, 1, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) >= (?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 1, 0),
+            row(0, 1, 1, 1),
+            row(0, 1, 1, 0),
+            row(0, 1, 0, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 1, 1, 0),
+            row(0, 1, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) >= (?, ?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 1, 1, 0),
+            row(0, 1, 1, 1),
+            row(0, 1, 1, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) < (?) ORDER BY b DESC, c DESC, d DESC", 0, 1),
+            row(0, 0, 1, 1),
+            row(0, 0, 1, 0),
+            row(0, 0, 0, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) <= (?) ORDER BY b DESC, c DESC, d DESC", 0, 1),
+            row(0, 1, 1, 1),
+            row(0, 1, 1, 0),
+            row(0, 1, 0, 0),
+            row(0, 0, 1, 1),
+            row(0, 0, 1, 0),
+            row(0, 0, 0, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) < (?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 0, 1),
+            row(0, 0, 0, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) <= (?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 0, 1),
+            row(0, 0, 1, 1),
+            row(0, 0, 1, 0),
+            row(0, 0, 0, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) < (?, ?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 0, 1, 1),
+            row(0, 0, 1, 0),
+            row(0, 0, 0, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) <= (?, ?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 0, 1, 1),
+            row(0, 0, 1, 1),
+            row(0, 0, 1, 0),
+            row(0, 0, 0, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) AND (b) < (?) ORDER BY b DESC, c DESC, d DESC", 0, 0, 1, 0, 1),
+            row(0, 0, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) AND (b, c) < (?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 0, 1, 1, 1, 1),
+            row(0, 1, 0, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) AND (b, c, d) < (?, ?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 0, 1, 1, 1, 1, 0),
+            row(0, 1, 0, 0)
+        );
+
+        // IN
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) IN ((?, ?, ?), (?, ?, ?))", 0, 0, 1, 0, 0, 1, 1),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1)
+        );
+
+        // same query but with whole tuple prepared
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) IN (?, ?)", 0, tuple(0, 1, 0), tuple(0, 1, 1)),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1)
+        );
+
+        // same query but with whole IN list prepared
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) IN ?", 0, list(tuple(0, 1, 0), tuple(0, 1, 1))),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1)
+        );
 
         // same query, but reversed order for the IN values
-        results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN ((?, ?, ?), (?, ?, ?))"),
-                makeIntOptions(0, 1, 1, 0, 1, 0));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 1, 0);
-        checkRow(1, results, 0, 0, 1, 1);
-
-        results = executePrepared(prepare("SELECT * FROM %s.multiple_clustering WHERE a=0 and (b, c) IN ((?, ?))"),
-                makeIntOptions(0, 1));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 1, 0);
-        checkRow(1, results, 0, 0, 1, 1);
-
-        results = executePrepared(prepare("SELECT * FROM %s.multiple_clustering WHERE a=0 and (b) IN ((?))"),
-                makeIntOptions(0));
-        assertEquals(3, results.size());
-        checkRow(0, results, 0, 0, 0, 0);
-        checkRow(1, results, 0, 0, 1, 0);
-        checkRow(2, results, 0, 0, 1, 1);
-    }
-
-    @Test
-    public void testPrepareInOneMarkerPerTuple() throws Throwable
-    {
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 1)");
-
-        UntypedResultSet results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN (?, ?)"),
-                options(tuple(0, 1, 0), tuple(0, 1, 1)));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 1, 0);
-        checkRow(1, results, 0, 0, 1, 1);
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) IN (?, ?)", 0, tuple(0, 1, 1), tuple(0, 1, 0)),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? and (b, c) IN ((?, ?))", 0, 0, 1),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? and (b) IN ((?))", 0, 0),
+            row(0, 0, 0, 0),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1)
+        );
+
+        // IN on both partition key and clustering key
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 0, 0, 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 0, 1, 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 0, 1, 1);
+
+        assertRows(execute("SELECT * FROM %s WHERE a IN (?, ?) AND (b, c, d) IN (?, ?)", 0, 1, tuple(0, 1, 0), tuple(0, 1, 1)),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1),
+            row(1, 0, 1, 0),
+            row(1, 0, 1, 1)
+        );
+
+        // same but with whole IN lists prepared
+        assertRows(execute("SELECT * FROM %s WHERE a IN ? AND (b, c, d) IN ?", list(0, 1), list(tuple(0, 1, 0), tuple(0, 1, 1))),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1),
+            row(1, 0, 1, 0),
+            row(1, 0, 1, 1)
+        );
 
         // same query, but reversed order for the IN values
-        results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN (?, ?)"),
-                options(tuple(0, 1, 1), tuple(0, 1, 0)));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 1, 0);
-        checkRow(1, results, 0, 0, 1, 1);
-
-
-        results = executePrepared(prepare("SELECT * FROM %s.multiple_clustering WHERE a=0 and (b, c) IN (?)"),
-                options(tuple(0, 1)));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 1, 0);
-        checkRow(1, results, 0, 0, 1, 1);
-
-        results = executePrepared(prepare("SELECT * FROM %s.multiple_clustering WHERE a=0 and (b) IN (?)"),
-                options(tuple(0)));
-        assertEquals(3, results.size());
-        checkRow(0, results, 0, 0, 0, 0);
-        checkRow(1, results, 0, 0, 1, 0);
-        checkRow(2, results, 0, 0, 1, 1);
+        assertRows(execute("SELECT * FROM %s WHERE a IN (?, ?) AND (b, c, d) IN (?, ?)", 1, 0, tuple(0, 1, 1), tuple(0, 1, 0)),
+            row(1, 0, 1, 0),
+            row(1, 0, 1, 1),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a IN (?, ?) and (b, c) IN ((?, ?))", 0, 1, 0, 1),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1),
+            row(1, 0, 1, 0),
+            row(1, 0, 1, 1)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a IN (?, ?) and (b) IN ((?))", 0, 1, 0),
+            row(0, 0, 0, 0),
+            row(0, 0, 1, 0),
+            row(0, 0, 1, 1),
+            row(1, 0, 0, 0),
+            row(1, 0, 1, 0),
+            row(1, 0, 1, 1)
+        );
     }
 
     @Test
-    public void testPrepareInOneMarker() throws Throwable
-    {
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 0, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 0)");
-        execute("INSERT INTO %s.multiple_clustering (a, b, c, d) VALUES (0, 0, 1, 1)");
-
-        UntypedResultSet results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN ?"),
-                options(list(tuple(0, 1, 0), tuple(0, 1, 1))));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 1, 0);
-        checkRow(1, results, 0, 0, 1, 1);
-
-        // same query, but reversed order for the IN values
-        results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN ?"),
-                options(list(tuple(0, 1, 1), tuple(0, 1, 0))));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 1, 0);
-        checkRow(1, results, 0, 0, 1, 1);
-
-        results = executePrepared(prepare(
-                "SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN ?"),
-                options(list()));
-        assertTrue(results.isEmpty());
-
-        results = executePrepared(prepare("SELECT * FROM %s.multiple_clustering WHERE a=0 and (b, c) IN ?"),
-                options(list(tuple(0, 1))));
-        assertEquals(2, results.size());
-        checkRow(0, results, 0, 0, 1, 0);
-        checkRow(1, results, 0, 0, 1, 1);
-
-        results = executePrepared(prepare("SELECT * FROM %s.multiple_clustering WHERE a=0 and (b) IN ?"),
-                options(list(tuple(0))));
-        assertEquals(3, results.size());
-        checkRow(0, results, 0, 0, 0, 0);
-        checkRow(1, results, 0, 0, 1, 0);
-        checkRow(2, results, 0, 0, 1, 1);
-
-        results = executePrepared(prepare("SELECT * FROM %s.multiple_clustering WHERE a=0 and (b) IN ?"),
-                options(list()));
-        assertTrue(results.isEmpty());
-    }
-
-    @Test(expected=InvalidRequestException.class)
-    public void testPrepareLiteralInWithShortTuple() throws Throwable
+    public void testMultipleClusteringReversedComponents() throws Throwable
     {
-        prepare("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN ((?, ?))");
-    }
+        createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a, b, c, d)) WITH CLUSTERING ORDER BY (b DESC, c ASC, d DESC)");
 
-    @Test(expected=InvalidRequestException.class)
-    public void testPrepareLiteralInWithLongTuple() throws Throwable
-    {
-        prepare("SELECT * FROM %s.multiple_clustering WHERE a=0 AND (b, c, d) IN ((?, ?, ?, ?, ?))");
-    }
-
-    @Test(expected=InvalidRequestException.class)
-    public void testPrepareLiteralInWithPartitionKey() throws Throwable
-    {
-        prepare("SELECT * FROM %s.multiple_clustering WHERE (a, b, c, d) IN ((?, ?, ?, ?))");
-    }
-
-    @Test(expected=InvalidRequestException.class)
-    public void testPrepareLiteralInSkipsClusteringColumn() throws Throwable
-    {
-        prepare("SELECT * FROM %s.multiple_clustering WHERE (c, d) IN ((?, ?))");
-    }
-
-    private static QueryOptions makeIntOptions(Integer... values)
-    {
-        List<ByteBuffer> buffers = new ArrayList<>(values.length);
-        for (int value : values)
-            buffers.add(ByteBufferUtil.bytes(value));
-        return QueryOptions.forInternalCalls(buffers);
-    }
-
-    private static ByteBuffer tuple(Integer... values)
-    {
-        List<AbstractType<?>> types = new ArrayList<>(values.length);
-        ByteBuffer[] buffers = new ByteBuffer[values.length];
-        for (int i = 0; i < values.length; i++)
-        {
-            types.add(Int32Type.instance);
-            buffers[i] = ByteBufferUtil.bytes(values[i]);
-        }
-
-        TupleType type = new TupleType(types);
-        return type.buildValue(buffers);
-    }
-
-    private static ByteBuffer list(ByteBuffer... values)
-    {
-        return CollectionSerializer.pack(Arrays.asList(values), values.length, 3);
-    }
-
-    private static QueryOptions options(ByteBuffer... buffers)
-    {
-        return QueryOptions.forInternalCalls(Arrays.asList(buffers));
-    }
+        // b and d are reversed in the clustering order
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 0, 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 1, 1);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 1, 0);
+
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 1);
+        execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 0);
+
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) > (?)", 0, 0),
+            row(0, 1, 0, 0),
+            row(0, 1, 1, 1),
+            row(0, 1, 1, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) >= (?)", 0, 0),
+            row(0, 1, 0, 0),
+            row(0, 1, 1, 1),
+            row(0, 1, 1, 0),
+            row(0, 0, 0, 0),
+            row(0, 0, 1, 1),
+            row(0, 0, 1, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) < (?)", 0, 1),
+            row(0, 0, 0, 0),
+            row(0, 0, 1, 1),
+            row(0, 0, 1, 0)
+        );
+
+        assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) <= (?)", 0, 1),
+            row(0, 1, 0, 0),
+            row(0, 1, 1, 1),
+            row(0, 1, 1, 0),
+            row(0, 0, 0, 0),
+            row(0, 0, 1, 1),
+            row(0, 0, 1, 0)
+        );
 
-    private static void checkRow(int rowIndex, UntypedResultSet results, Integer... expectedValues)
-    {
-        List<UntypedResultSet.Row> rows = newArrayList(results.iterator());
-        UntypedResultSet.Row row = rows.get(rowIndex);
-        Iterator<ColumnSpecification> columns = row.getColumns().iterator();
-        for (Integer expected : expectedValues)
-        {
-            String columnName = columns.next().name.toString();
-            int actual = row.getInt(columnName);
-            assertEquals(String.format("Expected value %d for column %s in row %d, but got %s", actual, columnName, rowIndex, expected),
-                         (long) expected, actual);
-        }
+        // preserve pre-6875 behavior (even though the query result is technically incorrect)
+        assertEmpty(execute("SELECT * FROM %s WHERE a = ? AND (b, c) > (?, ?)", 0, 1, 0));
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/test/unit/org/apache/cassandra/cql3/TupleTypeTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/TupleTypeTest.java b/test/unit/org/apache/cassandra/cql3/TupleTypeTest.java
new file mode 100644
index 0000000..84512a5
--- /dev/null
+++ b/test/unit/org/apache/cassandra/cql3/TupleTypeTest.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3;
+
+import org.junit.Test;
+
+public class TupleTypeTest extends CQLTester
+{
+    @Test
+    public void testTuplePutAndGet() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, t tuple<int, text, double>)");
+
+        execute("INSERT INTO %s (k, t) VALUES (?, ?)", 0, tuple(3, "foo", 3.4));
+        execute("INSERT INTO %s (k, t) VALUES (?, ?)", 1, tuple(8, "bar", 0.2));
+        assertAllRows(
+            row(0, tuple(3, "foo", 3.4)),
+            row(1, tuple(8, "bar", 0.2))
+        );
+
+        // nulls
+        execute("INSERT INTO %s (k, t) VALUES (?, ?)", 2, tuple(5, null, 3.4));
+        assertRows(execute("SELECT * FROM %s WHERE k=?", 2),
+            row(2, tuple(5, null, 3.4))
+        );
+
+        // incomplete tuple
+        execute("INSERT INTO %s (k, t) VALUES (?, ?)", 3, tuple(5, "bar"));
+        assertRows(execute("SELECT * FROM %s WHERE k=?", 3),
+            row(3, tuple(5, "bar"))
+        );
+    }
+
+    @Test
+    public void testNestedTuple() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, t tuple<int, tuple<text, double>>)");
+
+        execute("INSERT INTO %s (k, t) VALUES (?, ?)", 0, tuple(3, tuple("foo", 3.4)));
+        execute("INSERT INTO %s (k, t) VALUES (?, ?)", 1, tuple(8, tuple("bar", 0.2)));
+        assertAllRows(
+            row(0, tuple(3, tuple("foo", 3.4))),
+            row(1, tuple(8, tuple("bar", 0.2)))
+        );
+    }
+
+    @Test
+    public void testTupleInPartitionKey() throws Throwable
+    {
+        createTable("CREATE TABLE %s (t tuple<int, text> PRIMARY KEY)");
+
+        execute("INSERT INTO %s (t) VALUES (?)", tuple(3, "foo"));
+        assertAllRows(row(tuple(3, "foo")));
+    }
+
+    @Test
+    public void testTupleInClusteringKey() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int, t tuple<int, text>, PRIMARY KEY (k, t))");
+
+        execute("INSERT INTO %s (k, t) VALUES (?, ?)", 0, tuple(5, "bar"));
+        execute("INSERT INTO %s (k, t) VALUES (?, ?)", 0, tuple(3, "foo"));
+        execute("INSERT INTO %s (k, t) VALUES (?, ?)", 0, tuple(6, "bar"));
+        execute("INSERT INTO %s (k, t) VALUES (?, ?)", 0, tuple(5, "foo"));
+
+        assertAllRows(
+            row(0, tuple(3, "foo")),
+            row(0, tuple(5, "bar")),
+            row(0, tuple(5, "foo")),
+            row(0, tuple(6, "bar"))
+        );
+    }
+
+    @Test
+    public void testInvalidQueries() throws Throwable
+    {
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, t tuple<int, text, double>)");
+
+        assertInvalid("INSERT INTO %s (k, t) VALUES (0, ())");
+        assertInvalid("INSERT INTO %s (k, t) VALUES (0, (2, 'foo', 3.1, 'bar'))");
+    }
+}


[2/3] git commit: Tuple types

Posted by sl...@apache.org.
Tuple types

patch by slebresne; reviewed by thobbs for CASSANDRA-7248


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/0932ed67
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/0932ed67
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/0932ed67

Branch: refs/heads/trunk
Commit: 0932ed670c66ca2f8c5dc1450b85590738b773c9
Parents: 01c5e34
Author: Sylvain Lebresne <sy...@datastax.com>
Authored: Fri May 23 09:53:07 2014 +0200
Committer: Sylvain Lebresne <sy...@datastax.com>
Committed: Thu May 29 11:33:11 2014 +0200

----------------------------------------------------------------------
 CHANGES.txt                                     |    1 +
 build.xml                                       |   41 +-
 doc/native_protocol_v3.spec                     |   26 +-
 .../org/apache/cassandra/config/UTMetaData.java |   10 +-
 .../org/apache/cassandra/cql3/CQL3Type.java     |  106 ++
 src/java/org/apache/cassandra/cql3/Cql.g        |   44 +-
 src/java/org/apache/cassandra/cql3/Tuples.java  |  103 +-
 .../apache/cassandra/cql3/UntypedResultSet.java |   18 +
 .../org/apache/cassandra/cql3/UserTypes.java    |   14 +-
 .../cql3/statements/AlterTypeStatement.java     |   26 +-
 .../cql3/statements/CreateTypeStatement.java    |    8 +-
 .../cql3/statements/DropTypeStatement.java      |    2 +-
 .../cql3/statements/SelectStatement.java        |   34 +-
 .../cassandra/cql3/statements/Selection.java    |   10 +-
 .../apache/cassandra/db/marshal/TupleType.java  |   56 +-
 .../apache/cassandra/db/marshal/UserType.java   |  174 +--
 .../apache/cassandra/transport/DataType.java    |   40 +-
 test/conf/logback-test.xml                      |    4 +-
 .../unit/org/apache/cassandra/SchemaLoader.java |   14 +-
 .../org/apache/cassandra/cql3/CQLTester.java    |  472 ++++++
 .../cassandra/cql3/MultiColumnRelationTest.java | 1434 +++++-------------
 .../apache/cassandra/cql3/TupleTypeTest.java    |   97 ++
 22 files changed, 1445 insertions(+), 1289 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index fe8242b..2cb2f5d 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -29,6 +29,7 @@
  * Handle overlapping MultiSlices (CASSANDRA-7279)
  * Fix DataOutputTest on Windows (CASSANDRA-7265)
  * Embedded sets in user defined data-types are not updating (CASSANDRA-7267)
+ * Add tuple type to CQL/native protocol (CASSANDRA-7248)
 Merged from 2.0:
  * Copy compaction options to make sure they are reloaded (CASSANDRA-7290)
  * Add option to do more aggressive tombstone compactions (CASSANDRA-6563)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/build.xml
----------------------------------------------------------------------
diff --git a/build.xml b/build.xml
index 680efc1..5740577 100644
--- a/build.xml
+++ b/build.xml
@@ -61,6 +61,7 @@
     <property name="test.long.src" value="${test.dir}/long"/>
     <property name="test.pig.src" value="${test.dir}/pig"/>
     <property name="dist.dir" value="${build.dir}/dist"/>
+
 	
 	<property name="source.version" value="1.7"/>
 	<property name="target.version" value="1.7"/>
@@ -92,6 +93,9 @@
     <property name="test.timeout" value="60000" />
     <property name="test.long.timeout" value="600000" />
 
+    <!-- default for cql tests. Can be override by -Dcassandra.test.use_prepared=false -->
+    <property name="cassandra.test.use_prepared" value="true" />
+
     <!-- http://cobertura.sourceforge.net/ -->
     <property name="cobertura.version" value="2.0.3"/>
     <property name="cobertura.build.dir" value="${build.dir}/cobertura"/>
@@ -444,8 +448,6 @@
                 artifactId="cassandra-parent"
                 version="${version}"/>
         <dependency groupId="joda-time" artifactId="joda-time" version="2.3" />
-        <dependency groupId="org.slf4j" artifactId="slf4j-log4j12" version="1.7.2"/>
-        <dependency groupId="log4j" artifactId="log4j" version="1.2.16" />
       </artifact:pom>
 
       <!-- now the pom's for artifacts being deployed to Maven Central -->
@@ -1202,6 +1204,41 @@
     </testmacro>
   </target>
 
+  <target name="cql-test" depends="build-test" description="Execute CQL tests">
+    <testmacro suitename="cql" inputdir="${test.unit.src}"
+               timeout="${test.timeout}" filter="**/cql3/*Test.java">
+      <jvmarg value="-Dcassandra.test.use_prepared=${cassandra.test.use_prepared}"/>
+    </testmacro>
+  </target>
+
+  <target name="cql-test-some" depends="build-test" description="Execute specific CQL tests" >
+    <sequential>
+      <echo message="running ${test.methods} tests from ${test.name}"/>
+      <mkdir dir="${build.test.dir}/cassandra"/>
+      <mkdir dir="${build.test.dir}/output"/>
+      <junit fork="on" failureproperty="testfailed" maxmemory="1024m" timeout="${test.timeout}">
+        <sysproperty key="net.sourceforge.cobertura.datafile" file="${cobertura.datafile}"/>
+        <formatter type="brief" usefile="false"/>
+        <jvmarg value="-Dstorage-config=${test.conf}"/>
+        <jvmarg value="-Djava.awt.headless=true"/>
+        <jvmarg value="-javaagent:${basedir}/lib/jamm-0.2.6.jar" />
+        <jvmarg value="-ea"/>
+        <jvmarg value="-Xss256k"/>
+        <jvmarg value="-Dcassandra.test.use_prepared=${cassandra.test.use_prepared}"/>
+        <classpath>
+          <path refid="cassandra.classpath" />
+          <pathelement location="${test.classes}"/>
+          <path refid="cobertura.classpath"/>
+          <pathelement location="${test.conf}"/>
+          <fileset dir="${test.lib}">
+            <include name="**/*.jar" />
+          </fileset>
+        </classpath>
+        <test name="org.apache.cassandra.cql3.${test.name}" methods="${test.methods}" todir="${build.test.dir}/output"/>
+      </junit>
+    </sequential>
+  </target>
+
   <target name="pig-test" depends="build-test,maven-ant-tasks-retrieve-pig-test" description="Excute Pig tests">
     <testmacro suitename="pig" inputdir="${test.pig.src}" 
                timeout="1200000">

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/doc/native_protocol_v3.spec
----------------------------------------------------------------------
diff --git a/doc/native_protocol_v3.spec b/doc/native_protocol_v3.spec
index 6719838..400868c 100644
--- a/doc/native_protocol_v3.spec
+++ b/doc/native_protocol_v3.spec
@@ -38,7 +38,7 @@ Table of Contents
       4.2.8. AUTH_SUCCESS
   5. Compression
   6. Collection types
-  7. User Defined types
+  7. User Defined and tuple types
   8. Result paging
   9. Error codes
   10. Changes from v2
@@ -306,13 +306,13 @@ Table of Contents
               Section 4.2.5.2).
         0x04: Page_size. In that case, <result_page_size> is an [int]
               controlling the desired page size of the result (in CQL3 rows).
-              See the section on paging (Section 7) for more details.
+              See the section on paging (Section 8) for more details.
         0x08: With_paging_state. If present, <paging_state> should be present.
               <paging_state> is a [bytes] value that should have been returned
               in a result set (Section 4.2.5.2). If provided, the query will be
               executed but starting from a given paging state. This also to
               continue paging on a different node from the one it has been
-              started (See Section 7 for more details).
+              started (See Section 8 for more details).
         0x10: With serial consistency. If present, <serial_consistency> should be
               present. <serial_consistency> is the [consistency] level for the
               serial phase of conditional updates. That consitency can only be
@@ -529,7 +529,7 @@ Table of Contents
                       <paging_state> will be present. The <paging_state> is a
                       [bytes] value that should be used in QUERY/EXECUTE to
                       continue paging and retrieve the remained of the result for
-                      this query (See Section 7 for more details).
+                      this query (See Section 8 for more details).
             0x0004    No_metadata: if set, the <metadata> is only composed of
                       these <flags>, the <column_count> and optionally the
                       <paging_state> (depending on the Has_more_pages flage) but
@@ -592,6 +592,10 @@ Table of Contents
                                 i_th field of the UDT.
                               - <type_i> is an [option] representing the type of the
                                 i_th field of the UDT.
+            0x0031    Tuple: the value is <n><type_1>...<type_n> where <n> is a [short]
+                             representing the number of value in the type, and <type_i>
+                             are [option] representing the type of the i_th component
+                             of the tuple
 
     - <rows_count> is an [int] representing the number of rows present in this
       result. Those rows are serialized in the <rows_content> part.
@@ -756,16 +760,20 @@ Table of Contents
           value.
 
 
-7. User defined types
+7. User defined and tuple types
 
-  This section describes the serialization format for User defined types (UDT) values.
-  UDT values are the values of the User Defined Types as defined in section 4.2.5.2.
+  This section describes the serialization format for User defined types (UDT) and
+  tuple values. UDT (resp. tuple) values are the values of the User Defined Types
+  (resp. tuple type) as defined in section 4.2.5.2.
 
   A UDT value is composed of successive [bytes] values, one for each field of the UDT
   value (in the order defined by the type). A UDT value will generally have one value
   for each field of the type it represents, but it is allowed to have less values than
   the type has fields.
 
+  A tuple value has the exact same serialization format, i.e. a succession of
+  [bytes] values representing the components of the tuple.
+
 
 8. Result paging
 
@@ -896,8 +904,8 @@ Table of Contents
 10. Changes from v2
   * BATCH messages now have <flags> (like QUERY and EXECUTE) and a corresponding optional
     <serial_consistency> parameters (see Section 4.1.7).
-  * User Defined Types have to added to ResultSet metadata (see 4.2.5.2) and a new section
-    on the serialization format of UDT values has been added to the documentation
+  * User Defined Types and tuple types have to added to ResultSet metadata (see 4.2.5.2) and a
+    new section on the serialization format of UDT and tuple values has been added to the documentation
     (Section 7).
   * The serialization format for collection has changed (both the collection size and
     the length of each argument is now 4 bytes long). See Section 6.

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/src/java/org/apache/cassandra/config/UTMetaData.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/config/UTMetaData.java b/src/java/org/apache/cassandra/config/UTMetaData.java
index 178e653..ee653a8 100644
--- a/src/java/org/apache/cassandra/config/UTMetaData.java
+++ b/src/java/org/apache/cassandra/config/UTMetaData.java
@@ -100,11 +100,11 @@ public final class UTMetaData
         adder.resetCollection("field_names");
         adder.resetCollection("field_types");
 
-        for (ByteBuffer name : newType.fieldNames)
-            adder.addListEntry("field_names", name);
-        for (AbstractType<?> type : newType.fieldTypes)
-            adder.addListEntry("field_types", type.toString());
-
+        for (int i = 0; i < newType.size(); i++)
+        {
+            adder.addListEntry("field_names", newType.fieldName(i));
+            adder.addListEntry("field_types", newType.fieldType(i).toString());
+        }
         return mutation;
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/src/java/org/apache/cassandra/cql3/CQL3Type.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/CQL3Type.java b/src/java/org/apache/cassandra/cql3/CQL3Type.java
index f07bc19..a26a903 100644
--- a/src/java/org/apache/cassandra/cql3/CQL3Type.java
+++ b/src/java/org/apache/cassandra/cql3/CQL3Type.java
@@ -17,6 +17,9 @@
  */
 package org.apache.cassandra.cql3;
 
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.cassandra.config.KSMetaData;
 import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.db.marshal.*;
@@ -221,6 +224,62 @@ public interface CQL3Type
         }
     }
 
+    public static class Tuple implements CQL3Type
+    {
+        private final TupleType type;
+
+        private Tuple(TupleType type)
+        {
+            this.type = type;
+        }
+
+        public static Tuple create(TupleType type)
+        {
+            return new Tuple(type);
+        }
+
+        public boolean isCollection()
+        {
+            return false;
+        }
+
+        public AbstractType<?> getType()
+        {
+            return type;
+        }
+
+        @Override
+        public final boolean equals(Object o)
+        {
+            if(!(o instanceof Tuple))
+                return false;
+
+            Tuple that = (Tuple)o;
+            return type.equals(that.type);
+        }
+
+        @Override
+        public final int hashCode()
+        {
+            return type.hashCode();
+        }
+
+        @Override
+        public String toString()
+        {
+            StringBuilder sb = new StringBuilder();
+            sb.append("tuple<");
+            for (int i = 0; i < type.size(); i++)
+            {
+                if (i > 0)
+                    sb.append(", ");
+                sb.append(type.type(i).asCQL3Type());
+            }
+            sb.append(">");
+            return sb.toString();
+        }
+    }
+
     // For UserTypes, we need to know the current keyspace to resolve the
     // actual type used, so Raw is a "not yet prepared" CQL3Type.
     public abstract class Raw
@@ -277,6 +336,15 @@ public interface CQL3Type
             return new RawCollection(CollectionType.Kind.SET, null, t);
         }
 
+        public static Raw tuple(List<CQL3Type.Raw> ts) throws InvalidRequestException
+        {
+            for (int i = 0; i < ts.size(); i++)
+                if (ts.get(i).isCounter())
+                    throw new InvalidRequestException("counters are not allowed inside tuples");
+
+            return new RawTuple(ts);
+        }
+
         private static class RawType extends Raw
         {
             private CQL3Type type;
@@ -386,5 +454,43 @@ public interface CQL3Type
                 return name.toString();
             }
         }
+
+        private static class RawTuple extends Raw
+        {
+            private final List<CQL3Type.Raw> types;
+
+            private RawTuple(List<CQL3Type.Raw> types)
+            {
+                this.types = types;
+            }
+
+            public boolean isCollection()
+            {
+                return false;
+            }
+
+            public CQL3Type prepare(String keyspace) throws InvalidRequestException
+            {
+                List<AbstractType<?>> ts = new ArrayList<>(types.size());
+                for (CQL3Type.Raw t : types)
+                    ts.add(t.prepare(keyspace).getType());
+                return new Tuple(new TupleType(ts));
+            }
+
+            @Override
+            public String toString()
+            {
+                StringBuilder sb = new StringBuilder();
+                sb.append("tuple<");
+                for (int i = 0; i < types.size(); i++)
+                {
+                    if (i > 0)
+                        sb.append(", ");
+                    sb.append(types.get(i));
+                }
+                sb.append(">");
+                return sb.toString();
+            }
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/src/java/org/apache/cassandra/cql3/Cql.g
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/Cql.g b/src/java/org/apache/cassandra/cql3/Cql.g
index 57b61a5..f3681c4 100644
--- a/src/java/org/apache/cassandra/cql3/Cql.g
+++ b/src/java/org/apache/cassandra/cql3/Cql.g
@@ -846,13 +846,13 @@ constant returns [Constants.Literal constant]
     | { String sign=""; } ('-' {sign = "-"; } )? t=(K_NAN | K_INFINITY) { $constant = Constants.Literal.floatingPoint(sign + $t.text); }
     ;
 
-map_literal returns [Maps.Literal map]
+mapLiteral returns [Maps.Literal map]
     : '{' { List<Pair<Term.Raw, Term.Raw>> m = new ArrayList<Pair<Term.Raw, Term.Raw>>(); }
           ( k1=term ':' v1=term { m.add(Pair.create(k1, v1)); } ( ',' kn=term ':' vn=term { m.add(Pair.create(kn, vn)); } )* )?
       '}' { $map = new Maps.Literal(m); }
     ;
 
-set_or_map[Term.Raw t] returns [Term.Raw value]
+setOrMapLiteral[Term.Raw t] returns [Term.Raw value]
     : ':' v=term { List<Pair<Term.Raw, Term.Raw>> m = new ArrayList<Pair<Term.Raw, Term.Raw>>(); m.add(Pair.create(t, v)); }
           ( ',' kn=term ':' vn=term { m.add(Pair.create(kn, vn)); } )*
       { $value = new Maps.Literal(m); }
@@ -861,27 +861,34 @@ set_or_map[Term.Raw t] returns [Term.Raw value]
       { $value = new Sets.Literal(s); }
     ;
 
-collection_literal returns [Term.Raw value]
+collectionLiteral returns [Term.Raw value]
     : '[' { List<Term.Raw> l = new ArrayList<Term.Raw>(); }
           ( t1=term { l.add(t1); } ( ',' tn=term { l.add(tn); } )* )?
       ']' { $value = new Lists.Literal(l); }
-    | '{' t=term v=set_or_map[t] { $value = v; } '}'
+    | '{' t=term v=setOrMapLiteral[t] { $value = v; } '}'
     // Note that we have an ambiguity between maps and set for "{}". So we force it to a set literal,
     // and deal with it later based on the type of the column (SetLiteral.java).
     | '{' '}' { $value = new Sets.Literal(Collections.<Term.Raw>emptyList()); }
     ;
 
-usertype_literal returns [UserTypes.Literal ut]
+usertypeLiteral returns [UserTypes.Literal ut]
     @init{ Map<ColumnIdentifier, Term.Raw> m = new HashMap<ColumnIdentifier, Term.Raw>(); }
     @after{ $ut = new UserTypes.Literal(m); }
     // We don't allow empty literals because that conflicts with sets/maps and is currently useless since we don't allow empty user types
     : '{' k1=cident ':' v1=term { m.put(k1, v1); } ( ',' kn=cident ':' vn=term { m.put(kn, vn); } )* '}'
     ;
 
+tupleLiteral returns [Tuples.Literal tt]
+    @init{ List<Term.Raw> l = new ArrayList<Term.Raw>(); }
+    @after{ $tt = new Tuples.Literal(l); }
+    : '(' t1=term { l.add(t1); } ( ',' tn=term { l.add(tn); } )* ')'
+    ;
+
 value returns [Term.Raw value]
     : c=constant           { $value = c; }
-    | l=collection_literal { $value = l; }
-    | u=usertype_literal   { $value = u; }
+    | l=collectionLiteral  { $value = l; }
+    | u=usertypeLiteral    { $value = u; }
+    | t=tupleLiteral       { $value = t; }
     | K_NULL               { $value = Constants.NULL_LITERAL; }
     | ':' id=cident        { $value = newBindVariables(id); }
     | QMARK                { $value = newBindVariables(null); }
@@ -959,7 +966,7 @@ properties[PropertyDefinitions props]
 
 property[PropertyDefinitions props]
     : k=cident '=' (simple=propertyValue { try { $props.addProperty(k.toString(), simple); } catch (SyntaxException e) { addRecognitionError(e.getMessage()); } }
-                   |   map=map_literal   { try { $props.addProperty(k.toString(), convertPropertyMap(map)); } catch (SyntaxException e) { addRecognitionError(e.getMessage()); } })
+                   |   map=mapLiteral    { try { $props.addProperty(k.toString(), convertPropertyMap(map)); } catch (SyntaxException e) { addRecognitionError(e.getMessage()); } })
     ;
 
 propertyValue returns [String str]
@@ -1026,11 +1033,6 @@ singleColumnInValues returns [List<Term.Raw> terms]
     : '(' ( t1 = term { $terms.add(t1); } (',' ti=term { $terms.add(ti); })* )? ')'
     ;
 
-tupleLiteral returns [Tuples.Literal literal]
-    @init { List<Term.Raw> terms = new ArrayList<>(); }
-    : '(' t1=term { terms.add(t1); } (',' ti=term { terms.add(ti); })* ')' { $literal = new Tuples.Literal(terms); }
-    ;
-
 tupleOfTupleLiterals returns [List<Tuples.Literal> literals]
     @init { $literals = new ArrayList<>(); }
     : '(' t1=tupleLiteral { $literals.add(t1); } (',' ti=tupleLiteral { $literals.add(ti); })* ')'
@@ -1054,7 +1056,8 @@ inMarkerForTuple returns [Tuples.INRaw marker]
 comparatorType returns [CQL3Type.Raw t]
     : n=native_type     { $t = CQL3Type.Raw.from(n); }
     | c=collection_type { $t = c; }
-    | id=userTypeName  { $t = CQL3Type.Raw.userType(id); }
+    | tt=tuple_type     { $t = tt; }
+    | id=userTypeName   { $t = CQL3Type.Raw.userType(id); }
     | s=STRING_LITERAL
       {
         try {
@@ -1099,6 +1102,12 @@ collection_type returns [CQL3Type.Raw pt]
         { try { if (t != null) $pt = CQL3Type.Raw.set(t); } catch (InvalidRequestException e) { addRecognitionError(e.getMessage()); } }
     ;
 
+tuple_type returns [CQL3Type.Raw t]
+    : K_TUPLE '<' { List<CQL3Type.Raw> types = new ArrayList<>(); }
+         t1=comparatorType { types.add(t1); } (',' tn=comparatorType { types.add(tn); })*
+      '>' { try { $t = CQL3Type.Raw.tuple(types); } catch (InvalidRequestException e) { addRecognitionError(e.getMessage()); }}
+    ;
+
 username
     : IDENT
     | STRING_LITERAL
@@ -1110,11 +1119,12 @@ non_type_ident returns [ColumnIdentifier id]
     : t=IDENT                    { if (reservedTypeNames.contains($t.text)) addRecognitionError("Invalid (reserved) user type name " + $t.text); $id = new ColumnIdentifier($t.text, false); }
     | t=QUOTED_NAME              { $id = new ColumnIdentifier($t.text, true); }
     | k=basic_unreserved_keyword { $id = new ColumnIdentifier(k, false); }
+    | kk=K_KEY                   { $id = new ColumnIdentifier($kk.text, false); }
     ;
 
 unreserved_keyword returns [String str]
     : u=unreserved_function_keyword     { $str = u; }
-    | k=(K_TTL | K_COUNT | K_WRITETIME) { $str = $k.text; }
+    | k=(K_TTL | K_COUNT | K_WRITETIME | K_KEY) { $str = $k.text; }
     ;
 
 unreserved_function_keyword returns [String str]
@@ -1123,8 +1133,7 @@ unreserved_function_keyword returns [String str]
     ;
 
 basic_unreserved_keyword returns [String str]
-    : k=( K_KEY
-        | K_KEYS
+    : k=( K_KEYS
         | K_AS
         | K_CLUSTERING
         | K_COMPACT
@@ -1251,6 +1260,7 @@ K_MAP:         M A P;
 K_LIST:        L I S T;
 K_NAN:         N A N;
 K_INFINITY:    I N F I N I T Y;
+K_TUPLE:       T U P L E;
 
 K_TRIGGER:     T R I G G E R;
 K_STATIC:      S T A T I C;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/src/java/org/apache/cassandra/cql3/Tuples.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/Tuples.java b/src/java/org/apache/cassandra/cql3/Tuples.java
index 384633d..e1ce551 100644
--- a/src/java/org/apache/cassandra/cql3/Tuples.java
+++ b/src/java/org/apache/cassandra/cql3/Tuples.java
@@ -17,17 +17,15 @@
  */
 package org.apache.cassandra.cql3;
 
-import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.CollectionType;
-import org.apache.cassandra.db.marshal.ListType;
-import org.apache.cassandra.db.marshal.TupleType;
-import org.apache.cassandra.exceptions.InvalidRequestException;
-import org.apache.cassandra.serializers.MarshalException;
+import java.nio.ByteBuffer;
+import java.util.*;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.nio.ByteBuffer;
-import java.util.*;
+import org.apache.cassandra.db.marshal.*;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.serializers.MarshalException;
 
 /**
  * Static helper methods and classes for tuples.
@@ -36,6 +34,16 @@ public class Tuples
 {
     private static final Logger logger = LoggerFactory.getLogger(Tuples.class);
 
+    private Tuples() {}
+
+    public static ColumnSpecification componentSpecOf(ColumnSpecification column, int component)
+    {
+        return new ColumnSpecification(column.ksName,
+                                       column.cfName,
+                                       new ColumnIdentifier(String.format("%s[%d]", column.name, component), true),
+                                       ((TupleType)column.type).type(component));
+    }
+
     /**
      * A raw, literal tuple.  When prepared, this will become a Tuples.Value or Tuples.DelayedValue, depending
      * on whether the tuple holds NonTerminals.
@@ -49,6 +57,24 @@ public class Tuples
             this.elements = elements;
         }
 
+        public Term prepare(String keyspace, ColumnSpecification receiver) throws InvalidRequestException
+        {
+            validateAssignableTo(keyspace, receiver);
+
+            List<Term> values = new ArrayList<>(elements.size());
+            boolean allTerminal = true;
+            for (int i = 0; i < elements.size(); i++)
+            {
+                Term value = elements.get(i).prepare(keyspace, componentSpecOf(receiver, i));
+                if (value instanceof Term.NonTerminal)
+                    allTerminal = false;
+
+                values.add(value);
+            }
+            DelayedValue value = new DelayedValue(values);
+            return allTerminal ? value.bind(QueryOptions.DEFAULT) : value;
+        }
+
         public Term prepare(String keyspace, List<? extends ColumnSpecification> receivers) throws InvalidRequestException
         {
             if (elements.size() != receivers.size())
@@ -68,15 +94,36 @@ public class Tuples
             return allTerminal ? value.bind(QueryOptions.DEFAULT) : value;
         }
 
-        public Term prepare(String keyspace, ColumnSpecification receiver)
+        private void validateAssignableTo(String keyspace, ColumnSpecification receiver) throws InvalidRequestException
         {
-            throw new AssertionError("Tuples.Literal instances require a list of receivers for prepare()");
+            if (!(receiver.type instanceof TupleType))
+                throw new InvalidRequestException(String.format("Invalid tuple type literal for %s of type %s", receiver.name, receiver.type.asCQL3Type()));
+
+            TupleType tt = (TupleType)receiver.type;
+            for (int i = 0; i < elements.size(); i++)
+            {
+                if (i >= tt.size())
+                    throw new InvalidRequestException(String.format("Invalid tuple literal for %s: too many elements. Type %s expects %d but got %d",
+                                                                    receiver.name, tt.asCQL3Type(), tt.size(), elements.size()));
+
+                Term.Raw value = elements.get(i);
+                ColumnSpecification spec = componentSpecOf(receiver, i);
+                if (!value.isAssignableTo(keyspace, spec))
+                    throw new InvalidRequestException(String.format("Invalid tuple literal for %s: component %d is not of type %s", receiver.name, i, spec.type.asCQL3Type()));
+            }
         }
 
         public boolean isAssignableTo(String keyspace, ColumnSpecification receiver)
         {
-            // tuples shouldn't be assignable to anything right now
-            return false;
+            try
+            {
+                validateAssignableTo(keyspace, receiver);
+                return true;
+            }
+            catch (InvalidRequestException e)
+            {
+                return false;
+            }
         }
 
         @Override
@@ -105,7 +152,7 @@ public class Tuples
 
         public ByteBuffer get(QueryOptions options)
         {
-            throw new UnsupportedOperationException();
+            return TupleType.buildValue(elements);
         }
 
         public List<ByteBuffer> getElements()
@@ -141,18 +188,28 @@ public class Tuples
                 term.collectMarkerSpecification(boundNames);
         }
 
-        public Value bind(QueryOptions options) throws InvalidRequestException
+        private ByteBuffer[] bindInternal(QueryOptions options) throws InvalidRequestException
         {
-            ByteBuffer[] buffers = new ByteBuffer[elements.size()];
-            for (int i=0; i < elements.size(); i++)
-            {
-                ByteBuffer bytes = elements.get(i).bindAndGet(options);
-                if (bytes == null)
-                    throw new InvalidRequestException("Tuples may not contain null values");
+            // Inside tuples, we must force the serialization of collections whatever the protocol version is in
+            // use since we're going to store directly that serialized value.
+            options = options.withProtocolVersion(3);
 
+            ByteBuffer[] buffers = new ByteBuffer[elements.size()];
+            for (int i = 0; i < elements.size(); i++)
                 buffers[i] = elements.get(i).bindAndGet(options);
-            }
-            return new Value(buffers);
+            return buffers;
+        }
+
+        public Value bind(QueryOptions options) throws InvalidRequestException
+        {
+            return new Value(bindInternal(options));
+        }
+
+        @Override
+        public ByteBuffer bindAndGet(QueryOptions options) throws InvalidRequestException
+        {
+            // We don't "need" that override but it saves us the allocation of a Value object if used
+            return TupleType.buildValue(bindInternal(options));
         }
 
         @Override
@@ -343,4 +400,4 @@ public class Tuples
         sb.append(')');
         return sb.toString();
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/src/java/org/apache/cassandra/cql3/UntypedResultSet.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/UntypedResultSet.java b/src/java/org/apache/cassandra/cql3/UntypedResultSet.java
index 7e0f15a..42d0cb8 100644
--- a/src/java/org/apache/cassandra/cql3/UntypedResultSet.java
+++ b/src/java/org/apache/cassandra/cql3/UntypedResultSet.java
@@ -55,6 +55,9 @@ public abstract class UntypedResultSet implements Iterable<UntypedResultSet.Row>
     public abstract int size();
     public abstract Row one();
 
+    // No implemented by all subclasses, but we use it when we know it's there (for tests)
+    public abstract List<ColumnSpecification> metadata();
+
     private static class FromResultSet extends UntypedResultSet
     {
         private final ResultSet cqlRows;
@@ -90,6 +93,11 @@ public abstract class UntypedResultSet implements Iterable<UntypedResultSet.Row>
                 }
             };
         }
+
+        public List<ColumnSpecification> metadata()
+        {
+            return cqlRows.metadata.names;
+        }
     }
 
     private static class FromResultList extends UntypedResultSet
@@ -127,6 +135,11 @@ public abstract class UntypedResultSet implements Iterable<UntypedResultSet.Row>
                 }
             };
         }
+
+        public List<ColumnSpecification> metadata()
+        {
+            throw new UnsupportedOperationException();
+        }
     }
 
     private static class FromPager extends UntypedResultSet
@@ -176,6 +189,11 @@ public abstract class UntypedResultSet implements Iterable<UntypedResultSet.Row>
                 }
             };
         }
+
+        public List<ColumnSpecification> metadata()
+        {
+            return metadata;
+        }
     }
 
     public static class Row

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/src/java/org/apache/cassandra/cql3/UserTypes.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/UserTypes.java b/src/java/org/apache/cassandra/cql3/UserTypes.java
index ecffe31..c5469d2 100644
--- a/src/java/org/apache/cassandra/cql3/UserTypes.java
+++ b/src/java/org/apache/cassandra/cql3/UserTypes.java
@@ -36,7 +36,7 @@ public abstract class UserTypes
         return new ColumnSpecification(column.ksName,
                                        column.cfName,
                                        new ColumnIdentifier(column.name + "." + field, true),
-                                       ((UserType)column.type).fieldTypes.get(field));
+                                       ((UserType)column.type).fieldType(field));
     }
 
     public static class Literal implements Term.Raw
@@ -55,9 +55,9 @@ public abstract class UserTypes
             UserType ut = (UserType)receiver.type;
             boolean allTerminal = true;
             List<Term> values = new ArrayList<>(entries.size());
-            for (int i = 0; i < ut.fieldTypes.size(); i++)
+            for (int i = 0; i < ut.size(); i++)
             {
-                ColumnIdentifier field = new ColumnIdentifier(ut.fieldNames.get(i), UTF8Type.instance);
+                ColumnIdentifier field = new ColumnIdentifier(ut.fieldName(i), UTF8Type.instance);
                 Term.Raw raw = entries.get(field);
                 if (raw == null)
                     raw = Constants.NULL_LITERAL;
@@ -78,9 +78,9 @@ public abstract class UserTypes
                 throw new InvalidRequestException(String.format("Invalid user type literal for %s of type %s", receiver, receiver.type.asCQL3Type()));
 
             UserType ut = (UserType)receiver.type;
-            for (int i = 0; i < ut.fieldTypes.size(); i++)
+            for (int i = 0; i < ut.size(); i++)
             {
-                ColumnIdentifier field = new ColumnIdentifier(ut.fieldNames.get(i), UTF8Type.instance);
+                ColumnIdentifier field = new ColumnIdentifier(ut.fieldName(i), UTF8Type.instance);
                 Term.Raw value = entries.get(field);
                 if (value == null)
                     continue;
@@ -144,7 +144,7 @@ public abstract class UserTypes
 
         public void collectMarkerSpecification(VariableSpecifications boundNames)
         {
-            for (int i = 0; i < type.fieldTypes.size(); i++)
+            for (int i = 0; i < type.size(); i++)
                 values.get(i).collectMarkerSpecification(boundNames);
         }
 
@@ -155,7 +155,7 @@ public abstract class UserTypes
             options = options.withProtocolVersion(3);
 
             ByteBuffer[] buffers = new ByteBuffer[values.size()];
-            for (int i = 0; i < type.fieldTypes.size(); i++)
+            for (int i = 0; i < type.size(); i++)
                 buffers[i] = values.get(i).bindAndGet(options);
             return buffers;
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/src/java/org/apache/cassandra/cql3/statements/AlterTypeStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/AlterTypeStatement.java b/src/java/org/apache/cassandra/cql3/statements/AlterTypeStatement.java
index eac936f..1996457 100644
--- a/src/java/org/apache/cassandra/cql3/statements/AlterTypeStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/AlterTypeStatement.java
@@ -138,8 +138,8 @@ public abstract class AlterTypeStatement extends SchemaAlteringStatement
 
     private static int getIdxOfField(UserType type, ColumnIdentifier field)
     {
-        for (int i = 0; i < type.fieldTypes.size(); i++)
-            if (field.bytes.equals(type.fieldNames.get(i)))
+        for (int i = 0; i < type.size(); i++)
+            if (field.bytes.equals(type.fieldName(i)))
                 return i;
         return -1;
     }
@@ -183,8 +183,8 @@ public abstract class AlterTypeStatement extends SchemaAlteringStatement
                 return updated;
 
             // Otherwise, check for nesting
-            List<AbstractType<?>> updatedTypes = updateTypes(ut.fieldTypes, keyspace, toReplace, updated);
-            return updatedTypes == null ? null : new UserType(ut.keyspace, ut.name, new ArrayList<>(ut.fieldNames), updatedTypes);
+            List<AbstractType<?>> updatedTypes = updateTypes(ut.fieldTypes(), keyspace, toReplace, updated);
+            return updatedTypes == null ? null : new UserType(ut.keyspace, ut.name, new ArrayList<>(ut.fieldNames()), updatedTypes);
         }
         else if (type instanceof CompositeType)
         {
@@ -275,12 +275,12 @@ public abstract class AlterTypeStatement extends SchemaAlteringStatement
             if (getIdxOfField(toUpdate, fieldName) >= 0)
                 throw new InvalidRequestException(String.format("Cannot add new field %s to type %s: a field of the same name already exists", fieldName, name));
 
-            List<ByteBuffer> newNames = new ArrayList<>(toUpdate.fieldNames.size() + 1);
-            newNames.addAll(toUpdate.fieldNames);
+            List<ByteBuffer> newNames = new ArrayList<>(toUpdate.size() + 1);
+            newNames.addAll(toUpdate.fieldNames());
             newNames.add(fieldName.bytes);
 
-            List<AbstractType<?>> newTypes = new ArrayList<>(toUpdate.fieldTypes.size() + 1);
-            newTypes.addAll(toUpdate.fieldTypes);
+            List<AbstractType<?>> newTypes = new ArrayList<>(toUpdate.size() + 1);
+            newTypes.addAll(toUpdate.fieldTypes());
             newTypes.add(type.prepare(keyspace()).getType());
 
             return new UserType(toUpdate.keyspace, toUpdate.name, newNames, newTypes);
@@ -292,12 +292,12 @@ public abstract class AlterTypeStatement extends SchemaAlteringStatement
             if (idx < 0)
                 throw new InvalidRequestException(String.format("Unknown field %s in type %s", fieldName, name));
 
-            AbstractType<?> previous = toUpdate.fieldTypes.get(idx);
+            AbstractType<?> previous = toUpdate.fieldType(idx);
             if (!type.prepare(keyspace()).getType().isCompatibleWith(previous))
                 throw new InvalidRequestException(String.format("Type %s is incompatible with previous type %s of field %s in user type %s", type, previous.asCQL3Type(), fieldName, name));
 
-            List<ByteBuffer> newNames = new ArrayList<>(toUpdate.fieldNames);
-            List<AbstractType<?>> newTypes = new ArrayList<>(toUpdate.fieldTypes);
+            List<ByteBuffer> newNames = new ArrayList<>(toUpdate.fieldNames());
+            List<AbstractType<?>> newTypes = new ArrayList<>(toUpdate.fieldTypes());
             newTypes.set(idx, type.prepare(keyspace()).getType());
 
             return new UserType(toUpdate.keyspace, toUpdate.name, newNames, newTypes);
@@ -321,8 +321,8 @@ public abstract class AlterTypeStatement extends SchemaAlteringStatement
 
         protected UserType makeUpdatedType(UserType toUpdate) throws InvalidRequestException
         {
-            List<ByteBuffer> newNames = new ArrayList<>(toUpdate.fieldNames);
-            List<AbstractType<?>> newTypes = new ArrayList<>(toUpdate.fieldTypes);
+            List<ByteBuffer> newNames = new ArrayList<>(toUpdate.fieldNames());
+            List<AbstractType<?>> newTypes = new ArrayList<>(toUpdate.fieldTypes());
 
             for (Map.Entry<ColumnIdentifier, ColumnIdentifier> entry : renames.entrySet())
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/src/java/org/apache/cassandra/cql3/statements/CreateTypeStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/CreateTypeStatement.java b/src/java/org/apache/cassandra/cql3/statements/CreateTypeStatement.java
index cd3e3e5..cc5447e 100644
--- a/src/java/org/apache/cassandra/cql3/statements/CreateTypeStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/CreateTypeStatement.java
@@ -74,12 +74,12 @@ public class CreateTypeStatement extends SchemaAlteringStatement
 
     public static void checkForDuplicateNames(UserType type) throws InvalidRequestException
     {
-        for (int i = 0; i < type.fieldTypes.size() - 1; i++)
+        for (int i = 0; i < type.size() - 1; i++)
         {
-            ByteBuffer fieldName = type.fieldNames.get(i);
-            for (int j = i+1; j < type.fieldTypes.size(); j++)
+            ByteBuffer fieldName = type.fieldName(i);
+            for (int j = i+1; j < type.size(); j++)
             {
-                if (fieldName.equals(type.fieldNames.get(j)))
+                if (fieldName.equals(type.fieldName(j)))
                     throw new InvalidRequestException(String.format("Duplicate field name %s in type %s",
                                                                     UTF8Type.instance.getString(fieldName),
                                                                     UTF8Type.instance.getString(type.name)));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/src/java/org/apache/cassandra/cql3/statements/DropTypeStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/DropTypeStatement.java b/src/java/org/apache/cassandra/cql3/statements/DropTypeStatement.java
index 10fc366..1e1ded5 100644
--- a/src/java/org/apache/cassandra/cql3/statements/DropTypeStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/DropTypeStatement.java
@@ -97,7 +97,7 @@ public class DropTypeStatement extends SchemaAlteringStatement
             if (name.getKeyspace().equals(ut.keyspace) && name.getUserTypeName().equals(ut.name))
                 return true;
 
-            for (AbstractType<?> subtype : ut.fieldTypes)
+            for (AbstractType<?> subtype : ut.fieldTypes())
                 if (isUsedBy(subtype))
                     return true;
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
index 4a84b7b..9d63389 100644
--- a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
@@ -778,7 +778,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
     }
 
     private static List<Composite> buildBound(Bound bound,
-                                              Collection<ColumnDefinition> defs,
+                                              List<ColumnDefinition> defs,
                                               Restriction[] restrictions,
                                               boolean isReversed,
                                               CType type,
@@ -797,7 +797,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
                 else if (firstRestriction.isIN())
                     return buildMultiColumnInBound(bound, defs, (MultiColumnRestriction.IN) firstRestriction, isReversed, builder, type, options);
                 else
-                    return buildMultiColumnEQBound(bound, (MultiColumnRestriction.EQ) firstRestriction, isReversed, builder, options);
+                    return buildMultiColumnEQBound(bound, defs, (MultiColumnRestriction.EQ) firstRestriction, isReversed, builder, options);
             }
         }
 
@@ -886,7 +886,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
     }
 
     private static List<Composite> buildMultiColumnSliceBound(Bound bound,
-                                                              Collection<ColumnDefinition> defs,
+                                                              List<ColumnDefinition> defs,
                                                               MultiColumnRestriction.Slice slice,
                                                               boolean isReversed,
                                                               CBuilder builder,
@@ -911,22 +911,29 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
         }
 
         List<ByteBuffer> vals = slice.componentBounds(firstComponentBound, options);
-        builder.add(vals.get(firstName.position()));
 
-        while(iter.hasNext())
+        ByteBuffer v = vals.get(firstName.position());
+        if (v == null)
+            throw new InvalidRequestException("Invalid null value in condition for column " + firstName.name);
+        builder.add(v);
+
+        while (iter.hasNext())
         {
             ColumnDefinition def = iter.next();
             if (def.position() >= vals.size())
                 break;
 
-            builder.add(vals.get(def.position()));
+            v = vals.get(def.position());
+            if (v == null)
+                throw new InvalidRequestException("Invalid null value in condition for column " + def.name);
+            builder.add(v);
         }
         Relation.Type relType = slice.getRelation(eocBound, firstComponentBound);
         return Collections.singletonList(builder.build().withEOC(eocForRelation(relType)));
     }
 
     private static List<Composite> buildMultiColumnInBound(Bound bound,
-                                                           Collection<ColumnDefinition> defs,
+                                                           List<ColumnDefinition> defs,
                                                            MultiColumnRestriction.IN restriction,
                                                            boolean isReversed,
                                                            CBuilder builder,
@@ -941,6 +948,10 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
         Iterator<ColumnDefinition> iter = defs.iterator();
         for (List<ByteBuffer> components : splitInValues)
         {
+            for (int i = 0; i < components.size(); i++)
+                if (components.get(i) == null)
+                    throw new InvalidRequestException("Invalid null value in condition for column " + defs.get(i));
+
             Composite prefix = builder.buildWith(components);
             Bound b = isReversed == isReversedType(iter.next()) ? bound : Bound.reverse(bound);
             inValues.add(b == Bound.END && builder.remainingCount() - components.size() > 0
@@ -951,14 +962,21 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
     }
 
     private static List<Composite> buildMultiColumnEQBound(Bound bound,
+                                                           List<ColumnDefinition> defs,
                                                            MultiColumnRestriction.EQ restriction,
                                                            boolean isReversed,
                                                            CBuilder builder,
                                                            QueryOptions options) throws InvalidRequestException
     {
         Bound eocBound = isReversed ? Bound.reverse(bound) : bound;
-        for (ByteBuffer component : restriction.values(options))
+        List<ByteBuffer> values = restriction.values(options);
+        for (int i = 0; i < values.size(); i++)
+        {
+            ByteBuffer component = values.get(i);
+            if (component == null)
+                throw new InvalidRequestException("Invalid null value in condition for column " + defs.get(i));
             builder.add(component);
+        }
 
         Composite prefix = builder.build();
         return Collections.singletonList(builder.remainingCount() > 0 && eocBound == Bound.END

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/src/java/org/apache/cassandra/cql3/statements/Selection.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/Selection.java b/src/java/org/apache/cassandra/cql3/statements/Selection.java
index 4990e11..f4e0885 100644
--- a/src/java/org/apache/cassandra/cql3/statements/Selection.java
+++ b/src/java/org/apache/cassandra/cql3/statements/Selection.java
@@ -142,13 +142,13 @@ public abstract class Selection
                 throw new InvalidRequestException(String.format("Invalid field selection: %s of type %s is not a user type", withField.selected, type.asCQL3Type()));
 
             UserType ut = (UserType)type;
-            for (int i = 0; i < ut.fieldTypes.size(); i++)
+            for (int i = 0; i < ut.size(); i++)
             {
-                if (!ut.fieldNames.get(i).equals(withField.field.bytes))
+                if (!ut.fieldName(i).equals(withField.field.bytes))
                     continue;
 
                 if (metadata != null)
-                    metadata.add(makeFieldSelectSpec(cfm, withField, ut.fieldTypes.get(i), raw.alias));
+                    metadata.add(makeFieldSelectSpec(cfm, withField, ut.fieldType(i), raw.alias));
                 return new FieldSelector(ut, i, selected);
             }
             throw new InvalidRequestException(String.format("%s of type %s has no field %s", withField.selected, type.asCQL3Type(), withField.field));
@@ -472,13 +472,13 @@ public abstract class Selection
 
         public AbstractType<?> getType()
         {
-            return type.fieldTypes.get(field);
+            return type.fieldType(field);
         }
 
         @Override
         public String toString()
         {
-            return String.format("%s.%s", selected, UTF8Type.instance.getString(type.fieldNames.get(field)));
+            return String.format("%s.%s", selected, UTF8Type.instance.getString(type.fieldName(field)));
         }
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/src/java/org/apache/cassandra/db/marshal/TupleType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/TupleType.java b/src/java/org/apache/cassandra/db/marshal/TupleType.java
index 74211c8..e754b51 100644
--- a/src/java/org/apache/cassandra/db/marshal/TupleType.java
+++ b/src/java/org/apache/cassandra/db/marshal/TupleType.java
@@ -120,9 +120,10 @@ public class TupleType extends AbstractType<ByteBuffer>
                 throw new MarshalException(String.format("Not enough bytes to read size of %dth component", i));
 
             int size = input.getInt();
-            // We don't handle null just yet, but we should fix that soon (CASSANDRA-7206)
+
+            // size < 0 means null value
             if (size < 0)
-                throw new MarshalException("Nulls are not yet supported inside tuple values");
+                continue;
 
             if (input.remaining() < size)
                 throw new MarshalException(String.format("Not enough bytes to read %dth component", i));
@@ -158,13 +159,20 @@ public class TupleType extends AbstractType<ByteBuffer>
     {
         int totalLength = 0;
         for (ByteBuffer component : components)
-            totalLength += 4 + component.remaining();
+            totalLength += 4 + (component == null ? 0 : component.remaining());
 
         ByteBuffer result = ByteBuffer.allocate(totalLength);
         for (ByteBuffer component : components)
         {
-            result.putInt(component.remaining());
-            result.put(component.duplicate());
+            if (component == null)
+            {
+                result.putInt(-1);
+            }
+            else
+            {
+                result.putInt(component.remaining());
+                result.put(component.duplicate());
+            }
         }
         result.rewind();
         return result;
@@ -183,12 +191,17 @@ public class TupleType extends AbstractType<ByteBuffer>
             if (i > 0)
                 sb.append(":");
 
+            AbstractType<?> type = type(i);
             int size = input.getInt();
-            assert size >= 0; // We don't support nulls yet, but we will likely do with #7206 and we'll need
-                              // a way to represent it as a string (without it conflicting with a user value)
+            if (size < 0)
+            {
+                sb.append("@");
+                continue;
+            }
+
             ByteBuffer field = ByteBufferUtil.readBytes(input, size);
-            // We use ':' as delimiter so escape it if it's in the generated string
-            sb.append(field == null ? "null" : type(i).getString(value).replaceAll(":", "\\\\:"));
+            // We use ':' as delimiter, and @ to represent null, so escape them in the generated string
+            sb.append(type.getString(field).replaceAll(":", "\\\\:").replaceAll("@", "\\\\@"));
         }
         return sb.toString();
     }
@@ -196,15 +209,19 @@ public class TupleType extends AbstractType<ByteBuffer>
     public ByteBuffer fromString(String source)
     {
         // Split the input on non-escaped ':' characters
-        List<String> strings = AbstractCompositeType.split(source);
-        ByteBuffer[] components = new ByteBuffer[strings.size()];
-        for (int i = 0; i < strings.size(); i++)
+        List<String> fieldStrings = AbstractCompositeType.split(source);
+        ByteBuffer[] fields = new ByteBuffer[fieldStrings.size()];
+        for (int i = 0; i < fieldStrings.size(); i++)
         {
-            // TODO: we'll need to handle null somehow here once we support them
-            String str = strings.get(i).replaceAll("\\\\:", ":");
-            components[i] = type(i).fromString(str);
+            String fieldString = fieldStrings.get(i);
+            // We use @ to represent nulls
+            if (fieldString.equals("@"))
+                continue;
+
+            AbstractType<?> type = type(i);
+            fields[i] = type.fromString(fieldString.replaceAll("\\\\:", ":").replaceAll("\\\\@", "@"));
         }
-        return buildValue(components);
+        return buildValue(fields);
     }
 
     public TypeSerializer<ByteBuffer> getSerializer()
@@ -271,9 +288,14 @@ public class TupleType extends AbstractType<ByteBuffer>
     }
 
     @Override
+    public CQL3Type asCQL3Type()
+    {
+        return CQL3Type.Tuple.create(this);
+    }
+
+    @Override
     public String toString()
     {
         return getClass().getName() + TypeParser.stringifyTypeParameters(types);
     }
 }
-

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/src/java/org/apache/cassandra/db/marshal/UserType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/UserType.java b/src/java/org/apache/cassandra/db/marshal/UserType.java
index 6656fd6..44c208f 100644
--- a/src/java/org/apache/cassandra/db/marshal/UserType.java
+++ b/src/java/org/apache/cassandra/db/marshal/UserType.java
@@ -33,21 +33,22 @@ import org.apache.cassandra.utils.Pair;
 
 /**
  * A user defined type.
+ *
+ * A user type is really just a tuple type on steroids.
  */
-public class UserType extends AbstractType<ByteBuffer>
+public class UserType extends TupleType
 {
     public final String keyspace;
     public final ByteBuffer name;
-    public final List<ByteBuffer> fieldNames;
-    public final List<AbstractType<?>> fieldTypes;
+    private final List<ByteBuffer> fieldNames;
 
     public UserType(String keyspace, ByteBuffer name, List<ByteBuffer> fieldNames, List<AbstractType<?>> fieldTypes)
     {
+        super(fieldTypes);
         assert fieldNames.size() == fieldTypes.size();
         this.keyspace = keyspace;
         this.name = name;
         this.fieldNames = fieldNames;
-        this.fieldTypes = fieldTypes;
     }
 
     public static UserType getInstance(TypeParser parser) throws ConfigurationException, SyntaxException
@@ -65,65 +66,44 @@ public class UserType extends AbstractType<ByteBuffer>
         return new UserType(keyspace, name, columnNames, columnTypes);
     }
 
-    public String getNameAsString()
+    public AbstractType<?> fieldType(int i)
     {
-        return UTF8Type.instance.compose(name);
+        return type(i);
     }
 
-    public int compare(ByteBuffer o1, ByteBuffer o2)
+    public List<AbstractType<?>> fieldTypes()
     {
-        if (!o1.hasRemaining() || !o2.hasRemaining())
-            return o1.hasRemaining() ? 1 : o2.hasRemaining() ? -1 : 0;
-
-        ByteBuffer bb1 = o1.duplicate();
-        ByteBuffer bb2 = o2.duplicate();
-
-        int i = 0;
-        while (bb1.remaining() > 0 && bb2.remaining() > 0)
-        {
-            AbstractType<?> comparator = fieldTypes.get(i);
-
-            int size1 = bb1.getInt();
-            int size2 = bb2.getInt();
-
-            // Handle nulls
-            if (size1 < 0)
-            {
-                if (size2 < 0)
-                    continue;
-                return -1;
-            }
-            if (size2 < 0)
-                return 1;
-
-            ByteBuffer value1 = ByteBufferUtil.readBytes(bb1, size1);
-            ByteBuffer value2 = ByteBufferUtil.readBytes(bb2, size2);
-            int cmp = comparator.compare(value1, value2);
-            if (cmp != 0)
-                return cmp;
+        return types;
+    }
 
-            ++i;
-        }
+    public ByteBuffer fieldName(int i)
+    {
+        return fieldNames.get(i);
+    }
 
-        if (bb1.remaining() == 0)
-            return bb2.remaining() == 0 ? 0 : -1;
+    public List<ByteBuffer> fieldNames()
+    {
+        return fieldNames;
+    }
 
-        // bb1.remaining() > 0 && bb2.remaining() == 0
-        return 1;
+    public String getNameAsString()
+    {
+        return UTF8Type.instance.compose(name);
     }
 
+    // Note: the only reason we override this is to provide nicer error message, but since that's not that much code...
     @Override
     public void validate(ByteBuffer bytes) throws MarshalException
     {
         ByteBuffer input = bytes.duplicate();
-        for (int i = 0; i < fieldTypes.size(); i++)
+        for (int i = 0; i < size(); i++)
         {
             // we allow the input to have less fields than declared so as to support field addition.
             if (!input.hasRemaining())
                 return;
 
             if (input.remaining() < 4)
-                throw new MarshalException(String.format("Not enough bytes to read size of %dth field %s", i, fieldNames.get(i)));
+                throw new MarshalException(String.format("Not enough bytes to read size of %dth field %s", i, fieldName(i)));
 
             int size = input.getInt();
 
@@ -132,10 +112,10 @@ public class UserType extends AbstractType<ByteBuffer>
                 continue;
 
             if (input.remaining() < size)
-                throw new MarshalException(String.format("Not enough bytes to read %dth field %s", i, fieldNames.get(i)));
+                throw new MarshalException(String.format("Not enough bytes to read %dth field %s", i, fieldName(i)));
 
             ByteBuffer field = ByteBufferUtil.readBytes(input, size);
-            fieldTypes.get(i).validate(field);
+            types.get(i).validate(field);
         }
 
         // We're allowed to get less fields than declared, but not more
@@ -143,112 +123,20 @@ public class UserType extends AbstractType<ByteBuffer>
             throw new MarshalException("Invalid remaining data after end of UDT value");
     }
 
-    /**
-     * Split a UDT value into its fields values.
-     */
-    public ByteBuffer[] split(ByteBuffer value)
-    {
-        ByteBuffer[] fields = new ByteBuffer[fieldTypes.size()];
-        ByteBuffer input = value.duplicate();
-        for (int i = 0; i < fieldTypes.size(); i++)
-        {
-            if (!input.hasRemaining())
-                return Arrays.copyOfRange(fields, 0, i);
-
-            int size = input.getInt();
-            fields[i] = size < 0 ? null : ByteBufferUtil.readBytes(input, size);
-        }
-        return fields;
-    }
-
-    public static ByteBuffer buildValue(ByteBuffer[] fields)
-    {
-        int totalLength = 0;
-        for (ByteBuffer field : fields)
-            totalLength += 4 + (field == null ? 0 : field.remaining());
-
-        ByteBuffer result = ByteBuffer.allocate(totalLength);
-        for (ByteBuffer field : fields)
-        {
-            if (field == null)
-            {
-                result.putInt(-1);
-            }
-            else
-            {
-                result.putInt(field.remaining());
-                result.put(field.duplicate());
-            }
-        }
-        result.rewind();
-        return result;
-    }
-
-    @Override
-    public String getString(ByteBuffer value)
-    {
-        StringBuilder sb = new StringBuilder();
-        ByteBuffer input = value.duplicate();
-        for (int i = 0; i < fieldTypes.size(); i++)
-        {
-            if (!input.hasRemaining())
-                return sb.toString();
-
-            if (i > 0)
-                sb.append(":");
-
-            AbstractType<?> type = fieldTypes.get(i);
-            int size = input.getInt();
-            if (size < 0)
-            {
-                sb.append("@");
-                continue;
-            }
-
-            ByteBuffer field = ByteBufferUtil.readBytes(input, size);
-            // We use ':' as delimiter, and @ to represent null, so escape them in the generated string
-            sb.append(type.getString(field).replaceAll(":", "\\\\:").replaceAll("@", "\\\\@"));
-        }
-        return sb.toString();
-    }
-
-    public ByteBuffer fromString(String source)
-    {
-        // Split the input on non-escaped ':' characters
-        List<String> fieldStrings = AbstractCompositeType.split(source);
-        ByteBuffer[] fields = new ByteBuffer[fieldStrings.size()];
-        for (int i = 0; i < fieldStrings.size(); i++)
-        {
-            String fieldString = fieldStrings.get(i);
-            // We use @ to represent nulls
-            if (fieldString.equals("@"))
-                continue;
-
-            AbstractType<?> type = fieldTypes.get(i);
-            fields[i] = type.fromString(fieldString.replaceAll("\\\\:", ":").replaceAll("\\\\@", "@"));
-        }
-        return buildValue(fields);
-    }
-
-    public TypeSerializer<ByteBuffer> getSerializer()
-    {
-        return BytesSerializer.instance;
-    }
-
     @Override
-    public final int hashCode()
+    public int hashCode()
     {
-        return Objects.hashCode(keyspace, name, fieldNames, fieldTypes);
+        return Objects.hashCode(keyspace, name, fieldNames, types);
     }
 
     @Override
-    public final boolean equals(Object o)
+    public boolean equals(Object o)
     {
         if(!(o instanceof UserType))
             return false;
 
         UserType that = (UserType)o;
-        return keyspace.equals(that.keyspace) && name.equals(that.name) && fieldNames.equals(that.fieldNames) && fieldTypes.equals(that.fieldTypes);
+        return keyspace.equals(that.keyspace) && name.equals(that.name) && fieldNames.equals(that.fieldNames) && types.equals(that.types);
     }
 
     @Override
@@ -260,6 +148,6 @@ public class UserType extends AbstractType<ByteBuffer>
     @Override
     public String toString()
     {
-        return getClass().getName() + TypeParser.stringifyUserTypeParameters(keyspace, name, fieldNames, fieldTypes);
+        return getClass().getName() + TypeParser.stringifyUserTypeParameters(keyspace, name, fieldNames, types);
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/src/java/org/apache/cassandra/transport/DataType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/transport/DataType.java b/src/java/org/apache/cassandra/transport/DataType.java
index 2410378..0710645 100644
--- a/src/java/org/apache/cassandra/transport/DataType.java
+++ b/src/java/org/apache/cassandra/transport/DataType.java
@@ -52,7 +52,8 @@ public enum DataType implements OptionCodec.Codecable<DataType>
     LIST     (32, null),
     MAP      (33, null),
     SET      (34, null),
-    UDT      (48, null);
+    UDT      (48, null),
+    TUPLE    (49, null);
 
 
     public static final OptionCodec<DataType> codec = new OptionCodec<DataType>(DataType.class);
@@ -107,6 +108,12 @@ public enum DataType implements OptionCodec.Codecable<DataType>
                     fieldTypes.add(DataType.toType(codec.decodeOne(cb, version)));
                 }
                 return new UserType(ks, name, fieldNames, fieldTypes);
+            case TUPLE:
+                n = cb.readUnsignedShort();
+                List<AbstractType<?>> types = new ArrayList<>(n);
+                for (int i = 0; i < n; i++)
+                    types.add(DataType.toType(codec.decodeOne(cb, version)));
+                return new TupleType(types);
             default:
                 return null;
         }
@@ -135,13 +142,19 @@ public enum DataType implements OptionCodec.Codecable<DataType>
                 UserType udt = (UserType)value;
                 CBUtil.writeString(udt.keyspace, cb);
                 CBUtil.writeString(UTF8Type.instance.compose(udt.name), cb);
-                cb.writeShort(udt.fieldNames.size());
-                for (int i = 0; i < udt.fieldNames.size(); i++)
+                cb.writeShort(udt.size());
+                for (int i = 0; i < udt.size(); i++)
                 {
-                    CBUtil.writeString(UTF8Type.instance.compose(udt.fieldNames.get(i)), cb);
-                    codec.writeOne(DataType.fromType(udt.fieldTypes.get(i), version), cb, version);
+                    CBUtil.writeString(UTF8Type.instance.compose(udt.fieldName(i)), cb);
+                    codec.writeOne(DataType.fromType(udt.fieldType(i), version), cb, version);
                 }
                 break;
+            case TUPLE:
+                TupleType tt = (TupleType)value;
+                cb.writeShort(tt.size());
+                for (int i = 0; i < tt.size(); i++)
+                    codec.writeOne(DataType.fromType(tt.type(i), version), cb, version);
+                break;
         }
     }
 
@@ -166,12 +179,18 @@ public enum DataType implements OptionCodec.Codecable<DataType>
                 size += CBUtil.sizeOfString(udt.keyspace);
                 size += CBUtil.sizeOfString(UTF8Type.instance.compose(udt.name));
                 size += 2;
-                for (int i = 0; i < udt.fieldNames.size(); i++)
+                for (int i = 0; i < udt.size(); i++)
                 {
-                    size += CBUtil.sizeOfString(UTF8Type.instance.compose(udt.fieldNames.get(i)));
-                    size += codec.oneSerializedSize(DataType.fromType(udt.fieldTypes.get(i), version), version);
+                    size += CBUtil.sizeOfString(UTF8Type.instance.compose(udt.fieldName(i)));
+                    size += codec.oneSerializedSize(DataType.fromType(udt.fieldType(i), version), version);
                 }
                 return size;
+            case TUPLE:
+                TupleType tt = (TupleType)value;
+                size = 2;
+                for (int i = 0; i < tt.size(); i++)
+                    size += codec.oneSerializedSize(DataType.fromType(tt.type(i), version), version);
+                return size;
             default:
                 return 0;
         }
@@ -211,6 +230,9 @@ public enum DataType implements OptionCodec.Codecable<DataType>
             if (type instanceof UserType && version >= 3)
                 return Pair.<DataType, Object>create(UDT, type);
 
+            if (type instanceof TupleType && version >= 3)
+                return Pair.<DataType, Object>create(TUPLE, type);
+
             return Pair.<DataType, Object>create(CUSTOM, type.toString());
         }
         else
@@ -236,6 +258,8 @@ public enum DataType implements OptionCodec.Codecable<DataType>
                     return MapType.getInstance(l.get(0), l.get(1));
                 case UDT:
                     return (AbstractType)entry.right;
+                case TUPLE:
+                    return (AbstractType)entry.right;
                 default:
                     return entry.left.type;
             }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/test/conf/logback-test.xml
----------------------------------------------------------------------
diff --git a/test/conf/logback-test.xml b/test/conf/logback-test.xml
index a7aa34c..535e4fe 100644
--- a/test/conf/logback-test.xml
+++ b/test/conf/logback-test.xml
@@ -30,7 +30,7 @@
       <maxFileSize>20MB</maxFileSize>
     </triggeringPolicy>
     <encoder>
-      <pattern>%-5level [%thread] %date{ISO8601} %caller{1} - %msg%n</pattern>
+      <pattern>%-5level [%thread] %date{ISO8601} %msg%n</pattern>
     </encoder>
   </appender>
   
@@ -52,7 +52,7 @@
     </filter>
   </appender>
         
-  <root level="DEBUG">
+  <root level="INFO">
     <appender-ref ref="FILE" />
     <appender-ref ref="STDERR" />
     <appender-ref ref="STDOUT" />

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/test/unit/org/apache/cassandra/SchemaLoader.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/SchemaLoader.java b/test/unit/org/apache/cassandra/SchemaLoader.java
index cbff31e..7f637a4 100644
--- a/test/unit/org/apache/cassandra/SchemaLoader.java
+++ b/test/unit/org/apache/cassandra/SchemaLoader.java
@@ -52,6 +52,16 @@ public class SchemaLoader
     @BeforeClass
     public static void loadSchema() throws ConfigurationException
     {
+        prepareServer();
+
+        // if you're messing with low-level sstable stuff, it can be useful to inject the schema directly
+        // Schema.instance.load(schemaDefinition());
+        for (KSMetaData ksm : schemaDefinition())
+            MigrationManager.announceNewKeyspace(ksm);
+    }
+
+    public static void prepareServer()
+    {
         // Cleanup first
         cleanupAndLeaveDirs();
 
@@ -69,10 +79,6 @@ public class SchemaLoader
         // Migrations aren't happy if gossiper is not started.  Even if we don't use migrations though,
         // some tests now expect us to start gossip for them.
         startGossiper();
-        // if you're messing with low-level sstable stuff, it can be useful to inject the schema directly
-        // Schema.instance.load(schemaDefinition());
-        for (KSMetaData ksm : schemaDefinition())
-            MigrationManager.announceNewKeyspace(ksm);
     }
 
     public static void startGossiper()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/0932ed67/test/unit/org/apache/cassandra/cql3/CQLTester.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/CQLTester.java b/test/unit/org/apache/cassandra/cql3/CQLTester.java
new file mode 100644
index 0000000..06119ed
--- /dev/null
+++ b/test/unit/org/apache/cassandra/cql3/CQLTester.java
@@ -0,0 +1,472 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3;
+
+import java.nio.ByteBuffer;
+import java.util.*;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.google.common.base.Objects;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.db.ConsistencyLevel;
+import org.apache.cassandra.db.marshal.*;
+import org.apache.cassandra.exceptions.*;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+/**
+ * Base class for CQL tests.
+ */
+public abstract class CQLTester
+{
+    protected static final Logger logger = LoggerFactory.getLogger(CQLTester.class);
+
+    private static final String KEYSPACE = "cql_test_keyspace";
+
+    private static final boolean USE_PREPARED_VALUES = Boolean.valueOf(System.getProperty("cassandra.test.use_prepared", "true"));
+
+    private static final AtomicInteger seqNumber = new AtomicInteger();
+
+    private String currentTable;
+
+    @BeforeClass
+    public static void setUpClass() throws Throwable
+    {
+        // This start gossiper for the sake of schema migrations. We might be able to get rid of that with some work.
+        SchemaLoader.prepareServer();
+
+        schemaChange(String.format("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}", KEYSPACE));
+    }
+
+    @AfterClass
+    public static void tearDownClass()
+    {
+        SchemaLoader.stopGossiper();
+    }
+
+    protected void createTable(String query)
+    {
+        currentTable = "table_" + seqNumber.getAndIncrement();
+        String fullQuery = String.format(query, KEYSPACE + "." + currentTable);
+        logger.info(fullQuery);
+        schemaChange(fullQuery);
+    }
+
+    private static void schemaChange(String query)
+    {
+        try
+        {
+            // executeOnceInternal don't work for schema changes
+            QueryProcessor.process(query, ConsistencyLevel.ONE);
+        }
+        catch (Exception e)
+        {
+            throw new RuntimeException("Error setting schema for test (query was: " + query + ")", e);
+        }
+    }
+
+    protected UntypedResultSet execute(String query, Object... values) throws Throwable
+    {
+        if (currentTable == null)
+            throw new RuntimeException("You must create a table first with createTable");
+
+        try
+        {
+            query = String.format(query, KEYSPACE + "." + currentTable);
+
+            UntypedResultSet rs;
+            if (USE_PREPARED_VALUES)
+            {
+                logger.info("Executing: {} with values {}", query, formatAllValues(values));
+                rs = QueryProcessor.executeOnceInternal(query, transformValues(values));
+            }
+            else
+            {
+                query = replaceValues(query, values);
+                logger.info("Executing: {}", query);
+                rs = QueryProcessor.executeOnceInternal(query);
+            }
+            if (rs != null)
+                logger.info("Got {} rows", rs.size());
+            return rs;
+        }
+        catch (RuntimeException e)
+        {
+            Throwable cause = e.getCause() != null ? e.getCause() : e;
+            logger.info("Got error: {}", cause.getMessage() == null ? cause.toString() : cause.getMessage());
+            throw cause;
+        }
+    }
+
+    protected void assertRows(UntypedResultSet result, Object[]... rows)
+    {
+        if (result == null)
+        {
+            if (rows.length > 0)
+                Assert.fail(String.format("No rows returned by query but %d expected", rows.length));
+            return;
+        }
+
+        List<ColumnSpecification> meta = result.metadata();
+        Iterator<UntypedResultSet.Row> iter = result.iterator();
+        int i = 0;
+        while (iter.hasNext() && i < rows.length)
+        {
+            Object[] expected = rows[i++];
+            UntypedResultSet.Row actual = iter.next();
+
+            Assert.assertEquals(String.format("Invalid number of (expected) values provided for row %d", i), meta.size(), expected.length);
+
+            for (int j = 0; j < meta.size(); j++)
+            {
+                ColumnSpecification column = meta.get(j);
+                Object expectedValue = expected[j];
+                ByteBuffer expectedByteValue = makeByteBuffer(expected[j], (AbstractType)column.type);
+                ByteBuffer actualValue = actual.getBytes(column.name.toString());
+
+                if (!Objects.equal(expectedByteValue, actualValue))
+                    Assert.fail(String.format("Invalid value for row %d column %d (%s), expected <%s> but got <%s>",
+                                              i, j, column.name, formatValue(expectedByteValue, column.type), formatValue(actualValue, column.type)));
+            }
+        }
+
+        if (iter.hasNext())
+        {
+            while (iter.hasNext())
+            {
+                iter.next();
+                i++;
+            }
+            Assert.fail(String.format("Got less rows than expected. Expected %d but got %d.", rows.length, i));
+        }
+
+        Assert.assertTrue(String.format("Got more rows than expected. Expected %d but got %d", rows.length, i), i == rows.length);
+    }
+
+    protected void assertAllRows(Object[]... rows) throws Throwable
+    {
+        assertRows(execute("SELECT * FROM %s"), rows);
+    }
+
+    protected Object[] row(Object... expected)
+    {
+        return expected;
+    }
+
+    protected void assertEmpty(UntypedResultSet result) throws Throwable
+    {
+        if (result != null && result.size() != 0)
+            throw new InvalidRequestException(String.format("Expected empty result but got %d rows", result.size()));
+    }
+
+    protected void assertInvalid(String query, Object... values) throws Throwable
+    {
+        try
+        {
+            execute(query, values);
+            Assert.fail("Query should be invalid but no error was thrown. Query is: " + query);
+        }
+        catch (SyntaxException | InvalidRequestException e)
+        {
+            // This is what we expect
+        }
+    }
+
+    private static String replaceValues(String query, Object[] values)
+    {
+        StringBuilder sb = new StringBuilder();
+        int last = 0;
+        int i = 0;
+        int idx;
+        while ((idx = query.indexOf('?', last)) > 0)
+        {
+            if (i >= values.length)
+                throw new IllegalArgumentException(String.format("Not enough values provided. The query has at least %d variables but only %d values provided", i, values.length));
+
+            sb.append(query.substring(last, idx));
+
+            Object value = values[i++];
+
+            // When we have a .. IN ? .., we use a list for the value because that's what's expected when the value is serialized.
+            // When we format as string however, we need to special case to use parenthesis. Hackish but convenient.
+            if (idx >= 3 && value instanceof List && query.substring(idx - 3, idx).equalsIgnoreCase("IN "))
+            {
+                List l = (List)value;
+                sb.append("(");
+                for (int j = 0; j < l.size(); j++)
+                {
+                    if (j > 0)
+                        sb.append(", ");
+                    sb.append(formatForCQL(l.get(j)));
+                }
+                sb.append(")");
+            }
+            else
+            {
+                sb.append(formatForCQL(value));
+            }
+            last = idx + 1;
+        }
+        sb.append(query.substring(last));
+        return sb.toString();
+    }
+
+    // We're rellly only returning ByteBuffers but this make the type system happy
+    private static Object[] transformValues(Object[] values)
+    {
+        // We could partly rely on QueryProcessor.executeOnceInternal doing type conversion for us, but
+        // it would complain with ClassCastException if we pass say a string where an int is excepted (since
+        // it bases conversion on what the value should be, not what it is). For testing, we sometimes
+        // want to pass value of the wrong type and assert that this properly raise an InvalidRequestException
+        // and executeOnceInternal goes into way. So instead, we pre-convert everything to bytes here base
+        // on the value.
+        // Besides, we need to handle things like TupleValue that executeOnceInternal don't know about.
+
+        Object[] buffers = new ByteBuffer[values.length];
+        for (int i = 0; i < values.length; i++)
+        {
+            Object value = values[i];
+            if (value == null)
+            {
+                buffers[i] = null;
+                continue;
+            }
+
+            buffers[i] = typeFor(value).decompose(serializeTuples(value));
+        }
+        return buffers;
+    }
+
+    private static Object serializeTuples(Object value)
+    {
+        if (value instanceof TupleValue)
+        {
+            return ((TupleValue)value).toByteBuffer();
+        }
+
+        // We need to reach inside collections for TupleValue and transform them to ByteBuffer
+        // since otherwise the decompose method of the collection AbstractType won't know what
+        // to do with them
+        if (value instanceof List)
+        {
+            List l = (List)value;
+            List n = new ArrayList(l.size());
+            for (Object o : l)
+                n.add(serializeTuples(o));
+            return n;
+        }
+
+        if (value instanceof Set)
+        {
+            Set s = (Set)value;
+            Set n = new LinkedHashSet(s.size());
+            for (Object o : s)
+                n.add(serializeTuples(o));
+            return n;
+        }
+
+        if (value instanceof Map)
+        {
+            Map m = (Map)value;
+            Map n = new LinkedHashMap(m.size());
+            for (Object entry : m.entrySet())
+                n.put(serializeTuples(((Map.Entry)entry).getKey()), serializeTuples(((Map.Entry)entry).getValue()));
+            return n;
+        }
+        return value;
+    }
+
+    private static String formatAllValues(Object[] values)
+    {
+        StringBuilder sb = new StringBuilder();
+        sb.append("[");
+        for (int i = 0; i < values.length; i++)
+        {
+            if (i > 0)
+                sb.append(", ");
+            sb.append(formatForCQL(values[i]));
+        }
+        sb.append("]");
+        return sb.toString();
+    }
+
+    private static String formatForCQL(Object value)
+    {
+        if (value == null)
+            return "null";
+
+        if (value instanceof TupleValue)
+            return ((TupleValue)value).toCQLString();
+
+        // We need to reach inside collections for TupleValue. Besides, for some reason the format
+        // of collection that CollectionType.getString gives us is not at all 'CQL compatible'
+        if (value instanceof Collection)
+        {
+            StringBuilder sb = new StringBuilder();
+            if (value instanceof List)
+            {
+                List l = (List)value;
+                sb.append("[");
+                for (int i = 0; i < l.size(); i++)
+                {
+                    if (i > 0)
+                        sb.append(", ");
+                    sb.append(formatForCQL(l.get(i)));
+                }
+                sb.append("[");
+            }
+            else if (value instanceof Set)
+            {
+                Set s = (Set)value;
+                sb.append("{");
+                Iterator iter = s.iterator();
+                while (iter.hasNext())
+                {
+                    sb.append(formatForCQL(iter.next()));
+                    if (iter.hasNext())
+                        sb.append(", ");
+                }
+                sb.append("}");
+            }
+            else
+            {
+                Map m = (Map)value;
+                sb.append("{");
+                Iterator iter = m.entrySet().iterator();
+                while (iter.hasNext())
+                {
+                    Map.Entry entry = (Map.Entry)iter.next();
+                    sb.append(formatForCQL(entry.getKey())).append(": ").append(formatForCQL(entry.getValue()));
+                    if (iter.hasNext())
+                        sb.append(", ");
+                }
+                sb.append("}");
+            }
+            return sb.toString();
+        }
+
+        AbstractType type = typeFor(value);
+        String s = type.getString(type.decompose(value));
+
+        if (type instanceof UTF8Type)
+            return String.format("'%s'", s.replaceAll("'", "''"));
+
+        if (type instanceof BytesType)
+            return "0x" + s;
+
+        return s;
+    }
+
+    private static ByteBuffer makeByteBuffer(Object value, AbstractType type)
+    {
+        if (value == null)
+            return null;
+
+        if (value instanceof TupleValue)
+            return ((TupleValue)value).toByteBuffer();
+
+        if (value instanceof ByteBuffer)
+            return (ByteBuffer)value;
+
+        return type.decompose(value);
+    }
+
+    private static String formatValue(ByteBuffer bb, AbstractType<?> type)
+    {
+        return bb == null ? "null" : type.getString(bb);
+    }
+
+    protected Object tuple(Object...values)
+    {
+        return new TupleValue(values);
+    }
+
+    protected Object list(Object...values)
+    {
+        return Arrays.asList(values);
+    }
+
+    // Attempt to find an AbstracType from a value (for serialization/printing sake).
+    // Will work as long as we use types we know of, which is good enough for testing
+    private static AbstractType typeFor(Object value)
+    {
+        if (value instanceof ByteBuffer || value instanceof TupleValue || value == null)
+            return BytesType.instance;
+
+        if (value instanceof Integer)
+            return Int32Type.instance;
+
+        if (value instanceof Long)
+            return LongType.instance;
+
+        if (value instanceof Float)
+            return FloatType.instance;
+
+        if (value instanceof Double)
+            return DoubleType.instance;
+
+        if (value instanceof String)
+            return UTF8Type.instance;
+
+        if (value instanceof List)
+        {
+            List l = (List)value;
+            AbstractType elt = l.isEmpty() ? BytesType.instance : typeFor(l.get(0));
+            return ListType.getInstance(elt);
+        }
+
+        throw new IllegalArgumentException("Unsupported value type (value is " + value + ")");
+    }
+
+    private static class TupleValue
+    {
+        private final Object[] values;
+
+        TupleValue(Object[] values)
+        {
+            this.values = values;
+        }
+
+        public ByteBuffer toByteBuffer()
+        {
+            ByteBuffer[] bbs = new ByteBuffer[values.length];
+            for (int i = 0; i < values.length; i++)
+                bbs[i] = makeByteBuffer(values[i], typeFor(values[i]));
+            return TupleType.buildValue(bbs);
+        }
+
+        public String toCQLString()
+        {
+            StringBuilder sb = new StringBuilder();
+            sb.append("(");
+            for (int i = 0; i < values.length; i++)
+            {
+                if (i > 0)
+                    sb.append(", ");
+                sb.append(formatForCQL(values[i]));
+            }
+            sb.append(")");
+            return sb.toString();
+        }
+    }
+}


[3/3] git commit: Merge branch 'cassandra-2.1' into trunk

Posted by sl...@apache.org.
Merge branch 'cassandra-2.1' into trunk


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/56c2f0ab
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/56c2f0ab
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/56c2f0ab

Branch: refs/heads/trunk
Commit: 56c2f0ab110e2dadb6557de9bc6614829fdc5172
Parents: 4b9fde9 0932ed6
Author: Sylvain Lebresne <sy...@datastax.com>
Authored: Thu May 29 11:36:28 2014 +0200
Committer: Sylvain Lebresne <sy...@datastax.com>
Committed: Thu May 29 11:36:28 2014 +0200

----------------------------------------------------------------------
 CHANGES.txt                                     |    1 +
 build.xml                                       |   41 +-
 doc/native_protocol_v3.spec                     |   26 +-
 .../org/apache/cassandra/config/UTMetaData.java |   10 +-
 .../org/apache/cassandra/cql3/CQL3Type.java     |  106 ++
 src/java/org/apache/cassandra/cql3/Cql.g        |   44 +-
 src/java/org/apache/cassandra/cql3/Tuples.java  |  103 +-
 .../apache/cassandra/cql3/UntypedResultSet.java |   18 +
 .../org/apache/cassandra/cql3/UserTypes.java    |   14 +-
 .../cql3/statements/AlterTypeStatement.java     |   26 +-
 .../cql3/statements/CreateTypeStatement.java    |    8 +-
 .../cql3/statements/DropTypeStatement.java      |    2 +-
 .../cql3/statements/SelectStatement.java        |   34 +-
 .../cassandra/cql3/statements/Selection.java    |   10 +-
 .../apache/cassandra/db/marshal/TupleType.java  |   56 +-
 .../apache/cassandra/db/marshal/UserType.java   |  174 +--
 .../apache/cassandra/transport/DataType.java    |   40 +-
 test/conf/logback-test.xml                      |    4 +-
 .../unit/org/apache/cassandra/SchemaLoader.java |   14 +-
 .../org/apache/cassandra/cql3/CQLTester.java    |  472 ++++++
 .../cassandra/cql3/MultiColumnRelationTest.java | 1434 +++++-------------
 .../apache/cassandra/cql3/TupleTypeTest.java    |   97 ++
 22 files changed, 1445 insertions(+), 1289 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/56c2f0ab/CHANGES.txt
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/56c2f0ab/build.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/56c2f0ab/src/java/org/apache/cassandra/cql3/Cql.g
----------------------------------------------------------------------