You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by sl...@apache.org on 2013/12/17 17:03:37 UTC

[01/13] Push composites support in the storage engine

Updated Branches:
  refs/heads/trunk f943433ae -> 362cc0535


http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
index a338290..b78f182 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
@@ -73,7 +73,7 @@ public class CompactionsTest extends SchemaLoader
             DecoratedKey key = Util.dk(Integer.toString(i));
             RowMutation rm = new RowMutation(KEYSPACE1, key.key);
             for (int j = 0; j < 10; j++)
-                rm.add("Standard1", ByteBufferUtil.bytes(Integer.toString(j)),
+                rm.add("Standard1", Util.cellname(Integer.toString(j)),
                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
                        timestamp,
                        j > 0 ? 3 : 0); // let first column never expire, since deleting all columns does not produce sstable
@@ -133,7 +133,7 @@ public class CompactionsTest extends SchemaLoader
 
         // a subcolumn
         RowMutation rm = new RowMutation(KEYSPACE1, key.key);
-        rm.add("Super1", CompositeType.build(scName, ByteBufferUtil.bytes(0)),
+        rm.add("Super1", Util.cellname(scName, ByteBufferUtil.bytes(0)),
                ByteBufferUtil.EMPTY_BYTE_BUFFER,
                FBUtilities.timestampMicros());
         rm.apply();
@@ -183,7 +183,7 @@ public class CompactionsTest extends SchemaLoader
         {
             DecoratedKey key = Util.dk(String.valueOf(i));
             RowMutation rm = new RowMutation(KEYSPACE1, key.key);
-            rm.add("Standard2", ByteBufferUtil.bytes(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
+            rm.add("Standard2", Util.cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
             rm.apply();
 
             if (i % 2 == 0)
@@ -198,7 +198,7 @@ public class CompactionsTest extends SchemaLoader
         {
             DecoratedKey key = Util.dk(String.valueOf(i));
             RowMutation rm = new RowMutation(KEYSPACE1, key.key);
-            rm.add("Standard2", ByteBufferUtil.bytes(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
+            rm.add("Standard2", Util.cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
             rm.apply();
         }
         cfs.forceBlockingFlush();
@@ -243,7 +243,7 @@ public class CompactionsTest extends SchemaLoader
         for (int i = 0; i < ROWS_PER_SSTABLE; i++) {
             DecoratedKey key = Util.dk(String.valueOf(i));
             RowMutation rm = new RowMutation(KEYSPACE1, key.key);
-            rm.add(cfname, ByteBufferUtil.bytes("col"),
+            rm.add(cfname, Util.cellname("col"),
                    ByteBufferUtil.EMPTY_BYTE_BUFFER,
                    System.currentTimeMillis());
             rm.apply();
@@ -311,7 +311,7 @@ public class CompactionsTest extends SchemaLoader
         // Add test row
         DecoratedKey key = Util.dk(k);
         RowMutation rm = new RowMutation(KEYSPACE1, key.key);
-        rm.add(cfname, CompositeType.build(ByteBufferUtil.bytes("sc"), ByteBufferUtil.bytes("c")), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
+        rm.add(cfname, Util.cellname(ByteBufferUtil.bytes("sc"), ByteBufferUtil.bytes("c")), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
         rm.apply();
 
         cfs.forceBlockingFlush();
@@ -365,7 +365,7 @@ public class CompactionsTest extends SchemaLoader
         long timestamp = System.currentTimeMillis();
         DecoratedKey decoratedKey = Util.dk(String.format("%03d", key));
         RowMutation rm = new RowMutation(KEYSPACE1, decoratedKey.key);
-        rm.add("Standard1", ByteBufferUtil.bytes("col"), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, 1000);
+        rm.add("Standard1", Util.cellname("col"), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, 1000);
         rm.apply();
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java b/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java
index f8c3c00..a107e1b 100644
--- a/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java
@@ -74,7 +74,7 @@ public class LeveledCompactionStrategyTest extends SchemaLoader
             RowMutation rm = new RowMutation(ksname, key.key);
             for (int c = 0; c < columns; c++)
             {
-                rm.add(cfname, ByteBufferUtil.bytes("column" + c), value, 0);
+                rm.add(cfname, Util.cellname("column" + c), value, 0);
             }
             rm.apply();
             cfs.forceBlockingFlush();
@@ -122,7 +122,7 @@ public class LeveledCompactionStrategyTest extends SchemaLoader
             RowMutation rm = new RowMutation(ksname, key.key);
             for (int c = 0; c < columns; c++)
             {
-                rm.add(cfname, ByteBufferUtil.bytes("column" + c), value, 0);
+                rm.add(cfname, Util.cellname("column" + c), value, 0);
             }
             rm.apply();
             cfs.forceBlockingFlush();
@@ -166,7 +166,7 @@ public class LeveledCompactionStrategyTest extends SchemaLoader
             RowMutation rm = new RowMutation(ksname, key.key);
             for (int c = 0; c < columns; c++)
             {
-                rm.add(cfname, ByteBufferUtil.bytes("column" + c), value, 0);
+                rm.add(cfname, Util.cellname("column" + c), value, 0);
             }
             rm.apply();
             cfs.forceBlockingFlush();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java b/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java
index 444e30c..31b3375 100644
--- a/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java
@@ -50,7 +50,7 @@ public class OneCompactionTest extends SchemaLoader
         for (int j = 0; j < insertsPerTable; j++) {
             DecoratedKey key = Util.dk(String.valueOf(j));
             RowMutation rm = new RowMutation("Keyspace1", key.key);
-            rm.add(columnFamilyName, ByteBufferUtil.bytes("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
+            rm.add(columnFamilyName, Util.cellname("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
             rm.apply();
             inserted.add(key);
             store.forceBlockingFlush();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyTest.java b/test/unit/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyTest.java
index 6bfa4e8..bc4061c 100644
--- a/test/unit/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyTest.java
@@ -165,7 +165,7 @@ public class SizeTieredCompactionStrategyTest extends SchemaLoader
         {
             DecoratedKey key = Util.dk(String.valueOf(r));
             RowMutation rm = new RowMutation(ksname, key.key);
-            rm.add(cfname, ByteBufferUtil.bytes("column"), value, 0);
+            rm.add(cfname, Util.cellname("column"), value, 0);
             rm.apply();
             cfs.forceBlockingFlush();
         }
@@ -209,7 +209,7 @@ public class SizeTieredCompactionStrategyTest extends SchemaLoader
         {
             DecoratedKey key = Util.dk(String.valueOf(r));
             RowMutation rm = new RowMutation(ksname, key.key);
-            rm.add(cfname, ByteBufferUtil.bytes("column"), value, 0);
+            rm.add(cfname, Util.cellname("column"), value, 0);
             rm.apply();
             cfs.forceBlockingFlush();
         }
@@ -259,4 +259,4 @@ public class SizeTieredCompactionStrategyTest extends SchemaLoader
         filtered = filterColdSSTables(sstrs, 1.0);
         assertTrue(filtered.isEmpty());
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java b/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java
index 7666922..1d6664e 100644
--- a/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java
@@ -55,11 +55,11 @@ public class TTLExpiryTest extends SchemaLoader
         cfs.metadata.gcGraceSeconds(0);
         long timestamp = System.currentTimeMillis();
         RowMutation rm = new RowMutation("Keyspace1", Util.dk("ttl").key);
-        rm.add("Standard1", ByteBufferUtil.bytes("col"),
+        rm.add("Standard1", Util.cellname("col"),
                ByteBufferUtil.EMPTY_BYTE_BUFFER,
                timestamp,
                1);
-        rm.add("Standard1", ByteBufferUtil.bytes("col7"),
+        rm.add("Standard1", Util.cellname("col7"),
                ByteBufferUtil.EMPTY_BYTE_BUFFER,
                timestamp,
                1);
@@ -68,21 +68,21 @@ public class TTLExpiryTest extends SchemaLoader
         cfs.forceBlockingFlush();
 
         rm = new RowMutation("Keyspace1", Util.dk("ttl").key);
-                rm.add("Standard1", ByteBufferUtil.bytes("col2"),
+                rm.add("Standard1", Util.cellname("col2"),
                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
                        timestamp,
                        1);
                 rm.apply();
         cfs.forceBlockingFlush();
         rm = new RowMutation("Keyspace1", Util.dk("ttl").key);
-        rm.add("Standard1", ByteBufferUtil.bytes("col3"),
+        rm.add("Standard1", Util.cellname("col3"),
                    ByteBufferUtil.EMPTY_BYTE_BUFFER,
                    timestamp,
                    1);
         rm.apply();
         cfs.forceBlockingFlush();
         rm = new RowMutation("Keyspace1", Util.dk("ttl").key);
-        rm.add("Standard1", ByteBufferUtil.bytes("col311"),
+        rm.add("Standard1", Util.cellname("col311"),
                    ByteBufferUtil.EMPTY_BYTE_BUFFER,
                    timestamp,
                    1);
@@ -103,11 +103,11 @@ public class TTLExpiryTest extends SchemaLoader
         cfs.metadata.gcGraceSeconds(0);
         long timestamp = System.currentTimeMillis();
         RowMutation rm = new RowMutation("Keyspace1", Util.dk("ttl").key);
-        rm.add("Standard1", ByteBufferUtil.bytes("col"),
+        rm.add("Standard1", Util.cellname("col"),
                ByteBufferUtil.EMPTY_BYTE_BUFFER,
                timestamp,
                1);
-        rm.add("Standard1", ByteBufferUtil.bytes("col7"),
+        rm.add("Standard1", Util.cellname("col7"),
                ByteBufferUtil.EMPTY_BYTE_BUFFER,
                timestamp,
                1);
@@ -116,14 +116,14 @@ public class TTLExpiryTest extends SchemaLoader
         cfs.forceBlockingFlush();
 
         rm = new RowMutation("Keyspace1", Util.dk("ttl").key);
-                rm.add("Standard1", ByteBufferUtil.bytes("col2"),
+                rm.add("Standard1", Util.cellname("col2"),
                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
                        timestamp,
                        1);
                 rm.apply();
         cfs.forceBlockingFlush();
         rm = new RowMutation("Keyspace1", Util.dk("ttl").key);
-        rm.add("Standard1", ByteBufferUtil.bytes("col3"),
+        rm.add("Standard1", Util.cellname("col3"),
                    ByteBufferUtil.EMPTY_BYTE_BUFFER,
                    timestamp,
                    1);
@@ -131,7 +131,7 @@ public class TTLExpiryTest extends SchemaLoader
         cfs.forceBlockingFlush();
         DecoratedKey noTTLKey = Util.dk("nottl");
         rm = new RowMutation("Keyspace1", noTTLKey.key);
-        rm.add("Standard1", ByteBufferUtil.bytes("col311"),
+        rm.add("Standard1", Util.cellname("col311"),
                    ByteBufferUtil.EMPTY_BYTE_BUFFER,
                    timestamp);
         rm.apply();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/index/PerRowSecondaryIndexTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/index/PerRowSecondaryIndexTest.java b/test/unit/org/apache/cassandra/db/index/PerRowSecondaryIndexTest.java
index b983e6e..7b422b2 100644
--- a/test/unit/org/apache/cassandra/db/index/PerRowSecondaryIndexTest.java
+++ b/test/unit/org/apache/cassandra/db/index/PerRowSecondaryIndexTest.java
@@ -29,9 +29,11 @@ import org.junit.Test;
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.Util;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -58,21 +60,21 @@ public class PerRowSecondaryIndexTest extends SchemaLoader
         // create a row then test that the configured index instance was able to read the row
         RowMutation rm;
         rm = new RowMutation("PerRowSecondaryIndex", ByteBufferUtil.bytes("k1"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("indexed"), ByteBufferUtil.bytes("foo"), 1);
+        rm.add("Indexed1", Util.cellname("indexed"), ByteBufferUtil.bytes("foo"), 1);
         rm.apply();
 
         ColumnFamily indexedRow = PerRowSecondaryIndexTest.TestIndex.LAST_INDEXED_ROW;
         assertNotNull(indexedRow);
-        assertEquals(ByteBufferUtil.bytes("foo"), indexedRow.getColumn(ByteBufferUtil.bytes("indexed")).value());
+        assertEquals(ByteBufferUtil.bytes("foo"), indexedRow.getColumn(Util.cellname("indexed")).value());
 
         // update the row and verify what was indexed
         rm = new RowMutation("PerRowSecondaryIndex", ByteBufferUtil.bytes("k1"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("indexed"), ByteBufferUtil.bytes("bar"), 2);
+        rm.add("Indexed1", Util.cellname("indexed"), ByteBufferUtil.bytes("bar"), 2);
         rm.apply();
 
         indexedRow = PerRowSecondaryIndexTest.TestIndex.LAST_INDEXED_ROW;
         assertNotNull(indexedRow);
-        assertEquals(ByteBufferUtil.bytes("bar"), indexedRow.getColumn(ByteBufferUtil.bytes("indexed")).value());
+        assertEquals(ByteBufferUtil.bytes("bar"), indexedRow.getColumn(Util.cellname("indexed")).value());
         assertTrue(Arrays.equals("k1".getBytes(), PerRowSecondaryIndexTest.TestIndex.LAST_INDEXED_KEY.array()));
     }
 
@@ -82,7 +84,7 @@ public class PerRowSecondaryIndexTest extends SchemaLoader
         // issue a column delete and test that the configured index instance was notified to update
         RowMutation rm;
         rm = new RowMutation("PerRowSecondaryIndex", ByteBufferUtil.bytes("k2"));
-        rm.delete("Indexed1", ByteBufferUtil.bytes("indexed"), 1);
+        rm.delete("Indexed1", Util.cellname("indexed"), 1);
         rm.apply();
 
         ColumnFamily indexedRow = PerRowSecondaryIndexTest.TestIndex.LAST_INDEXED_ROW;
@@ -184,6 +186,12 @@ public class PerRowSecondaryIndexTest extends SchemaLoader
         }
 
         @Override
+        public boolean indexes(CellName name)
+        {
+            return true;
+        }
+
+        @Override
         public void removeIndex(ByteBuffer columnName)
         {
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/marshal/CompositeTypeTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/marshal/CompositeTypeTest.java b/test/unit/org/apache/cassandra/db/marshal/CompositeTypeTest.java
index 1039fb6..b7452b1 100644
--- a/test/unit/org/apache/cassandra/db/marshal/CompositeTypeTest.java
+++ b/test/unit/org/apache/cassandra/db/marshal/CompositeTypeTest.java
@@ -34,6 +34,7 @@ import org.apache.cassandra.Util;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellNames;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.utils.*;
 
@@ -185,11 +186,11 @@ public class CompositeTypeTest extends SchemaLoader
 
         Iterator<Column> iter = cf.getSortedColumns().iterator();
 
-        assert iter.next().name().equals(cname1);
-        assert iter.next().name().equals(cname2);
-        assert iter.next().name().equals(cname3);
-        assert iter.next().name().equals(cname4);
-        assert iter.next().name().equals(cname5);
+        assert iter.next().name().toByteBuffer().equals(cname1);
+        assert iter.next().name().toByteBuffer().equals(cname2);
+        assert iter.next().name().toByteBuffer().equals(cname3);
+        assert iter.next().name().toByteBuffer().equals(cname4);
+        assert iter.next().name().toByteBuffer().equals(cname5);
     }
 
     @Test
@@ -257,7 +258,7 @@ public class CompositeTypeTest extends SchemaLoader
 
     private void addColumn(RowMutation rm, ByteBuffer cname)
     {
-        rm.add(cfName, cname, ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
+        rm.add(cfName, CellNames.simpleDense(cname), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
     }
 
     private ByteBuffer createCompositeKey(String s, UUID uuid, int i, boolean lastIsOne)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/marshal/DynamicCompositeTypeTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/marshal/DynamicCompositeTypeTest.java b/test/unit/org/apache/cassandra/db/marshal/DynamicCompositeTypeTest.java
index f8e2fb6..fd489ed 100644
--- a/test/unit/org/apache/cassandra/db/marshal/DynamicCompositeTypeTest.java
+++ b/test/unit/org/apache/cassandra/db/marshal/DynamicCompositeTypeTest.java
@@ -31,6 +31,7 @@ import static org.junit.Assert.fail;
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.utils.*;
 
@@ -184,11 +185,11 @@ public class DynamicCompositeTypeTest extends SchemaLoader
 
         Iterator<Column> iter = cf.getSortedColumns().iterator();
 
-        assert iter.next().name().equals(cname1);
-        assert iter.next().name().equals(cname2);
-        assert iter.next().name().equals(cname3);
-        assert iter.next().name().equals(cname4);
-        assert iter.next().name().equals(cname5);
+        assert iter.next().name().toByteBuffer().equals(cname1);
+        assert iter.next().name().toByteBuffer().equals(cname2);
+        assert iter.next().name().toByteBuffer().equals(cname3);
+        assert iter.next().name().toByteBuffer().equals(cname4);
+        assert iter.next().name().toByteBuffer().equals(cname5);
     }
 
     @Test
@@ -231,7 +232,7 @@ public class DynamicCompositeTypeTest extends SchemaLoader
 
     private void addColumn(RowMutation rm, ByteBuffer cname)
     {
-        rm.add(cfName, cname, ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
+        rm.add(cfName, CellNames.simpleDense(cname), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
     }
 
     private ByteBuffer createDynamicCompositeKey(String s, UUID uuid, int i, boolean lastIsOne)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
index fa96270..45b670d 100644
--- a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
@@ -23,6 +23,7 @@ import java.util.Random;
 
 import org.junit.Test;
 
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.marshal.BytesType;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
@@ -55,7 +56,7 @@ public class CompressedRandomAccessReaderTest
 
         try
         {
-            MetadataCollector sstableMetadataCollector = new MetadataCollector(BytesType.instance).replayPosition(null);
+            MetadataCollector sstableMetadataCollector = new MetadataCollector(new SimpleDenseCellNameType(BytesType.instance)).replayPosition(null);
             SequentialWriter writer = compressed
                 ? new CompressedSequentialWriter(f, filename + ".metadata", false, new CompressionParameters(SnappyCompressor.instance), sstableMetadataCollector)
                 : new SequentialWriter(f, CompressionParameters.DEFAULT_CHUNK_LENGTH, false);
@@ -106,7 +107,7 @@ public class CompressedRandomAccessReaderTest
         File metadata = new File(file.getPath() + ".meta");
         metadata.deleteOnExit();
 
-        MetadataCollector sstableMetadataCollector = new MetadataCollector(BytesType.instance).replayPosition(null);
+        MetadataCollector sstableMetadataCollector = new MetadataCollector(new SimpleDenseCellNameType(BytesType.instance)).replayPosition(null);
         SequentialWriter writer = new CompressedSequentialWriter(file, metadata.getPath(), false, new CompressionParameters(SnappyCompressor.instance), sstableMetadataCollector);
 
         writer.write(CONTENT.getBytes());

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/io/sstable/IndexHelperTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/IndexHelperTest.java b/test/unit/org/apache/cassandra/io/sstable/IndexHelperTest.java
index 53a79f8..9d0e6cf 100644
--- a/test/unit/org/apache/cassandra/io/sstable/IndexHelperTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/IndexHelperTest.java
@@ -25,6 +25,8 @@ import static org.junit.Assert.*;
 
 import org.junit.Test;
 
+import org.apache.cassandra.Util;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.IntegerType;
 import static org.apache.cassandra.io.sstable.IndexHelper.IndexInfo;
@@ -32,34 +34,39 @@ import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
 
 public class IndexHelperTest
 {
+    private static CellName cn(long l)
+    {
+        return Util.cellname(l);
+    }
+
     @Test
     public void testIndexHelper()
     {
         List<IndexInfo> indexes = new ArrayList<IndexInfo>();
-        indexes.add(new IndexInfo(bytes(0L), bytes(5L), 0, 0));
-        indexes.add(new IndexInfo(bytes(10L), bytes(15L), 0, 0));
-        indexes.add(new IndexInfo(bytes(20L), bytes(25L), 0, 0));
+        indexes.add(new IndexInfo(cn(0L), cn(5L), 0, 0));
+        indexes.add(new IndexInfo(cn(10L), cn(15L), 0, 0));
+        indexes.add(new IndexInfo(cn(20L), cn(25L), 0, 0));
 
-        AbstractType comp = IntegerType.instance;
+        CellNameType comp = new SimpleDenseCellNameType(IntegerType.instance);
 
-        assertEquals(0, IndexHelper.indexFor(bytes(-1L), indexes, comp, false, -1));
-        assertEquals(0, IndexHelper.indexFor(bytes(5L), indexes, comp, false, -1));
-        assertEquals(1, IndexHelper.indexFor(bytes(12L), indexes, comp, false, -1));
-        assertEquals(2, IndexHelper.indexFor(bytes(17L), indexes, comp, false, -1));
-        assertEquals(3, IndexHelper.indexFor(bytes(100L), indexes, comp, false, -1));
-        assertEquals(3, IndexHelper.indexFor(bytes(100L), indexes, comp, false, 0));
-        assertEquals(3, IndexHelper.indexFor(bytes(100L), indexes, comp, false, 1));
-        assertEquals(3, IndexHelper.indexFor(bytes(100L), indexes, comp, false, 2));
-        assertEquals(-1, IndexHelper.indexFor(bytes(100L), indexes, comp, false, 3));
+        assertEquals(0, IndexHelper.indexFor(cn(-1L), indexes, comp, false, -1));
+        assertEquals(0, IndexHelper.indexFor(cn(5L), indexes, comp, false, -1));
+        assertEquals(1, IndexHelper.indexFor(cn(12L), indexes, comp, false, -1));
+        assertEquals(2, IndexHelper.indexFor(cn(17L), indexes, comp, false, -1));
+        assertEquals(3, IndexHelper.indexFor(cn(100L), indexes, comp, false, -1));
+        assertEquals(3, IndexHelper.indexFor(cn(100L), indexes, comp, false, 0));
+        assertEquals(3, IndexHelper.indexFor(cn(100L), indexes, comp, false, 1));
+        assertEquals(3, IndexHelper.indexFor(cn(100L), indexes, comp, false, 2));
+        assertEquals(-1, IndexHelper.indexFor(cn(100L), indexes, comp, false, 3));
 
-        assertEquals(-1, IndexHelper.indexFor(bytes(-1L), indexes, comp, true, -1));
-        assertEquals(0, IndexHelper.indexFor(bytes(5L), indexes, comp, true, -1));
-        assertEquals(1, IndexHelper.indexFor(bytes(17L), indexes, comp, true, -1));
-        assertEquals(2, IndexHelper.indexFor(bytes(100L), indexes, comp, true, -1));
-        assertEquals(0, IndexHelper.indexFor(bytes(100L), indexes, comp, true, 0));
-        assertEquals(1, IndexHelper.indexFor(bytes(12L), indexes, comp, true, -1));
-        assertEquals(1, IndexHelper.indexFor(bytes(100L), indexes, comp, true, 1));
-        assertEquals(2, IndexHelper.indexFor(bytes(100L), indexes, comp, true, 2));
-        assertEquals(-1, IndexHelper.indexFor(bytes(100L), indexes, comp, true, 4));
+        assertEquals(-1, IndexHelper.indexFor(cn(-1L), indexes, comp, true, -1));
+        assertEquals(0, IndexHelper.indexFor(cn(5L), indexes, comp, true, -1));
+        assertEquals(1, IndexHelper.indexFor(cn(17L), indexes, comp, true, -1));
+        assertEquals(2, IndexHelper.indexFor(cn(100L), indexes, comp, true, -1));
+        assertEquals(0, IndexHelper.indexFor(cn(100L), indexes, comp, true, 0));
+        assertEquals(1, IndexHelper.indexFor(cn(12L), indexes, comp, true, -1));
+        assertEquals(1, IndexHelper.indexFor(cn(100L), indexes, comp, true, 1));
+        assertEquals(2, IndexHelper.indexFor(cn(100L), indexes, comp, true, 2));
+        assertEquals(-1, IndexHelper.indexFor(cn(100L), indexes, comp, true, 4));
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java
index 4b4c562..94cc2d4 100644
--- a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java
@@ -78,7 +78,7 @@ public class IndexSummaryManagerTest extends SchemaLoader
             QueryFilter filter = QueryFilter.getIdentityFilter(key, cfs.getColumnFamilyName(), System.currentTimeMillis());
             ColumnFamily row = cfs.getColumnFamily(filter);
             assertNotNull(row);
-            Column column = row.getColumn(ByteBufferUtil.bytes("column"));
+            Column column = row.getColumn(Util.cellname("column"));
             assertNotNull(column);
             assertEquals(100, column.value().array().length);
         }
@@ -112,7 +112,7 @@ public class IndexSummaryManagerTest extends SchemaLoader
             {
                 DecoratedKey key = Util.dk(String.valueOf(row));
                 RowMutation rm = new RowMutation(ksname, key.key);
-                rm.add(cfname, ByteBufferUtil.bytes("column"), value, 0);
+                rm.add(cfname, Util.cellname("column"), value, 0);
                 rm.apply();
             }
             cfs.forceBlockingFlush();
@@ -246,7 +246,7 @@ public class IndexSummaryManagerTest extends SchemaLoader
         {
             DecoratedKey key = Util.dk(String.valueOf(row));
             RowMutation rm = new RowMutation(ksname, key.key);
-            rm.add(cfname, ByteBufferUtil.bytes("column"), value, 0);
+            rm.add(cfname, Util.cellname("column"), value, 0);
             rm.apply();
         }
         cfs.forceBlockingFlush();
@@ -307,7 +307,7 @@ public class IndexSummaryManagerTest extends SchemaLoader
             {
                 DecoratedKey key = Util.dk(String.valueOf(row));
                 RowMutation rm = new RowMutation(ksname, key.key);
-                rm.add(cfname, ByteBufferUtil.bytes("column"), value, 0);
+                rm.add(cfname, Util.cellname("column"), value, 0);
                 rm.apply();
             }
             cfs.forceBlockingFlush();
@@ -328,4 +328,4 @@ public class IndexSummaryManagerTest extends SchemaLoader
                 assertTrue(entry.getValue() < 0.9);
         }
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java b/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
index 0b0ecf8..bb3689c 100644
--- a/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
@@ -25,8 +25,10 @@ import java.nio.ByteBuffer;
 import java.util.*;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.columniterator.SSTableNamesIterator;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.junit.BeforeClass;
@@ -97,13 +99,14 @@ public class LegacySSTableTest extends SchemaLoader
         try
         {
             SSTableReader reader = SSTableReader.open(getDescriptor(version));
+            CellNameType type = reader.metadata.comparator;
             for (String keystring : TEST_DATA)
             {
                 ByteBuffer key = ByteBufferUtil.bytes(keystring);
                 // confirm that the bloom filter does not reject any keys/names
                 DecoratedKey dk = reader.partitioner.decorateKey(key);
-                SSTableNamesIterator iter = new SSTableNamesIterator(reader, dk, FBUtilities.singleton(key));
-                assert iter.next().name().equals(key);
+                SSTableNamesIterator iter = new SSTableNamesIterator(reader, dk, FBUtilities.singleton(Util.cellname(key), type));
+                assert iter.next().name().toByteBuffer().equals(key);
             }
 
             // TODO actually test some reads

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
index 45a69e2..809934f 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
@@ -84,6 +84,6 @@ public class SSTableLoaderTest extends SchemaLoader
         List<Row> rows = Util.getRangeSlice(Keyspace.open("Keyspace1").getColumnFamilyStore("Standard1"));
         assertEquals(1, rows.size());
         assertEquals(key, rows.get(0).key);
-        assertEquals(ByteBufferUtil.bytes(100), rows.get(0).cf.getColumn(ByteBufferUtil.bytes("col1")).value());
+        assertEquals(ByteBufferUtil.bytes(100), rows.get(0).cf.getColumn(Util.cellname("col1")).value());
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java
index 6bd5f56..cf44573 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableMetadataTest.java
@@ -30,10 +30,8 @@ import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
-import org.apache.cassandra.db.ColumnFamilyStore;
-import org.apache.cassandra.db.DecoratedKey;
-import org.apache.cassandra.db.RowMutation;
-import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.marshal.BytesType;
 import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.db.marshal.IntegerType;
@@ -55,14 +53,14 @@ public class SSTableMetadataTest extends SchemaLoader
             DecoratedKey key = Util.dk(Integer.toString(i));
             RowMutation rm = new RowMutation("Keyspace1", key.key);
             for (int j = 0; j < 10; j++)
-                rm.add("Standard1", ByteBufferUtil.bytes(Integer.toString(j)),
+                rm.add("Standard1", Util.cellname(Integer.toString(j)),
                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
                        timestamp,
                        10 + j);
             rm.apply();
         }
         RowMutation rm = new RowMutation("Keyspace1", Util.dk("longttl").key);
-        rm.add("Standard1", ByteBufferUtil.bytes("col"),
+        rm.add("Standard1", Util.cellname("col"),
                ByteBufferUtil.EMPTY_BYTE_BUFFER,
                timestamp,
                10000);
@@ -78,7 +76,7 @@ public class SSTableMetadataTest extends SchemaLoader
 
         }
         rm = new RowMutation("Keyspace1", Util.dk("longttl2").key);
-        rm.add("Standard1", ByteBufferUtil.bytes("col"),
+        rm.add("Standard1", Util.cellname("col"),
                ByteBufferUtil.EMPTY_BYTE_BUFFER,
                timestamp,
                20000);
@@ -126,11 +124,11 @@ public class SSTableMetadataTest extends SchemaLoader
         DecoratedKey key = Util.dk("deletetest");
         RowMutation rm = new RowMutation("Keyspace1", key.key);
         for (int i = 0; i<5; i++)
-            rm.add("Standard2", ByteBufferUtil.bytes("deletecolumn"+i),
+            rm.add("Standard2", Util.cellname("deletecolumn"+i),
                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
                        timestamp,
                        100);
-        rm.add("Standard2", ByteBufferUtil.bytes("todelete"),
+        rm.add("Standard2", Util.cellname("todelete"),
                    ByteBufferUtil.EMPTY_BYTE_BUFFER,
                    timestamp,
                    1000);
@@ -145,7 +143,7 @@ public class SSTableMetadataTest extends SchemaLoader
             assertEquals(ttltimestamp + 1000, firstMaxDelTime, 10);
         }
         rm = new RowMutation("Keyspace1", key.key);
-        rm.delete("Standard2", ByteBufferUtil.bytes("todelete"), timestamp + 1);
+        rm.delete("Standard2", Util.cellname("todelete"), timestamp + 1);
         rm.apply();
         store.forceBlockingFlush();
         assertEquals(2,store.getSSTables().size());
@@ -179,9 +177,7 @@ public class SSTableMetadataTest extends SchemaLoader
             RowMutation rm = new RowMutation("Keyspace1", key.key);
             for (int i = 100; i<150; i++)
             {
-                rm.add("Standard3", ByteBufferUtil.bytes(j+"col"+i),
-                   ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                   System.currentTimeMillis());
+                rm.add("Standard3", Util.cellname(j+"col"+i), ByteBufferUtil.EMPTY_BYTE_BUFFER, System.currentTimeMillis());
             }
             rm.apply();
         }
@@ -196,9 +192,7 @@ public class SSTableMetadataTest extends SchemaLoader
         RowMutation rm = new RowMutation("Keyspace1", key.key);
         for (int i = 101; i<299; i++)
         {
-            rm.add("Standard3", ByteBufferUtil.bytes(9+"col"+i),
-               ByteBufferUtil.EMPTY_BYTE_BUFFER,
-               System.currentTimeMillis());
+            rm.add("Standard3", Util.cellname(9+"col"+i), ByteBufferUtil.EMPTY_BYTE_BUFFER, System.currentTimeMillis());
         }
         rm.apply();
 
@@ -229,13 +223,13 @@ public class SSTableMetadataTest extends SchemaLoader
 
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("StandardComposite2");
 
-        CompositeType ct = CompositeType.getInstance(BytesType.instance, IntegerType.instance);
+        CellNameType type = cfs.getComparator();
 
         ByteBuffer key = ByteBufferUtil.bytes("k");
         for (int i = 0; i < 10; i++)
         {
             RowMutation rm = new RowMutation("Keyspace1", key);
-            ByteBuffer colName = ct.builder().add(ByteBufferUtil.bytes("a"+(9-i))).add(ByteBufferUtil.bytes(i)).build();
+            CellName colName = type.makeCellName(ByteBufferUtil.bytes("a"+(9-i)), ByteBufferUtil.bytes(i));
             rm.add("StandardComposite2", colName, ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
             rm.apply();
         }
@@ -245,7 +239,7 @@ public class SSTableMetadataTest extends SchemaLoader
         for (int i = 0; i < 10; i++)
         {
             RowMutation rm = new RowMutation("Keyspace1", key);
-            ByteBuffer colName = ct.builder().add(ByteBufferUtil.bytes("b"+(9-i))).add(ByteBufferUtil.bytes(i)).build();
+            CellName colName = type.makeCellName(ByteBufferUtil.bytes("b"+(9-i)), ByteBufferUtil.bytes(i));
             rm.add("StandardComposite2", colName, ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
             rm.apply();
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
index 6bf17fd..cf17e5f 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
@@ -43,6 +43,7 @@ import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.columniterator.IdentityQueryFilter;
+import org.apache.cassandra.db.composites.Composites;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.compaction.ICompactionScanner;
 import org.apache.cassandra.dht.IPartitioner;
@@ -62,6 +63,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
+import static org.apache.cassandra.Util.cellname;
 
 @RunWith(OrderedJUnit4ClassRunner.class)
 public class SSTableReaderTest extends SchemaLoader
@@ -85,7 +87,7 @@ public class SSTableReaderTest extends SchemaLoader
         {
             ByteBuffer key = ByteBufferUtil.bytes(String.valueOf(j));
             RowMutation rm = new RowMutation("Keyspace1", key);
-            rm.add("Standard2", ByteBufferUtil.bytes("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
+            rm.add("Standard2", cellname("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
             rm.apply();
         }
         store.forceBlockingFlush();
@@ -126,7 +128,7 @@ public class SSTableReaderTest extends SchemaLoader
         {
             ByteBuffer key = ByteBufferUtil.bytes(String.valueOf(j));
             RowMutation rm = new RowMutation("Keyspace1", key);
-            rm.add("Standard1", ByteBufferUtil.bytes("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
+            rm.add("Standard1", cellname("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
             rm.apply();
         }
         store.forceBlockingFlush();
@@ -161,7 +163,7 @@ public class SSTableReaderTest extends SchemaLoader
         {
             ByteBuffer key = ByteBufferUtil.bytes(String.valueOf(j));
             RowMutation rm = new RowMutation("Keyspace1", key);
-            rm.add("Standard1", ByteBufferUtil.bytes("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
+            rm.add("Standard1", cellname("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
             rm.apply();
         }
         store.forceBlockingFlush();
@@ -189,7 +191,7 @@ public class SSTableReaderTest extends SchemaLoader
         {
             ByteBuffer key = ByteBufferUtil.bytes(String.valueOf(j));
             RowMutation rm = new RowMutation("Keyspace1", key);
-            rm.add("Standard2", ByteBufferUtil.bytes("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
+            rm.add("Standard2", cellname("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
             rm.apply();
         }
         store.forceBlockingFlush();
@@ -218,7 +220,7 @@ public class SSTableReaderTest extends SchemaLoader
         ColumnFamilyStore store = keyspace.getColumnFamilyStore("Indexed1");
         ByteBuffer key = ByteBufferUtil.bytes(String.valueOf("k1"));
         RowMutation rm = new RowMutation("Keyspace1", key);
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), System.currentTimeMillis());
+        rm.add("Indexed1", cellname("birthdate"), ByteBufferUtil.bytes(1L), System.currentTimeMillis());
         rm.apply();
         store.forceBlockingFlush();
 
@@ -250,7 +252,7 @@ public class SSTableReaderTest extends SchemaLoader
             if (store.metadata.getKeyValidator().compare(lastKey.key, key.key) < 0)
                 lastKey = key;
             RowMutation rm = new RowMutation(ks, key.key);
-            rm.add(cf, ByteBufferUtil.bytes("col"),
+            rm.add(cf, cellname("col"),
                    ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp);
             rm.apply();
         }
@@ -274,7 +276,7 @@ public class SSTableReaderTest extends SchemaLoader
         ColumnFamilyStore store = keyspace.getColumnFamilyStore("Indexed1");
         ByteBuffer key = ByteBufferUtil.bytes(String.valueOf("k1"));
         RowMutation rm = new RowMutation("Keyspace1", key);
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), System.currentTimeMillis());
+        rm.add("Indexed1", cellname("birthdate"), ByteBufferUtil.bytes(1L), System.currentTimeMillis());
         rm.apply();
         store.forceBlockingFlush();
 
@@ -301,7 +303,7 @@ public class SSTableReaderTest extends SchemaLoader
         ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard1");
         ByteBuffer key = ByteBufferUtil.bytes(String.valueOf("k1"));
         RowMutation rm = new RowMutation("Keyspace1", key);
-        rm.add("Standard1", ByteBufferUtil.bytes("xyz"), ByteBufferUtil.bytes("abc"), 0);
+        rm.add("Standard1", cellname("xyz"), ByteBufferUtil.bytes("abc"), 0);
         rm.apply();
         store.forceBlockingFlush();
         boolean foundScanner = false;
@@ -328,7 +330,7 @@ public class SSTableReaderTest extends SchemaLoader
         {
             ByteBuffer key = ByteBufferUtil.bytes(String.valueOf(j));
             RowMutation rm = new RowMutation("Keyspace1", key);
-            rm.add("Standard2", ByteBufferUtil.bytes("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
+            rm.add("Standard2", cellname("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
             rm.apply();
         }
         store.forceBlockingFlush();
@@ -362,7 +364,7 @@ public class SSTableReaderTest extends SchemaLoader
         {
             ByteBuffer key = ByteBufferUtil.bytes(String.format("%3d", j));
             RowMutation rm = new RowMutation("Keyspace1", key);
-            rm.add("StandardLowIndexInterval", ByteBufferUtil.bytes("0"), ByteBufferUtil.bytes(String.format("%3d", j)), j);
+            rm.add("StandardLowIndexInterval", Util.cellname("0"), ByteBufferUtil.bytes(String.format("%3d", j)), j);
             rm.apply();
         }
         store.forceBlockingFlush();
@@ -383,9 +385,9 @@ public class SSTableReaderTest extends SchemaLoader
             {
                 public void run()
                 {
-                    ColumnFamily result = store.getColumnFamily(sstable.partitioner.decorateKey(key), ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 100, 100);
+                    ColumnFamily result = store.getColumnFamily(sstable.partitioner.decorateKey(key), Composites.EMPTY, Composites.EMPTY, false, 100, 100);
                     assertFalse(result.isEmpty());
-                    assertEquals(0, ByteBufferUtil.compare(String.format("%3d", index).getBytes(), result.getColumn(ByteBufferUtil.bytes("0")).value()));
+                    assertEquals(0, ByteBufferUtil.compare(String.format("%3d", index).getBytes(), result.getColumn(Util.cellname("0")).value()));
                 }
             }));
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/io/sstable/SSTableScannerTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableScannerTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableScannerTest.java
index c17fb71..8f7f07b 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableScannerTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableScannerTest.java
@@ -72,7 +72,7 @@ public class SSTableScannerTest extends SchemaLoader
         long timestamp = System.currentTimeMillis();
         DecoratedKey decoratedKey = Util.dk(toKey(key));
         RowMutation rm = new RowMutation(KEYSPACE, decoratedKey.key);
-        rm.add(TABLE, ByteBufferUtil.bytes("col"), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, 1000);
+        rm.add(TABLE, Util.cellname("col"), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, 1000);
         rm.apply();
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
index 9e7aa16..b61dc4c 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableSimpleWriterTest.java
@@ -94,7 +94,7 @@ public class SSTableSimpleWriterTest extends SchemaLoader
         int i = 0;
         for (Column c : cf)
         {
-            assert toInt(c.name()) == i : "Column name should be " + i + ", got " + toInt(c.name());
+            assert toInt(c.name().toByteBuffer()) == i : "Column name should be " + i + ", got " + toInt(c.name().toByteBuffer());
             assert c.value().equals(bytes("v"));
             assert c.timestamp() == 1;
             ++i;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java b/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java
index feeac7c..f7687c1 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java
@@ -163,7 +163,7 @@ public class SSTableUtils
             for (String key : keys)
             {
                 ColumnFamily cf = TreeMapBackedSortedColumns.factory.create(ksname, cfname);
-                cf.addColumn(new Column(ByteBufferUtil.bytes(key), ByteBufferUtil.bytes(key), 0));
+                cf.addColumn(new Column(Util.cellname(key), ByteBufferUtil.bytes(key), 0));
                 map.put(key, cf);
             }
             return write(map);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java b/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
index e0b87cd..6793c85 100644
--- a/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
@@ -28,6 +28,7 @@ import java.util.Set;
 import com.google.common.collect.Sets;
 import org.junit.Test;
 
+import org.apache.cassandra.db.composites.SimpleDenseCellNameType;
 import org.apache.cassandra.db.commitlog.ReplayPosition;
 import org.apache.cassandra.db.marshal.BytesType;
 import org.apache.cassandra.dht.RandomPartitioner;
@@ -51,7 +52,7 @@ public class MetadataSerializerTest
         long minTimestamp = 2162517136L;
         long maxTimestamp = 4162517136L;
 
-        MetadataCollector collector = new MetadataCollector(BytesType.instance)
+        MetadataCollector collector = new MetadataCollector(new SimpleDenseCellNameType(BytesType.instance))
                                                       .estimatedRowSize(rowSizes)
                                                       .estimatedColumnCount(columnCounts)
                                                       .replayPosition(rp);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/service/AntiEntropyServiceCounterTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/service/AntiEntropyServiceCounterTest.java b/test/unit/org/apache/cassandra/service/AntiEntropyServiceCounterTest.java
index 0950f1d..b33f208 100644
--- a/test/unit/org/apache/cassandra/service/AntiEntropyServiceCounterTest.java
+++ b/test/unit/org/apache/cassandra/service/AntiEntropyServiceCounterTest.java
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.LinkedList;
 
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellNames;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.db.ConsistencyLevel;
 
@@ -39,7 +40,7 @@ public class AntiEntropyServiceCounterTest extends AntiEntropyServiceTestAbstrac
     {
         List<IMutation> rms = new LinkedList<IMutation>();
         RowMutation rm = new RowMutation(keyspaceName, ByteBufferUtil.bytes("key1"));
-        rm.addCounter(cfname, ByteBufferUtil.bytes("Column1"), 42);
+        rm.addCounter(cfname, CellNames.simpleDense(ByteBufferUtil.bytes("Column1")), 42);
         rms.add(new CounterMutation(rm, ConsistencyLevel.ONE));
         return rms;
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/service/AntiEntropyServiceStandardTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/service/AntiEntropyServiceStandardTest.java b/test/unit/org/apache/cassandra/service/AntiEntropyServiceStandardTest.java
index 169be2d..9cfe8f0 100644
--- a/test/unit/org/apache/cassandra/service/AntiEntropyServiceStandardTest.java
+++ b/test/unit/org/apache/cassandra/service/AntiEntropyServiceStandardTest.java
@@ -23,6 +23,7 @@ package org.apache.cassandra.service;
 import java.util.List;
 import java.util.LinkedList;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
@@ -39,7 +40,7 @@ public class AntiEntropyServiceStandardTest extends AntiEntropyServiceTestAbstra
         List<IMutation> rms = new LinkedList<IMutation>();
         RowMutation rm;
         rm = new RowMutation(keyspaceName, ByteBufferUtil.bytes("key1"));
-        rm.add(cfname, ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("asdfasdf"), 0);
+        rm.add(cfname, Util.cellname("Column1"), ByteBufferUtil.bytes("asdfasdf"), 0);
         rms.add(rm);
         return rms;
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/service/QueryPagerTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/service/QueryPagerTest.java b/test/unit/org/apache/cassandra/service/QueryPagerTest.java
index 6239ba0..0d0efb9 100644
--- a/test/unit/org/apache/cassandra/service/QueryPagerTest.java
+++ b/test/unit/org/apache/cassandra/service/QueryPagerTest.java
@@ -30,6 +30,7 @@ import org.apache.cassandra.Util;
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.OrderedJUnit4ClassRunner;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.filter.*;
 import org.apache.cassandra.dht.*;
 import org.apache.cassandra.service.pager.*;
@@ -45,6 +46,11 @@ public class QueryPagerTest extends SchemaLoader
     private static final String KS = "Keyspace1";
     private static final String CF = "Standard1";
 
+    private static String string(CellName name)
+    {
+        return string(name.toByteBuffer());
+    }
+
     private static String string(ByteBuffer bb)
     {
         try
@@ -109,9 +115,9 @@ public class QueryPagerTest extends SchemaLoader
 
     private static ReadCommand namesQuery(String key, String... names)
     {
-        SortedSet<ByteBuffer> s = new TreeSet<ByteBuffer>(cfs().metadata.comparator);
+        SortedSet<CellName> s = new TreeSet<CellName>(cfs().metadata.comparator);
         for (String name : names)
-            s.add(bytes(name));
+            s.add(CellNames.simpleDense(bytes(name)));
         return new SliceByNamesReadCommand(KS, bytes(key), CF, System.currentTimeMillis(), new NamesQueryFilter(s, true));
     }
 
@@ -122,22 +128,22 @@ public class QueryPagerTest extends SchemaLoader
 
     private static ReadCommand sliceQuery(String key, String start, String end, boolean reversed, int count)
     {
-        SliceQueryFilter filter = new SliceQueryFilter(bytes(start), bytes(end), reversed, count);
+        SliceQueryFilter filter = new SliceQueryFilter(CellNames.simpleDense(bytes(start)), CellNames.simpleDense(bytes(end)), reversed, count);
         // Note: for MultiQueryTest, we need the same timestamp/expireBefore for all queries, so we just use 0 as it doesn't matter here.
         return new SliceFromReadCommand(KS, bytes(key), CF, 0, filter);
     }
 
     private static RangeSliceCommand rangeNamesQuery(AbstractBounds<RowPosition> range, int count, String... names)
     {
-        SortedSet<ByteBuffer> s = new TreeSet<ByteBuffer>(cfs().metadata.comparator);
+        SortedSet<CellName> s = new TreeSet<CellName>(cfs().metadata.comparator);
         for (String name : names)
-            s.add(bytes(name));
+            s.add(CellNames.simpleDense(bytes(name)));
         return new RangeSliceCommand(KS, CF, System.currentTimeMillis(), new NamesQueryFilter(s, true), range, count);
     }
 
     private static RangeSliceCommand rangeSliceQuery(AbstractBounds<RowPosition> range, int count, String start, String end)
     {
-        SliceQueryFilter filter = new SliceQueryFilter(bytes(start), bytes(end), false, Integer.MAX_VALUE);
+        SliceQueryFilter filter = new SliceQueryFilter(CellNames.simpleDense(bytes(start)), CellNames.simpleDense(bytes(end)), false, Integer.MAX_VALUE);
         return new RangeSliceCommand(KS, CF, System.currentTimeMillis(), filter, range, count);
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java b/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java
index 7bb6272..1da33fc 100644
--- a/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java
+++ b/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java
@@ -50,6 +50,7 @@ import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.CounterId;
 import org.apache.cassandra.utils.FBUtilities;
+import static org.apache.cassandra.Util.cellname;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
@@ -167,7 +168,7 @@ public class StreamingTransferTest extends SchemaLoader
             String col = "col" + offs[i];
             assert cfs.getColumnFamily(QueryFilter.getIdentityFilter(Util.dk(key), cfs.name, System.currentTimeMillis())) != null;
             assert rows.get(i).key.key.equals(ByteBufferUtil.bytes(key));
-            assert rows.get(i).cf.getColumn(ByteBufferUtil.bytes(col)) != null;
+            assert rows.get(i).cf.getColumn(cellname(col)) != null;
         }
 
         // and that the max timestamp for the file was rediscovered
@@ -228,7 +229,7 @@ public class StreamingTransferTest extends SchemaLoader
                 long val = key.hashCode();
                 ColumnFamily cf = TreeMapBackedSortedColumns.factory.create(keyspace.getName(), cfs.name);
                 cf.addColumn(column(col, "v", timestamp));
-                cf.addColumn(new Column(ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(val), timestamp));
+                cf.addColumn(new Column(cellname("birthdate"), ByteBufferUtil.bytes(val), timestamp));
                 RowMutation rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes(key), cf);
                 logger.debug("Applying row to transfer " + rm);
                 rm.apply();
@@ -265,12 +266,12 @@ public class StreamingTransferTest extends SchemaLoader
         String key = "key1";
         RowMutation rm = new RowMutation(ks, ByteBufferUtil.bytes(key));
         // add columns of size slightly less than column_index_size to force insert column index
-        rm.add(cfname, ByteBufferUtil.bytes(1), ByteBuffer.wrap(new byte[DatabaseDescriptor.getColumnIndexSize() - 64]), 2);
-        rm.add(cfname, ByteBufferUtil.bytes(6), ByteBuffer.wrap(new byte[DatabaseDescriptor.getColumnIndexSize()]), 2);
+        rm.add(cfname, cellname(1), ByteBuffer.wrap(new byte[DatabaseDescriptor.getColumnIndexSize() - 64]), 2);
+        rm.add(cfname, cellname(6), ByteBuffer.wrap(new byte[DatabaseDescriptor.getColumnIndexSize()]), 2);
         ColumnFamily cf = rm.addOrGet(cfname);
         // add RangeTombstones
-        cf.delete(new DeletionInfo(ByteBufferUtil.bytes(2), ByteBufferUtil.bytes(3), cf.getComparator(), 1, (int) (System.currentTimeMillis() / 1000)));
-        cf.delete(new DeletionInfo(ByteBufferUtil.bytes(5), ByteBufferUtil.bytes(7), cf.getComparator(), 1, (int) (System.currentTimeMillis() / 1000)));
+        cf.delete(new DeletionInfo(cellname(2), cellname(3), cf.getComparator(), 1, (int) (System.currentTimeMillis() / 1000)));
+        cf.delete(new DeletionInfo(cellname(5), cellname(7), cf.getComparator(), 1, (int) (System.currentTimeMillis() / 1000)));
         rm.apply();
         cfs.forceBlockingFlush();
 
@@ -319,12 +320,8 @@ public class StreamingTransferTest extends SchemaLoader
                 state.writeElement(CounterId.fromInt(4), 4L, 2L);
                 state.writeElement(CounterId.fromInt(6), 3L, 3L);
                 state.writeElement(CounterId.fromInt(8), 2L, 4L);
-                cf.addColumn(new CounterColumn(ByteBufferUtil.bytes(col),
-                        state.context,
-                        timestamp));
-                cfCleaned.addColumn(new CounterColumn(ByteBufferUtil.bytes(col),
-                        cc.clearAllDelta(state.context),
-                        timestamp));
+                cf.addColumn(new CounterColumn(cellname(col), state.context, timestamp));
+                cfCleaned.addColumn(new CounterColumn(cellname(col), cc.clearAllDelta(state.context), timestamp));
 
                 entries.put(key, cf);
                 cleanedEntries.put(key, cfCleaned);
@@ -456,7 +453,7 @@ public class StreamingTransferTest extends SchemaLoader
             {
                 ColumnFamily cf = TreeMapBackedSortedColumns.factory.create(keyspace.getName(), cfs.name);
                 cf.addColumn(column(colName, "value", timestamp));
-                cf.addColumn(new Column(ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(new Date(timestamp).toString()), timestamp));
+                cf.addColumn(new Column(cellname("birthdate"), ByteBufferUtil.bytes(new Date(timestamp).toString()), timestamp));
                 RowMutation rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes(key), cf);
                 logger.debug("Applying row to transfer " + rm);
                 rm.apply();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/streaming/compress/CompressedInputStreamTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/streaming/compress/CompressedInputStreamTest.java b/test/unit/org/apache/cassandra/streaming/compress/CompressedInputStreamTest.java
index efe51c6..8d9480b 100644
--- a/test/unit/org/apache/cassandra/streaming/compress/CompressedInputStreamTest.java
+++ b/test/unit/org/apache/cassandra/streaming/compress/CompressedInputStreamTest.java
@@ -25,6 +25,7 @@ import java.util.*;
 
 import org.junit.Test;
 
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.marshal.BytesType;
 import org.apache.cassandra.io.compress.CompressedSequentialWriter;
 import org.apache.cassandra.io.compress.CompressionMetadata;
@@ -60,7 +61,7 @@ public class CompressedInputStreamTest
         // write compressed data file of longs
         File tmp = new File(File.createTempFile("cassandra", "unittest").getParent(), "ks-cf-ib-1-Data.db");
         Descriptor desc = Descriptor.fromFilename(tmp.getAbsolutePath());
-        MetadataCollector collector = new MetadataCollector(BytesType.instance);
+        MetadataCollector collector = new MetadataCollector(new SimpleDenseCellNameType(BytesType.instance));
         CompressionParameters param = new CompressionParameters(SnappyCompressor.instance, 32, Collections.EMPTY_MAP);
         CompressedSequentialWriter writer = new CompressedSequentialWriter(tmp, desc.filenameFor(Component.COMPRESSION_INFO), false, param, collector);
         Map<Long, Long> index = new HashMap<Long, Long>();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/tools/SSTableExportTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/tools/SSTableExportTest.java b/test/unit/org/apache/cassandra/tools/SSTableExportTest.java
index 566e2c8..c2de740 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableExportTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableExportTest.java
@@ -33,12 +33,7 @@ import java.io.PrintStream;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
-import org.apache.cassandra.db.Column;
-import org.apache.cassandra.db.ColumnFamily;
-import org.apache.cassandra.db.CounterColumn;
-import org.apache.cassandra.db.DeletionInfo;
-import org.apache.cassandra.db.ExpiringColumn;
-import org.apache.cassandra.db.TreeMapBackedSortedColumns;
+import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.io.sstable.Descriptor;
@@ -65,11 +60,11 @@ public class SSTableExportTest extends SchemaLoader
         SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2);
 
         // Add rowA
-        cfamily.addColumn(ByteBufferUtil.bytes("colA"), ByteBufferUtil.bytes("valA"), System.currentTimeMillis());
+        cfamily.addColumn(Util.cellname("colA"), ByteBufferUtil.bytes("valA"), System.currentTimeMillis());
         writer.append(Util.dk("rowA"), cfamily);
         cfamily.clear();
         
-        cfamily.addColumn(ByteBufferUtil.bytes("colB"), ByteBufferUtil.bytes("valB"), System.currentTimeMillis());
+        cfamily.addColumn(Util.cellname("colB"), ByteBufferUtil.bytes("valB"), System.currentTimeMillis());
         writer.append(Util.dk("rowB"), cfamily);
         cfamily.clear();
         
@@ -91,12 +86,12 @@ public class SSTableExportTest extends SchemaLoader
         SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2);
 
         // Add rowA
-        cfamily.addColumn(ByteBufferUtil.bytes("colA"), ByteBufferUtil.bytes("valA"), System.currentTimeMillis());
+        cfamily.addColumn(Util.cellname("colA"), ByteBufferUtil.bytes("valA"), System.currentTimeMillis());
         writer.append(Util.dk("rowA"), cfamily);
         cfamily.clear();
 
         // Add rowB
-        cfamily.addColumn(ByteBufferUtil.bytes("colB"), ByteBufferUtil.bytes("valB"), System.currentTimeMillis());
+        cfamily.addColumn(Util.cellname("colB"), ByteBufferUtil.bytes("valB"), System.currentTimeMillis());
         writer.append(Util.dk("rowB"), cfamily);
         cfamily.clear();
 
@@ -127,18 +122,18 @@ public class SSTableExportTest extends SchemaLoader
 
         int nowInSec = (int)(System.currentTimeMillis() / 1000) + 42; //live for 42 seconds
         // Add rowA
-        cfamily.addColumn(ByteBufferUtil.bytes("colA"), ByteBufferUtil.bytes("valA"), System.currentTimeMillis());
-        cfamily.addColumn(new ExpiringColumn(ByteBufferUtil.bytes("colExp"), ByteBufferUtil.bytes("valExp"), System.currentTimeMillis(), 42, nowInSec));
+        cfamily.addColumn(Util.cellname("colA"), ByteBufferUtil.bytes("valA"), System.currentTimeMillis());
+        cfamily.addColumn(new ExpiringColumn(Util.cellname("colExp"), ByteBufferUtil.bytes("valExp"), System.currentTimeMillis(), 42, nowInSec));
         writer.append(Util.dk("rowA"), cfamily);
         cfamily.clear();
 
         // Add rowB
-        cfamily.addColumn(ByteBufferUtil.bytes("colB"), ByteBufferUtil.bytes("valB"), System.currentTimeMillis());
+        cfamily.addColumn(Util.cellname("colB"), ByteBufferUtil.bytes("valB"), System.currentTimeMillis());
         writer.append(Util.dk("rowB"), cfamily);
         cfamily.clear();
 
         // Add rowExclude
-        cfamily.addColumn(ByteBufferUtil.bytes("colX"), ByteBufferUtil.bytes("valX"), System.currentTimeMillis());
+        cfamily.addColumn(Util.cellname("colX"), ByteBufferUtil.bytes("valX"), System.currentTimeMillis());
         writer.append(Util.dk("rowExclude"), cfamily);
         cfamily.clear();
 
@@ -176,17 +171,18 @@ public class SSTableExportTest extends SchemaLoader
     @Test
     public void testRoundTripStandardCf() throws IOException
     {
+        ColumnFamilyStore cfs = Keyspace.open("Keyspace1").getColumnFamilyStore("Standard1");
         File tempSS = tempSSTableFile("Keyspace1", "Standard1");
         ColumnFamily cfamily = TreeMapBackedSortedColumns.factory.create("Keyspace1", "Standard1");
         SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2);
 
         // Add rowA
-        cfamily.addColumn(ByteBufferUtil.bytes("name"), ByteBufferUtil.bytes("val"), System.currentTimeMillis());
+        cfamily.addColumn(Util.cellname("name"), ByteBufferUtil.bytes("val"), System.currentTimeMillis());
         writer.append(Util.dk("rowA"), cfamily);
         cfamily.clear();
 
         // Add rowExclude
-        cfamily.addColumn(ByteBufferUtil.bytes("name"), ByteBufferUtil.bytes("val"), System.currentTimeMillis());
+        cfamily.addColumn(Util.cellname("name"), ByteBufferUtil.bytes("val"), System.currentTimeMillis());
         writer.append(Util.dk("rowExclude"), cfamily);
         cfamily.clear();
 
@@ -201,13 +197,13 @@ public class SSTableExportTest extends SchemaLoader
         new SSTableImport().importJson(tempJson.getPath(), "Keyspace1", "Standard1", tempSS2.getPath());
 
         reader = SSTableReader.open(Descriptor.fromFilename(tempSS2.getPath()));
-        QueryFilter qf = QueryFilter.getNamesFilter(Util.dk("rowA"), "Standard1", ByteBufferUtil.bytes("name"), System.currentTimeMillis());
+        QueryFilter qf = Util.namesQueryFilter(cfs, Util.dk("rowA"), "name");
         ColumnFamily cf = qf.getSSTableColumnIterator(reader).getColumnFamily();
         qf.collateOnDiskAtom(cf, qf.getSSTableColumnIterator(reader), Integer.MIN_VALUE);
         assertNotNull(cf);
-        assertEquals(hexToBytes("76616c"), cf.getColumn(ByteBufferUtil.bytes("name")).value());
+        assertEquals(hexToBytes("76616c"), cf.getColumn(Util.cellname("name")).value());
 
-        qf = QueryFilter.getNamesFilter(Util.dk("rowExclude"), "Standard1", ByteBufferUtil.bytes("name"), System.currentTimeMillis());
+        qf = Util.namesQueryFilter(cfs, Util.dk("rowExclude"), "name");
         cf = qf.getSSTableColumnIterator(reader).getColumnFamily();
         assert cf == null;
     }
@@ -220,7 +216,7 @@ public class SSTableExportTest extends SchemaLoader
         SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2);
 
         // Add rowA
-        cfamily.addColumn(new CounterColumn(ByteBufferUtil.bytes("colA"), 42, System.currentTimeMillis()));
+        cfamily.addColumn(new CounterColumn(Util.cellname("colA"), 42, System.currentTimeMillis()));
         writer.append(Util.dk("rowA"), cfamily);
         cfamily.clear();
 
@@ -251,7 +247,7 @@ public class SSTableExportTest extends SchemaLoader
         SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2);
 
         // Add rowA
-        cfamily.addColumn(new Column(ByteBufferUtil.bytes("data"), UTF8Type.instance.fromString("{\"foo\":\"bar\"}")));
+        cfamily.addColumn(new Column(Util.cellname("data"), UTF8Type.instance.fromString("{\"foo\":\"bar\"}")));
         writer.append(Util.dk("rowA"), cfamily);
         cfamily.clear();
 
@@ -283,8 +279,8 @@ public class SSTableExportTest extends SchemaLoader
         SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2);
 
         // Add rowA
-        cfamily.addColumn(ByteBufferUtil.bytes("colName"), ByteBufferUtil.bytes("val"), System.currentTimeMillis());
-        cfamily.addColumn(ByteBufferUtil.bytes("colName1"), ByteBufferUtil.bytes("val1"), System.currentTimeMillis());
+        cfamily.addColumn(Util.cellname("colName"), ByteBufferUtil.bytes("val"), System.currentTimeMillis());
+        cfamily.addColumn(Util.cellname("colName1"), ByteBufferUtil.bytes("val1"), System.currentTimeMillis());
         cfamily.delete(new DeletionInfo(0, 0));
         writer.append(Util.dk("rowA"), cfamily);
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/tools/SSTableImportTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/tools/SSTableImportTest.java b/test/unit/org/apache/cassandra/tools/SSTableImportTest.java
index ae2ade2..fdfaa82 100644
--- a/test/unit/org/apache/cassandra/tools/SSTableImportTest.java
+++ b/test/unit/org/apache/cassandra/tools/SSTableImportTest.java
@@ -55,9 +55,9 @@ public class SSTableImportTest extends SchemaLoader
         OnDiskAtomIterator iter = qf.getSSTableColumnIterator(reader);
         ColumnFamily cf = cloneForAdditions(iter);
         while (iter.hasNext()) cf.addAtom(iter.next());
-        assert cf.getColumn(ByteBufferUtil.bytes("colAA")).value().equals(hexToBytes("76616c4141"));
-        assert !(cf.getColumn(ByteBufferUtil.bytes("colAA")) instanceof DeletedColumn);
-        Column expCol = cf.getColumn(ByteBufferUtil.bytes("colAC"));
+        assert cf.getColumn(Util.cellname("colAA")).value().equals(hexToBytes("76616c4141"));
+        assert !(cf.getColumn(Util.cellname("colAA")) instanceof DeletedColumn);
+        Column expCol = cf.getColumn(Util.cellname("colAC"));
         assert expCol.value().equals(hexToBytes("76616c4143"));
         assert expCol instanceof ExpiringColumn;
         assert ((ExpiringColumn)expCol).getTimeToLive() == 42 && expCol.getLocalDeletionTime() == 2000000000;
@@ -89,9 +89,9 @@ public class SSTableImportTest extends SchemaLoader
         OnDiskAtomIterator iter = qf.getSSTableColumnIterator(reader);
         ColumnFamily cf = cloneForAdditions(iter);
         while (iter.hasNext()) cf.addAtom(iter.next());
-        assert cf.getColumn(ByteBufferUtil.bytes("colAA")).value().equals(hexToBytes("76616c4141"));
-        assert !(cf.getColumn(ByteBufferUtil.bytes("colAA")) instanceof DeletedColumn);
-        Column expCol = cf.getColumn(ByteBufferUtil.bytes("colAC"));
+        assert cf.getColumn(Util.cellname("colAA")).value().equals(hexToBytes("76616c4141"));
+        assert !(cf.getColumn(Util.cellname("colAA")) instanceof DeletedColumn);
+        Column expCol = cf.getColumn(Util.cellname("colAC"));
         assert expCol.value().equals(hexToBytes("76616c4143"));
         assert expCol instanceof ExpiringColumn;
         assert ((ExpiringColumn)expCol).getTimeToLive() == 42 && expCol.getLocalDeletionTime() == 2000000000;
@@ -110,9 +110,9 @@ public class SSTableImportTest extends SchemaLoader
         ColumnFamily cf = cloneForAdditions(qf.getSSTableColumnIterator(reader));
         qf.collateOnDiskAtom(cf, qf.getSSTableColumnIterator(reader), Integer.MIN_VALUE);
 
-        DeletionTime delTime = cf.deletionInfo().rangeCovering(CompositeType.build(ByteBufferUtil.bytes("superA")));
+        DeletionTime delTime = cf.deletionInfo().rangeCovering(cf.getComparator().make(ByteBufferUtil.bytes("superA")));
         assertEquals("supercolumn deletion time did not match the expected time", new DeletionInfo(0, 0), new DeletionInfo(delTime));
-        Column subColumn = cf.getColumn(CompositeType.build(ByteBufferUtil.bytes("superA"), ByteBufferUtil.bytes("636f6c4141")));
+        Column subColumn = cf.getColumn(Util.cellname("superA", "636f6c4141"));
         assert subColumn.value().equals(hexToBytes("76616c75654141"));
     }
 
@@ -140,9 +140,9 @@ public class SSTableImportTest extends SchemaLoader
         ColumnFamily cf = cloneForAdditions(iter);
         while (iter.hasNext())
             cf.addAtom(iter.next());
-        assert cf.getColumn(ByteBufferUtil.bytes("colAA")).value().equals(hexToBytes("76616c4141"));
-        assert !(cf.getColumn(ByteBufferUtil.bytes("colAA")) instanceof DeletedColumn);
-        Column expCol = cf.getColumn(ByteBufferUtil.bytes("colAC"));
+        assert cf.getColumn(Util.cellname("colAA")).value().equals(hexToBytes("76616c4141"));
+        assert !(cf.getColumn(Util.cellname("colAA")) instanceof DeletedColumn);
+        Column expCol = cf.getColumn(Util.cellname("colAC"));
         assert expCol.value().equals(hexToBytes("76616c4143"));
         assert expCol instanceof ExpiringColumn;
         assert ((ExpiringColumn) expCol).getTimeToLive() == 42 && expCol.getLocalDeletionTime() == 2000000000;
@@ -164,9 +164,9 @@ public class SSTableImportTest extends SchemaLoader
         assertEquals(cf.deletionInfo(), new DeletionInfo(0, 0));
         while (iter.hasNext())
             cf.addAtom(iter.next());
-        assert cf.getColumn(ByteBufferUtil.bytes("colAA")).value().equals(hexToBytes("76616c4141"));
-        assert !(cf.getColumn(ByteBufferUtil.bytes("colAA")) instanceof DeletedColumn);
-        Column expCol = cf.getColumn(ByteBufferUtil.bytes("colAC"));
+        assert cf.getColumn(Util.cellname("colAA")).value().equals(hexToBytes("76616c4141"));
+        assert !(cf.getColumn(Util.cellname("colAA")) instanceof DeletedColumn);
+        Column expCol = cf.getColumn(Util.cellname("colAC"));
         assert expCol.value().equals(hexToBytes("76616c4143"));
         assert expCol instanceof ExpiringColumn;
         assert ((ExpiringColumn) expCol).getTimeToLive() == 42 && expCol.getLocalDeletionTime() == 2000000000;
@@ -186,7 +186,7 @@ public class SSTableImportTest extends SchemaLoader
         OnDiskAtomIterator iter = qf.getSSTableColumnIterator(reader);
         ColumnFamily cf = cloneForAdditions(iter);
         while (iter.hasNext()) cf.addAtom(iter.next());
-        Column c = cf.getColumn(ByteBufferUtil.bytes("colAA"));
+        Column c = cf.getColumn(Util.cellname("colAA"));
         assert c instanceof CounterColumn: c;
         assert ((CounterColumn) c).total() == 42;
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/utils/IntervalTreeTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/utils/IntervalTreeTest.java b/test/unit/org/apache/cassandra/utils/IntervalTreeTest.java
index 8113703..55e047a 100644
--- a/test/unit/org/apache/cassandra/utils/IntervalTreeTest.java
+++ b/test/unit/org/apache/cassandra/utils/IntervalTreeTest.java
@@ -146,13 +146,13 @@ public class IntervalTreeTest
             {
                 public void serialize(Integer i, DataOutput out) throws IOException { out.writeInt(i); }
                 public Integer deserialize(DataInput in) throws IOException { return in.readInt(); }
-                public long serializedSize(Integer i, TypeSizes ts) { return 4; }
+                public long serializedSize(Integer i, TypeSizes s) { return 4; }
             },
             new ISerializer<String>()
             {
                 public void serialize(String v, DataOutput out) throws IOException { out.writeUTF(v); }
                 public String deserialize(DataInput in) throws IOException { return in.readUTF(); }
-                public long serializedSize(String v, TypeSizes ts) { return v.length(); }
+                public long serializedSize(String v, TypeSizes s) { return v.length(); }
             },
             Interval.class.getConstructor(Object.class, Object.class, Object.class)
         );


[13/13] git commit: Push composites support in the storage engine

Posted by sl...@apache.org.
Push composites support in the storage engine

patch by slebresne; reviewed by benedict for CASSANDRA-5417


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/362cc053
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/362cc053
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/362cc053

Branch: refs/heads/trunk
Commit: 362cc05352ec67e707e0ac790732e96a15e63f6b
Parents: f943433
Author: Sylvain Lebresne <sy...@datastax.com>
Authored: Tue Oct 29 11:03:52 2013 +0100
Committer: Sylvain Lebresne <sy...@datastax.com>
Committed: Tue Dec 17 17:03:21 2013 +0100

----------------------------------------------------------------------
 CHANGES.txt                                     |   1 +
 src/java/org/apache/cassandra/auth/Auth.java    |   2 +-
 .../cassandra/auth/CassandraAuthorizer.java     |   2 +-
 .../cassandra/auth/PasswordAuthenticator.java   |   2 +-
 .../apache/cassandra/cache/AutoSavingCache.java |   5 +-
 .../org/apache/cassandra/config/CFMetaData.java | 510 ++++++++-----------
 .../cassandra/config/ColumnDefinition.java      |  77 +--
 .../org/apache/cassandra/config/KSMetaData.java |  18 +-
 .../cassandra/config/TriggerDefinition.java     |  17 +-
 .../org/apache/cassandra/config/UTMetaData.java |  13 +-
 .../cassandra/cql/AlterTableStatement.java      |   2 +-
 .../cql/CreateColumnFamilyStatement.java        |  17 +-
 .../apache/cassandra/cql/DeleteStatement.java   |   4 +-
 .../apache/cassandra/cql/QueryProcessor.java    |  84 +--
 .../apache/cassandra/cql/SelectStatement.java   |   3 -
 .../apache/cassandra/cql/UpdateStatement.java   |   9 +-
 src/java/org/apache/cassandra/cql3/CQL3Row.java |  36 ++
 .../apache/cassandra/cql3/ColumnIdentifier.java |  20 +-
 .../cassandra/cql3/ColumnNameBuilder.java       |  90 ----
 .../org/apache/cassandra/cql3/Constants.java    |  44 +-
 src/java/org/apache/cassandra/cql3/Lists.java   |  65 ++-
 src/java/org/apache/cassandra/cql3/Maps.java    |  35 +-
 .../org/apache/cassandra/cql3/Operation.java    |  57 ++-
 .../apache/cassandra/cql3/QueryProcessor.java   |  28 +-
 src/java/org/apache/cassandra/cql3/Sets.java    |  31 +-
 .../apache/cassandra/cql3/UntypedResultSet.java |  97 +++-
 .../apache/cassandra/cql3/UpdateParameters.java |  34 +-
 .../cassandra/cql3/functions/TimeuuidFcts.java  |   1 +
 .../cassandra/cql3/functions/TokenFct.java      |   6 +-
 .../cql3/statements/AlterTableStatement.java    |  39 +-
 .../cql3/statements/AlterTypeStatement.java     |   3 +-
 .../cql3/statements/ColumnGroupMap.java         | 171 -------
 .../cql3/statements/CreateIndexStatement.java   |   8 +-
 .../cql3/statements/CreateTableStatement.java   |  57 +--
 .../cql3/statements/DeleteStatement.java        |  50 +-
 .../cql3/statements/ModificationStatement.java  |  70 ++-
 .../cql3/statements/SelectStatement.java        | 471 ++++++++---------
 .../cassandra/cql3/statements/Selection.java    |  14 -
 .../cql3/statements/UpdateStatement.java        |  27 +-
 .../cassandra/db/ArrayBackedSortedColumns.java  |  31 +-
 .../apache/cassandra/db/AtomDeserializer.java   | 110 ++++
 .../cassandra/db/AtomicSortedColumns.java       |  26 +-
 .../apache/cassandra/db/BatchlogManager.java    |  12 +-
 .../org/apache/cassandra/db/CFRowAdder.java     |  81 +++
 .../cassandra/db/CollationController.java       |   8 +-
 src/java/org/apache/cassandra/db/Column.java    | 105 +---
 .../org/apache/cassandra/db/ColumnFamily.java   |  28 +-
 .../cassandra/db/ColumnFamilySerializer.java    |  13 +-
 .../apache/cassandra/db/ColumnFamilyStore.java  |  50 +-
 .../org/apache/cassandra/db/ColumnIndex.java    |  10 +-
 .../apache/cassandra/db/ColumnSerializer.java   |  37 +-
 .../org/apache/cassandra/db/CounterColumn.java  |  27 +-
 .../apache/cassandra/db/CounterMutation.java    |   4 +-
 .../cassandra/db/CounterUpdateColumn.java       |   9 +-
 src/java/org/apache/cassandra/db/DataRange.java |  23 +-
 .../org/apache/cassandra/db/DefsTables.java     |  13 -
 .../org/apache/cassandra/db/DeletedColumn.java  |  18 +-
 .../org/apache/cassandra/db/DeletionInfo.java   |  53 +-
 .../org/apache/cassandra/db/DeletionTime.java   |  10 +-
 .../org/apache/cassandra/db/EmptyColumns.java   |   6 +-
 .../org/apache/cassandra/db/ExpiringColumn.java |  27 +-
 .../cassandra/db/HintedHandOffManager.java      |  27 +-
 src/java/org/apache/cassandra/db/Memtable.java  |   3 +-
 .../org/apache/cassandra/db/OnDiskAtom.java     |  42 +-
 .../apache/cassandra/db/PagedRangeCommand.java  |  42 +-
 .../apache/cassandra/db/RangeSliceCommand.java  |  10 +-
 .../org/apache/cassandra/db/RangeTombstone.java |  77 +--
 .../apache/cassandra/db/RangeTombstoneList.java |  74 ++-
 .../org/apache/cassandra/db/RowIndexEntry.java  |  40 +-
 .../org/apache/cassandra/db/RowMutation.java    |  14 +-
 .../cassandra/db/SliceByNamesReadCommand.java   |  10 +-
 .../cassandra/db/SliceFromReadCommand.java      |  12 +-
 .../org/apache/cassandra/db/SuperColumns.java   | 228 +++++----
 .../org/apache/cassandra/db/SystemKeyspace.java |  23 +-
 .../db/TreeMapBackedSortedColumns.java          |  22 +-
 .../apache/cassandra/db/UnsortedColumns.java    |  10 +-
 .../db/columniterator/IdentityQueryFilter.java  |   4 +-
 .../db/columniterator/IndexedSliceReader.java   | 135 +++--
 .../db/columniterator/SSTableNamesIterator.java |  46 +-
 .../db/columniterator/SSTableSliceIterator.java |   2 +-
 .../db/columniterator/SimpleSliceReader.java    |  12 +-
 .../db/compaction/LazilyCompactedRow.java       |   2 +-
 .../cassandra/db/compaction/Scrubber.java       |   4 +-
 .../cassandra/db/composites/AbstractCType.java  | 336 ++++++++++++
 .../db/composites/AbstractCellNameType.java     | 356 +++++++++++++
 .../db/composites/AbstractComposite.java        | 132 +++++
 .../AbstractCompoundCellNameType.java           | 264 ++++++++++
 .../composites/AbstractSimpleCellNameType.java  | 141 +++++
 .../db/composites/BoundedComposite.java         |  95 ++++
 .../cassandra/db/composites/CBuilder.java       |  34 ++
 .../apache/cassandra/db/composites/CType.java   | 142 ++++++
 .../cassandra/db/composites/CellName.java       |  74 +++
 .../cassandra/db/composites/CellNameType.java   | 202 ++++++++
 .../cassandra/db/composites/CellNames.java      |  91 ++++
 .../cassandra/db/composites/Composite.java      |  76 +++
 .../cassandra/db/composites/Composites.java     | 110 ++++
 .../cassandra/db/composites/CompoundCType.java  | 158 ++++++
 .../db/composites/CompoundComposite.java        |  70 +++
 .../db/composites/CompoundDenseCellName.java    |  70 +++
 .../composites/CompoundDenseCellNameType.java   |  86 ++++
 .../db/composites/CompoundSparseCellName.java   | 164 ++++++
 .../composites/CompoundSparseCellNameType.java  | 238 +++++++++
 .../cassandra/db/composites/SimpleCType.java    | 133 +++++
 .../db/composites/SimpleComposite.java          |  76 +++
 .../db/composites/SimpleDenseCellName.java      |  73 +++
 .../db/composites/SimpleDenseCellNameType.java  |  78 +++
 .../db/composites/SimpleSparseCellName.java     |  99 ++++
 .../db/composites/SimpleSparseCellNameType.java |  98 ++++
 .../cassandra/db/filter/ColumnCounter.java      |  18 +-
 .../apache/cassandra/db/filter/ColumnSlice.java | 160 ++++--
 .../cassandra/db/filter/ExtendedFilter.java     |  63 +--
 .../cassandra/db/filter/IDiskAtomFilter.java    |  40 +-
 .../cassandra/db/filter/NamesQueryFilter.java   |  71 ++-
 .../apache/cassandra/db/filter/QueryFilter.java |  17 +-
 .../cassandra/db/filter/SliceQueryFilter.java   |  69 ++-
 .../AbstractSimplePerColumnSecondaryIndex.java  |  13 +-
 .../cassandra/db/index/SecondaryIndex.java      |  20 +-
 .../db/index/SecondaryIndexManager.java         |  21 +-
 .../db/index/composites/CompositesIndex.java    |  55 +-
 .../CompositesIndexOnClusteringKey.java         |  45 +-
 .../CompositesIndexOnCollectionKey.java         |  48 +-
 .../CompositesIndexOnCollectionValue.java       |  48 +-
 .../CompositesIndexOnPartitionKey.java          |  33 +-
 .../composites/CompositesIndexOnRegular.java    |  38 +-
 .../db/index/composites/CompositesSearcher.java |  35 +-
 .../cassandra/db/index/keys/KeysIndex.java      |  17 +-
 .../cassandra/db/index/keys/KeysSearcher.java   |  24 +-
 .../db/marshal/AbstractCommutativeType.java     |   3 +-
 .../db/marshal/AbstractCompositeType.java       |  20 +-
 .../cassandra/db/marshal/AbstractType.java      | 108 ----
 .../cassandra/db/marshal/CollectionType.java    |   4 +-
 .../cassandra/db/marshal/CompositeType.java     |  31 +-
 .../cassandra/db/marshal/CounterColumnType.java |   3 +-
 .../apache/cassandra/db/marshal/ListType.java   |   8 +-
 .../apache/cassandra/db/marshal/MapType.java    |  12 +-
 .../apache/cassandra/db/marshal/SetType.java    |   9 +-
 .../hadoop/ColumnFamilyInputFormat.java         |   7 +-
 .../hadoop/ColumnFamilyRecordReader.java        |  61 +--
 .../hadoop/pig/AbstractCassandraStorage.java    |  11 +-
 .../apache/cassandra/hadoop/pig/CqlStorage.java |   5 +-
 .../io/sstable/AbstractSSTableSimpleWriter.java |   9 +-
 .../cassandra/io/sstable/CQLSSTableWriter.java  |  25 +-
 .../cassandra/io/sstable/ColumnNameHelper.java  | 234 ++++-----
 .../cassandra/io/sstable/IndexHelper.java       |  75 +--
 .../cassandra/io/sstable/KeyIterator.java       |   2 +-
 .../apache/cassandra/io/sstable/SSTable.java    |   2 +-
 .../cassandra/io/sstable/SSTableReader.java     |  10 +-
 .../cassandra/io/sstable/SSTableScanner.java    |   6 +-
 .../io/sstable/SSTableSimpleUnsortedWriter.java |   2 +-
 .../io/sstable/SSTableSimpleWriter.java         |   3 +-
 .../cassandra/io/sstable/SSTableWriter.java     |   2 +-
 .../io/sstable/metadata/MetadataCollector.java  |   8 +-
 .../cassandra/io/util/MappedFileDataInput.java  |   5 +-
 .../apache/cassandra/service/CacheService.java  |   9 +-
 .../apache/cassandra/service/StorageProxy.java  |  11 +-
 .../service/pager/AbstractQueryPager.java       |   6 +-
 .../service/pager/RangeSliceQueryPager.java     |  11 +-
 .../service/pager/SliceQueryPager.java          |   8 +-
 .../cassandra/thrift/CassandraServer.java       | 103 ++--
 .../cassandra/thrift/ThriftValidation.java      |  70 +--
 .../org/apache/cassandra/tools/BulkLoader.java  |  19 +-
 .../apache/cassandra/tools/SSTableExport.java   |  16 +-
 .../apache/cassandra/tools/SSTableImport.java   |  38 +-
 .../org/apache/cassandra/tracing/Tracing.java   |  29 +-
 .../cassandra/triggers/TriggerExecutor.java     |   3 +-
 .../org/apache/cassandra/utils/FBUtilities.java |   6 +-
 .../org/apache/cassandra/utils/ObjectSizes.java |  13 +
 test/data/serialization/2.0/db.RowMutation.bin  | Bin 3599 -> 0 bytes
 .../apache/cassandra/db/LongKeyspaceTest.java   |   5 +-
 .../apache/cassandra/db/MeteredFlusherTest.java |   6 +-
 .../cassandra/db/commitlog/ComitLogStress.java  |   5 +-
 .../db/compaction/LongCompactionsTest.java      |   2 +-
 .../LongLeveledCompactionStrategyTest.java      |   2 +-
 .../cassandra/AbstractSerializationsTester.java |   1 +
 .../unit/org/apache/cassandra/SchemaLoader.java |  76 +--
 test/unit/org/apache/cassandra/Util.java        | 102 +++-
 .../apache/cassandra/config/CFMetaDataTest.java |  30 +-
 .../cassandra/config/ColumnDefinitionTest.java  |   2 +-
 .../org/apache/cassandra/config/DefsTest.java   |  53 +-
 .../db/ArrayBackedSortedColumnsTest.java        |  42 +-
 .../org/apache/cassandra/db/CleanupTest.java    |   2 +-
 .../cassandra/db/CollationControllerTest.java   |  14 +-
 .../cassandra/db/ColumnFamilyStoreTest.java     | 358 ++++++-------
 .../apache/cassandra/db/ColumnFamilyTest.java   |  31 +-
 .../org/apache/cassandra/db/CommitLogTest.java  |  11 +-
 .../apache/cassandra/db/CounterColumnTest.java  |  89 ++--
 .../cassandra/db/CounterMutationTest.java       |  12 +-
 .../apache/cassandra/db/HintedHandOffTest.java  |   3 +-
 .../org/apache/cassandra/db/KeyCacheTest.java   |  21 +-
 .../apache/cassandra/db/KeyCollisionTest.java   |   3 +-
 .../org/apache/cassandra/db/KeyspaceTest.java   | 162 +++---
 .../org/apache/cassandra/db/NameSortTest.java   |   4 +-
 .../cassandra/db/RangeTombstoneListTest.java    |  13 +-
 .../apache/cassandra/db/RangeTombstoneTest.java |  21 +-
 .../apache/cassandra/db/ReadMessageTest.java    |  34 +-
 .../cassandra/db/RecoveryManagerTest.java       |   5 +-
 .../db/RecoveryManagerTruncateTest.java         |   7 +-
 .../cassandra/db/RemoveColumnFamilyTest.java    |   4 +-
 .../db/RemoveColumnFamilyWithFlush1Test.java    |   6 +-
 .../db/RemoveColumnFamilyWithFlush2Test.java    |   4 +-
 .../apache/cassandra/db/RemoveColumnTest.java   |  22 +-
 .../cassandra/db/RemoveSubColumnTest.java       |   5 +-
 .../org/apache/cassandra/db/RowCacheTest.java   |  33 +-
 .../apache/cassandra/db/RowIterationTest.java   |   7 +-
 test/unit/org/apache/cassandra/db/RowTest.java  |   7 +-
 .../db/SecondaryIndexColumnSizeTest.java        |  13 +-
 .../apache/cassandra/db/SerializationsTest.java |  62 ++-
 .../org/apache/cassandra/db/TimeSortTest.java   |  38 +-
 .../compaction/BlacklistingCompactionsTest.java |   5 +-
 .../db/compaction/CompactionsPurgeTest.java     |  39 +-
 .../db/compaction/CompactionsTest.java          |  14 +-
 .../LeveledCompactionStrategyTest.java          |   6 +-
 .../db/compaction/OneCompactionTest.java        |   2 +-
 .../SizeTieredCompactionStrategyTest.java       |   6 +-
 .../cassandra/db/compaction/TTLExpiryTest.java  |  20 +-
 .../db/index/PerRowSecondaryIndexTest.java      |  18 +-
 .../cassandra/db/marshal/CompositeTypeTest.java |  13 +-
 .../db/marshal/DynamicCompositeTypeTest.java    |  13 +-
 .../CompressedRandomAccessReaderTest.java       |   5 +-
 .../cassandra/io/sstable/IndexHelperTest.java   |  51 +-
 .../io/sstable/IndexSummaryManagerTest.java     |  10 +-
 .../cassandra/io/sstable/LegacySSTableTest.java |   7 +-
 .../cassandra/io/sstable/SSTableLoaderTest.java |   2 +-
 .../io/sstable/SSTableMetadataTest.java         |  32 +-
 .../cassandra/io/sstable/SSTableReaderTest.java |  26 +-
 .../io/sstable/SSTableScannerTest.java          |   2 +-
 .../io/sstable/SSTableSimpleWriterTest.java     |   2 +-
 .../cassandra/io/sstable/SSTableUtils.java      |   2 +-
 .../metadata/MetadataSerializerTest.java        |   3 +-
 .../service/AntiEntropyServiceCounterTest.java  |   3 +-
 .../service/AntiEntropyServiceStandardTest.java |   3 +-
 .../cassandra/service/QueryPagerTest.java       |  18 +-
 .../streaming/StreamingTransferTest.java        |  23 +-
 .../compress/CompressedInputStreamTest.java     |   3 +-
 .../cassandra/tools/SSTableExportTest.java      |  42 +-
 .../cassandra/tools/SSTableImportTest.java      |  30 +-
 .../cassandra/utils/IntervalTreeTest.java       |   4 +-
 237 files changed, 7001 insertions(+), 3718 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index c0d0f0d..dcc7e33 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -16,6 +16,7 @@
  * Batch read from OTC's queue and cleanup (CASSANDRA-1632)
  * Secondary index support for collections (CASSANDRA-4511)
  * SSTable metadata(Stats.db) format change (CASSANDRA-6356)
+ * Push composites support in the storage engine (CASSANDRA-5417)
 
 
 2.0.4

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/auth/Auth.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/auth/Auth.java b/src/java/org/apache/cassandra/auth/Auth.java
index 5c5dfe7..36e55bf 100644
--- a/src/java/org/apache/cassandra/auth/Auth.java
+++ b/src/java/org/apache/cassandra/auth/Auth.java
@@ -246,7 +246,7 @@ public class Auth
             ResultMessage.Rows rows = selectUserStatement.execute(QueryState.forInternalCalls(),
                                                                   new QueryOptions(consistencyForUser(username),
                                                                                    Lists.newArrayList(ByteBufferUtil.bytes(username))));
-            return new UntypedResultSet(rows.result);
+            return UntypedResultSet.create(rows.result);
         }
         catch (RequestValidationException e)
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java b/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java
index deecfdb..8f257db 100644
--- a/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java
+++ b/src/java/org/apache/cassandra/auth/CassandraAuthorizer.java
@@ -76,7 +76,7 @@ public class CassandraAuthorizer implements IAuthorizer
                                                                  new QueryOptions(ConsistencyLevel.ONE,
                                                                                   Lists.newArrayList(ByteBufferUtil.bytes(user.getName()),
                                                                                                      ByteBufferUtil.bytes(resource.getName()))));
-            result = new UntypedResultSet(rows.result);
+            result = UntypedResultSet.create(rows.result);
         }
         catch (RequestValidationException e)
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java b/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java
index 3d9ba98..cd5bdc3 100644
--- a/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java
+++ b/src/java/org/apache/cassandra/auth/PasswordAuthenticator.java
@@ -110,7 +110,7 @@ public class PasswordAuthenticator implements ISaslAwareAuthenticator
             ResultMessage.Rows rows = authenticateStatement.execute(QueryState.forInternalCalls(),
                                                                     new QueryOptions(consistencyForUser(username),
                                                                                      Lists.newArrayList(ByteBufferUtil.bytes(username))));
-            result = new UntypedResultSet(rows.result);
+            result = UntypedResultSet.create(rows.result);
         }
         catch (RequestValidationException e)
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cache/AutoSavingCache.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cache/AutoSavingCache.java b/src/java/org/apache/cassandra/cache/AutoSavingCache.java
index 6554eb3..cbadefc 100644
--- a/src/java/org/apache/cassandra/cache/AutoSavingCache.java
+++ b/src/java/org/apache/cassandra/cache/AutoSavingCache.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.cache;
 
 import java.io.*;
-import java.nio.ByteBuffer;
 import java.util.*;
 import java.util.concurrent.Future;
 import java.util.concurrent.ScheduledFuture;
@@ -30,7 +29,6 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.ColumnFamilyType;
 import org.apache.cassandra.db.compaction.CompactionInfo;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.compaction.OperationType;
@@ -43,7 +41,6 @@ import org.apache.cassandra.io.util.LengthAvailableInputStream;
 import org.apache.cassandra.io.util.SequentialWriter;
 import org.apache.cassandra.service.CacheService;
 import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.Pair;
 
 public class AutoSavingCache<K extends CacheKey, V> extends InstrumentingCache<K, V>
@@ -171,7 +168,7 @@ public class AutoSavingCache<K extends CacheKey, V> extends InstrumentingCache<K
             else
                 type = OperationType.UNKNOWN;
 
-            info = new CompactionInfo(new CFMetaData(Keyspace.SYSTEM_KS, cacheType.toString(), ColumnFamilyType.Standard, BytesType.instance, null),
+            info = new CompactionInfo(CFMetaData.denseCFMetaData(Keyspace.SYSTEM_KS, cacheType.toString(), BytesType.instance),
                                       type,
                                       0,
                                       keys.size(),

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/config/CFMetaData.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/config/CFMetaData.java b/src/java/org/apache/cassandra/config/CFMetaData.java
index 0a33c20..e56c1dd 100644
--- a/src/java/org/apache/cassandra/config/CFMetaData.java
+++ b/src/java/org/apache/cassandra/config/CFMetaData.java
@@ -22,7 +22,6 @@ import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.nio.ByteBuffer;
-import java.nio.charset.CharacterCodingException;
 import java.util.*;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -30,6 +29,7 @@ import com.google.common.base.Objects;
 import com.google.common.collect.AbstractIterator;
 import com.google.common.collect.MapDifference;
 import com.google.common.collect.Maps;
+import org.apache.cassandra.db.composites.*;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.builder.EqualsBuilder;
@@ -55,6 +55,7 @@ import org.apache.cassandra.io.compress.LZ4Compressor;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.thrift.CqlRow;
+import org.apache.cassandra.thrift.CqlResult;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
@@ -384,7 +385,7 @@ public final class CFMetaData
     public final String ksName;                       // name of keyspace
     public final String cfName;                       // name of this column family
     public final ColumnFamilyType cfType;             // standard, super
-    public volatile AbstractType<?> comparator;       // bytes, long, timeuuid, utf8, etc.
+    public volatile CellNameType comparator;          // bytes, long, timeuuid, utf8, etc.
 
     //OPTIONAL
     private volatile String comment = "";
@@ -438,7 +439,6 @@ public final class CFMetaData
     public CFMetaData keyValidator(AbstractType<?> prop) {keyValidator = prop; return this;}
     public CFMetaData minCompactionThreshold(int prop) {minCompactionThreshold = prop; return this;}
     public CFMetaData maxCompactionThreshold(int prop) {maxCompactionThreshold = prop; return this;}
-    public CFMetaData columnMetadata(Map<ByteBuffer,ColumnDefinition> prop) {columnMetadata = prop; return this;}
     public CFMetaData compactionStrategyClass(Class<? extends AbstractCompactionStrategy> prop) {compactionStrategyClass = prop; return this;}
     public CFMetaData compactionStrategyOptions(Map<String, String> prop) {compactionStrategyOptions = prop; return this;}
     public CFMetaData compressionParameters(CompressionParameters prop) {compressionParameters = prop; return this;}
@@ -452,18 +452,13 @@ public final class CFMetaData
     public CFMetaData droppedColumns(Map<ColumnIdentifier, Long> cols) {droppedColumns = cols; return this;}
     public CFMetaData triggers(Map<String, TriggerDefinition> prop) {triggers = prop; return this;}
 
-    public CFMetaData(String keyspace, String name, ColumnFamilyType type, AbstractType<?> comp, AbstractType<?> subcc)
-    {
-        this(keyspace, name, type, makeComparator(type, comp, subcc));
-    }
-
-    public CFMetaData(String keyspace, String name, ColumnFamilyType type, AbstractType<?> comp)
+    public CFMetaData(String keyspace, String name, ColumnFamilyType type, CellNameType comp)
     {
         this(keyspace, name, type, comp, getId(keyspace, name));
     }
 
     @VisibleForTesting
-    CFMetaData(String keyspace, String name, ColumnFamilyType type, AbstractType<?> comp,  UUID id)
+    CFMetaData(String keyspace, String name, ColumnFamilyType type, CellNameType comp,  UUID id)
     {
         // (subcc may be null for non-supercolumns)
         // (comp may also be null for custom indexes, which is kind of broken if you ask me)
@@ -475,6 +470,28 @@ public final class CFMetaData
         cfId = id;
     }
 
+    public static CFMetaData denseCFMetaData(String keyspace, String name, AbstractType<?> comp, AbstractType<?> subcc)
+    {
+        CellNameType cellNameType = CellNames.fromAbstractType(makeRawAbstractType(comp, subcc), true);
+        return new CFMetaData(keyspace, name, subcc == null ? ColumnFamilyType.Standard : ColumnFamilyType.Super, cellNameType);
+    }
+
+    public static CFMetaData sparseCFMetaData(String keyspace, String name, AbstractType<?> comp)
+    {
+        CellNameType cellNameType = CellNames.fromAbstractType(comp, false);
+        return new CFMetaData(keyspace, name, ColumnFamilyType.Standard, cellNameType);
+    }
+
+    public static CFMetaData denseCFMetaData(String keyspace, String name, AbstractType<?> comp)
+    {
+        return denseCFMetaData(keyspace, name, comp, null);
+    }
+
+    private static AbstractType<?> makeRawAbstractType(AbstractType<?> comparator, AbstractType<?> subComparator)
+    {
+        return subComparator == null ? comparator : CompositeType.getInstance(Arrays.asList(comparator, subComparator));
+    }
+
     public Map<String, TriggerDefinition> getTriggers()
     {
         return triggers;
@@ -491,7 +508,7 @@ public final class CFMetaData
         try
         {
             CreateTableStatement statement = (CreateTableStatement) QueryProcessor.parseStatement(cql).prepare().statement;
-            CFMetaData cfm = newSystemMetadata(keyspace, statement.columnFamily(), "", statement.comparator, null);
+            CFMetaData cfm = newSystemMetadata(keyspace, statement.columnFamily(), "", statement.comparator);
             statement.applyPropertiesTo(cfm);
             return cfm.rebuild();
         }
@@ -501,13 +518,6 @@ public final class CFMetaData
         }
     }
 
-    private static AbstractType<?> makeComparator(ColumnFamilyType cftype, AbstractType<?> comp, AbstractType<?> subcc)
-    {
-        return cftype == ColumnFamilyType.Super
-             ? CompositeType.getInstance(comp, subcc == null ? BytesType.instance : subcc)
-             : comp;
-    }
-
     private static String enforceCommentNotNull (CharSequence comment)
     {
         return (comment == null) ? "" : comment.toString();
@@ -518,11 +528,9 @@ public final class CFMetaData
         return UUID.nameUUIDFromBytes(ArrayUtils.addAll(ksName.getBytes(), cfName.getBytes()));
     }
 
-    private static CFMetaData newSystemMetadata(String keyspace, String cfName, String comment, AbstractType<?> comparator, AbstractType<?> subcc)
+    private static CFMetaData newSystemMetadata(String keyspace, String cfName, String comment, CellNameType comparator)
     {
-        ColumnFamilyType type = subcc == null ? ColumnFamilyType.Standard : ColumnFamilyType.Super;
-        CFMetaData newCFMD = new CFMetaData(keyspace, cfName, type, comparator,  subcc);
-
+        CFMetaData newCFMD = new CFMetaData(keyspace, cfName, ColumnFamilyType.Standard, comparator);
         return newCFMD.comment(comment)
                 .readRepairChance(0)
                 .dcLocalReadRepairChance(0)
@@ -530,7 +538,7 @@ public final class CFMetaData
                 .memtableFlushPeriod(3600 * 1000);
     }
 
-    public static CFMetaData newIndexMetadata(CFMetaData parent, ColumnDefinition info, AbstractType<?> columnComparator)
+    public static CFMetaData newIndexMetadata(CFMetaData parent, ColumnDefinition info, CellNameType indexComparator)
     {
         // Depends on parent's cache setting, turn on its index CF's cache.
         // Row caching is never enabled; see CASSANDRA-5732
@@ -538,7 +546,7 @@ public final class CFMetaData
                              ? Caching.KEYS_ONLY
                              : Caching.NONE;
 
-        return new CFMetaData(parent.ksName, parent.indexColumnFamilyName(info), ColumnFamilyType.Standard, columnComparator, (AbstractType)null)
+        return new CFMetaData(parent.ksName, parent.indexColumnFamilyName(info), ColumnFamilyType.Standard, indexComparator)
                              .keyValidator(info.type)
                              .readRepairChance(0.0)
                              .dcLocalReadRepairChance(0.0)
@@ -574,14 +582,12 @@ public final class CFMetaData
 
     static CFMetaData copyOpts(CFMetaData newCFMD, CFMetaData oldCFMD)
     {
-        Map<ByteBuffer, ColumnDefinition> clonedColumns = new HashMap<>();
+        List<ColumnDefinition> clonedColumns = new ArrayList<>(oldCFMD.allColumns().size());
         for (ColumnDefinition cd : oldCFMD.allColumns())
-        {
-            ColumnDefinition cloned = cd.copy();
-            clonedColumns.put(cloned.name.bytes, cloned);
-        }
+            clonedColumns.add(cd.copy());
 
-        return newCFMD.comment(oldCFMD.comment)
+        return newCFMD.addAllColumnDefinitions(clonedColumns)
+                      .comment(oldCFMD.comment)
                       .readRepairChance(oldCFMD.readRepairChance)
                       .dcLocalReadRepairChance(oldCFMD.dcLocalReadRepairChance)
                       .replicateOnWrite(oldCFMD.replicateOnWrite)
@@ -590,7 +596,6 @@ public final class CFMetaData
                       .keyValidator(oldCFMD.keyValidator)
                       .minCompactionThreshold(oldCFMD.minCompactionThreshold)
                       .maxCompactionThreshold(oldCFMD.maxCompactionThreshold)
-                      .columnMetadata(clonedColumns)
                       .compactionStrategyClass(oldCFMD.compactionStrategyClass)
                       .compactionStrategyOptions(new HashMap<>(oldCFMD.compactionStrategyOptions))
                       .compressionParameters(oldCFMD.compressionParameters.copy())
@@ -763,18 +768,13 @@ public final class CFMetaData
         return compactValueColumn;
     }
 
-    public ColumnNameBuilder getKeyNameBuilder()
+    // TODO: we could use CType for key validation too to make this unnecessary but
+    // it's unclear it would be a win overall
+    public CType getKeyValidatorAsCType()
     {
         return keyValidator instanceof CompositeType
-             ? new CompositeType.Builder((CompositeType)keyValidator)
-             : new NonCompositeBuilder(keyValidator);
-    }
-
-    public ColumnNameBuilder getColumnNameBuilder()
-    {
-        return comparator instanceof CompositeType
-             ? new CompositeType.Builder((CompositeType)comparator)
-             : new NonCompositeBuilder(comparator);
+             ? new CompoundCType(((CompositeType) keyValidator).types)
+             : new SimpleCType(keyValidator);
     }
 
     public double getBloomFilterFpChance()
@@ -891,19 +891,23 @@ public final class CFMetaData
             .toHashCode();
     }
 
-    public AbstractType<?> getValueValidatorFromCellName(ByteBuffer cellName)
+    public AbstractType<?> getValueValidator(ColumnIdentifier column)
     {
-        // If this is a CQL table, we need to pull out the CQL column name to look up the correct column type.
-        if (!hasCompositeComparator() || isDense())
-            return getValueValidator(new ColumnIdentifier(cellName, comparator));
+        return getValueValidator(getColumnDefinition(column));
+    }
 
-        ByteBuffer name = ((CompositeType)comparator).extractLastComponent(cellName);
-        return getValueValidator(new ColumnIdentifier(name, UTF8Type.instance));
+    public AbstractType<?> getValueValidator(CellName name)
+    {
+        return getValueValidator(getColumnDefinition(name));
     }
 
-    public AbstractType<?> getValueValidator(ColumnIdentifier column)
+    public AbstractType<?> getValueValidatorForFullCellName(ByteBuffer name)
     {
-        return getValueValidator(getColumnDefinition(column));
+        // If this is a CQL table, we need to pull out the CQL column name to look up the correct column type.
+        if (!isCQL3Table())
+            return getValueValidator(getColumnDefinition(name));
+
+        return getValueValidator(getColumnDefinition(comparator.cellFromByteBuffer(name)));
     }
 
     public AbstractType<?> getValueValidator(ColumnDefinition columnDefinition)
@@ -954,11 +958,19 @@ public final class CFMetaData
 
         try
         {
+            AbstractType<?> rawComparator = TypeParser.parse(cf_def.comparator_type);
+            AbstractType<?> subComparator = cfType == ColumnFamilyType.Standard
+                                          ? null
+                                          : cf_def.subcomparator_type == null ? BytesType.instance : TypeParser.parse(cf_def.subcomparator_type);
+
+            // Dense for thrit is simplified as all column metadata are REGULAR
+            boolean isDense = (cf_def.column_metadata == null || cf_def.column_metadata.isEmpty()) && !isCQL3OnlyPKComparator(rawComparator);
+            CellNameType comparator = CellNames.fromAbstractType(makeRawAbstractType(rawComparator, subComparator), isDense);
+
             CFMetaData newCFMD = new CFMetaData(cf_def.keyspace,
                                                 cf_def.name,
                                                 cfType,
-                                                TypeParser.parse(cf_def.comparator_type),
-                                                cf_def.subcomparator_type == null ? null : TypeParser.parse(cf_def.subcomparator_type));
+                                                comparator);
 
             if (cf_def.isSetGc_grace_seconds()) { newCFMD.gcGraceSeconds(cf_def.gc_grace_seconds); }
             if (cf_def.isSetMin_compaction_threshold()) { newCFMD.minCompactionThreshold(cf_def.min_compaction_threshold); }
@@ -992,15 +1004,12 @@ public final class CFMetaData
 
             if (cf_def.isSetKey_validation_class()) { newCFMD.keyValidator(TypeParser.parse(cf_def.key_validation_class)); }
             if (cf_def.isSetKey_alias() && !(newCFMD.keyValidator instanceof CompositeType))
-            {
-                newCFMD.columnMetadata.put(cf_def.key_alias,
-                                           ColumnDefinition.partitionKeyDef(newCFMD, cf_def.key_alias, newCFMD.keyValidator, null));
-            }
+                newCFMD.addOrReplaceColumnDefinition(ColumnDefinition.partitionKeyDef(newCFMD, cf_def.key_alias, newCFMD.keyValidator, null));
 
-            return newCFMD.comment(cf_def.comment)
+            return newCFMD.addAllColumnDefinitions(ColumnDefinition.fromThrift(newCFMD, cf_def.column_metadata))
+                          .comment(cf_def.comment)
                           .replicateOnWrite(cf_def.replicate_on_write)
                           .defaultValidator(TypeParser.parse(cf_def.default_validation_class))
-                          .columnMetadata(ColumnDefinition.fromThrift(newCFMD, cf_def.column_metadata))
                           .compressionParameters(cp)
                           .rebuild();
         }
@@ -1016,19 +1025,24 @@ public final class CFMetaData
      * @param row CqlRow containing columns from schema_columnfamilies.
      * @return CFMetaData derived from CqlRow
      */
-    public static CFMetaData fromThriftCqlRow(CqlRow row)
+    public static CFMetaData fromThriftCqlRow(CqlRow cf, CqlResult columnsRes)
     {
-        Map<String, ByteBuffer> columns = new HashMap<>();
-        try
-        {
-            for (org.apache.cassandra.thrift.Column column : row.getColumns())
-                columns.put(ByteBufferUtil.string(column.bufferForName()), column.value);
-        }
-        catch (CharacterCodingException ignore)
-        {
-        }
-        UntypedResultSet.Row cql3row = new UntypedResultSet.Row(columns);
-        return fromSchemaNoColumnsNoTriggers(cql3row);
+        UntypedResultSet.Row cfRow = new UntypedResultSet.Row(convertThriftCqlRow(cf));
+
+        List<Map<String, ByteBuffer>> cols = new ArrayList<>(columnsRes.rows.size());
+        for (CqlRow row : columnsRes.rows)
+            cols.add(convertThriftCqlRow(row));
+        UntypedResultSet colsRow = UntypedResultSet.create(cols);
+
+        return fromSchemaNoTriggers(cfRow, colsRow);
+    }
+
+    private static Map<String, ByteBuffer> convertThriftCqlRow(CqlRow row)
+    {
+        Map<String, ByteBuffer> m = new HashMap<>();
+        for (org.apache.cassandra.thrift.Column column : row.getColumns())
+            m.put(UTF8Type.instance.getString(column.bufferForName()), column.value);
+        return m;
     }
 
     public void reload()
@@ -1051,11 +1065,11 @@ public final class CFMetaData
     /**
      * Updates CFMetaData in-place to match cf_def
      *
-     * *Note*: This method left public only for DefsTest, don't use directly!
+     * *Note*: This method left package-private only for DefsTest, don't use directly!
      *
      * @throws ConfigurationException if ks/cf names or cf ids didn't match
      */
-    public void apply(CFMetaData cfm) throws ConfigurationException
+    void apply(CFMetaData cfm) throws ConfigurationException
     {
         logger.debug("applying {} to {}", cfm, this);
 
@@ -1201,9 +1215,8 @@ public final class CFMetaData
 
         if (isSuper())
         {
-            CompositeType ct = (CompositeType)comparator;
-            def.setComparator_type(ct.types.get(0).toString());
-            def.setSubcomparator_type(ct.types.get(1).toString());
+            def.setComparator_type(comparator.subtype(0).toString());
+            def.setSubcomparator_type(comparator.subtype(1).toString());
         }
         else
         {
@@ -1257,37 +1270,14 @@ public final class CFMetaData
     }
 
     /**
-     * Returns a ColumnDefinition given a full (internal) column name.
+     * Returns a ColumnDefinition given a cell name.
      */
-    public ColumnDefinition getColumnDefinitionFromCellName(ByteBuffer cellName)
+    public ColumnDefinition getColumnDefinition(CellName cellName)
     {
-        if (!isSuper() && hasCompositeComparator())
-        {
-            CompositeType composite = (CompositeType)comparator;
-            ByteBuffer[] components = composite.split(cellName);
-            for (ColumnDefinition def : allColumns())
-            {
-                ByteBuffer toCompare;
-                if (def.isOnAllComponents())
-                {
-                    toCompare = cellName;
-                }
-                else
-                {
-                    if (def.position() >= components.length)
-                        break;
-
-                    toCompare = components[def.position()];
-                }
-                if (def.name.bytes.equals(toCompare))
-                    return def;
-            }
-            return null;
-        }
-        else
-        {
-            return columnMetadata.get(cellName);
-        }
+        ColumnIdentifier id = cellName.cql3ColumnName();
+        return id == null
+             ? getColumnDefinition(cellName.toByteBuffer())  // Means a dense layout, try the full column name
+             : getColumnDefinition(id);
     }
 
     public ColumnDefinition getColumnDefinitionForIndex(String indexName)
@@ -1356,8 +1346,13 @@ public final class CFMetaData
     public Iterator<OnDiskAtom> getOnDiskIterator(DataInput in, int count, ColumnSerializer.Flag flag, int expireBefore, Descriptor.Version version)
     {
         if (version.hasSuperColumns && cfType == ColumnFamilyType.Super)
-            return SuperColumns.onDiskIterator(in, count, flag, expireBefore);
-        return Column.onDiskIterator(in, count, flag, expireBefore, version);
+            return SuperColumns.onDiskIterator(in, count, flag, expireBefore, comparator);
+        return Column.onDiskIterator(in, count, flag, expireBefore, version, comparator);
+    }
+
+    public AtomDeserializer getOnDiskDeserializer(DataInput in, Descriptor.Version version)
+    {
+        return new AtomDeserializer(comparator, in, ColumnSerializer.Flag.LOCAL, Integer.MIN_VALUE, version);
     }
 
     public static boolean isNameValid(String name)
@@ -1382,8 +1377,11 @@ public final class CFMetaData
         if (cfType == null)
             throw new ConfigurationException(String.format("Invalid column family type for %s", cfName));
 
-        if (comparator instanceof CounterColumnType)
-            throw new ConfigurationException("CounterColumnType is not a valid comparator");
+        for (int i = 0; i < comparator.size(); i++)
+        {
+            if (comparator.subtype(i) instanceof CounterColumnType)
+                throw new ConfigurationException("CounterColumnType is not a valid comparator");
+        }
         if (keyValidator instanceof CounterColumnType)
             throw new ConfigurationException("CounterColumnType is not a valid key validator");
 
@@ -1534,9 +1532,8 @@ public final class CFMetaData
         ColumnFamily cf = rm.addOrGet(SchemaColumnFamiliesCf);
         int ldt = (int) (System.currentTimeMillis() / 1000);
 
-        ColumnNameBuilder builder = SchemaColumnFamiliesCf.getColumnNameBuilder();
-        builder.add(ByteBufferUtil.bytes(cfName));
-        cf.addAtom(new RangeTombstone(builder.build(), builder.buildAsEndOfRange(), timestamp, ldt));
+        Composite prefix = SchemaColumnFamiliesCf.comparator.make(cfName);
+        cf.addAtom(new RangeTombstone(prefix, prefix.end(), timestamp, ldt));
 
         for (ColumnDefinition cd : allColumns())
             cd.deleteFromSchema(rm, timestamp);
@@ -1570,67 +1567,80 @@ public final class CFMetaData
         // For property that can be null (and can be changed), we insert tombstones, to make sure
         // we don't keep a property the user has removed
         ColumnFamily cf = rm.addOrGet(SchemaColumnFamiliesCf);
-        int ldt = (int) (System.currentTimeMillis() / 1000);
+        Composite prefix = SchemaColumnFamiliesCf.comparator.make(cfName);
+        CFRowAdder adder = new CFRowAdder(cf, prefix, timestamp);
 
-        cf.addColumn(Column.create("", timestamp, cfName, ""));
-        cf.addColumn(Column.create(cfType.toString(), timestamp, cfName, "type"));
+        adder.add("type", cfType.toString());
 
         if (isSuper())
         {
             // We need to continue saving the comparator and subcomparator separatly, otherwise
             // we won't know at deserialization if the subcomparator should be taken into account
             // TODO: we should implement an on-start migration if we want to get rid of that.
-            CompositeType ct = (CompositeType)comparator;
-            cf.addColumn(Column.create(ct.types.get(0).toString(), timestamp, cfName, "comparator"));
-            cf.addColumn(Column.create(ct.types.get(1).toString(), timestamp, cfName, "subcomparator"));
+            adder.add("comparator", comparator.subtype(0).toString());
+            adder.add("subcomparator", comparator.subtype(1).toString());
         }
         else
         {
-            cf.addColumn(Column.create(comparator.toString(), timestamp, cfName, "comparator"));
-        }
-
-        cf.addColumn(comment == null ? DeletedColumn.create(ldt, timestamp, cfName, "comment")
-                                     : Column.create(comment, timestamp, cfName, "comment"));
-        cf.addColumn(Column.create(readRepairChance, timestamp, cfName, "read_repair_chance"));
-        cf.addColumn(Column.create(dcLocalReadRepairChance, timestamp, cfName, "local_read_repair_chance"));
-        cf.addColumn(Column.create(replicateOnWrite, timestamp, cfName, "replicate_on_write"));
-        cf.addColumn(Column.create(populateIoCacheOnFlush, timestamp, cfName, "populate_io_cache_on_flush"));
-        cf.addColumn(Column.create(gcGraceSeconds, timestamp, cfName, "gc_grace_seconds"));
-        cf.addColumn(Column.create(defaultValidator.toString(), timestamp, cfName, "default_validator"));
-        cf.addColumn(Column.create(keyValidator.toString(), timestamp, cfName, "key_validator"));
-        cf.addColumn(Column.create(minCompactionThreshold, timestamp, cfName, "min_compaction_threshold"));
-        cf.addColumn(Column.create(maxCompactionThreshold, timestamp, cfName, "max_compaction_threshold"));
-        cf.addColumn(bloomFilterFpChance == null ? DeletedColumn.create(ldt, timestamp, cfName, "bloomFilterFpChance")
-                                                 : Column.create(bloomFilterFpChance, timestamp, cfName, "bloom_filter_fp_chance"));
-        cf.addColumn(Column.create(memtableFlushPeriod, timestamp, cfName, "memtable_flush_period_in_ms"));
-        cf.addColumn(Column.create(caching.toString(), timestamp, cfName, "caching"));
-        cf.addColumn(Column.create(defaultTimeToLive, timestamp, cfName, "default_time_to_live"));
-        cf.addColumn(Column.create(compactionStrategyClass.getName(), timestamp, cfName, "compaction_strategy_class"));
-        cf.addColumn(Column.create(json(compressionParameters.asThriftOptions()), timestamp, cfName, "compression_parameters"));
-        cf.addColumn(Column.create(json(compactionStrategyOptions), timestamp, cfName, "compaction_strategy_options"));
-        cf.addColumn(Column.create(indexInterval, timestamp, cfName, "index_interval"));
-        cf.addColumn(Column.create(speculativeRetry.toString(), timestamp, cfName, "speculative_retry"));
+            adder.add("comparator", comparator.toString());
+        }
+
+        adder.add("comment", comment);
+        adder.add("read_repair_chance", readRepairChance);
+        adder.add("local_read_repair_chance", dcLocalReadRepairChance);
+        adder.add("replicate_on_write", replicateOnWrite);
+        adder.add("populate_io_cache_on_flush", populateIoCacheOnFlush);
+        adder.add("gc_grace_seconds", gcGraceSeconds);
+        adder.add("default_validator", defaultValidator.toString());
+        adder.add("key_validator", keyValidator.toString());
+        adder.add("min_compaction_threshold", minCompactionThreshold);
+        adder.add("max_compaction_threshold", maxCompactionThreshold);
+        adder.add("bloom_filter_fp_chance", bloomFilterFpChance);
+
+        adder.add("memtable_flush_period_in_ms", memtableFlushPeriod);
+        adder.add("caching", caching.toString());
+        adder.add("default_time_to_live", defaultTimeToLive);
+        adder.add("compaction_strategy_class", compactionStrategyClass.getName());
+        adder.add("compression_parameters", json(compressionParameters.asThriftOptions()));
+        adder.add("compaction_strategy_options", json(compactionStrategyOptions));
+        adder.add("index_interval", indexInterval);
+        adder.add("speculative_retry", speculativeRetry.toString());
 
         for (Map.Entry<ColumnIdentifier, Long> entry : droppedColumns.entrySet())
-            cf.addColumn(new Column(makeDroppedColumnName(entry.getKey()), LongType.instance.decompose(entry.getValue()), timestamp));
+            adder.addMapEntry("dropped_columns", entry.getKey().toString(), entry.getValue());
 
         // Save the CQL3 metadata "the old way" for compatibility sake
-        cf.addColumn(Column.create(aliasesToJson(partitionKeyColumns), timestamp, cfName, "key_aliases"));
-        cf.addColumn(Column.create(aliasesToJson(clusteringColumns), timestamp, cfName, "column_aliases"));
-        cf.addColumn(compactValueColumn == null ? DeletedColumn.create(ldt, timestamp, cfName, "value_alias")
-                                                : Column.create(compactValueColumn.name.bytes, timestamp, cfName, "value_alias"));
+        adder.add("key_aliases", aliasesToJson(partitionKeyColumns));
+        adder.add("column_aliases", aliasesToJson(clusteringColumns));
+        adder.add("value_alias", compactValueColumn == null ? null : compactValueColumn.name.toString());
     }
 
     // Package protected for use by tests
-    static CFMetaData fromSchemaNoColumnsNoTriggers(UntypedResultSet.Row result)
+    static CFMetaData fromSchemaNoTriggers(UntypedResultSet.Row result, UntypedResultSet serializedColumnDefinitions)
     {
         try
         {
-            CFMetaData cfm = new CFMetaData(result.getString("keyspace_name"),
-                                            result.getString("columnfamily_name"),
-                                            ColumnFamilyType.valueOf(result.getString("type")),
-                                            TypeParser.parse(result.getString("comparator")),
-                                            result.has("subcomparator") ? TypeParser.parse(result.getString("subcomparator")) : null);
+            String ksName = result.getString("keyspace_name");
+            String cfName = result.getString("columnfamily_name");
+
+            AbstractType<?> rawComparator = TypeParser.parse(result.getString("comparator"));
+            AbstractType<?> subComparator = result.has("subcomparator") ? TypeParser.parse(result.getString("subcomparator")) : null;
+            ColumnFamilyType cfType = ColumnFamilyType.valueOf(result.getString("type"));
+
+            AbstractType<?> fullRawComparator = makeRawAbstractType(rawComparator, subComparator);
+
+            List<ColumnDefinition> columnDefs = ColumnDefinition.fromSchema(serializedColumnDefinitions,
+                                                                            ksName,
+                                                                            cfName,
+                                                                            fullRawComparator,
+                                                                            cfType == ColumnFamilyType.Super);
+
+            CellNameType comparator = CellNames.fromAbstractType(fullRawComparator, isDense(fullRawComparator, columnDefs));
+
+            CFMetaData cfm = new CFMetaData(ksName,
+                                            cfName,
+                                            cfType,
+                                            comparator);
 
             cfm.readRepairChance(result.getDouble("read_repair_chance"));
             cfm.dcLocalReadRepairChance(result.getDouble("local_read_repair_chance"));
@@ -1675,11 +1685,11 @@ public final class CFMetaData
              * However, for upgrade sake we need to still be able to read those old values. Moreover, we cannot easily
              * remove those old columns once "converted" to columnMetadata because that would screw up nodes that may
              * not have upgraded. So for now we keep the both info and in sync, even though its redundant.
-             * In other words, the ColumnDefinition the following lines add may be replaced later when ColumnDefinition.fromSchema
-             * is called but that's ok.
              */
-            cfm.addColumnMetadataFromAliases(aliasesFromStrings(fromJsonList(result.getString("key_aliases"))), cfm.keyValidator, ColumnDefinition.Kind.PARTITION_KEY);
-            cfm.addColumnMetadataFromAliases(aliasesFromStrings(fromJsonList(result.getString("column_aliases"))), cfm.comparator, ColumnDefinition.Kind.CLUSTERING_COLUMN);
+            if (result.has("key_aliases"))
+                cfm.addColumnMetadataFromAliases(aliasesFromStrings(fromJsonList(result.getString("key_aliases"))), cfm.keyValidator, ColumnDefinition.Kind.PARTITION_KEY);
+            if (result.has("column_aliases"))
+                cfm.addColumnMetadataFromAliases(aliasesFromStrings(fromJsonList(result.getString("column_aliases"))), cfm.comparator.asAbstractType(), ColumnDefinition.Kind.CLUSTERING_COLUMN);
 
             if (result.has("value_alias"))
                 cfm.addColumnMetadataFromAliases(Collections.<ByteBuffer>singletonList(result.getBytes("value_alias")), cfm.defaultValidator, ColumnDefinition.Kind.COMPACT_VALUE);
@@ -1687,7 +1697,10 @@ public final class CFMetaData
             if (result.has("dropped_columns"))
                 cfm.droppedColumns(convertDroppedColumns(result.getMap("dropped_columns", UTF8Type.instance, LongType.instance)));
 
-            return cfm;
+            for (ColumnDefinition cd : columnDefs)
+                cfm.addOrReplaceColumnDefinition(cd);
+
+            return cfm.rebuild();
         }
         catch (SyntaxException | ConfigurationException e)
         {
@@ -1723,13 +1736,16 @@ public final class CFMetaData
      */
     public static CFMetaData fromSchema(UntypedResultSet.Row result)
     {
-        CFMetaData cfDef = fromSchemaNoColumnsNoTriggers(result);
+        String ksName = result.getString("keyspace_name");
+        String cfName = result.getString("columnfamily_name");
+
+        Row serializedColumns = SystemKeyspace.readSchemaRow(SystemKeyspace.SCHEMA_COLUMNS_CF, ksName, cfName);
+        CFMetaData cfm = fromSchemaNoTriggers(result, ColumnDefinition.resultify(serializedColumns));
 
-        Row serializedTriggers = SystemKeyspace.readSchemaRow(SystemKeyspace.SCHEMA_TRIGGERS_CF, cfDef.ksName, cfDef.cfName);
-        addTriggerDefinitionsFromSchema(cfDef, serializedTriggers);
+        Row serializedTriggers = SystemKeyspace.readSchemaRow(SystemKeyspace.SCHEMA_TRIGGERS_CF, ksName, cfName);
+        addTriggerDefinitionsFromSchema(cfm, serializedTriggers);
 
-        Row serializedColumns = SystemKeyspace.readSchemaRow(SystemKeyspace.SCHEMA_COLUMNS_CF, cfDef.ksName, cfDef.cfName);
-        return addColumnDefinitionsFromSchema(cfDef, serializedColumns);
+        return cfm;
     }
 
     private static CFMetaData fromSchema(Row row)
@@ -1740,6 +1756,9 @@ public final class CFMetaData
 
     private String aliasesToJson(List<ColumnDefinition> rawAliases)
     {
+        if (rawAliases == null)
+            return null;
+
         List<String> aliases = new ArrayList<String>(rawAliases.size());
         for (ColumnDefinition rawAlias : rawAliases)
             aliases.add(rawAlias.name.toString());
@@ -1762,14 +1781,6 @@ public final class CFMetaData
         return converted;
     }
 
-    private ByteBuffer makeDroppedColumnName(ColumnIdentifier column)
-    {
-        ColumnNameBuilder builder = SchemaColumnFamiliesCf.getColumnNameBuilder();
-        builder.add(UTF8Type.instance.decompose(cfName));
-        builder.add(UTF8Type.instance.decompose("dropped_columns"));
-        return builder.add(column).build();
-    }
-
     /**
      * Convert current metadata into schema mutation
      *
@@ -1798,33 +1809,23 @@ public final class CFMetaData
         switch (kind)
         {
             case REGULAR:
-                AbstractType<?> cfComparator = cfType == ColumnFamilyType.Super ? ((CompositeType)comparator).types.get(1) : comparator;
-                if (cfComparator instanceof CompositeType)
-                {
-                    if (componentIndex == null)
-                        return cfComparator;
+                if (componentIndex == null)
+                    return comparator.asAbstractType();
 
-                    List<AbstractType<?>> types = ((CompositeType)cfComparator).types;
-                    AbstractType<?> t = types.get(componentIndex);
-                    assert t != null : "Non-sensical component index";
-                    return t;
-                }
-                else
-                {
-                    return cfComparator;
-                }
+                AbstractType<?> t = comparator.subtype(componentIndex);
+                assert t != null : "Non-sensical component index";
+                return t;
             default:
                 // CQL3 column names are UTF8
                 return UTF8Type.instance;
         }
     }
 
-    // Package protected for use by tests
-    static CFMetaData addColumnDefinitionsFromSchema(CFMetaData cfm, Row serializedColumnDefinitions)
+    public CFMetaData addAllColumnDefinitions(Collection<ColumnDefinition> defs)
     {
-        for (ColumnDefinition cd : ColumnDefinition.fromSchema(serializedColumnDefinitions, cfm))
-            cfm.addOrReplaceColumnDefinition(cd);
-        return cfm.rebuild();
+        for (ColumnDefinition def : defs)
+            addOrReplaceColumnDefinition(def);
+        return this;
     }
 
     public CFMetaData addColumnDefinition(ColumnDefinition def) throws ConfigurationException
@@ -1839,12 +1840,16 @@ public final class CFMetaData
     // know this cannot happen.
     public CFMetaData addOrReplaceColumnDefinition(ColumnDefinition def)
     {
+        if (def.kind == ColumnDefinition.Kind.REGULAR)
+            comparator.addCQL3Column(def.name);
         columnMetadata.put(def.name.bytes, def);
         return this;
     }
 
     public boolean removeColumnDefinition(ColumnDefinition def)
     {
+        if (def.kind == ColumnDefinition.Kind.REGULAR)
+            comparator.removeCQL3Column(def.name);
         return columnMetadata.remove(def.name.bytes) != null;
     }
 
@@ -1900,13 +1905,9 @@ public final class CFMetaData
     public CFMetaData rebuild()
     {
         List<ColumnDefinition> pkCols = nullInitializedList(keyValidator.componentsCount());
-        boolean isDense = isDense(comparator, allColumns());
-        int nbCkCols = isDense
-                     ? comparator.componentsCount()
-                     : comparator.componentsCount() - (hasCollections() ? 2 : 1);
-        List<ColumnDefinition> ckCols = nullInitializedList(nbCkCols);
+        List<ColumnDefinition> ckCols = nullInitializedList(comparator.clusteringPrefixSize());
         // We keep things sorted to get consistent/predicatable order in select queries
-        SortedSet<ColumnDefinition> regCols = new TreeSet<ColumnDefinition>(regularColumnComparator);
+        SortedSet<ColumnDefinition> regCols = new TreeSet<>(regularColumnComparator);
         ColumnDefinition compactCol = null;
 
         for (ColumnDefinition def : allColumns())
@@ -1918,7 +1919,7 @@ public final class CFMetaData
                     pkCols.set(def.position(), def);
                     break;
                 case CLUSTERING_COLUMN:
-                    assert !(def.isOnAllComponents() && comparator instanceof CompositeType);
+                    assert !(def.isOnAllComponents() && comparator.isCompound());
                     ckCols.set(def.position(), def);
                     break;
                 case REGULAR:
@@ -1935,7 +1936,7 @@ public final class CFMetaData
         partitionKeyColumns = addDefaultKeyAliases(pkCols);
         clusteringColumns = addDefaultColumnAliases(ckCols);
         regularColumns = regCols;
-        compactValueColumn = addDefaultValueAlias(compactCol, isDense);
+        compactValueColumn = addDefaultValueAlias(compactCol, comparator.isDense());
         return this;
     }
 
@@ -1969,12 +1970,17 @@ public final class CFMetaData
         {
             if (ckCols.get(i) == null)
             {
-                Integer idx = null;
-                AbstractType<?> type = comparator;
-                if (hasCompositeComparator())
+                Integer idx;
+                AbstractType<?> type;
+                if (comparator.isCompound())
                 {
                     idx = i;
-                    type = ((CompositeType)comparator).types.get(i);
+                    type = comparator.subtype(i);
+                }
+                else
+                {
+                    idx = null;
+                    type = comparator.asAbstractType();
                 }
                 ByteBuffer name = ByteBufferUtil.bytes(DEFAULT_COLUMN_ALIAS + (i + 1));
                 ColumnDefinition newDef = ColumnDefinition.clusteringKeyDef(this, name, type, idx);
@@ -2003,26 +2009,6 @@ public final class CFMetaData
         }
     }
 
-    public boolean hasCollections()
-    {
-        return getCollectionType() != null;
-    }
-
-    public boolean hasCompositeComparator()
-    {
-        return comparator instanceof CompositeType;
-    }
-
-    public ColumnToCollectionType getCollectionType()
-    {
-        if (isSuper() || !hasCompositeComparator())
-            return null;
-
-        CompositeType composite = (CompositeType)comparator;
-        AbstractType<?> last = composite.types.get(composite.types.size() - 1);
-        return last instanceof ColumnToCollectionType ? (ColumnToCollectionType)last : null;
-    }
-
     /*
      * We call dense a CF for which each component of the comparator is a clustering column, i.e. no
      * component is used to store a regular column names. In other words, non-composite static "thrift"
@@ -2073,12 +2059,6 @@ public final class CFMetaData
 
     }
 
-    // See above.
-    public boolean isDense()
-    {
-        return clusteringColumns.size() == comparator.componentsCount();
-    }
-
     private static boolean isCQL3OnlyPKComparator(AbstractType<?> comparator)
     {
         if (!(comparator instanceof CompositeType))
@@ -2088,6 +2068,11 @@ public final class CFMetaData
         return ct.types.size() == 1 && ct.types.get(0) instanceof UTF8Type;
     }
 
+    public boolean isCQL3Table()
+    {
+        return !isSuper() && !comparator.isDense() && comparator.isCompound();
+    }
+
     private static <T> List<T> nullInitializedList(int size)
     {
         List<T> l = new ArrayList<>(size);
@@ -2155,77 +2140,4 @@ public final class CFMetaData
             .append("triggers", triggers)
             .toString();
     }
-
-    private static class NonCompositeBuilder implements ColumnNameBuilder
-    {
-        private final AbstractType<?> type;
-        private ByteBuffer columnName;
-
-        private NonCompositeBuilder(AbstractType<?> type)
-        {
-            this.type = type;
-        }
-
-        public NonCompositeBuilder add(ByteBuffer bb)
-        {
-            if (columnName != null)
-                throw new IllegalStateException("Column name is already constructed");
-
-            columnName = bb;
-            return this;
-        }
-
-        public NonCompositeBuilder add(ColumnIdentifier name)
-        {
-            return add(name.bytes);
-        }
-
-        public NonCompositeBuilder add(ByteBuffer bb, Relation.Type op)
-        {
-            return add(bb);
-        }
-
-        public int componentCount()
-        {
-            return columnName == null ? 0 : 1;
-        }
-
-        public int remainingCount()
-        {
-            return columnName == null ? 1 : 0;
-        }
-
-        public ByteBuffer get(int i)
-        {
-            if (i < 0 || i >= (columnName == null ? 0 : 1))
-                throw new IllegalArgumentException();
-
-            return columnName;
-        }
-
-        public ByteBuffer build()
-        {
-            return columnName == null ? ByteBufferUtil.EMPTY_BYTE_BUFFER : columnName;
-        }
-
-        public ByteBuffer buildAsEndOfRange()
-        {
-            return build();
-        }
-
-        public NonCompositeBuilder copy()
-        {
-            NonCompositeBuilder newBuilder = new NonCompositeBuilder(type);
-            newBuilder.columnName = columnName;
-            return newBuilder;
-        }
-
-        public ByteBuffer getComponent(int i)
-        {
-            if (i != 0 || columnName == null)
-                throw new IllegalArgumentException();
-
-            return columnName;
-        }
-    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/config/ColumnDefinition.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/config/ColumnDefinition.java b/src/java/org/apache/cassandra/config/ColumnDefinition.java
index 05a10bc..ef6f589 100644
--- a/src/java/org/apache/cassandra/config/ColumnDefinition.java
+++ b/src/java/org/apache/cassandra/config/ColumnDefinition.java
@@ -26,6 +26,7 @@ import com.google.common.collect.Maps;
 
 import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.exceptions.*;
@@ -265,26 +266,22 @@ public class ColumnDefinition extends ColumnSpecification
         return cd;
     }
 
-    public static Map<ByteBuffer, ColumnDefinition> fromThrift(CFMetaData cfm, List<ColumnDef> thriftDefs) throws SyntaxException, ConfigurationException
+    public static List<ColumnDefinition> fromThrift(CFMetaData cfm, List<ColumnDef> thriftDefs) throws SyntaxException, ConfigurationException
     {
         if (thriftDefs == null)
-            return new HashMap<>();
+            return Collections.emptyList();
 
-        Map<ByteBuffer, ColumnDefinition> cds = new TreeMap<>();
+        List<ColumnDefinition> defs = new ArrayList<>(thriftDefs.size());
         for (ColumnDef thriftColumnDef : thriftDefs)
-        {
-            ColumnDefinition def = fromThrift(cfm, thriftColumnDef);
-            cds.put(def.name.bytes, def);
-        }
+            defs.add(fromThrift(cfm, thriftColumnDef));
 
-        return cds;
+        return defs;
     }
 
     /**
      * Drop specified column from the schema using given row.
      *
      * @param rm         The schema row mutation
-     * @param cfName     The name of the parent ColumnFamily
      * @param timestamp  The timestamp to use for column modification
      */
     public void deleteFromSchema(RowMutation rm, long timestamp)
@@ -292,29 +289,24 @@ public class ColumnDefinition extends ColumnSpecification
         ColumnFamily cf = rm.addOrGet(CFMetaData.SchemaColumnsCf);
         int ldt = (int) (System.currentTimeMillis() / 1000);
 
-        ColumnNameBuilder builder = CFMetaData.SchemaColumnsCf.getColumnNameBuilder();
-        // Note: the following is necessary for backward compatibility. For CQL3, BBU.bytes(name.toString()) == name
         ByteBuffer nameBytes = ByteBufferUtil.bytes(name.toString());
-        builder.add(ByteBufferUtil.bytes(cfName)).add(nameBytes);
-        cf.addAtom(new RangeTombstone(builder.build(), builder.buildAsEndOfRange(), timestamp, ldt));
+        // Note: we do want to use name.toString(), not name.bytes directly for backward compatibility (For CQL3, this won't make a difference).
+        Composite prefix = CFMetaData.SchemaColumnsCf.comparator.make(cfName, name.toString());
+        cf.addAtom(new RangeTombstone(prefix, prefix.end(), timestamp, ldt));
     }
 
     public void toSchema(RowMutation rm, long timestamp)
     {
         ColumnFamily cf = rm.addOrGet(CFMetaData.SchemaColumnsCf);
-        int ldt = (int) (System.currentTimeMillis() / 1000);
-
-        cf.addColumn(Column.create("", timestamp, cfName, name.toString(), ""));
-        cf.addColumn(Column.create(type.toString(), timestamp, cfName, name.toString(), TYPE));
-        cf.addColumn(indexType == null ? DeletedColumn.create(ldt, timestamp, cfName, name.toString(), INDEX_TYPE)
-                                       : Column.create(indexType.toString(), timestamp, cfName, name.toString(), INDEX_TYPE));
-        cf.addColumn(indexOptions == null ? DeletedColumn.create(ldt, timestamp, cfName, name.toString(), INDEX_OPTIONS)
-                                          : Column.create(json(indexOptions), timestamp, cfName, name.toString(), INDEX_OPTIONS));
-        cf.addColumn(indexName == null ? DeletedColumn.create(ldt, timestamp, cfName, name.toString(), INDEX_NAME)
-                                       : Column.create(indexName, timestamp, cfName, name.toString(), INDEX_NAME));
-        cf.addColumn(componentIndex == null ? DeletedColumn.create(ldt, timestamp, cfName, name.toString(), COMPONENT_INDEX)
-                                            : Column.create(componentIndex, timestamp, cfName, name.toString(), COMPONENT_INDEX));
-        cf.addColumn(Column.create(kind.serialize(), timestamp, cfName, name.toString(), KIND));
+        Composite prefix = CFMetaData.SchemaColumnsCf.comparator.make(cfName, name.toString());
+        CFRowAdder adder = new CFRowAdder(cf, prefix, timestamp);
+
+        adder.add(TYPE, type.toString());
+        adder.add(INDEX_TYPE, indexType == null ? null : indexType.toString());
+        adder.add(INDEX_OPTIONS, json(indexOptions));
+        adder.add(INDEX_NAME, indexName);
+        adder.add(COMPONENT_INDEX, componentIndex);
+        adder.add(KIND, kind.serialize());
     }
 
     public ColumnDefinition apply(ColumnDefinition def)  throws ConfigurationException
@@ -344,18 +336,22 @@ public class ColumnDefinition extends ColumnSpecification
                                     kind);
     }
 
+    public static UntypedResultSet resultify(Row serializedColumns)
+    {
+        String query = String.format("SELECT * FROM %s.%s", Keyspace.SYSTEM_KS, SystemKeyspace.SCHEMA_COLUMNS_CF);
+        return QueryProcessor.resultify(query, serializedColumns);
+    }
+
     /**
      * Deserialize columns from storage-level representation
      *
      * @param serializedColumns storage-level partition containing the column definitions
      * @return the list of processed ColumnDefinitions
      */
-    public static List<ColumnDefinition> fromSchema(Row serializedColumns, CFMetaData cfm)
+    public static List<ColumnDefinition> fromSchema(UntypedResultSet serializedColumns, String ksName, String cfName, AbstractType<?> rawComparator, boolean isSuper)
     {
         List<ColumnDefinition> cds = new ArrayList<>();
-
-        String query = String.format("SELECT * FROM %s.%s", Keyspace.SYSTEM_KS, SystemKeyspace.SCHEMA_COLUMNS_CF);
-        for (UntypedResultSet.Row row : QueryProcessor.resultify(query, serializedColumns))
+        for (UntypedResultSet.Row row : serializedColumns)
         {
             Kind kind = row.has(KIND)
                       ? Kind.deserialize(row.getString(KIND))
@@ -364,12 +360,12 @@ public class ColumnDefinition extends ColumnSpecification
             Integer componentIndex = null;
             if (row.has(COMPONENT_INDEX))
                 componentIndex = row.getInt(COMPONENT_INDEX);
-            else if (kind == Kind.CLUSTERING_COLUMN && cfm.isSuper())
+            else if (kind == Kind.CLUSTERING_COLUMN && isSuper)
                 componentIndex = 1; // A ColumnDefinition for super columns applies to the column component
 
             // Note: we save the column name as string, but we should not assume that it is an UTF8 name, we
             // we need to use the comparator fromString method
-            AbstractType<?> comparator = cfm.getComponentComparator(componentIndex, kind);
+            AbstractType<?> comparator = getComponentComparator(rawComparator, componentIndex, kind);
             ColumnIdentifier name = new ColumnIdentifier(comparator.fromString(row.getString(COLUMN_NAME)), comparator);
 
             AbstractType<?> validator;
@@ -394,12 +390,27 @@ public class ColumnDefinition extends ColumnSpecification
             if (row.has(INDEX_NAME))
                 indexName = row.getString(INDEX_NAME);
 
-            cds.add(new ColumnDefinition(cfm.ksName, cfm.cfName, name, validator, indexType, indexOptions, indexName, componentIndex, kind));
+            cds.add(new ColumnDefinition(ksName, cfName, name, validator, indexType, indexOptions, indexName, componentIndex, kind));
         }
 
         return cds;
     }
 
+    public static AbstractType<?> getComponentComparator(AbstractType<?> rawComparator, Integer componentIndex, ColumnDefinition.Kind kind)
+    {
+        switch (kind)
+        {
+            case REGULAR:
+                if (componentIndex == null || (componentIndex == 0 && !(rawComparator instanceof CompositeType)))
+                    return rawComparator;
+
+                return ((CompositeType)rawComparator).types.get(componentIndex);
+            default:
+                // CQL3 column names are UTF8
+                return UTF8Type.instance;
+        }
+    }
+
     public String getIndexName()
     {
         return indexName;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/config/KSMetaData.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/config/KSMetaData.java b/src/java/org/apache/cassandra/config/KSMetaData.java
index 71f81c7..3dfea03 100644
--- a/src/java/org/apache/cassandra/config/KSMetaData.java
+++ b/src/java/org/apache/cassandra/config/KSMetaData.java
@@ -241,10 +241,11 @@ public final class KSMetaData
     {
         RowMutation rm = new RowMutation(Keyspace.SYSTEM_KS, SystemKeyspace.getSchemaKSKey(name));
         ColumnFamily cf = rm.addOrGet(CFMetaData.SchemaKeyspacesCf);
+        CFRowAdder adder = new CFRowAdder(cf, CFMetaData.SchemaKeyspacesCf.comparator.builder().build(), timestamp);
 
-        cf.addColumn(Column.create(durableWrites, timestamp, "durable_writes"));
-        cf.addColumn(Column.create(strategyClass.getName(), timestamp, "strategy_class"));
-        cf.addColumn(Column.create(json(strategyOptions), timestamp, "strategy_options"));
+        adder.add("durable_writes", durableWrites);
+        adder.add("strategy_class", strategyClass.getName());
+        adder.add("strategy_options", json(strategyOptions));
 
         for (CFMetaData cfm : cfMetaData.values())
             cfm.toSchema(rm, timestamp);
@@ -308,17 +309,6 @@ public final class KSMetaData
             CFMetaData cfm = CFMetaData.fromSchema(result);
             cfms.put(cfm.cfName, cfm);
         }
-
-        for (CFMetaData cfm : cfms.values())
-        {
-            Row columnRow = SystemKeyspace.readSchemaRow(SystemKeyspace.SCHEMA_COLUMNS_CF, cfm.ksName, cfm.cfName);
-            // This may replace some existing definition coming from the old key, column and
-            // value aliases. But that's what we want (see CFMetaData.fromSchemaNoColumnsNoTriggers).
-            for (ColumnDefinition cd : ColumnDefinition.fromSchema(columnRow, cfm))
-                cfm.addOrReplaceColumnDefinition(cd);
-            cfm.rebuild();
-        }
-
         return cfms;
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/config/TriggerDefinition.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/config/TriggerDefinition.java b/src/java/org/apache/cassandra/config/TriggerDefinition.java
index e08f97c..e1809eb 100644
--- a/src/java/org/apache/cassandra/config/TriggerDefinition.java
+++ b/src/java/org/apache/cassandra/config/TriggerDefinition.java
@@ -22,15 +22,13 @@ import java.util.*;
 
 import com.google.common.base.Objects;
 
-import org.apache.cassandra.cql3.ColumnNameBuilder;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.thrift.TriggerDef;
 
-import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
-
 public class TriggerDefinition
 {
     private static final String TRIGGER_NAME = "trigger_name";
@@ -84,11 +82,11 @@ public class TriggerDefinition
     {
         ColumnFamily cf = rm.addOrGet(SystemKeyspace.SCHEMA_TRIGGERS_CF);
 
-        ColumnNameBuilder builder = CFMetaData.SchemaTriggersCf.getColumnNameBuilder();
-        builder.add(bytes(cfName)).add(bytes(name));
+        CFMetaData cfm = CFMetaData.SchemaTriggersCf;
+        Composite prefix = cfm.comparator.make(cfName, name);
+        CFRowAdder adder = new CFRowAdder(cf, prefix, timestamp);
 
-        cf.addColumn(builder.copy().add(bytes("")).build(), bytes(""), timestamp); // the row marker
-        cf.addColumn(builder.copy().add(bytes(TRIGGER_OPTIONS)).add(bytes(CLASS)).build(), bytes(classOption), timestamp);
+        adder.addMapEntry(TRIGGER_OPTIONS, CLASS, classOption);
     }
 
     /**
@@ -103,9 +101,8 @@ public class TriggerDefinition
         ColumnFamily cf = rm.addOrGet(SystemKeyspace.SCHEMA_TRIGGERS_CF);
         int ldt = (int) (System.currentTimeMillis() / 1000);
 
-        ColumnNameBuilder builder = CFMetaData.SchemaTriggersCf.getColumnNameBuilder();
-        builder.add(bytes(cfName)).add(bytes(name));
-        cf.addAtom(new RangeTombstone(builder.build(), builder.buildAsEndOfRange(), timestamp, ldt));
+        Composite prefix = CFMetaData.SchemaTriggersCf.comparator.make(cfName, name);
+        cf.addAtom(new RangeTombstone(prefix, prefix.end(), timestamp, ldt));
     }
 
     public static TriggerDefinition fromThrift(TriggerDef thriftDef)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/config/UTMetaData.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/config/UTMetaData.java b/src/java/org/apache/cassandra/config/UTMetaData.java
index 108e106..f78e645 100644
--- a/src/java/org/apache/cassandra/config/UTMetaData.java
+++ b/src/java/org/apache/cassandra/config/UTMetaData.java
@@ -21,6 +21,7 @@ import java.nio.ByteBuffer;
 import java.util.*;
 
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.exceptions.RequestValidationException;
 import org.apache.cassandra.cql3.*;
@@ -34,6 +35,9 @@ import org.apache.cassandra.utils.ByteBufferUtil;
  */
 public final class UTMetaData
 {
+    private static final ColumnIdentifier COLUMN_NAMES = new ColumnIdentifier("column_names", false);
+    private static final ColumnIdentifier COLUMN_TYPES = new ColumnIdentifier("column_types", false);
+
     private final Map<ByteBuffer, UserType> userTypes = new HashMap<>();
 
     // Only for Schema. You should generally not create instance of this, but rather use
@@ -84,8 +88,9 @@ public final class UTMetaData
         RowMutation rm = new RowMutation(Keyspace.SYSTEM_KS, newType.name);
         ColumnFamily cf = rm.addOrGet(SystemKeyspace.SCHEMA_USER_TYPES_CF);
 
-        ColumnNameBuilder builder = CFMetaData.SchemaUserTypesCf.getColumnNameBuilder();
-        UpdateParameters params = new UpdateParameters(CFMetaData.SchemaUserTypesCf, Collections.<ByteBuffer>emptyList(), timestamp, 0, null);
+        CFMetaData cfm = CFMetaData.SchemaUserTypesCf;
+        UpdateParameters params = new UpdateParameters(cfm, Collections.<ByteBuffer>emptyList(), timestamp, 0, null);
+        Composite prefix = cfm.comparator.builder().build();
 
         List<ByteBuffer> columnTypes = new ArrayList<>(newType.types.size());
         for (AbstractType<?> type : newType.types)
@@ -93,8 +98,8 @@ public final class UTMetaData
 
         try
         {
-            new Lists.Setter(new ColumnIdentifier("column_names", false), new Lists.Value(newType.columnNames)).execute(newType.name, cf, builder.copy(), params);
-            new Lists.Setter(new ColumnIdentifier("column_types", false), new Lists.Value(columnTypes)).execute(newType.name, cf, builder, params);
+            new Lists.Setter(cfm.getColumnDefinition(COLUMN_NAMES), new Lists.Value(newType.columnNames)).execute(newType.name, cf, prefix, params);
+            new Lists.Setter(cfm.getColumnDefinition(COLUMN_TYPES), new Lists.Value(columnTypes)).execute(newType.name, cf, prefix, params);
         }
         catch (RequestValidationException e)
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql/AlterTableStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql/AlterTableStatement.java b/src/java/org/apache/cassandra/cql/AlterTableStatement.java
index 479f830..0d767b2 100644
--- a/src/java/org/apache/cassandra/cql/AlterTableStatement.java
+++ b/src/java/org/apache/cassandra/cql/AlterTableStatement.java
@@ -69,7 +69,7 @@ public class AlterTableStatement
         CFMetaData cfm = meta.clone();
 
         ByteBuffer columnName = this.oType == OperationType.OPTS ? null
-                                                                 : meta.comparator.fromStringCQL2(this.columnName);
+                                                                 : meta.comparator.subtype(0).fromStringCQL2(this.columnName);
 
         switch (oType)
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql/CreateColumnFamilyStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql/CreateColumnFamilyStatement.java b/src/java/org/apache/cassandra/cql/CreateColumnFamilyStatement.java
index a140d02..a71707c 100644
--- a/src/java/org/apache/cassandra/cql/CreateColumnFamilyStatement.java
+++ b/src/java/org/apache/cassandra/cql/CreateColumnFamilyStatement.java
@@ -26,7 +26,7 @@ import java.util.Set;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
-import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.db.composites.SimpleDenseCellNameType;
 import org.apache.cassandra.db.ColumnFamilyType;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.TypeParser;
@@ -123,20 +123,20 @@ public class CreateColumnFamilyStatement
     }
 
     // Column definitions
-    private Map<ByteBuffer, ColumnDefinition> getColumns(CFMetaData cfm) throws InvalidRequestException
+    private List<ColumnDefinition> getColumns(CFMetaData cfm) throws InvalidRequestException
     {
-        Map<ByteBuffer, ColumnDefinition> columnDefs = new HashMap<ByteBuffer, ColumnDefinition>();
+        List<ColumnDefinition> columnDefs = new ArrayList<>(columns.size());
 
         for (Map.Entry<Term, String> col : columns.entrySet())
         {
             try
             {
-                ByteBuffer columnName = cfm.comparator.fromStringCQL2(col.getKey().getText());
+                ByteBuffer columnName = cfm.comparator.asAbstractType().fromStringCQL2(col.getKey().getText());
                 String validatorClassName = CFPropDefs.comparators.containsKey(col.getValue())
                                           ? CFPropDefs.comparators.get(col.getValue())
                                           : col.getValue();
                 AbstractType<?> validator = TypeParser.parse(validatorClassName);
-                columnDefs.put(columnName, ColumnDefinition.regularDef(cfm, columnName, validator, null));
+                columnDefs.add(ColumnDefinition.regularDef(cfm, columnName, validator, null));
             }
             catch (ConfigurationException e)
             {
@@ -175,8 +175,7 @@ public class CreateColumnFamilyStatement
             newCFMD = new CFMetaData(keyspace,
                                      name,
                                      ColumnFamilyType.Standard,
-                                     comparator,
-                                     null);
+                                     new SimpleDenseCellNameType(comparator));
 
             if (CFMetaData.DEFAULT_COMPRESSOR != null && cfProps.compressionParameters.isEmpty())
                 cfProps.compressionParameters.put(CompressionParameters.SSTABLE_COMPRESSION, CFMetaData.DEFAULT_COMPRESSOR);
@@ -185,7 +184,8 @@ public class CreateColumnFamilyStatement
             if (minCompactionThreshold <= 0 || maxCompactionThreshold <= 0)
                 throw new ConfigurationException("Disabling compaction by setting compaction thresholds to 0 has been deprecated, set the compaction option 'enabled' to false instead.");
 
-            newCFMD.comment(cfProps.getProperty(CFPropDefs.KW_COMMENT))
+            newCFMD.addAllColumnDefinitions(getColumns(newCFMD))
+                   .comment(cfProps.getProperty(CFPropDefs.KW_COMMENT))
                    .readRepairChance(getPropertyDouble(CFPropDefs.KW_READREPAIRCHANCE, CFMetaData.DEFAULT_READ_REPAIR_CHANCE))
                    .dcLocalReadRepairChance(getPropertyDouble(CFPropDefs.KW_DCLOCALREADREPAIRCHANCE, CFMetaData.DEFAULT_DCLOCAL_READ_REPAIR_CHANCE))
                    .replicateOnWrite(getPropertyBoolean(CFPropDefs.KW_REPLICATEONWRITE, CFMetaData.DEFAULT_REPLICATE_ON_WRITE))
@@ -193,7 +193,6 @@ public class CreateColumnFamilyStatement
                    .defaultValidator(cfProps.getValidator())
                    .minCompactionThreshold(minCompactionThreshold)
                    .maxCompactionThreshold(maxCompactionThreshold)
-                   .columnMetadata(getColumns(newCFMD))
                    .keyValidator(TypeParser.parse(CFPropDefs.comparators.get(getKeyType())))
                    .compactionStrategyClass(cfProps.compactionStrategyClass)
                    .compactionStrategyOptions(cfProps.compactionStrategyOptions)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql/DeleteStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql/DeleteStatement.java b/src/java/org/apache/cassandra/cql/DeleteStatement.java
index 0a1f90c..bcc63e1 100644
--- a/src/java/org/apache/cassandra/cql/DeleteStatement.java
+++ b/src/java/org/apache/cassandra/cql/DeleteStatement.java
@@ -24,6 +24,7 @@ import java.util.List;
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.Schema;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.IMutation;
 import org.apache.cassandra.db.RowMutation;
 import org.apache.cassandra.db.marshal.AbstractType;
@@ -100,9 +101,10 @@ public class DeleteStatement extends AbstractModification
         else
         {
             // Delete specific columns
+            AbstractType<?> at = metadata.comparator.asAbstractType();
             for (Term column : columns)
             {
-                ByteBuffer columnName = column.getByteBuffer(metadata.comparator, variables);
+                CellName columnName = metadata.comparator.cellFromByteBuffer(column.getByteBuffer(at, variables));
                 validateColumnName(columnName);
                 rm.delete(columnFamily, columnName, (timestamp == null) ? getTimestamp(clientState) : timestamp);
             }


[06/13] Push composites support in the storage engine

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/index/SecondaryIndexManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/SecondaryIndexManager.java b/src/java/org/apache/cassandra/db/index/SecondaryIndexManager.java
index 357ad65..27f0dd3 100644
--- a/src/java/org/apache/cassandra/db/index/SecondaryIndexManager.java
+++ b/src/java/org/apache/cassandra/db/index/SecondaryIndexManager.java
@@ -27,9 +27,9 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.config.IndexType;
-import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.compaction.CompactionManager;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.filter.ExtendedFilter;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.sstable.ReducingKeyIterator;
@@ -144,12 +144,12 @@ public class SecondaryIndexManager
         logger.info("Index build of {} complete", idxNames);
     }
 
-    public boolean indexes(ByteBuffer name, Collection<SecondaryIndex> indexes)
+    public boolean indexes(CellName name, Collection<SecondaryIndex> indexes)
     {
         return !indexFor(name, indexes).isEmpty();
     }
 
-    public List<SecondaryIndex> indexFor(ByteBuffer name, Collection<SecondaryIndex> indexes)
+    public List<SecondaryIndex> indexFor(CellName name, Collection<SecondaryIndex> indexes)
     {
         List<SecondaryIndex> matching = null;
         for (SecondaryIndex index : indexes)
@@ -169,12 +169,12 @@ public class SecondaryIndexManager
         return indexes(column.name());
     }
 
-    public boolean indexes(ByteBuffer name)
+    public boolean indexes(CellName name)
     {
         return indexes(name, indexesByColumn.values());
     }
 
-    public List<SecondaryIndex> indexFor(ByteBuffer name)
+    public List<SecondaryIndex> indexFor(CellName name)
     {
         return indexFor(name, indexesByColumn.values());
     }
@@ -437,7 +437,8 @@ public class SecondaryIndexManager
 
         for (Column column : indexedColumnsInRow)
         {
-            SecondaryIndex index = indexesByColumn.get(column.name());
+            // TODO: this is probably incorrect, we should pull all indexes
+            SecondaryIndex index = indexesByColumn.get(column.name().toByteBuffer());
             if (index == null)
                 continue;
 
@@ -559,8 +560,12 @@ public class SecondaryIndexManager
 
     public boolean validate(Column column)
     {
-        SecondaryIndex index = getIndexForColumn(column.name());
-        return index == null || index.validate(column);
+        for (SecondaryIndex index : indexFor(column.name()))
+        {
+            if (!index.validate(column))
+                return false;
+        }
+        return true;
     }
 
     public static interface Updater

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/index/composites/CompositesIndex.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/composites/CompositesIndex.java b/src/java/org/apache/cassandra/db/index/composites/CompositesIndex.java
index 6d137ca..95314cf 100644
--- a/src/java/org/apache/cassandra/db/index/composites/CompositesIndex.java
+++ b/src/java/org/apache/cassandra/db/index/composites/CompositesIndex.java
@@ -24,12 +24,15 @@ import java.util.Set;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
-import org.apache.cassandra.cql3.ColumnNameBuilder;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.index.AbstractSimplePerColumnSecondaryIndex;
 import org.apache.cassandra.db.index.SecondaryIndexManager;
 import org.apache.cassandra.db.index.SecondaryIndexSearcher;
-import org.apache.cassandra.db.marshal.*;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.CollectionType;
 import org.apache.cassandra.exceptions.ConfigurationException;
 
 /**
@@ -37,9 +40,9 @@ import org.apache.cassandra.exceptions.ConfigurationException;
  */
 public abstract class CompositesIndex extends AbstractSimplePerColumnSecondaryIndex
 {
-    private volatile CompositeType indexComparator;
+    private volatile CellNameType indexComparator;
 
-    protected CompositeType getIndexComparator()
+    protected CellNameType getIndexComparator()
     {
         // Yes, this is racy, but doing this more than once is not a big deal, we just want to avoid doing it every time
         // More seriously, we should fix that whole SecondaryIndex API so this can be a final and avoid all that non-sense.
@@ -81,7 +84,7 @@ public abstract class CompositesIndex extends AbstractSimplePerColumnSecondaryIn
     }
 
     // Check SecondaryIndex.getIndexComparator if you want to know why this is static
-    public static CompositeType getIndexComparator(CFMetaData baseMetadata, ColumnDefinition cfDef)
+    public static CellNameType getIndexComparator(CFMetaData baseMetadata, ColumnDefinition cfDef)
     {
         if (cfDef.type.isCollection())
         {
@@ -110,12 +113,12 @@ public abstract class CompositesIndex extends AbstractSimplePerColumnSecondaryIn
         throw new AssertionError();
     }
 
-    protected ByteBuffer makeIndexColumnName(ByteBuffer rowKey, Column column)
+    protected CellName makeIndexColumnName(ByteBuffer rowKey, Column column)
     {
-        return makeIndexColumnNameBuilder(rowKey, column.name()).build();
+        return getIndexComparator().create(makeIndexColumnPrefix(rowKey, column.name()), null);
     }
 
-    protected abstract ColumnNameBuilder makeIndexColumnNameBuilder(ByteBuffer rowKey, ByteBuffer columnName);
+    protected abstract Composite makeIndexColumnPrefix(ByteBuffer rowKey, Composite columnName);
 
     public abstract IndexedEntry decodeEntry(DecoratedKey indexedValue, Column indexEntry);
 
@@ -132,17 +135,11 @@ public abstract class CompositesIndex extends AbstractSimplePerColumnSecondaryIn
 
     }
 
-    protected AbstractType getExpressionComparator()
+    protected AbstractType<?> getExpressionComparator()
     {
         return baseCfs.metadata.getColumnDefinitionComparator(columnDef);
     }
 
-    protected CompositeType getBaseComparator()
-    {
-        assert baseCfs.getComparator() instanceof CompositeType;
-        return (CompositeType)baseCfs.getComparator();
-    }
-
     public SecondaryIndexSearcher createSecondaryIndexSearcher(Set<ByteBuffer> columns)
     {
         return new CompositesSearcher(baseCfs.indexManager, columns);
@@ -166,45 +163,31 @@ public abstract class CompositesIndex extends AbstractSimplePerColumnSecondaryIn
     public static class IndexedEntry
     {
         public final DecoratedKey indexValue;
-        public final ByteBuffer indexEntry;
+        public final CellName indexEntry;
         public final long timestamp;
 
         public final ByteBuffer indexedKey;
-        public final ColumnNameBuilder indexedEntryNameBuilder;
+        public final Composite indexedEntryPrefix;
         public final ByteBuffer indexedEntryCollectionKey; // may be null
 
-        public IndexedEntry(DecoratedKey indexValue,
-                            ByteBuffer indexEntry,
-                            long timestamp,
-                            ByteBuffer indexedKey,
-                            ColumnNameBuilder indexedEntryNameBuilder)
+        public IndexedEntry(DecoratedKey indexValue, CellName indexEntry, long timestamp, ByteBuffer indexedKey, Composite indexedEntryPrefix)
         {
-            this(indexValue, indexEntry, timestamp, indexedKey, indexedEntryNameBuilder, null);
+            this(indexValue, indexEntry, timestamp, indexedKey, indexedEntryPrefix, null);
         }
 
         public IndexedEntry(DecoratedKey indexValue,
-                            ByteBuffer indexEntry,
+                            CellName indexEntry,
                             long timestamp,
                             ByteBuffer indexedKey,
-                            ColumnNameBuilder indexedEntryNameBuilder,
+                            Composite indexedEntryPrefix,
                             ByteBuffer indexedEntryCollectionKey)
         {
             this.indexValue = indexValue;
             this.indexEntry = indexEntry;
             this.timestamp = timestamp;
             this.indexedKey = indexedKey;
-            this.indexedEntryNameBuilder = indexedEntryNameBuilder;
+            this.indexedEntryPrefix = indexedEntryPrefix;
             this.indexedEntryCollectionKey = indexedEntryCollectionKey;
         }
-
-        public ByteBuffer indexedEntryStart()
-        {
-            return indexedEntryNameBuilder.build();
-        }
-
-        public ByteBuffer indexedEntryEnd()
-        {
-            return indexedEntryNameBuilder.buildAsEndOfRange();
-        }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnClusteringKey.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnClusteringKey.java b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnClusteringKey.java
index 63889ee..38c55fd 100644
--- a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnClusteringKey.java
+++ b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnClusteringKey.java
@@ -23,8 +23,8 @@ import java.util.List;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
-import org.apache.cassandra.cql3.ColumnNameBuilder;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.index.SecondaryIndex;
 import org.apache.cassandra.db.marshal.*;
 
@@ -47,62 +47,55 @@ import org.apache.cassandra.db.marshal.*;
  */
 public class CompositesIndexOnClusteringKey extends CompositesIndex
 {
-    public static CompositeType buildIndexComparator(CFMetaData baseMetadata, ColumnDefinition columnDef)
+    public static CellNameType buildIndexComparator(CFMetaData baseMetadata, ColumnDefinition columnDef)
     {
         // Index cell names are rk ck_0 ... ck_{i-1} ck_{i+1} ck_n, so n
         // components total (where n is the number of clustering keys)
         int ckCount = baseMetadata.clusteringColumns().size();
         List<AbstractType<?>> types = new ArrayList<AbstractType<?>>(ckCount);
-        List<AbstractType<?>> ckTypes = baseMetadata.comparator.getComponents();
         types.add(SecondaryIndex.keyComparator);
         for (int i = 0; i < columnDef.position(); i++)
-            types.add(ckTypes.get(i));
+            types.add(baseMetadata.clusteringColumns().get(i).type);
         for (int i = columnDef.position() + 1; i < ckCount; i++)
-            types.add(ckTypes.get(i));
-        return CompositeType.getInstance(types);
+            types.add(baseMetadata.clusteringColumns().get(i).type);
+        return new CompoundDenseCellNameType(types);
     }
 
     protected ByteBuffer getIndexedValue(ByteBuffer rowKey, Column column)
     {
-        CompositeType baseComparator = (CompositeType)baseCfs.getComparator();
-        ByteBuffer[] components = baseComparator.split(column.name());
-        return components[columnDef.position()];
+        return column.name().get(columnDef.position());
     }
 
-    protected ColumnNameBuilder makeIndexColumnNameBuilder(ByteBuffer rowKey, ByteBuffer columnName)
+    protected Composite makeIndexColumnPrefix(ByteBuffer rowKey, Composite columnName)
     {
-        int ckCount = baseCfs.metadata.clusteringColumns().size();
-        CompositeType baseComparator = (CompositeType)baseCfs.getComparator();
-        ByteBuffer[] components = baseComparator.split(columnName);
-        CompositeType.Builder builder = getIndexComparator().builder();
+        int count = Math.min(baseCfs.metadata.clusteringColumns().size(), columnName.size());
+        CBuilder builder = getIndexComparator().prefixBuilder();
         builder.add(rowKey);
-
-        for (int i = 0; i < Math.min(components.length, columnDef.position()); i++)
-            builder.add(components[i]);
-        for (int i = columnDef.position() + 1; i < Math.min(components.length, ckCount); i++)
-            builder.add(components[i]);
-        return builder;
+        for (int i = 0; i < Math.min(columnDef.position(), count); i++)
+            builder.add(columnName.get(i));
+        for (int i = columnDef.position() + 1; i < count; i++)
+            builder.add(columnName.get(i));
+        return builder.build();
     }
 
     public IndexedEntry decodeEntry(DecoratedKey indexedValue, Column indexEntry)
     {
         int ckCount = baseCfs.metadata.clusteringColumns().size();
-        ByteBuffer[] components = getIndexComparator().split(indexEntry.name());
 
-        ColumnNameBuilder builder = getBaseComparator().builder();
+        CBuilder builder = baseCfs.getComparator().builder();
         for (int i = 0; i < columnDef.position(); i++)
-            builder.add(components[i + 1]);
+            builder.add(indexEntry.name().get(i + 1));
 
         builder.add(indexedValue.key);
 
         for (int i = columnDef.position() + 1; i < ckCount; i++)
-            builder.add(components[i]);
+            builder.add(indexEntry.name().get(i));
 
-        return new IndexedEntry(indexedValue, indexEntry.name(), indexEntry.timestamp(), components[0], builder);
+        return new IndexedEntry(indexedValue, indexEntry.name(), indexEntry.timestamp(), indexEntry.name().get(0), builder.build());
     }
 
     @Override
-    public boolean indexes(ByteBuffer name)
+    public boolean indexes(CellName name)
     {
         // For now, assume this is only used in CQL3 when we know name has enough component.
         return true;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnCollectionKey.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnCollectionKey.java b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnCollectionKey.java
index c2acfc9..f3daaf2 100644
--- a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnCollectionKey.java
+++ b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnCollectionKey.java
@@ -23,11 +23,14 @@ import java.util.List;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
-import org.apache.cassandra.cql3.ColumnNameBuilder;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CBuilder;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
+import org.apache.cassandra.db.composites.CompoundDenseCellNameType;
 import org.apache.cassandra.db.index.SecondaryIndex;
 import org.apache.cassandra.db.marshal.*;
-import org.apache.cassandra.dht.LocalToken;
 
 /**
  * Index on the collection element of the cell name of a collection.
@@ -45,15 +48,14 @@ import org.apache.cassandra.dht.LocalToken;
  */
 public class CompositesIndexOnCollectionKey extends CompositesIndex
 {
-    public static CompositeType buildIndexComparator(CFMetaData baseMetadata, ColumnDefinition columnDef)
+    public static CellNameType buildIndexComparator(CFMetaData baseMetadata, ColumnDefinition columnDef)
     {
         int count = 1 + baseMetadata.clusteringColumns().size(); // row key + clustering prefix
         List<AbstractType<?>> types = new ArrayList<AbstractType<?>>(count);
-        List<AbstractType<?>> ckTypes = baseMetadata.comparator.getComponents();
         types.add(SecondaryIndex.keyComparator);
         for (int i = 0; i < count - 1; i++)
-            types.add(ckTypes.get(i));
-        return CompositeType.getInstance(types);
+            types.add(baseMetadata.comparator.subtype(i));
+        return new CompoundDenseCellNameType(types);
     }
 
     @Override
@@ -64,49 +66,41 @@ public class CompositesIndexOnCollectionKey extends CompositesIndex
 
     protected ByteBuffer getIndexedValue(ByteBuffer rowKey, Column column)
     {
-        CompositeType baseComparator = (CompositeType)baseCfs.getComparator();
-        ByteBuffer[] components = baseComparator.split(column.name());
-        return components[columnDef.position() + 1];
+        return column.name().get(columnDef.position() + 1);
     }
 
-    protected ColumnNameBuilder makeIndexColumnNameBuilder(ByteBuffer rowKey, ByteBuffer columnName)
+    protected Composite makeIndexColumnPrefix(ByteBuffer rowKey, Composite cellName)
     {
         int count = 1 + baseCfs.metadata.clusteringColumns().size();
-        CompositeType baseComparator = (CompositeType)baseCfs.getComparator();
-        ByteBuffer[] components = baseComparator.split(columnName);
-        CompositeType.Builder builder = getIndexComparator().builder();
+        CBuilder builder = getIndexComparator().builder();
         builder.add(rowKey);
         for (int i = 0; i < count - 1; i++)
-            builder.add(components[i]);
-        return builder;
+            builder.add(cellName.get(i));
+        return builder.build();
     }
 
     public IndexedEntry decodeEntry(DecoratedKey indexedValue, Column indexEntry)
     {
         int count = 1 + baseCfs.metadata.clusteringColumns().size();
-        ByteBuffer[] components = getIndexComparator().split(indexEntry.name());
-
-        ColumnNameBuilder builder = getBaseComparator().builder();
+        CBuilder builder = baseCfs.getComparator().builder();
         for (int i = 0; i < count - 1; i++)
-            builder.add(components[i + 1]);
-
-        return new IndexedEntry(indexedValue, indexEntry.name(), indexEntry.timestamp(), components[0], builder);
+            builder.add(indexEntry.name().get(i + 1));
+        return new IndexedEntry(indexedValue, indexEntry.name(), indexEntry.timestamp(), indexEntry.name().get(0), builder.build());
     }
 
     @Override
-    public boolean indexes(ByteBuffer name)
+    public boolean indexes(CellName name)
     {
         // We index if the CQL3 column name is the one of the collection we index
-        ByteBuffer[] components = getBaseComparator().split(name);
         AbstractType<?> comp = baseCfs.metadata.getColumnDefinitionComparator(columnDef);
-        return components.length > columnDef.position()
-            && comp.compare(components[columnDef.position()], columnDef.name.bytes) == 0;
+        return name.size() > columnDef.position()
+            && comp.compare(name.get(columnDef.position()), columnDef.name.bytes) == 0;
     }
 
     public boolean isStale(IndexedEntry entry, ColumnFamily data, long now)
     {
-        ByteBuffer bb = entry.indexedEntryNameBuilder.copy().add(columnDef.name).add(entry.indexValue.key).build();
-        Column liveColumn = data.getColumn(bb);
+        CellName name = data.getComparator().create(entry.indexedEntryPrefix, columnDef.name, entry.indexValue.key);
+        Column liveColumn = data.getColumn(name);
         return (liveColumn == null || liveColumn.isMarkedForDelete(now));
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnCollectionValue.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnCollectionValue.java b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnCollectionValue.java
index f416d0e..9bf297b 100644
--- a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnCollectionValue.java
+++ b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnCollectionValue.java
@@ -23,11 +23,14 @@ import java.util.List;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
-import org.apache.cassandra.cql3.ColumnNameBuilder;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CBuilder;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
+import org.apache.cassandra.db.composites.CompoundDenseCellNameType;
 import org.apache.cassandra.db.index.SecondaryIndex;
 import org.apache.cassandra.db.marshal.*;
-import org.apache.cassandra.dht.LocalToken;
 
 /**
  * Index the value of a collection cell.
@@ -42,15 +45,15 @@ import org.apache.cassandra.dht.LocalToken;
  */
 public class CompositesIndexOnCollectionValue extends CompositesIndex
 {
-    public static CompositeType buildIndexComparator(CFMetaData baseMetadata, ColumnDefinition columnDef)
+    public static CellNameType buildIndexComparator(CFMetaData baseMetadata, ColumnDefinition columnDef)
     {
         int prefixSize = columnDef.position();
         List<AbstractType<?>> types = new ArrayList<AbstractType<?>>(prefixSize + 2);
         types.add(SecondaryIndex.keyComparator);
         for (int i = 0; i < prefixSize; i++)
-            types.add(((CompositeType)baseMetadata.comparator).types.get(i));
+            types.add(baseMetadata.comparator.subtype(i));
         types.add(((CollectionType)columnDef.type).nameComparator()); // collection key
-        return CompositeType.getInstance(types);
+        return new CompoundDenseCellNameType(types);
     }
 
     @Override
@@ -64,43 +67,38 @@ public class CompositesIndexOnCollectionValue extends CompositesIndex
         return column.value();
     }
 
-    protected ColumnNameBuilder makeIndexColumnNameBuilder(ByteBuffer rowKey, ByteBuffer columnName)
+    protected Composite makeIndexColumnPrefix(ByteBuffer rowKey, Composite cellName)
     {
-        int prefixSize = columnDef.position();
-        CompositeType baseComparator = (CompositeType)baseCfs.getComparator();
-        ByteBuffer[] components = baseComparator.split(columnName);
-        assert components.length == baseComparator.types.size();
-        CompositeType.Builder builder = getIndexComparator().builder();
+        CBuilder builder = getIndexComparator().prefixBuilder();
         builder.add(rowKey);
-        for (int i = 0; i < prefixSize; i++)
-            builder.add(components[i]);
-        builder.add(components[prefixSize + 1]);
-        return builder;
+        for (int i = 0; i < Math.min(columnDef.position(), cellName.size()); i++)
+            builder.add(cellName.get(i));
+        builder.add(cellName.get(columnDef.position() + 1));
+        return builder.build();
     }
 
     public IndexedEntry decodeEntry(DecoratedKey indexedValue, Column indexEntry)
     {
         int prefixSize = columnDef.position();
-        ByteBuffer[] components = getIndexComparator().split(indexEntry.name());
-        CompositeType.Builder builder = getBaseComparator().builder();
+        CellName name = indexEntry.name();
+        CBuilder builder = baseCfs.getComparator().builder();
         for (int i = 0; i < prefixSize; i++)
-            builder.add(components[i + 1]);
-        return new IndexedEntry(indexedValue, indexEntry.name(), indexEntry.timestamp(), components[0], builder, components[prefixSize + 1]);
+            builder.add(name.get(i + 1));
+        return new IndexedEntry(indexedValue, name, indexEntry.timestamp(), name.get(0), builder.build(), name.get(prefixSize + 1));
     }
 
     @Override
-    public boolean indexes(ByteBuffer name)
+    public boolean indexes(CellName name)
     {
-        ByteBuffer[] components = getBaseComparator().split(name);
         AbstractType<?> comp = baseCfs.metadata.getColumnDefinitionComparator(columnDef);
-        return components.length > columnDef.position()
-            && comp.compare(components[columnDef.position()], columnDef.name.bytes) == 0;
+        return name.size() > columnDef.position()
+            && comp.compare(name.get(columnDef.position()), columnDef.name.bytes) == 0;
     }
 
     public boolean isStale(IndexedEntry entry, ColumnFamily data, long now)
     {
-        ByteBuffer bb = entry.indexedEntryNameBuilder.copy().add(columnDef.name).add(entry.indexedEntryCollectionKey).build();
-        Column liveColumn = data.getColumn(bb);
+        CellName name = data.getComparator().create(entry.indexedEntryPrefix, columnDef.name, entry.indexedEntryCollectionKey);
+        Column liveColumn = data.getColumn(name);
         if (liveColumn == null || liveColumn.isMarkedForDelete(now))
             return true;
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnPartitionKey.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnPartitionKey.java b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnPartitionKey.java
index d097899..6df1e8d 100644
--- a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnPartitionKey.java
+++ b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnPartitionKey.java
@@ -23,8 +23,8 @@ import java.util.List;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
-import org.apache.cassandra.cql3.ColumnNameBuilder;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.index.SecondaryIndex;
 import org.apache.cassandra.db.marshal.*;
 
@@ -48,13 +48,14 @@ import org.apache.cassandra.db.marshal.*;
  */
 public class CompositesIndexOnPartitionKey extends CompositesIndex
 {
-    public static CompositeType buildIndexComparator(CFMetaData baseMetadata, ColumnDefinition columnDef)
+    public static CellNameType buildIndexComparator(CFMetaData baseMetadata, ColumnDefinition columnDef)
     {
         int ckCount = baseMetadata.clusteringColumns().size();
         List<AbstractType<?>> types = new ArrayList<AbstractType<?>>(ckCount + 1);
         types.add(SecondaryIndex.keyComparator);
-        types.addAll(baseMetadata.comparator.getComponents());
-        return CompositeType.getInstance(types);
+        for (int i = 0; i < ckCount; i++)
+            types.add(baseMetadata.comparator.subtype(i));
+        return new CompoundDenseCellNameType(types);
     }
 
     protected ByteBuffer getIndexedValue(ByteBuffer rowKey, Column column)
@@ -64,32 +65,28 @@ public class CompositesIndexOnPartitionKey extends CompositesIndex
         return components[columnDef.position()];
     }
 
-    protected ColumnNameBuilder makeIndexColumnNameBuilder(ByteBuffer rowKey, ByteBuffer columnName)
+    protected Composite makeIndexColumnPrefix(ByteBuffer rowKey, Composite columnName)
     {
-        int ckCount = baseCfs.metadata.clusteringColumns().size();
-        CompositeType baseComparator = (CompositeType)baseCfs.getComparator();
-        ByteBuffer[] components = baseComparator.split(columnName);
-        CompositeType.Builder builder = getIndexComparator().builder();
+        int count = Math.min(baseCfs.metadata.clusteringColumns().size(), columnName.size());
+        CBuilder builder = getIndexComparator().prefixBuilder();
         builder.add(rowKey);
-        for (int i = 0; i < ckCount; i++)
-            builder.add(components[i]);
-        return builder;
+        for (int i = 0; i < count; i++)
+            builder.add(columnName.get(i));
+        return builder.build();
     }
 
     public IndexedEntry decodeEntry(DecoratedKey indexedValue, Column indexEntry)
     {
         int ckCount = baseCfs.metadata.clusteringColumns().size();
-        ByteBuffer[] components = getIndexComparator().split(indexEntry.name());
-
-        ColumnNameBuilder builder = getBaseComparator().builder();
+        CBuilder builder = baseCfs.getComparator().builder();
         for (int i = 0; i < ckCount; i++)
-            builder.add(components[i + 1]);
+            builder.add(indexEntry.name().get(i + 1));
 
-        return new IndexedEntry(indexedValue, indexEntry.name(), indexEntry.timestamp(), components[0], builder);
+        return new IndexedEntry(indexedValue, indexEntry.name(), indexEntry.timestamp(), indexEntry.name().get(0), builder.build());
     }
 
     @Override
-    public boolean indexes(ByteBuffer name)
+    public boolean indexes(CellName name)
     {
         // Since a partition key is always full, we always index it
         return true;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnRegular.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnRegular.java b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnRegular.java
index 55a0f88..6903b77 100644
--- a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnRegular.java
+++ b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnRegular.java
@@ -23,8 +23,8 @@ import java.util.List;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
-import org.apache.cassandra.cql3.ColumnNameBuilder;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.index.SecondaryIndex;
 import org.apache.cassandra.db.marshal.*;
 
@@ -47,14 +47,14 @@ import org.apache.cassandra.db.marshal.*;
  */
 public class CompositesIndexOnRegular extends CompositesIndex
 {
-    public static CompositeType buildIndexComparator(CFMetaData baseMetadata, ColumnDefinition columnDef)
+    public static CellNameType buildIndexComparator(CFMetaData baseMetadata, ColumnDefinition columnDef)
     {
         int prefixSize = columnDef.position();
         List<AbstractType<?>> types = new ArrayList<AbstractType<?>>(prefixSize + 1);
         types.add(SecondaryIndex.keyComparator);
         for (int i = 0; i < prefixSize; i++)
-            types.add(((CompositeType)baseMetadata.comparator).types.get(i));
-        return CompositeType.getInstance(types);
+            types.add(baseMetadata.comparator.subtype(i));
+        return new CompoundDenseCellNameType(types);
     }
 
     protected ByteBuffer getIndexedValue(ByteBuffer rowKey, Column column)
@@ -62,39 +62,35 @@ public class CompositesIndexOnRegular extends CompositesIndex
         return column.value();
     }
 
-    protected ColumnNameBuilder makeIndexColumnNameBuilder(ByteBuffer rowKey, ByteBuffer columnName)
+    protected Composite makeIndexColumnPrefix(ByteBuffer rowKey, Composite cellName)
     {
-        CompositeType baseComparator = (CompositeType)baseCfs.getComparator();
-        ByteBuffer[] components = baseComparator.split(columnName);
-        CompositeType.Builder builder = getIndexComparator().builder();
+        CBuilder builder = getIndexComparator().prefixBuilder();
         builder.add(rowKey);
-        for (int i = 0; i < Math.min(columnDef.position(), components.length); i++)
-            builder.add(components[i]);
-        return builder;
+        for (int i = 0; i < Math.min(columnDef.position(), cellName.size()); i++)
+            builder.add(cellName.get(i));
+        return builder.build();
     }
 
     public IndexedEntry decodeEntry(DecoratedKey indexedValue, Column indexEntry)
     {
-        ByteBuffer[] components = getIndexComparator().split(indexEntry.name());
-        CompositeType.Builder builder = getBaseComparator().builder();
+        CBuilder builder = baseCfs.getComparator().builder();
         for (int i = 0; i < columnDef.position(); i++)
-            builder.add(components[i + 1]);
-        return new IndexedEntry(indexedValue, indexEntry.name(), indexEntry.timestamp(), components[0], builder);
+            builder.add(indexEntry.name().get(i + 1));
+        return new IndexedEntry(indexedValue, indexEntry.name(), indexEntry.timestamp(), indexEntry.name().get(0), builder.build());
     }
 
     @Override
-    public boolean indexes(ByteBuffer name)
+    public boolean indexes(CellName name)
     {
-        ByteBuffer[] components = getBaseComparator().split(name);
         AbstractType<?> comp = baseCfs.metadata.getColumnDefinitionComparator(columnDef);
-        return components.length > columnDef.position()
-            && comp.compare(components[columnDef.position()], columnDef.name.bytes) == 0;
+        return name.size() > columnDef.position()
+            && comp.compare(name.get(columnDef.position()), columnDef.name.bytes) == 0;
     }
 
     public boolean isStale(IndexedEntry entry, ColumnFamily data, long now)
     {
-        ByteBuffer bb = entry.indexedEntryNameBuilder.copy().add(columnDef.name).build();
-        Column liveColumn = data.getColumn(bb);
+        CellName name = data.getComparator().create(entry.indexedEntryPrefix, columnDef.name);
+        Column liveColumn = data.getColumn(name);
         if (liveColumn == null || liveColumn.isMarkedForDelete(now))
             return true;
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java b/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
index bcb0dd2..97602af 100644
--- a/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
+++ b/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
@@ -24,15 +24,16 @@ import java.util.*;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.cql3.ColumnNameBuilder;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
+import org.apache.cassandra.db.composites.Composites;
 import org.apache.cassandra.db.filter.ExtendedFilter;
 import org.apache.cassandra.db.filter.IDiskAtomFilter;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
 import org.apache.cassandra.db.index.SecondaryIndexManager;
 import org.apache.cassandra.db.index.SecondaryIndexSearcher;
-import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.dht.AbstractBounds;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
@@ -52,23 +53,23 @@ public class CompositesSearcher extends SecondaryIndexSearcher
         return baseCfs.filter(getIndexedIterator(filter), filter);
     }
 
-    private ByteBuffer makePrefix(CompositesIndex index, ByteBuffer key, ExtendedFilter filter, boolean isStart)
+    private Composite makePrefix(CompositesIndex index, ByteBuffer key, ExtendedFilter filter, boolean isStart)
     {
         if (key.remaining() == 0)
-            return ByteBufferUtil.EMPTY_BYTE_BUFFER;
+            return Composites.EMPTY;
 
-        ColumnNameBuilder builder;
+        Composite prefix;
         IDiskAtomFilter columnFilter = filter.columnFilter(key);
         if (columnFilter instanceof SliceQueryFilter)
         {
             SliceQueryFilter sqf = (SliceQueryFilter)columnFilter;
-            builder = index.makeIndexColumnNameBuilder(key, isStart ? sqf.start() : sqf.finish());
+            prefix = index.makeIndexColumnPrefix(key, isStart ? sqf.start() : sqf.finish());
         }
         else
         {
-            builder = index.getIndexComparator().builder().add(key);
+            prefix = index.getIndexComparator().make(key);
         }
-        return isStart ? builder.build() : builder.buildAsEndOfRange();
+        return isStart ? prefix.start() : prefix.end();
     }
 
     private ColumnFamilyStore.AbstractScanIterator getIndexedIterator(final ExtendedFilter filter)
@@ -94,15 +95,15 @@ public class CompositesSearcher extends SecondaryIndexSearcher
         ByteBuffer startKey = range.left instanceof DecoratedKey ? ((DecoratedKey)range.left).key : ByteBufferUtil.EMPTY_BYTE_BUFFER;
         ByteBuffer endKey = range.right instanceof DecoratedKey ? ((DecoratedKey)range.right).key : ByteBufferUtil.EMPTY_BYTE_BUFFER;
 
-        final CompositeType baseComparator = (CompositeType)baseCfs.getComparator();
-        final CompositeType indexComparator = (CompositeType)index.getIndexCfs().getComparator();
+        final CellNameType baseComparator = baseCfs.getComparator();
+        final CellNameType indexComparator = index.getIndexCfs().getComparator();
 
-        final ByteBuffer startPrefix = makePrefix(index, startKey, filter, true);
-        final ByteBuffer endPrefix = makePrefix(index, endKey, filter, false);
+        final Composite startPrefix = makePrefix(index, startKey, filter, true);
+        final Composite endPrefix = makePrefix(index, endKey, filter, false);
 
         return new ColumnFamilyStore.AbstractScanIterator()
         {
-            private ByteBuffer lastSeenPrefix = startPrefix;
+            private Composite lastSeenPrefix = startPrefix;
             private Deque<Column> indexColumns;
             private int columnsRead = Integer.MAX_VALUE;
             private int limit = filter.currentLimit();
@@ -135,7 +136,7 @@ public class CompositesSearcher extends SecondaryIndexSearcher
                  */
                 DecoratedKey currentKey = null;
                 ColumnFamily data = null;
-                ByteBuffer previousPrefix = null;
+                Composite previousPrefix = null;
 
                 while (true)
                 {
@@ -229,7 +230,7 @@ public class CompositesSearcher extends SecondaryIndexSearcher
                         }
 
                         // Check if this entry cannot be a hit due to the original column filter
-                        ByteBuffer start = entry.indexedEntryStart();
+                        Composite start = entry.indexedEntryPrefix;
                         if (!filter.columnFilter(dk.key).maySelectPrefix(baseComparator, start))
                             continue;
 
@@ -248,7 +249,7 @@ public class CompositesSearcher extends SecondaryIndexSearcher
                         // We always query the whole CQL3 row. In the case where the original filter was a name filter this might be
                         // slightly wasteful, but this probably doesn't matter in practice and it simplify things.
                         SliceQueryFilter dataFilter = new SliceQueryFilter(start,
-                                                                           entry.indexedEntryEnd(),
+                                                                           entry.indexedEntryPrefix.end(),
                                                                            false,
                                                                            Integer.MAX_VALUE,
                                                                            baseCfs.metadata.clusteringColumns().size());
@@ -267,7 +268,7 @@ public class CompositesSearcher extends SecondaryIndexSearcher
                         if (entry.indexedEntryCollectionKey != null)
                             previousPrefix = start;
 
-                        if (!filter.isSatisfiedBy(dk, newData, entry.indexedEntryNameBuilder, entry.indexedEntryCollectionKey))
+                        if (!filter.isSatisfiedBy(dk, newData, entry.indexedEntryPrefix, entry.indexedEntryCollectionKey))
                             continue;
 
                         if (data == null)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/index/keys/KeysIndex.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/keys/KeysIndex.java b/src/java/org/apache/cassandra/db/index/keys/KeysIndex.java
index 7d98c24..ee56c36 100644
--- a/src/java/org/apache/cassandra/db/index/keys/KeysIndex.java
+++ b/src/java/org/apache/cassandra/db/index/keys/KeysIndex.java
@@ -20,6 +20,8 @@ package org.apache.cassandra.db.index.keys;
 import java.nio.ByteBuffer;
 import java.util.Set;
 
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNames;
 import org.apache.cassandra.db.ColumnFamily;
 import org.apache.cassandra.db.Column;
 import org.apache.cassandra.db.index.AbstractSimplePerColumnSecondaryIndex;
@@ -38,9 +40,9 @@ public class KeysIndex extends AbstractSimplePerColumnSecondaryIndex
         return column.value();
     }
 
-    protected ByteBuffer makeIndexColumnName(ByteBuffer rowKey, Column column)
+    protected CellName makeIndexColumnName(ByteBuffer rowKey, Column column)
     {
-        return rowKey;
+        return CellNames.simpleDense(rowKey);
     }
 
     public SecondaryIndexSearcher createSecondaryIndexSearcher(Set<ByteBuffer> columns)
@@ -50,7 +52,7 @@ public class KeysIndex extends AbstractSimplePerColumnSecondaryIndex
 
     public boolean isIndexEntryStale(ByteBuffer indexedValue, ColumnFamily data, long now)
     {
-        Column liveColumn = data.getColumn(columnDef.name.bytes);
+        Column liveColumn = data.getColumn(data.getComparator().makeCellName(columnDef.name.bytes));
         if (liveColumn == null || liveColumn.isMarkedForDelete(now))
             return true;
 
@@ -63,8 +65,15 @@ public class KeysIndex extends AbstractSimplePerColumnSecondaryIndex
         // no options used
     }
 
+    public boolean indexes(CellName name)
+    {
+        // This consider the full cellName directly
+        AbstractType<?> comparator = baseCfs.metadata.getColumnDefinitionComparator(columnDef);
+        return comparator.compare(columnDef.name.bytes, name.toByteBuffer()) == 0;
+    }
+
     protected AbstractType getExpressionComparator()
     {
-        return baseCfs.getComparator();
+        return baseCfs.getComparator().asAbstractType();
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java b/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java
index 3740e24..0101a0b 100644
--- a/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java
+++ b/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java
@@ -28,13 +28,16 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
+import org.apache.cassandra.db.composites.Composites;
 import org.apache.cassandra.db.filter.ExtendedFilter;
 import org.apache.cassandra.db.filter.IDiskAtomFilter;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.db.index.*;
 import org.apache.cassandra.dht.AbstractBounds;
 import org.apache.cassandra.dht.Range;
-import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.HeapAllocator;
 
 public class KeysSearcher extends SecondaryIndexSearcher
@@ -75,12 +78,15 @@ public class KeysSearcher extends SecondaryIndexSearcher
          * indexed row.
          */
         final AbstractBounds<RowPosition> range = filter.dataRange.keyRange();
-        final ByteBuffer startKey = range.left instanceof DecoratedKey ? ((DecoratedKey)range.left).key : ByteBufferUtil.EMPTY_BYTE_BUFFER;
-        final ByteBuffer endKey = range.right instanceof DecoratedKey ? ((DecoratedKey)range.right).key : ByteBufferUtil.EMPTY_BYTE_BUFFER;
+        CellNameType type = index.getIndexCfs().getComparator();
+        final Composite startKey = range.left instanceof DecoratedKey ? type.make(((DecoratedKey)range.left).key) : Composites.EMPTY;
+        final Composite endKey = range.right instanceof DecoratedKey ? type.make(((DecoratedKey)range.right).key) : Composites.EMPTY;
+
+        final CellName primaryColumn = baseCfs.getComparator().cellFromByteBuffer(primary.column);
 
         return new ColumnFamilyStore.AbstractScanIterator()
         {
-            private ByteBuffer lastSeenKey = startKey;
+            private Composite lastSeenKey = startKey;
             private Iterator<Column> indexColumns;
             private int columnsRead = Integer.MAX_VALUE;
 
@@ -101,7 +107,7 @@ public class KeysSearcher extends SecondaryIndexSearcher
 
                         if (logger.isTraceEnabled() && (index instanceof AbstractSimplePerColumnSecondaryIndex))
                             logger.trace("Scanning index {} starting with {}",
-                                         ((AbstractSimplePerColumnSecondaryIndex)index).expressionString(primary), index.getBaseCfs().metadata.getKeyValidator().getString(startKey));
+                                         ((AbstractSimplePerColumnSecondaryIndex)index).expressionString(primary), index.getBaseCfs().metadata.getKeyValidator().getString(startKey.toByteBuffer()));
 
                         QueryFilter indexFilter = QueryFilter.getSliceFilter(indexKey,
                                                                              index.getIndexCfs().name,
@@ -128,7 +134,7 @@ public class KeysSearcher extends SecondaryIndexSearcher
                         {
                             // skip the row we already saw w/ the last page of results
                             indexColumns.next();
-                            logger.trace("Skipping {}", baseCfs.metadata.getKeyValidator().getString(firstColumn.name()));
+                            logger.trace("Skipping {}", baseCfs.metadata.getKeyValidator().getString(firstColumn.name().toByteBuffer()));
                         }
                         else if (range instanceof Range && indexColumns.hasNext() && firstColumn.name().equals(startKey))
                         {
@@ -148,7 +154,7 @@ public class KeysSearcher extends SecondaryIndexSearcher
                             continue;
                         }
 
-                        DecoratedKey dk = baseCfs.partitioner.decorateKey(lastSeenKey);
+                        DecoratedKey dk = baseCfs.partitioner.decorateKey(lastSeenKey.toByteBuffer());
                         if (!range.right.isMinimum(baseCfs.partitioner) && range.right.compareTo(dk) < 0)
                         {
                             logger.trace("Reached end of assigned scan range");
@@ -161,7 +167,7 @@ public class KeysSearcher extends SecondaryIndexSearcher
                         }
 
                         logger.trace("Returning index hit for {}", dk);
-                        ColumnFamily data = baseCfs.getColumnFamily(new QueryFilter(dk, baseCfs.name, filter.columnFilter(lastSeenKey), filter.timestamp));
+                        ColumnFamily data = baseCfs.getColumnFamily(new QueryFilter(dk, baseCfs.name, filter.columnFilter(lastSeenKey.toByteBuffer()), filter.timestamp));
                         // While the column family we'll get in the end should contains the primary clause column, the initialFilter may not have found it and can thus be null
                         if (data == null)
                             data = TreeMapBackedSortedColumns.factory.create(baseCfs.metadata);
@@ -179,7 +185,7 @@ public class KeysSearcher extends SecondaryIndexSearcher
                         if (((KeysIndex)index).isIndexEntryStale(indexKey.key, data, filter.timestamp))
                         {
                             // delete the index entry w/ its own timestamp
-                            Column dummyColumn = new Column(primary.column, indexKey.key, column.timestamp());
+                            Column dummyColumn = new Column(primaryColumn, indexKey.key, column.timestamp());
                             ((PerColumnSecondaryIndex)index).delete(dk.key, dummyColumn);
                             continue;
                         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/marshal/AbstractCommutativeType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/AbstractCommutativeType.java b/src/java/org/apache/cassandra/db/marshal/AbstractCommutativeType.java
index 01db148..a7162ae 100644
--- a/src/java/org/apache/cassandra/db/marshal/AbstractCommutativeType.java
+++ b/src/java/org/apache/cassandra/db/marshal/AbstractCommutativeType.java
@@ -18,6 +18,7 @@
 package org.apache.cassandra.db.marshal;
 
 import java.nio.ByteBuffer;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.Column;
 import org.apache.cassandra.db.context.CounterContext;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -44,5 +45,5 @@ public abstract class AbstractCommutativeType extends AbstractType<Long>
     /**
      * create commutative column
      */
-    public abstract Column createColumn(ByteBuffer name, ByteBuffer value, long timestamp);
+    public abstract Column createColumn(CellName name, ByteBuffer value, long timestamp);
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/marshal/AbstractCompositeType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/AbstractCompositeType.java b/src/java/org/apache/cassandra/db/marshal/AbstractCompositeType.java
index d002aa7..be66d21 100644
--- a/src/java/org/apache/cassandra/db/marshal/AbstractCompositeType.java
+++ b/src/java/org/apache/cassandra/db/marshal/AbstractCompositeType.java
@@ -35,21 +35,21 @@ import java.util.List;
 public abstract class AbstractCompositeType extends AbstractType<ByteBuffer>
 {
     // changes bb position
-    protected static int getShortLength(ByteBuffer bb)
+    public static int getShortLength(ByteBuffer bb)
     {
         int length = (bb.get() & 0xFF) << 8;
         return length | (bb.get() & 0xFF);
     }
 
     // changes bb position
-    protected static void putShortLength(ByteBuffer bb, int length)
+    public static void putShortLength(ByteBuffer bb, int length)
     {
         bb.put((byte) ((length >> 8) & 0xFF));
         bb.put((byte) (length & 0xFF));
     }
 
     // changes bb position
-    protected static ByteBuffer getBytes(ByteBuffer bb, int length)
+    public static ByteBuffer getBytes(ByteBuffer bb, int length)
     {
         ByteBuffer copy = bb.duplicate();
         copy.limit(copy.position() + length);
@@ -58,7 +58,7 @@ public abstract class AbstractCompositeType extends AbstractType<ByteBuffer>
     }
 
     // changes bb position
-    protected static ByteBuffer getWithShortLength(ByteBuffer bb)
+    public static ByteBuffer getWithShortLength(ByteBuffer bb)
     {
         int length = getShortLength(bb);
         return getBytes(bb, length);
@@ -169,7 +169,7 @@ public abstract class AbstractCompositeType extends AbstractType<ByteBuffer>
      * Escapes all occurences of the ':' character from the input, replacing them by "\:".
      * Furthermore, if the last character is '\' or '!', a '!' is appended.
      */
-    static String escape(String input)
+    public static String escape(String input)
     {
         if (input.isEmpty())
             return input;
@@ -234,7 +234,7 @@ public abstract class AbstractCompositeType extends AbstractType<ByteBuffer>
             byte b = bb.get();
             if (b != 0)
             {
-                sb.append(":!");
+                sb.append(b < 0 ? ":_" : ":!");
                 break;
             }
             ++i;
@@ -249,6 +249,7 @@ public abstract class AbstractCompositeType extends AbstractType<ByteBuffer>
         List<ParsedComparator> comparators = new ArrayList<ParsedComparator>(parts.size());
         int totalLength = 0, i = 0;
         boolean lastByteIsOne = false;
+        boolean lastByteIsMinusOne = false;
 
         for (String part : parts)
         {
@@ -257,6 +258,11 @@ public abstract class AbstractCompositeType extends AbstractType<ByteBuffer>
                 lastByteIsOne = true;
                 break;
             }
+            else if (part.equals("_"))
+            {
+                lastByteIsMinusOne = true;
+                break;
+            }
 
             ParsedComparator p = parseComparator(i, part);
             AbstractType<?> type = p.getAbstractType();
@@ -281,6 +287,8 @@ public abstract class AbstractCompositeType extends AbstractType<ByteBuffer>
         }
         if (lastByteIsOne)
             bb.put(bb.limit() - 1, (byte)1);
+        else if (lastByteIsMinusOne)
+            bb.put(bb.limit() - 1, (byte)-1);
 
         bb.rewind();
         return bb;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/marshal/AbstractType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/AbstractType.java b/src/java/org/apache/cassandra/db/marshal/AbstractType.java
index ffba918..cefa465 100644
--- a/src/java/org/apache/cassandra/db/marshal/AbstractType.java
+++ b/src/java/org/apache/cassandra/db/marshal/AbstractType.java
@@ -25,17 +25,9 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.cassandra.cql3.CQL3Type;
-import org.apache.cassandra.db.filter.ColumnSlice;
-import org.apache.cassandra.db.filter.SliceQueryFilter;
 import org.apache.cassandra.exceptions.SyntaxException;
-import org.apache.cassandra.db.Column;
-import org.apache.cassandra.db.OnDiskAtom;
-import org.apache.cassandra.db.RangeTombstone;
 import org.apache.cassandra.serializers.TypeSerializer;
 import org.apache.cassandra.serializers.MarshalException;
-import org.apache.cassandra.utils.ByteBufferUtil;
-
-import static org.apache.cassandra.io.sstable.IndexHelper.IndexInfo;
 
 /**
  * Specifies a Comparator for a specific type of ByteBuffer.
@@ -47,78 +39,10 @@ import static org.apache.cassandra.io.sstable.IndexHelper.IndexInfo;
  */
 public abstract class AbstractType<T> implements Comparator<ByteBuffer>
 {
-    public final Comparator<IndexInfo> indexComparator;
-    public final Comparator<IndexInfo> indexReverseComparator;
-    public final Comparator<Column> columnComparator;
-    public final Comparator<Column> columnReverseComparator;
-    public final Comparator<OnDiskAtom> onDiskAtomComparator;
     public final Comparator<ByteBuffer> reverseComparator;
 
     protected AbstractType()
     {
-        indexComparator = new Comparator<IndexInfo>()
-        {
-            public int compare(IndexInfo o1, IndexInfo o2)
-            {
-                return AbstractType.this.compare(o1.lastName, o2.lastName);
-            }
-        };
-        indexReverseComparator = new Comparator<IndexInfo>()
-        {
-            public int compare(IndexInfo o1, IndexInfo o2)
-            {
-                return AbstractType.this.compare(o1.firstName, o2.firstName);
-            }
-        };
-        columnComparator = new Comparator<Column>()
-        {
-            public int compare(Column c1, Column c2)
-            {
-                return AbstractType.this.compare(c1.name(), c2.name());
-            }
-        };
-        columnReverseComparator = new Comparator<Column>()
-        {
-            public int compare(Column c1, Column c2)
-            {
-                return AbstractType.this.compare(c2.name(), c1.name());
-            }
-        };
-        onDiskAtomComparator = new Comparator<OnDiskAtom>()
-        {
-            public int compare(OnDiskAtom c1, OnDiskAtom c2)
-            {
-                int comp = AbstractType.this.compare(c1.name(), c2.name());
-                if (comp != 0)
-                    return comp;
-
-                if (c1 instanceof RangeTombstone)
-                {
-                    if (c2 instanceof RangeTombstone)
-                    {
-                        RangeTombstone t1 = (RangeTombstone)c1;
-                        RangeTombstone t2 = (RangeTombstone)c2;
-                        int comp2 = AbstractType.this.compare(t1.max, t2.max);
-                        if (comp2 == 0)
-                            return t1.data.compareTo(t2.data);
-                        else
-                            return comp2;
-                    }
-                    else
-                    {
-                        return -1;
-                    }
-                }
-                else if (c2 instanceof RangeTombstone)
-                {
-                    return 1;
-                }
-                else
-                {
-                    return 0;
-                }
-            }
-        };
         reverseComparator = new Comparator<ByteBuffer>()
         {
             public int compare(ByteBuffer o1, ByteBuffer o2)
@@ -197,17 +121,6 @@ public abstract class AbstractType<T> implements Comparator<ByteBuffer>
         return builder.toString();
     }
 
-    /* convenience method */
-    public String getColumnsString(Iterable<Column> columns)
-    {
-        StringBuilder builder = new StringBuilder();
-        for (Column column : columns)
-        {
-            builder.append(column.getString(this)).append(",");
-        }
-        return builder.toString();
-    }
-
     public boolean isCommutative()
     {
         return false;
@@ -312,25 +225,4 @@ public abstract class AbstractType<T> implements Comparator<ByteBuffer>
     {
         return getClass().getName();
     }
-
-    protected boolean intersects(ByteBuffer minColName, ByteBuffer maxColName, ByteBuffer sliceStart, ByteBuffer sliceEnd)
-    {
-        return (sliceStart.equals(ByteBufferUtil.EMPTY_BYTE_BUFFER) || compare(maxColName, sliceStart) >= 0)
-               && (sliceEnd.equals(ByteBufferUtil.EMPTY_BYTE_BUFFER) || compare(sliceEnd, minColName) >= 0);
-    }
-
-    public boolean intersects(List<ByteBuffer> minColumnNames, List<ByteBuffer> maxColumnNames, SliceQueryFilter filter)
-    {
-        assert minColumnNames.size() == 1;
-
-        for (ColumnSlice slice : filter.slices)
-        {
-            ByteBuffer start = filter.isReversed() ? slice.finish : slice.start;
-            ByteBuffer finish = filter.isReversed() ? slice.start : slice.finish;
-
-            if (intersects(minColumnNames.get(0), maxColumnNames.get(0), start, finish))
-                return true;
-        }
-        return false;
-    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/marshal/CollectionType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/CollectionType.java b/src/java/org/apache/cassandra/db/marshal/CollectionType.java
index 07c86e0..0f3f564 100644
--- a/src/java/org/apache/cassandra/db/marshal/CollectionType.java
+++ b/src/java/org/apache/cassandra/db/marshal/CollectionType.java
@@ -58,7 +58,7 @@ public abstract class CollectionType<T> extends AbstractType<T>
 
     protected abstract void appendToStringBuilder(StringBuilder sb);
 
-    public abstract ByteBuffer serialize(List<Pair<ByteBuffer, Column>> columns);
+    public abstract ByteBuffer serialize(List<Column> columns);
 
     @Override
     public String toString()
@@ -113,7 +113,7 @@ public abstract class CollectionType<T> extends AbstractType<T>
         return (ByteBuffer)result.flip();
     }
 
-    protected List<Pair<ByteBuffer, Column>> enforceLimit(List<Pair<ByteBuffer, Column>> columns)
+    protected List<Column> enforceLimit(List<Column> columns)
     {
         if (columns.size() <= MAX_ELEMENTS)
             return columns;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/marshal/CompositeType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/CompositeType.java b/src/java/org/apache/cassandra/db/marshal/CompositeType.java
index e7a5fee..36249bf 100644
--- a/src/java/org/apache/cassandra/db/marshal/CompositeType.java
+++ b/src/java/org/apache/cassandra/db/marshal/CompositeType.java
@@ -27,11 +27,8 @@ import java.util.Map;
 
 import com.google.common.collect.ImmutableList;
 
-import org.apache.cassandra.db.filter.ColumnSlice;
-import org.apache.cassandra.db.filter.SliceQueryFilter;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.SyntaxException;
-import org.apache.cassandra.cql3.ColumnNameBuilder;
 import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.cql3.Relation;
 import org.apache.cassandra.io.util.DataOutputBuffer;
@@ -234,30 +231,6 @@ public class CompositeType extends AbstractCompositeType
         return true;
     }
 
-    @Override
-    public boolean intersects(List<ByteBuffer> minColumnNames, List<ByteBuffer> maxColumnNames, SliceQueryFilter filter)
-    {
-        assert minColumnNames.size() == maxColumnNames.size();
-        outer:
-        for (ColumnSlice slice : filter.slices)
-        {
-            // This slices intersects if all component intersect. And we don't intersect
-            // only if no slice intersects
-            ByteBuffer[] start = split(filter.isReversed() ? slice.finish : slice.start);
-            ByteBuffer[] finish = split(filter.isReversed() ? slice.start : slice.finish);
-            for (int i = 0; i < minColumnNames.size(); i++)
-            {
-                AbstractType<?> t = types.get(i);
-                ByteBuffer s = i < start.length ? start[i] : ByteBufferUtil.EMPTY_BYTE_BUFFER;
-                ByteBuffer f = i < finish.length ? finish[i] : ByteBufferUtil.EMPTY_BYTE_BUFFER;
-                if (!t.intersects(minColumnNames.get(i), maxColumnNames.get(i), s, f))
-                    continue outer;
-            }
-            return true;
-        }
-        return false;
-    }
-
     private static class StaticParsedComparator implements ParsedComparator
     {
         final AbstractType<?> type;
@@ -315,7 +288,7 @@ public class CompositeType extends AbstractCompositeType
         return out;
     }
 
-    public static class Builder implements ColumnNameBuilder
+    public static class Builder
     {
         private final CompositeType composite;
 
@@ -376,13 +349,11 @@ public class CompositeType extends AbstractCompositeType
             return this;
         }
 
-        @Override
         public Builder add(ByteBuffer bb)
         {
             return add(bb, Relation.Type.EQ);
         }
 
-        @Override
         public Builder add(ColumnIdentifier name)
         {
             return add(name.bytes);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/marshal/CounterColumnType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/CounterColumnType.java b/src/java/org/apache/cassandra/db/marshal/CounterColumnType.java
index 6a77458..37cd59b 100644
--- a/src/java/org/apache/cassandra/db/marshal/CounterColumnType.java
+++ b/src/java/org/apache/cassandra/db/marshal/CounterColumnType.java
@@ -21,6 +21,7 @@ import java.nio.ByteBuffer;
 
 import org.apache.cassandra.cql3.CQL3Type;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.serializers.TypeSerializer;
 import org.apache.cassandra.serializers.CounterSerializer;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -47,7 +48,7 @@ public class CounterColumnType extends AbstractCommutativeType
     /**
      * create commutative column
      */
-    public Column createColumn(ByteBuffer name, ByteBuffer value, long timestamp)
+    public Column createColumn(CellName name, ByteBuffer value, long timestamp)
     {
         return new CounterUpdateColumn(name, value, timestamp);
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/marshal/ListType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/ListType.java b/src/java/org/apache/cassandra/db/marshal/ListType.java
index 808ba45..58ba6f1 100644
--- a/src/java/org/apache/cassandra/db/marshal/ListType.java
+++ b/src/java/org/apache/cassandra/db/marshal/ListType.java
@@ -82,16 +82,16 @@ public class ListType<T> extends CollectionType<List<T>>
         sb.append(getClass().getName()).append(TypeParser.stringifyTypeParameters(Collections.<AbstractType<?>>singletonList(elements)));
     }
 
-    public ByteBuffer serialize(List<Pair<ByteBuffer, Column>> columns)
+    public ByteBuffer serialize(List<Column> columns)
     {
         columns = enforceLimit(columns);
 
         List<ByteBuffer> bbs = new ArrayList<ByteBuffer>(columns.size());
         int size = 0;
-        for (Pair<ByteBuffer, Column> p : columns)
+        for (Column c : columns)
         {
-            bbs.add(p.right.value());
-            size += 2 + p.right.value().remaining();
+            bbs.add(c.value());
+            size += 2 + c.value().remaining();
         }
         return pack(bbs, columns.size(), size);
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/marshal/MapType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/MapType.java b/src/java/org/apache/cassandra/db/marshal/MapType.java
index fd96da7..17bd7a7 100644
--- a/src/java/org/apache/cassandra/db/marshal/MapType.java
+++ b/src/java/org/apache/cassandra/db/marshal/MapType.java
@@ -89,17 +89,19 @@ public class MapType<K, V> extends CollectionType<Map<K, V>>
     /**
      * Creates the same output than serialize, but from the internal representation.
      */
-    public ByteBuffer serialize(List<Pair<ByteBuffer, Column>> columns)
+    public ByteBuffer serialize(List<Column> columns)
     {
         columns = enforceLimit(columns);
 
         List<ByteBuffer> bbs = new ArrayList<ByteBuffer>(2 * columns.size());
         int size = 0;
-        for (Pair<ByteBuffer, Column> p : columns)
+        for (Column c : columns)
         {
-            bbs.add(p.left);
-            bbs.add(p.right.value());
-            size += 4 + p.left.remaining() + p.right.value().remaining();
+            ByteBuffer key = c.name().collectionElement();
+            ByteBuffer value = c.value();
+            bbs.add(key);
+            bbs.add(value);
+            size += 4 + key.remaining() + value.remaining();
         }
         return pack(bbs, columns.size(), size);
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/marshal/SetType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/SetType.java b/src/java/org/apache/cassandra/db/marshal/SetType.java
index c947d26..9e45f8f 100644
--- a/src/java/org/apache/cassandra/db/marshal/SetType.java
+++ b/src/java/org/apache/cassandra/db/marshal/SetType.java
@@ -82,16 +82,17 @@ public class SetType<T> extends CollectionType<Set<T>>
         sb.append(getClass().getName()).append(TypeParser.stringifyTypeParameters(Collections.<AbstractType<?>>singletonList(elements)));
     }
 
-    public ByteBuffer serialize(List<Pair<ByteBuffer, Column>> columns)
+    public ByteBuffer serialize(List<Column> columns)
     {
         columns = enforceLimit(columns);
 
         List<ByteBuffer> bbs = new ArrayList<ByteBuffer>(columns.size());
         int size = 0;
-        for (Pair<ByteBuffer, Column> p : columns)
+        for (Column c : columns)
         {
-            bbs.add(p.left);
-            size += 2 + p.left.remaining();
+            ByteBuffer key = c.name().collectionElement();
+            bbs.add(key);
+            size += 2 + key.remaining();
         }
         return pack(bbs, columns.size(), size);
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/hadoop/ColumnFamilyInputFormat.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/ColumnFamilyInputFormat.java b/src/java/org/apache/cassandra/hadoop/ColumnFamilyInputFormat.java
index 1b5a4e2..4327aa9 100644
--- a/src/java/org/apache/cassandra/hadoop/ColumnFamilyInputFormat.java
+++ b/src/java/org/apache/cassandra/hadoop/ColumnFamilyInputFormat.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.*;
 
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.Column;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.JobConf;
@@ -44,15 +45,15 @@ import org.apache.hadoop.mapreduce.*;
  *
  * The default split size is 64k rows.
  */
-public class ColumnFamilyInputFormat extends AbstractColumnFamilyInputFormat<ByteBuffer, SortedMap<ByteBuffer, Column>>
+public class ColumnFamilyInputFormat extends AbstractColumnFamilyInputFormat<ByteBuffer, SortedMap<CellName, Column>>
 {
     
-    public RecordReader<ByteBuffer, SortedMap<ByteBuffer, Column>> createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException
+    public RecordReader<ByteBuffer, SortedMap<CellName, Column>> createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException
     {
         return new ColumnFamilyRecordReader();
     }
 
-    public org.apache.hadoop.mapred.RecordReader<ByteBuffer, SortedMap<ByteBuffer, Column>> getRecordReader(org.apache.hadoop.mapred.InputSplit split, JobConf jobConf, final Reporter reporter) throws IOException
+    public org.apache.hadoop.mapred.RecordReader<ByteBuffer, SortedMap<CellName, Column>> getRecordReader(org.apache.hadoop.mapred.InputSplit split, JobConf jobConf, final Reporter reporter) throws IOException
     {
         TaskAttemptContext tac = new TaskAttemptContext(jobConf, TaskAttemptID.forName(jobConf.get(MAPRED_TASK_ID)))
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java b/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java
index 98a294d..7bda3fb 100644
--- a/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java
+++ b/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java
@@ -29,8 +29,8 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.db.Column;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.db.marshal.TypeParser;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.thrift.*;
@@ -44,8 +44,8 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.thrift.TException;
 import org.apache.thrift.transport.TTransport;
 
-public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap<ByteBuffer, Column>>
-    implements org.apache.hadoop.mapred.RecordReader<ByteBuffer, SortedMap<ByteBuffer, Column>>
+public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap<CellName, Column>>
+    implements org.apache.hadoop.mapred.RecordReader<ByteBuffer, SortedMap<CellName, Column>>
 {
     private static final Logger logger = LoggerFactory.getLogger(ColumnFamilyRecordReader.class);
 
@@ -53,7 +53,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
 
     private ColumnFamilySplit split;
     private RowIterator iter;
-    private Pair<ByteBuffer, SortedMap<ByteBuffer, Column>> currentRow;
+    private Pair<ByteBuffer, SortedMap<CellName, Column>> currentRow;
     private SlicePredicate predicate;
     private boolean isEmptyPredicate;
     private int totalRowCount; // total number of rows to fetch
@@ -92,7 +92,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
         return currentRow.left;
     }
 
-    public SortedMap<ByteBuffer, Column> getCurrentValue()
+    public SortedMap<CellName, Column> getCurrentValue()
     {
         return currentRow.right;
     }
@@ -210,12 +210,12 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
         return split.getLocations()[0];
     }
 
-    private abstract class RowIterator extends AbstractIterator<Pair<ByteBuffer, SortedMap<ByteBuffer, Column>>>
+    private abstract class RowIterator extends AbstractIterator<Pair<ByteBuffer, SortedMap<CellName, Column>>>
     {
         protected List<KeySlice> rows;
         protected int totalRead = 0;
         protected final boolean isSuper;
-        protected final AbstractType<?> comparator;
+        protected final CellNameType comparator;
         protected final AbstractType<?> subComparator;
         protected final IPartitioner partitioner;
 
@@ -253,7 +253,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
                         cfDef.column_type = ByteBufferUtil.string(type);
                 }
 
-                comparator = TypeParser.parse(cfDef.comparator_type);
+                comparator = CellNames.fromAbstractType(TypeParser.parse(cfDef.comparator_type), true);
                 subComparator = cfDef.subcomparator_type == null ? null : TypeParser.parse(cfDef.subcomparator_type);
             }
             catch (ConfigurationException e)
@@ -297,21 +297,21 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
             for (org.apache.cassandra.thrift.Column column : super_column.columns)
             {
                 Column c = unthriftifySimple(column);
-                columns.add(c.withUpdatedName(CompositeType.build(super_column.name, c.name())));
+                columns.add(c.withUpdatedName(comparator.makeCellName(super_column.name, c.name().toByteBuffer())));
             }
             return columns;
         }
 
         protected Column unthriftifySimple(org.apache.cassandra.thrift.Column column)
         {
-            return new Column(column.name, column.value, column.timestamp);
+            return new Column(comparator.cellFromByteBuffer(column.name), column.value, column.timestamp);
         }
 
         private Column unthriftifyCounter(CounterColumn column)
         {
             //CounterColumns read the counterID from the System keyspace, so need the StorageService running and access
             //to cassandra.yaml. To avoid a Hadoop needing access to yaml return a regular Column.
-            return new Column(column.name, ByteBufferUtil.bytes(column.value), 0);
+            return new Column(comparator.cellFromByteBuffer(column.name), ByteBufferUtil.bytes(column.value), 0);
         }
 
         private List<Column> unthriftifySuperCounter(CounterSuperColumn super_column)
@@ -320,7 +320,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
             for (CounterColumn column : super_column.columns)
             {
                 Column c = unthriftifyCounter(column);
-                columns.add(c.withUpdatedName(CompositeType.build(super_column.name, c.name())));
+                columns.add(c.withUpdatedName(comparator.makeCellName(super_column.name, c.name().toByteBuffer())));
             }
             return columns;
         }
@@ -401,7 +401,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
             }
         }
 
-        protected Pair<ByteBuffer, SortedMap<ByteBuffer, Column>> computeNext()
+        protected Pair<ByteBuffer, SortedMap<CellName, Column>> computeNext()
         {
             maybeInit();
             if (rows == null)
@@ -409,7 +409,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
 
             totalRead++;
             KeySlice ks = rows.get(i++);
-            SortedMap<ByteBuffer, Column> map = new TreeMap<ByteBuffer, Column>(comparator);
+            SortedMap<CellName, Column> map = new TreeMap<CellName, Column>(comparator);
             for (ColumnOrSuperColumn cosc : ks.columns)
             {
                 List<Column> columns = unthriftify(cosc);
@@ -422,8 +422,8 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
 
     private class WideRowIterator extends RowIterator
     {
-        private PeekingIterator<Pair<ByteBuffer, SortedMap<ByteBuffer, Column>>> wideColumns;
-        private ByteBuffer lastColumn = ByteBufferUtil.EMPTY_BYTE_BUFFER;
+        private PeekingIterator<Pair<ByteBuffer, SortedMap<CellName, Column>>> wideColumns;
+        private Composite lastColumn = Composites.EMPTY;
         private ByteBuffer lastCountedKey = ByteBufferUtil.EMPTY_BYTE_BUFFER;
 
         private void maybeInit()
@@ -452,7 +452,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
 
             try
             {
-                rows = client.get_paged_slice(cfName, keyRange, lastColumn, consistencyLevel);
+                rows = client.get_paged_slice(cfName, keyRange, lastColumn.toByteBuffer(), consistencyLevel);
                 int n = 0;
                 for (KeySlice row : rows)
                     n += row.columns.size();
@@ -471,14 +471,14 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
             }
         }
 
-        protected Pair<ByteBuffer, SortedMap<ByteBuffer, Column>> computeNext()
+        protected Pair<ByteBuffer, SortedMap<CellName, Column>> computeNext()
         {
             maybeInit();
             if (rows == null)
                 return endOfData();
 
-            Pair<ByteBuffer, SortedMap<ByteBuffer, Column>> next = wideColumns.next();
-            lastColumn = next.right.values().iterator().next().name().duplicate();
+            Pair<ByteBuffer, SortedMap<CellName, Column>> next = wideColumns.next();
+            lastColumn = next.right.values().iterator().next().name();
 
             maybeIncreaseRowCounter(next);
             return next;
@@ -489,7 +489,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
          * Increases the row counter only if we really moved to the next row.
          * @param next just fetched row slice
          */
-        private void maybeIncreaseRowCounter(Pair<ByteBuffer, SortedMap<ByteBuffer, Column>> next)
+        private void maybeIncreaseRowCounter(Pair<ByteBuffer, SortedMap<CellName, Column>> next)
         {
             ByteBuffer currentKey = next.left;
             if (!currentKey.equals(lastCountedKey))
@@ -499,7 +499,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
             }
         }
 
-        private class WideColumnIterator extends AbstractIterator<Pair<ByteBuffer, SortedMap<ByteBuffer, Column>>>
+        private class WideColumnIterator extends AbstractIterator<Pair<ByteBuffer, SortedMap<CellName, Column>>>
         {
             private final Iterator<KeySlice> rows;
             private Iterator<ColumnOrSuperColumn> columns;
@@ -520,14 +520,17 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
                 columns = currentRow.columns.iterator();
             }
 
-            protected Pair<ByteBuffer, SortedMap<ByteBuffer, Column>> computeNext()
+            protected Pair<ByteBuffer, SortedMap<CellName, Column>> computeNext()
             {
+                CellNameType cellType = subComparator == null
+                                      ? comparator
+                                      : new CompoundDenseCellNameType(Arrays.asList(comparator.asAbstractType(), subComparator));
                 while (true)
                 {
                     if (columns.hasNext())
                     {
                         ColumnOrSuperColumn cosc = columns.next();
-                        SortedMap<ByteBuffer, Column> map;
+                        SortedMap<CellName, Column> map;
                         List<Column> columns = unthriftify(cosc);
                         if (columns.size() == 1)
                         {
@@ -536,11 +539,11 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
                         else
                         {
                             assert isSuper;
-                            map = new TreeMap<ByteBuffer, Column>(CompositeType.getInstance(comparator, subComparator));
+                            map = new TreeMap<CellName, Column>(cellType);
                             for (Column column : columns)
                                 map.put(column.name(), column);
                         }
-                        return Pair.<ByteBuffer, SortedMap<ByteBuffer, Column>>create(currentRow.key, map);
+                        return Pair.<ByteBuffer, SortedMap<CellName, Column>>create(currentRow.key, map);
                     }
 
                     if (!rows.hasNext())
@@ -557,7 +560,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
     // to the old. Thus, expect a small performance hit.
     // And obviously this wouldn't work for wide rows. But since ColumnFamilyInputFormat
     // and ColumnFamilyRecordReader don't support them, it should be fine for now.
-    public boolean next(ByteBuffer key, SortedMap<ByteBuffer, Column> value) throws IOException
+    public boolean next(ByteBuffer key, SortedMap<CellName, Column> value) throws IOException
     {
         if (this.nextKeyValue())
         {
@@ -578,9 +581,9 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
         return ByteBuffer.wrap(new byte[this.keyBufferSize]);
     }
 
-    public SortedMap<ByteBuffer, Column> createValue()
+    public SortedMap<CellName, Column> createValue()
     {
-        return new TreeMap<ByteBuffer, Column>();
+        return new TreeMap<CellName, Column>();
     }
 
     public long getPos() throws IOException

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/hadoop/pig/AbstractCassandraStorage.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/pig/AbstractCassandraStorage.java b/src/java/org/apache/cassandra/hadoop/pig/AbstractCassandraStorage.java
index b0b7fe9..e5b8bb1 100644
--- a/src/java/org/apache/cassandra/hadoop/pig/AbstractCassandraStorage.java
+++ b/src/java/org/apache/cassandra/hadoop/pig/AbstractCassandraStorage.java
@@ -123,22 +123,21 @@ public abstract class AbstractCassandraStorage extends LoadFunc implements Store
         CfDef cfDef = cfInfo.cfDef;
         Tuple pair = TupleFactory.getInstance().newTuple(2);
 
+        ByteBuffer colName = col.name().toByteBuffer();
+
         // name
         if(comparator instanceof AbstractCompositeType)
-            setTupleValue(pair, 0, composeComposite((AbstractCompositeType)comparator,col.name()));
+            setTupleValue(pair, 0, composeComposite((AbstractCompositeType)comparator,colName));
         else
-            setTupleValue(pair, 0, cassandraToObj(comparator, col.name()));
+            setTupleValue(pair, 0, cassandraToObj(comparator, col.name().toByteBuffer()));
 
         // value
         Map<ByteBuffer,AbstractType> validators = getValidatorMap(cfDef);
-        ByteBuffer colName;
         if (cfInfo.cql3Table && !cfInfo.compactCqlTable)
         {
-            ByteBuffer[] names = ((AbstractCompositeType) parseType(cfDef.comparator_type)).split(col.name());
+            ByteBuffer[] names = ((AbstractCompositeType) parseType(cfDef.comparator_type)).split(colName);
             colName = names[names.length-1];
         }
-        else
-            colName = col.name();
         if (validators.get(colName) == null)
         {
             Map<MarshallerType, AbstractType> marshallers = getDefaultMarshallers(cfDef);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/hadoop/pig/CqlStorage.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/pig/CqlStorage.java b/src/java/org/apache/cassandra/hadoop/pig/CqlStorage.java
index 89c1944..7ce78de 100644
--- a/src/java/org/apache/cassandra/hadoop/pig/CqlStorage.java
+++ b/src/java/org/apache/cassandra/hadoop/pig/CqlStorage.java
@@ -24,6 +24,7 @@ import java.util.*;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
+import org.apache.cassandra.db.composites.CellNames;
 import org.apache.cassandra.db.Column;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.exceptions.ConfigurationException;
@@ -111,8 +112,8 @@ public class CqlStorage extends AbstractCassandraStorage
                 ByteBuffer columnValue = columns.get(ByteBufferUtil.string(cdef.name.duplicate()));
                 if (columnValue != null)
                 {
-                    Column column = new Column(cdef.name, columnValue);
-                    AbstractType<?> validator = getValidatorMap(cfDef).get(column.name());
+                    Column column = new Column(CellNames.simpleDense(cdef.name), columnValue);
+                    AbstractType<?> validator = getValidatorMap(cfDef).get(cdef.name);
                     setTupleValue(tuple, i, cqlColumnToObj(column, cfDef), validator);
                 }
                 else

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java b/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java
index 6018369..27f1c12 100644
--- a/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java
@@ -28,7 +28,6 @@ import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.context.CounterContext;
-import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.utils.CounterId;
@@ -117,7 +116,7 @@ public abstract class AbstractSSTableSimpleWriter
             if (currentSuperColumn == null)
                 throw new IllegalStateException("Trying to add a column to a super column family, but no super column has been started.");
 
-            column = column.withUpdatedName(CompositeType.build(currentSuperColumn, column.name()));
+            column = column.withUpdatedName(columnFamily.getComparator().makeCellName(currentSuperColumn, column.name().toByteBuffer()));
         }
         columnFamily.addColumn(column);
     }
@@ -130,7 +129,7 @@ public abstract class AbstractSSTableSimpleWriter
      */
     public void addColumn(ByteBuffer name, ByteBuffer value, long timestamp)
     {
-        addColumn(new Column(name, value, timestamp));
+        addColumn(new Column(metadata.comparator.cellFromByteBuffer(name), value, timestamp));
     }
 
     /**
@@ -145,7 +144,7 @@ public abstract class AbstractSSTableSimpleWriter
      */
     public void addExpiringColumn(ByteBuffer name, ByteBuffer value, long timestamp, int ttl, long expirationTimestampMS)
     {
-        addColumn(new ExpiringColumn(name, value, timestamp, ttl, (int)(expirationTimestampMS / 1000)));
+        addColumn(new ExpiringColumn(metadata.comparator.cellFromByteBuffer(name), value, timestamp, ttl, (int)(expirationTimestampMS / 1000)));
     }
 
     /**
@@ -155,7 +154,7 @@ public abstract class AbstractSSTableSimpleWriter
      */
     public void addCounterColumn(ByteBuffer name, long value)
     {
-        addColumn(new CounterColumn(name, CounterContext.instance().create(counterid, 1L, value, false), System.currentTimeMillis()));
+        addColumn(new CounterColumn(metadata.comparator.cellFromByteBuffer(name), CounterContext.instance().create(counterid, 1L, value, false), System.currentTimeMillis()));
     }
 
     /**


[07/13] Push composites support in the storage engine

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/CompoundSparseCellName.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/CompoundSparseCellName.java b/src/java/org/apache/cassandra/db/composites/CompoundSparseCellName.java
new file mode 100644
index 0000000..77b311e
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/CompoundSparseCellName.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.utils.Allocator;
+import org.apache.cassandra.utils.ObjectSizes;
+
+public class CompoundSparseCellName extends CompoundComposite implements CellName
+{
+    private static final ByteBuffer[] EMPTY_PREFIX = new ByteBuffer[0];
+
+    protected final ColumnIdentifier columnName;
+
+    // Not meant to be used directly, you should use the CellNameType method instead
+    CompoundSparseCellName(ColumnIdentifier columnName)
+    {
+        this(EMPTY_PREFIX, columnName);
+    }
+
+    CompoundSparseCellName(ByteBuffer[] elements, ColumnIdentifier columnName)
+    {
+        this(elements, elements.length, columnName);
+    }
+
+    CompoundSparseCellName(ByteBuffer[] elements, int size, ColumnIdentifier columnName)
+    {
+        super(elements, size);
+        this.columnName = columnName;
+    }
+
+    public int size()
+    {
+        return size + 1;
+    }
+
+    public ByteBuffer get(int i)
+    {
+        return i == size ? columnName.bytes : elements[i];
+    }
+
+    public int clusteringSize()
+    {
+        return size;
+    }
+
+    public ColumnIdentifier cql3ColumnName()
+    {
+        return columnName;
+    }
+
+    public ByteBuffer collectionElement()
+    {
+        return null;
+    }
+
+    public boolean isCollectionCell()
+    {
+        return false;
+    }
+
+    public boolean isSameCQL3RowAs(CellName other)
+    {
+        if (clusteringSize() != other.clusteringSize())
+            return false;
+
+        for (int i = 0; i < clusteringSize(); i++)
+        {
+            if (!elements[i].equals(other.get(i)))
+                return false;
+        }
+        return true;
+    }
+
+    public CellName copy(Allocator allocator)
+    {
+        if (elements.length == 0)
+            return this;
+
+        // We don't copy columnName because it's interned in SparseCellNameType
+        return new CompoundSparseCellName(elementsCopy(allocator), columnName);
+    }
+
+    @Override
+    public long memorySize()
+    {
+        return ObjectSizes.getSuperClassFieldSize(super.memorySize())
+             + ObjectSizes.getFieldSize(ObjectSizes.getReferenceSize()) + columnName.memorySize();
+    }
+
+    public static class WithCollection extends CompoundSparseCellName
+    {
+        private final ByteBuffer collectionElement;
+
+        WithCollection(ColumnIdentifier columnName, ByteBuffer collectionElement)
+        {
+            this(EMPTY_PREFIX, columnName, collectionElement);
+        }
+
+        WithCollection(ByteBuffer[] elements, ColumnIdentifier columnName, ByteBuffer collectionElement)
+        {
+            this(elements, elements.length, columnName, collectionElement);
+        }
+
+        WithCollection(ByteBuffer[] elements, int size, ColumnIdentifier columnName, ByteBuffer collectionElement)
+        {
+            super(elements, size, columnName);
+            this.collectionElement = collectionElement;
+        }
+
+        public int size()
+        {
+            return size + 2;
+        }
+
+        public ByteBuffer get(int i)
+        {
+            return i == size + 1 ? collectionElement : super.get(i);
+        }
+
+        @Override
+        public ByteBuffer collectionElement()
+        {
+            return collectionElement;
+        }
+
+        @Override
+        public boolean isCollectionCell()
+        {
+            return true;
+        }
+
+        @Override
+        public CellName copy(Allocator allocator)
+        {
+            // We don't copy columnName because it's interned in SparseCellNameType
+            return new CompoundSparseCellName.WithCollection(elements.length == 0 ? elements : elementsCopy(allocator), size, columnName, allocator.clone(collectionElement));
+        }
+
+        @Override
+        public long memorySize()
+        {
+            return ObjectSizes.getSuperClassFieldSize(super.memorySize())
+                 + ObjectSizes.getFieldSize(ObjectSizes.getReferenceSize()) + ObjectSizes.getSize(collectionElement);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/CompoundSparseCellNameType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/CompoundSparseCellNameType.java b/src/java/org/apache/cassandra/db/composites/CompoundSparseCellNameType.java
new file mode 100644
index 0000000..2cc8516
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/CompoundSparseCellNameType.java
@@ -0,0 +1,238 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+import java.util.*;
+
+import org.apache.cassandra.cql3.CQL3Row;
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.CollectionType;
+import org.apache.cassandra.db.marshal.ColumnToCollectionType;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+public class CompoundSparseCellNameType extends AbstractCompoundCellNameType
+{
+    private static final AbstractType<?> columnNameType = UTF8Type.instance;
+    private static final ColumnIdentifier rowMarkerId = new ColumnIdentifier(ByteBufferUtil.EMPTY_BYTE_BUFFER, UTF8Type.instance);
+    private static final CellName rowMarkerNoPrefix = new CompoundSparseCellName(rowMarkerId);
+
+    protected final Map<ByteBuffer, ColumnIdentifier> internedIds;
+
+    public CompoundSparseCellNameType(List<AbstractType<?>> types)
+    {
+        this(new CompoundCType(types));
+    }
+
+    public CompoundSparseCellNameType(CompoundCType clusteringType)
+    {
+        this(clusteringType, makeCType(clusteringType, null), new HashMap<ByteBuffer, ColumnIdentifier>());
+    }
+
+    private CompoundSparseCellNameType(CompoundCType clusteringType, CompoundCType fullType, Map<ByteBuffer, ColumnIdentifier> internedIds)
+    {
+        super(clusteringType, fullType);
+        this.internedIds = internedIds;
+    }
+
+    protected static CompoundCType makeCType(CompoundCType clusteringType, ColumnToCollectionType collectionType)
+    {
+        List<AbstractType<?>> allSubtypes = new ArrayList<AbstractType<?>>(clusteringType.size() + (collectionType == null ? 1 : 2));
+        for (int i = 0; i < clusteringType.size(); i++)
+            allSubtypes.add(clusteringType.subtype(i));
+        allSubtypes.add(columnNameType);
+        if (collectionType != null)
+            allSubtypes.add(collectionType);
+        return new CompoundCType(allSubtypes);
+    }
+
+    public CellNameType setSubtype(int position, AbstractType<?> newType)
+    {
+        if (position < clusteringSize)
+            return new CompoundSparseCellNameType(clusteringType.setSubtype(position, newType), fullType.setSubtype(position, newType), internedIds);
+
+        if (position == clusteringSize)
+            throw new IllegalArgumentException();
+
+        throw new IndexOutOfBoundsException();
+    }
+
+    @Override
+    public CellNameType addCollection(ColumnIdentifier columnName, CollectionType newCollection)
+    {
+        return new WithCollection(clusteringType, ColumnToCollectionType.getInstance(Collections.singletonMap(columnName.bytes, newCollection)), internedIds);
+    }
+
+    public boolean isDense()
+    {
+        return false;
+    }
+
+    public boolean supportCollections()
+    {
+        return true;
+    }
+
+    public CellName create(Composite prefix, ColumnIdentifier columnName)
+    {
+        assert prefix.size() == clusteringSize;
+
+        if (prefix.isEmpty())
+            return new CompoundSparseCellName(columnName);
+
+        assert prefix instanceof CompoundComposite;
+        CompoundComposite lc = (CompoundComposite)prefix;
+        return new CompoundSparseCellName(lc.elements, clusteringSize, columnName);
+    }
+
+    public CellName rowMarker(Composite prefix)
+    {
+        if (prefix.isEmpty())
+            return rowMarkerNoPrefix;
+
+        return create(prefix, rowMarkerId);
+    }
+
+    protected ColumnIdentifier idFor(ByteBuffer bb)
+    {
+        ColumnIdentifier id = internedIds.get(bb);
+        return id == null ? new ColumnIdentifier(bb, columnNameType) : id;
+    }
+
+    protected Composite makeWith(ByteBuffer[] components, int size, Composite.EOC eoc)
+    {
+        if (size < clusteringSize + 1 || eoc != Composite.EOC.NONE)
+            return new CompoundComposite(components, size).withEOC(eoc);
+
+        return new CompoundSparseCellName(components, clusteringSize, idFor(components[clusteringSize]));
+    }
+
+    protected Composite copyAndMakeWith(ByteBuffer[] components, int size, Composite.EOC eoc)
+    {
+        if (size < clusteringSize + 1 || eoc != Composite.EOC.NONE)
+            return new CompoundComposite(Arrays.copyOfRange(components, 0, size), size).withEOC(eoc);
+
+        ByteBuffer[] clusteringColumns = Arrays.copyOfRange(components, 0, clusteringSize);
+        return new CompoundSparseCellName(clusteringColumns, idFor(components[clusteringSize]));
+    }
+
+    public void addCQL3Column(ColumnIdentifier id)
+    {
+        internedIds.put(id.bytes, id);
+    }
+
+    public void removeCQL3Column(ColumnIdentifier id)
+    {
+        internedIds.remove(id.bytes);
+    }
+
+    public CQL3Row.Builder CQL3RowBuilder(long now)
+    {
+        return makeSparseCQL3RowBuilder(now);
+    }
+
+    public static class WithCollection extends CompoundSparseCellNameType
+    {
+        private final ColumnToCollectionType collectionType;
+
+        public WithCollection(List<AbstractType<?>> types, ColumnToCollectionType collectionType)
+        {
+            this(new CompoundCType(types), collectionType);
+        }
+
+        WithCollection(CompoundCType clusteringType, ColumnToCollectionType collectionType)
+        {
+            this(clusteringType, makeCType(clusteringType, collectionType), collectionType, new HashMap<ByteBuffer, ColumnIdentifier>());
+        }
+
+        private WithCollection(CompoundCType clusteringType, ColumnToCollectionType collectionType, Map<ByteBuffer, ColumnIdentifier> internedIds)
+        {
+            this(clusteringType, makeCType(clusteringType, collectionType), collectionType, internedIds);
+        }
+
+        private WithCollection(CompoundCType clusteringType, CompoundCType fullCType, ColumnToCollectionType collectionType, Map<ByteBuffer, ColumnIdentifier> internedIds)
+        {
+            super(clusteringType, fullCType, internedIds);
+            this.collectionType = collectionType;
+        }
+
+        @Override
+        public CellNameType setSubtype(int position, AbstractType<?> newType)
+        {
+            if (position < clusteringSize)
+                return new WithCollection(clusteringType.setSubtype(position, newType), collectionType, internedIds);
+
+            throw position >= fullType.size() ? new IndexOutOfBoundsException() : new IllegalArgumentException();
+        }
+
+        @Override
+        public CellNameType addCollection(ColumnIdentifier columnName, CollectionType newCollection)
+        {
+            Map<ByteBuffer, CollectionType> newMap = new HashMap<>(collectionType.defined);
+            newMap.put(columnName.bytes, newCollection);
+            return new WithCollection(clusteringType, ColumnToCollectionType.getInstance(newMap), internedIds);
+        }
+
+        @Override
+        public CellName create(Composite prefix, ColumnIdentifier columnName, ByteBuffer collectionElement)
+        {
+            // We ignore the columnName because it's just the COMPACT_VALUE name which is not store in the cell name
+            assert prefix.size() == clusteringSize;
+
+            if (prefix.isEmpty())
+                return new CompoundSparseCellName.WithCollection(columnName, collectionElement);
+
+            assert prefix instanceof CompoundComposite;
+            CompoundComposite lc = (CompoundComposite)prefix;
+            return new CompoundSparseCellName.WithCollection(lc.elements, clusteringSize, columnName, collectionElement);
+        }
+
+        @Override
+        public boolean hasCollections()
+        {
+            return true;
+        }
+
+        @Override
+        public ColumnToCollectionType collectionType()
+        {
+            return collectionType;
+        }
+
+        @Override
+        protected Composite makeWith(ByteBuffer[] components, int size, Composite.EOC eoc)
+        {
+            if (size < fullSize)
+                return super.makeWith(components, size, eoc);
+
+            return new CompoundSparseCellName.WithCollection(components, clusteringSize, idFor(components[clusteringSize]), components[fullSize - 1]);
+        }
+
+        protected Composite copyAndMakeWith(ByteBuffer[] components, int size, Composite.EOC eoc)
+        {
+            if (size < fullSize)
+                return super.copyAndMakeWith(components, size, eoc);
+
+            ByteBuffer[] clusteringColumns = Arrays.copyOfRange(components, 0, clusteringSize);
+            return new CompoundSparseCellName.WithCollection(clusteringColumns, idFor(components[clusteringSize]), components[fullSize + 1]);
+        }
+    }
+}
+

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/SimpleCType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/SimpleCType.java b/src/java/org/apache/cassandra/db/composites/SimpleCType.java
new file mode 100644
index 0000000..08ace8b
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/SimpleCType.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.marshal.AbstractType;
+
+/**
+ * A not truly-composite CType.
+ */
+public class SimpleCType extends AbstractCType
+{
+    protected final AbstractType<?> type;
+
+    public SimpleCType(AbstractType<?> type)
+    {
+        this.type = type;
+    }
+
+    public boolean isCompound()
+    {
+        return false;
+    }
+
+    public int size()
+    {
+        return 1;
+    }
+
+    public AbstractType<?> subtype(int i)
+    {
+        if (i != 0)
+            throw new IndexOutOfBoundsException();
+        return type;
+    }
+
+    public Composite fromByteBuffer(ByteBuffer bytes)
+    {
+        return !bytes.hasRemaining() ? Composites.EMPTY : new SimpleComposite(bytes);
+    }
+
+    public CBuilder builder()
+    {
+        return new SimpleCBuilder(this);
+    }
+
+    public CType setSubtype(int position, AbstractType<?> newType)
+    {
+        if (position != 0)
+            throw new IndexOutOfBoundsException();
+        return new SimpleCType(newType);
+    }
+
+    // Use sparingly, it defeats the purpose
+    public AbstractType<?> asAbstractType()
+    {
+        return type;
+    }
+
+    public static class SimpleCBuilder implements CBuilder
+    {
+        private final CType type;
+        private ByteBuffer value;
+
+        public SimpleCBuilder(CType type)
+        {
+            this.type = type;
+        }
+
+        public int remainingCount()
+        {
+            return value == null ? 1 : 0;
+        }
+
+        public CBuilder add(ByteBuffer value)
+        {
+            if (this.value != null)
+                throw new IllegalStateException();
+            this.value = value;
+            return this;
+        }
+
+        public CBuilder add(Object value)
+        {
+            return add(((AbstractType)type.subtype(0)).decompose(value));
+        }
+
+        public Composite build()
+        {
+            if (value == null || !value.hasRemaining())
+                return Composites.EMPTY;
+
+            // If we're building a dense cell name, then we can directly allocate the
+            // CellName object as it's complete.
+            if (type instanceof CellNameType && ((CellNameType)type).isDense())
+                return new SimpleDenseCellName(value);
+
+            return new SimpleComposite(value);
+        }
+
+        public Composite buildWith(ByteBuffer value)
+        {
+            if (this.value != null)
+                throw new IllegalStateException();
+
+            if (value == null || !value.hasRemaining())
+                return Composites.EMPTY;
+
+            // If we're building a dense cell name, then we can directly allocate the
+            // CellName object as it's complete.
+            if (type instanceof CellNameType && ((CellNameType)type).isDense())
+                return new SimpleDenseCellName(value);
+
+            return new SimpleComposite(value);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/SimpleComposite.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/SimpleComposite.java b/src/java/org/apache/cassandra/db/composites/SimpleComposite.java
new file mode 100644
index 0000000..e88afe7
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/SimpleComposite.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.utils.Allocator;
+import org.apache.cassandra.utils.ObjectSizes;
+
+/**
+ * A "simple" (not-truly-composite) Composite.
+ */
+public class SimpleComposite extends AbstractComposite
+{
+    protected final ByteBuffer element;
+
+    SimpleComposite(ByteBuffer element)
+    {
+        // We have to be careful with empty ByteBuffers as we shouldn't store them.
+        // To avoid errors (and so isEmpty() works as we intend), we don't allow simpleComposite with
+        // an empty element (but it's ok for CompoundComposite, it's a row marker in that case).
+        assert element.hasRemaining();
+        this.element = element;
+    }
+
+    public int size()
+    {
+        return 1;
+    }
+
+    public ByteBuffer get(int i)
+    {
+        if (i != 0)
+            throw new IndexOutOfBoundsException();
+
+        return element;
+    }
+
+    @Override
+    public Composite withEOC(EOC newEoc)
+    {
+        // EOC makes no sense for not truly composites.
+        return this;
+    }
+
+    @Override
+    public ByteBuffer toByteBuffer()
+    {
+        return element;
+    }
+
+    public long memorySize()
+    {
+        return ObjectSizes.getFieldSize(ObjectSizes.getSize(element));
+    }
+
+    public Composite copy(Allocator allocator)
+    {
+        return new SimpleComposite(allocator.clone(element));
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/SimpleDenseCellName.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/SimpleDenseCellName.java b/src/java/org/apache/cassandra/db/composites/SimpleDenseCellName.java
new file mode 100644
index 0000000..338e8a0
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/SimpleDenseCellName.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.utils.Allocator;
+import org.apache.cassandra.utils.ObjectSizes;
+
+public class SimpleDenseCellName extends SimpleComposite implements CellName
+{
+    // Not meant to be used directly, you should use the CellNameType method instead
+    SimpleDenseCellName(ByteBuffer element)
+    {
+        super(element);
+    }
+
+    public int clusteringSize()
+    {
+        return 1;
+    }
+
+    public ColumnIdentifier cql3ColumnName()
+    {
+        return null;
+    }
+
+    public ByteBuffer collectionElement()
+    {
+        return null;
+    }
+
+    public boolean isCollectionCell()
+    {
+        return false;
+    }
+
+    public boolean isSameCQL3RowAs(CellName other)
+    {
+        // Dense cell imply one cell by CQL row so no other cell will be the same row.
+        return equals(other);
+    }
+
+    @Override
+    public long memorySize()
+    {
+        return ObjectSizes.getSuperClassFieldSize(super.memorySize());
+    }
+
+    // If cellnames were sharing some prefix components, this will break it, so
+    // we might want to try to do better.
+    @Override
+    public CellName copy(Allocator allocator)
+    {
+        return new SimpleDenseCellName(allocator.clone(element));
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/SimpleDenseCellNameType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/SimpleDenseCellNameType.java b/src/java/org/apache/cassandra/db/composites/SimpleDenseCellNameType.java
new file mode 100644
index 0000000..cafb521
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/SimpleDenseCellNameType.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.cql3.CQL3Row;
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.db.marshal.AbstractType;
+
+public class SimpleDenseCellNameType extends AbstractSimpleCellNameType
+{
+    public SimpleDenseCellNameType(AbstractType<?> type)
+    {
+        super(type);
+    }
+
+    public int clusteringPrefixSize()
+    {
+        return 1;
+    }
+
+    public CBuilder prefixBuilder()
+    {
+        // Simple dense is "all" prefix
+        return builder();
+    }
+
+    public CellNameType setSubtype(int position, AbstractType<?> newType)
+    {
+        if (position != 0)
+            throw new IllegalArgumentException();
+        return new SimpleDenseCellNameType(newType);
+    }
+
+    public boolean isDense()
+    {
+        return true;
+    }
+
+    public CellName create(Composite prefix, ColumnIdentifier columnName)
+    {
+        assert prefix.size() == 1;
+        // We ignore the columnName because it's just the COMPACT_VALUE name which is not store in the cell name
+        return new SimpleDenseCellName(prefix.get(0));
+    }
+
+    @Override
+    public Composite fromByteBuffer(ByteBuffer bb)
+    {
+        return !bb.hasRemaining()
+             ? Composites.EMPTY
+             : new SimpleDenseCellName(bb);
+    }
+
+    public void addCQL3Column(ColumnIdentifier id) {}
+    public void removeCQL3Column(ColumnIdentifier id) {}
+
+    public CQL3Row.Builder CQL3RowBuilder(long now)
+    {
+        return makeDenseCQL3RowBuilder(now);
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/SimpleSparseCellName.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/SimpleSparseCellName.java b/src/java/org/apache/cassandra/db/composites/SimpleSparseCellName.java
new file mode 100644
index 0000000..8105683
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/SimpleSparseCellName.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.utils.Allocator;
+import org.apache.cassandra.utils.ObjectSizes;
+
+public class SimpleSparseCellName extends AbstractComposite implements CellName
+{
+    private final ColumnIdentifier columnName;
+
+    // Not meant to be used directly, you should use the CellNameType method instead
+    SimpleSparseCellName(ColumnIdentifier columnName)
+    {
+        this.columnName = columnName;
+    }
+
+    public int size()
+    {
+        return 1;
+    }
+
+    public ByteBuffer get(int i)
+    {
+        if (i != 0)
+            throw new IndexOutOfBoundsException();
+
+        return columnName.bytes;
+    }
+
+    @Override
+    public Composite withEOC(EOC newEoc)
+    {
+        // EOC makes no sense for not truly composites.
+        return this;
+    }
+
+    @Override
+    public ByteBuffer toByteBuffer()
+    {
+        return columnName.bytes;
+    }
+
+    public int clusteringSize()
+    {
+        return 0;
+    }
+
+    public ColumnIdentifier cql3ColumnName()
+    {
+        return columnName;
+    }
+
+    public ByteBuffer collectionElement()
+    {
+        return null;
+    }
+
+    public boolean isCollectionCell()
+    {
+        return false;
+    }
+
+    public boolean isSameCQL3RowAs(CellName other)
+    {
+        return true;
+    }
+
+    @Override
+    public long memorySize()
+    {
+        return ObjectSizes.getFieldSize(ObjectSizes.getReferenceSize()) + columnName.memorySize();
+    }
+
+    @Override
+    public CellName copy(Allocator allocator)
+    {
+        // We're interning those instance in SparceCellNameType so don't need to copy.
+        return this;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/SimpleSparseCellNameType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/SimpleSparseCellNameType.java b/src/java/org/apache/cassandra/db/composites/SimpleSparseCellNameType.java
new file mode 100644
index 0000000..6d86ce2
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/SimpleSparseCellNameType.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.cassandra.cql3.CQL3Row;
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.db.marshal.AbstractType;
+
+public class SimpleSparseCellNameType extends AbstractSimpleCellNameType
+{
+    // Simple sparse means static thrift CF or non-clustered CQL3. This means that cell names will mainly
+    // be those that have been declared and we can intern the whole CellName instances.
+    private final Map<ByteBuffer, CellName> internedNames;
+
+    public SimpleSparseCellNameType(AbstractType<?> type)
+    {
+        this(type, new HashMap<ByteBuffer, CellName>());
+    }
+
+    private SimpleSparseCellNameType(AbstractType<?> type, Map<ByteBuffer, CellName> internedNames)
+    {
+        super(type);
+        this.internedNames = internedNames;
+    }
+
+    public int clusteringPrefixSize()
+    {
+        return 0;
+    }
+
+    public CellNameType setSubtype(int position, AbstractType<?> newType)
+    {
+        if (position != 0)
+            throw new IllegalArgumentException();
+        return new SimpleSparseCellNameType(newType, internedNames);
+    }
+
+    public CBuilder prefixBuilder()
+    {
+        return Composites.EMPTY_BUILDER;
+    }
+
+    public boolean isDense()
+    {
+        return false;
+    }
+
+    public CellName create(Composite prefix, ColumnIdentifier columnName)
+    {
+        assert prefix.isEmpty();
+        CellName cn = internedNames.get(columnName.bytes);
+        return cn == null ? new SimpleSparseCellName(columnName) : cn;
+    }
+
+    @Override
+    public Composite fromByteBuffer(ByteBuffer bb)
+    {
+        if (!bb.hasRemaining())
+            return Composites.EMPTY;
+
+        CellName cn = internedNames.get(bb);
+        return cn == null ? new SimpleSparseCellName(new ColumnIdentifier(bb, type)) : cn;
+    }
+
+    public void addCQL3Column(ColumnIdentifier id)
+    {
+        internedNames.put(id.bytes, new SimpleSparseCellName(id));
+    }
+
+    public void removeCQL3Column(ColumnIdentifier id)
+    {
+        internedNames.remove(id.bytes);
+    }
+
+    public CQL3Row.Builder CQL3RowBuilder(long now)
+    {
+        return makeSparseCQL3RowBuilder(now);
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/filter/ColumnCounter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/filter/ColumnCounter.java b/src/java/org/apache/cassandra/db/filter/ColumnCounter.java
index c2c0ade..61f919e 100644
--- a/src/java/org/apache/cassandra/db/filter/ColumnCounter.java
+++ b/src/java/org/apache/cassandra/db/filter/ColumnCounter.java
@@ -20,13 +20,11 @@
  */
 package org.apache.cassandra.db.filter;
 
-import java.nio.ByteBuffer;
-
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.db.Column;
 import org.apache.cassandra.db.ColumnFamily;
 import org.apache.cassandra.db.DeletionInfo;
-import org.apache.cassandra.db.marshal.CompositeType;
-import org.apache.cassandra.utils.ByteBufferUtil;
 
 public class ColumnCounter
 {
@@ -75,9 +73,9 @@ public class ColumnCounter
 
     public static class GroupByPrefix extends ColumnCounter
     {
-        private final CompositeType type;
+        private final CellNameType type;
         private final int toGroup;
-        private ByteBuffer[] last;
+        private CellName last;
 
         /**
          * A column counter that count only 1 for all the columns sharing a
@@ -89,7 +87,7 @@ public class ColumnCounter
          *                column. If 0, all columns are grouped, otherwise we group
          *                those for which the {@code toGroup} first component are equals.
          */
-        public GroupByPrefix(long timestamp, CompositeType type, int toGroup)
+        public GroupByPrefix(long timestamp, CellNameType type, int toGroup)
         {
             super(timestamp);
             this.type = type;
@@ -112,15 +110,15 @@ public class ColumnCounter
                 return;
             }
 
-            ByteBuffer[] current = type.split(column.name());
-            assert current.length >= toGroup;
+            CellName current = column.name();
+            assert current.size() >= toGroup;
 
             if (last != null)
             {
                 boolean isSameGroup = true;
                 for (int i = 0; i < toGroup; i++)
                 {
-                    if (ByteBufferUtil.compareUnsigned(last[i], current[i]) != 0)
+                    if (type.subtype(i).compare(last.get(i), current.get(i)) != 0)
                     {
                         isSameGroup = false;
                         break;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/filter/ColumnSlice.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/filter/ColumnSlice.java b/src/java/org/apache/cassandra/db/filter/ColumnSlice.java
index e0c576e..5b504a5 100644
--- a/src/java/org/apache/cassandra/db/filter/ColumnSlice.java
+++ b/src/java/org/apache/cassandra/db/filter/ColumnSlice.java
@@ -21,48 +21,67 @@ import java.io.*;
 import java.nio.ByteBuffer;
 import java.util.Comparator;
 import java.util.Iterator;
+import java.util.List;
 import java.util.NavigableMap;
 
 import com.google.common.collect.AbstractIterator;
 
+import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.io.ISerializer;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.Allocator;
 
 public class ColumnSlice
 {
-    public static final Serializer serializer = new Serializer();
-
-    public static final ColumnSlice ALL_COLUMNS = new ColumnSlice(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER);
+    public static final ColumnSlice ALL_COLUMNS = new ColumnSlice(Composites.EMPTY, Composites.EMPTY);
     public static final ColumnSlice[] ALL_COLUMNS_ARRAY = new ColumnSlice[]{ ALL_COLUMNS };
 
-    public final ByteBuffer start;
-    public final ByteBuffer finish;
+    public final Composite start;
+    public final Composite finish;
 
-    public ColumnSlice(ByteBuffer start, ByteBuffer finish)
+    public ColumnSlice(Composite start, Composite finish)
     {
         assert start != null && finish != null;
         this.start = start;
         this.finish = finish;
     }
 
-    public boolean isAlwaysEmpty(AbstractType<?> comparator, boolean reversed)
+    public boolean isAlwaysEmpty(CellNameType comparator, boolean reversed)
     {
-        Comparator<ByteBuffer> orderedComparator = reversed ? comparator.reverseComparator : comparator;
-        return (start.remaining() > 0 && finish.remaining() > 0 && orderedComparator.compare(start, finish) > 0);
+        Comparator<Composite> orderedComparator = reversed ? comparator.reverseComparator() : comparator;
+        return !start.isEmpty() && !finish.isEmpty() && orderedComparator.compare(start, finish) > 0;
     }
 
-    public boolean includes(Comparator<ByteBuffer> cmp, ByteBuffer name)
+    public boolean includes(Comparator<Composite> cmp, Composite name)
     {
-        return cmp.compare(start, name) <= 0 && (finish.equals(ByteBufferUtil.EMPTY_BYTE_BUFFER) || cmp.compare(finish, name) >= 0);
+        return cmp.compare(start, name) <= 0 && (finish.isEmpty() || cmp.compare(finish, name) >= 0);
     }
 
-    public boolean isBefore(Comparator<ByteBuffer> cmp, ByteBuffer name)
+    public boolean isBefore(Comparator<Composite> cmp, Composite name)
     {
-        return !finish.equals(ByteBufferUtil.EMPTY_BYTE_BUFFER) && cmp.compare(finish, name) < 0;
+        return !finish.isEmpty() && cmp.compare(finish, name) < 0;
     }
 
+    public boolean intersects(List<ByteBuffer> minCellNames, List<ByteBuffer> maxCellNames, CellNameType comparator, boolean reversed)
+    {
+        assert minCellNames.size() == maxCellNames.size();
+
+        Composite sStart = reversed ? finish : start;
+        Composite sEnd = reversed ? start : finish;
+
+        for (int i = 0; i < minCellNames.size(); i++)
+        {
+            AbstractType<?> t = comparator.subtype(i);
+            if (  (i < sEnd.size() && t.compare(sEnd.get(i), minCellNames.get(i)) < 0)
+               || (i < sStart.size() && t.compare(sStart.get(i), maxCellNames.get(i)) > 0))
+                return false;
+        }
+        return true;
+    }
 
     @Override
     public final int hashCode()
@@ -83,47 +102,49 @@ public class ColumnSlice
     @Override
     public String toString()
     {
-        return "[" + ByteBufferUtil.bytesToHex(start) + ", " + ByteBufferUtil.bytesToHex(finish) + "]";
+        return "[" + ByteBufferUtil.bytesToHex(start.toByteBuffer()) + ", " + ByteBufferUtil.bytesToHex(finish.toByteBuffer()) + "]";
     }
 
     public static class Serializer implements IVersionedSerializer<ColumnSlice>
     {
+        private final CType type;
+
+        public Serializer(CType type)
+        {
+            this.type = type;
+        }
+
         public void serialize(ColumnSlice cs, DataOutput out, int version) throws IOException
         {
-            ByteBufferUtil.writeWithShortLength(cs.start, out);
-            ByteBufferUtil.writeWithShortLength(cs.finish, out);
+            ISerializer<Composite> serializer = type.serializer();
+            serializer.serialize(cs.start, out);
+            serializer.serialize(cs.finish, out);
         }
 
         public ColumnSlice deserialize(DataInput in, int version) throws IOException
         {
-            ByteBuffer start = ByteBufferUtil.readWithShortLength(in);
-            ByteBuffer finish = ByteBufferUtil.readWithShortLength(in);
+            ISerializer<Composite> serializer = type.serializer();
+            Composite start = serializer.deserialize(in);
+            Composite finish = serializer.deserialize(in);
             return new ColumnSlice(start, finish);
         }
 
         public long serializedSize(ColumnSlice cs, int version)
         {
-            TypeSizes sizes = TypeSizes.NATIVE;
-
-            int startSize = cs.start.remaining();
-            int finishSize = cs.finish.remaining();
-
-            int size = 0;
-            size += sizes.sizeof((short) startSize) + startSize;
-            size += sizes.sizeof((short) finishSize) + finishSize;
-            return size;
+            ISerializer<Composite> serializer = type.serializer();
+            return serializer.serializedSize(cs.start, TypeSizes.NATIVE) + serializer.serializedSize(cs.finish, TypeSizes.NATIVE);
         }
     }
 
     public static class NavigableMapIterator extends AbstractIterator<Column>
     {
-        private final NavigableMap<ByteBuffer, Column> map;
+        private final NavigableMap<CellName, Column> map;
         private final ColumnSlice[] slices;
 
         private int idx = 0;
         private Iterator<Column> currentSlice;
 
-        public NavigableMapIterator(NavigableMap<ByteBuffer, Column> map, ColumnSlice[] slices)
+        public NavigableMapIterator(NavigableMap<CellName, Column> map, ColumnSlice[] slices)
         {
             this.map = map;
             this.slices = slices;
@@ -139,20 +160,20 @@ public class ColumnSlice
                 ColumnSlice slice = slices[idx++];
                 // Note: we specialize the case of start == "" and finish = "" because it is slightly more efficient, but also they have a specific
                 // meaning (namely, they always extend to the beginning/end of the range).
-                if (slice.start.remaining() == 0)
+                if (slice.start.isEmpty())
                 {
-                    if (slice.finish.remaining() == 0)
+                    if (slice.finish.isEmpty())
                         currentSlice = map.values().iterator();
                     else
-                        currentSlice = map.headMap(slice.finish, true).values().iterator();
+                        currentSlice = map.headMap(new FakeCell(slice.finish), true).values().iterator();
                 }
-                else if (slice.finish.remaining() == 0)
+                else if (slice.finish.isEmpty())
                 {
-                    currentSlice = map.tailMap(slice.start, true).values().iterator();
+                    currentSlice = map.tailMap(new FakeCell(slice.start), true).values().iterator();
                 }
                 else
                 {
-                    currentSlice = map.subMap(slice.start, true, slice.finish, true).values().iterator();
+                    currentSlice = map.subMap(new FakeCell(slice.start), true, new FakeCell(slice.finish), true).values().iterator();
                 }
             }
 
@@ -163,4 +184,73 @@ public class ColumnSlice
             return computeNext();
         }
     }
+
+    /*
+    * We need to take a slice (headMap/tailMap/subMap) of a CellName map
+    * based on a Composite. While CellName and Composite are comparable
+    * and so this should work, I haven't found how to generify it properly.
+    * So instead we create a "fake" CellName object that just encapsulate
+    * the prefix. I might not be a valid CellName with respect to the CF
+    * CellNameType, but this doesn't matter here (since we only care about
+    * comparison). This is arguably a bit of a hack.
+    */
+    private static class FakeCell extends AbstractComposite implements CellName
+    {
+        private final Composite prefix;
+
+        private FakeCell(Composite prefix)
+        {
+            this.prefix = prefix;
+        }
+
+        public int size()
+        {
+            return prefix.size();
+        }
+
+        public ByteBuffer get(int i)
+        {
+            return prefix.get(i);
+        }
+
+        public Composite.EOC eoc()
+        {
+            return prefix.eoc();
+        }
+
+        public int clusteringSize()
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        public ColumnIdentifier cql3ColumnName()
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        public ByteBuffer collectionElement()
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        public boolean isCollectionCell()
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        public boolean isSameCQL3RowAs(CellName other)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        public CellName copy(Allocator allocator)
+        {
+            throw new UnsupportedOperationException();
+        }
+
+        public long memorySize()
+        {
+            throw new UnsupportedOperationException();
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/filter/ExtendedFilter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/filter/ExtendedFilter.java b/src/java/org/apache/cassandra/db/filter/ExtendedFilter.java
index e749871..f5a6bc3 100644
--- a/src/java/org/apache/cassandra/db/filter/ExtendedFilter.java
+++ b/src/java/org/apache/cassandra/db/filter/ExtendedFilter.java
@@ -19,19 +19,21 @@ package org.apache.cassandra.db.filter;
 
 import java.nio.ByteBuffer;
 import java.util.Collections;
-import java.util.Iterator;
 import java.util.List;
 import java.util.SortedSet;
 import java.util.TreeSet;
 
+import org.apache.cassandra.db.marshal.CollectionType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.ColumnDefinition;
-import org.apache.cassandra.cql3.ColumnNameBuilder;
 import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.marshal.*;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.Composite;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.db.columniterator.OnDiskAtomIterator;
 
 /**
@@ -129,7 +131,7 @@ public abstract class ExtendedFilter
      * @return true if the provided data satisfies all the expressions from
      * the clause of this filter.
      */
-    public abstract boolean isSatisfiedBy(DecoratedKey rowKey, ColumnFamily data, ColumnNameBuilder builder, ByteBuffer collectionElement);
+    public abstract boolean isSatisfiedBy(DecoratedKey rowKey, ColumnFamily data, Composite prefix, ByteBuffer collectionElement);
 
     public static boolean satisfies(int comparison, IndexExpression.Operator op)
     {
@@ -180,7 +182,7 @@ public abstract class ExtendedFilter
              * We also don't want to do for paging ranges as the actual filter depends on the row key (it would
              * probably be possible to make it work but we won't really use it so we don't bother).
              */
-            if (cfs.metadata.hasCompositeComparator() || dataRange instanceof DataRange.Paging)
+            if (cfs.getComparator().isCompound() || dataRange instanceof DataRange.Paging)
                 return null;
 
             IDiskAtomFilter filter = dataRange.columnFilter(null); // ok since not a paging range
@@ -200,9 +202,9 @@ public abstract class ExtendedFilter
                 assert filter instanceof NamesQueryFilter;
                 if (!clause.isEmpty())
                 {
-                    SortedSet<ByteBuffer> columns = new TreeSet<ByteBuffer>(cfs.getComparator());
+                    SortedSet<CellName> columns = new TreeSet<CellName>(cfs.getComparator());
                     for (IndexExpression expr : clause)
-                        columns.add(expr.column);
+                        columns.add(cfs.getComparator().cellFromByteBuffer(expr.column));
                     columns.addAll(((NamesQueryFilter) filter).columns);
                     return ((NamesQueryFilter) filter).withUpdatedColumns(columns);
                 }
@@ -233,7 +235,7 @@ public abstract class ExtendedFilter
 
             for (IndexExpression expr : clause)
             {
-                if (data.getColumn(expr.column) == null)
+                if (data.getColumn(data.getComparator().cellFromByteBuffer(expr.column)) == null)
                 {
                     logger.debug("adding extraFilter to cover additional expressions");
                     return true;
@@ -251,18 +253,18 @@ public abstract class ExtendedFilter
              * 2) We don't yet allow non-indexed range slice with filters in CQL3 (i.e. this will never be
              * called by CFS.filter() for composites).
              */
-            assert !cfs.metadata.hasCompositeComparator();
-
             if (!needsExtraQuery(rowKey.key, data))
                 return null;
 
             // Note: for counters we must be careful to not add a column that was already there (to avoid overcount). That is
             // why we do the dance of avoiding to query any column we already have (it's also more efficient anyway)
-            SortedSet<ByteBuffer> columns = new TreeSet<ByteBuffer>(cfs.getComparator());
+            SortedSet<CellName> columns = new TreeSet<CellName>(cfs.getComparator());
             for (IndexExpression expr : clause)
-                if (data.getColumn(expr.column) == null)
-                    columns.add(expr.column);
-
+            {
+                CellName name = data.getComparator().cellFromByteBuffer(expr.column);
+                if (data.getColumn(name) == null)
+                    columns.add(name);
+            }
             assert !columns.isEmpty();
             return new NamesQueryFilter(columns);
         }
@@ -279,7 +281,7 @@ public abstract class ExtendedFilter
             return pruned;
         }
 
-        public boolean isSatisfiedBy(DecoratedKey rowKey, ColumnFamily data, ColumnNameBuilder builder, ByteBuffer collectionElement)
+        public boolean isSatisfiedBy(DecoratedKey rowKey, ColumnFamily data, Composite prefix, ByteBuffer collectionElement)
         {
             for (IndexExpression expression : clause)
             {
@@ -290,7 +292,7 @@ public abstract class ExtendedFilter
                 {
                     // This can't happen with CQL3 as this should be rejected upfront. For thrift however,
                     // column name are not predefined. But that means the column name correspond to an internal one.
-                    Column column = data.getColumn(expression.column);
+                    Column column = data.getColumn(data.getComparator().cellFromByteBuffer(expression.column));
                     if (column != null)
                     {
                         dataValue = column.value();
@@ -301,12 +303,12 @@ public abstract class ExtendedFilter
                 {
                     if (def.type.isCollection())
                     {
-                        if (!collectionSatisfies(def, data, builder, expression, collectionElement))
+                        if (!collectionSatisfies(def, data, prefix, expression, collectionElement))
                             return false;
                         continue;
                     }
 
-                    dataValue = extractDataValue(def, rowKey.key, data, builder);
+                    dataValue = extractDataValue(def, rowKey.key, data, prefix);
                     validator = def.type;
                 }
 
@@ -320,35 +322,33 @@ public abstract class ExtendedFilter
             return true;
         }
 
-        private static boolean collectionSatisfies(ColumnDefinition def, ColumnFamily data, ColumnNameBuilder builder, IndexExpression expr, ByteBuffer collectionElement)
+        private static boolean collectionSatisfies(ColumnDefinition def, ColumnFamily data, Composite prefix, IndexExpression expr, ByteBuffer collectionElement)
         {
             assert def.type.isCollection();
 
             CollectionType type = (CollectionType)def.type;
-            builder = builder.copy().add(def.name.bytes);
-
             switch (type.kind)
             {
                 case LIST:
                     assert collectionElement != null;
-                    return type.valueComparator().compare(data.getColumn(builder.add(collectionElement).build()).value(), expr.value) == 0;
+                    return type.valueComparator().compare(data.getColumn(data.getComparator().create(prefix, def.name, collectionElement)).value(), expr.value) == 0;
                 case SET:
-                    return data.getColumn(builder.add(expr.value).build()) != null;
+                    return data.getColumn(data.getComparator().create(prefix, def.name, expr.value)) != null;
                 case MAP:
                     if (expr.operator == IndexExpression.Operator.CONTAINS_KEY)
                     {
-                        return data.getColumn(builder.add(expr.value).build()) != null;
+                        return data.getColumn(data.getComparator().create(prefix, def.name, expr.value)) != null;
                     }
                     else
                     {
                         assert collectionElement != null;
-                        return type.valueComparator().compare(data.getColumn(builder.add(collectionElement).build()).value(), expr.value) == 0;
+                        return type.valueComparator().compare(data.getColumn(data.getComparator().create(prefix, def.name, collectionElement)).value(), expr.value) == 0;
                     }
             }
             throw new AssertionError();
         }
 
-        private ByteBuffer extractDataValue(ColumnDefinition def, ByteBuffer rowKey, ColumnFamily data, ColumnNameBuilder builder)
+        private ByteBuffer extractDataValue(ColumnDefinition def, ByteBuffer rowKey, ColumnFamily data, Composite prefix)
         {
             switch (def.kind)
             {
@@ -357,10 +357,13 @@ public abstract class ExtendedFilter
                          ? rowKey
                          : ((CompositeType)data.metadata().getKeyValidator()).split(rowKey)[def.position()];
                 case CLUSTERING_COLUMN:
-                    return builder.get(def.position());
+                    return prefix.get(def.position());
                 case REGULAR:
-                    ByteBuffer colName = builder == null ? def.name.bytes : builder.copy().add(def.name).build();
-                    Column column = data.getColumn(colName);
+                    CellName cname = prefix == null
+                                   ? data.getComparator().cellFromByteBuffer(def.name.bytes)
+                                   : data.getComparator().create(prefix, def.name);
+
+                    Column column = data.getColumn(cname);
                     return column == null ? null : column.value();
                 case COMPACT_VALUE:
                     assert data.getColumnCount() == 1;
@@ -392,7 +395,7 @@ public abstract class ExtendedFilter
             return data;
         }
 
-        public boolean isSatisfiedBy(DecoratedKey rowKey, ColumnFamily data, ColumnNameBuilder builder, ByteBuffer collectionElement)
+        public boolean isSatisfiedBy(DecoratedKey rowKey, ColumnFamily data, Composite prefix, ByteBuffer collectionElement)
         {
             return true;
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/filter/IDiskAtomFilter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/filter/IDiskAtomFilter.java b/src/java/org/apache/cassandra/db/filter/IDiskAtomFilter.java
index 69a8950..b55cfd7 100644
--- a/src/java/org/apache/cassandra/db/filter/IDiskAtomFilter.java
+++ b/src/java/org/apache/cassandra/db/filter/IDiskAtomFilter.java
@@ -22,11 +22,11 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.util.Comparator;
 import java.util.Iterator;
-import java.nio.ByteBuffer;
 
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.columniterator.OnDiskAtomIterator;
-import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.sstable.SSTableReader;
 import org.apache.cassandra.io.util.FileDataInput;
@@ -68,53 +68,53 @@ public interface IDiskAtomFilter
      */
     public void collectReducedColumns(ColumnFamily container, Iterator<Column> reducedColumns, int gcBefore, long now);
 
-    public Comparator<Column> getColumnComparator(AbstractType<?> comparator);
+    public Comparator<Column> getColumnComparator(CellNameType comparator);
 
     public boolean isReversed();
     public void updateColumnsLimit(int newLimit);
 
     public int getLiveCount(ColumnFamily cf, long now);
-    public ColumnCounter columnCounter(AbstractType<?> comparator, long now);
+    public ColumnCounter columnCounter(CellNameType comparator, long now);
 
     public IDiskAtomFilter cloneShallow();
-    public boolean maySelectPrefix(Comparator<ByteBuffer> cmp, ByteBuffer prefix);
+    public boolean maySelectPrefix(Comparator<Composite> cmp, Composite prefix);
 
     boolean shouldInclude(SSTableReader sstable);
 
     public static class Serializer implements IVersionedSerializer<IDiskAtomFilter>
     {
-        public static Serializer instance = new Serializer();
+        private final CellNameType type;
+
+        public Serializer(CellNameType type)
+        {
+            this.type = type;
+        }
 
         public void serialize(IDiskAtomFilter filter, DataOutput out, int version) throws IOException
         {
             if (filter instanceof SliceQueryFilter)
             {
                 out.writeByte(0);
-                SliceQueryFilter.serializer.serialize((SliceQueryFilter)filter, out, version);
+                type.sliceQueryFilterSerializer().serialize((SliceQueryFilter)filter, out, version);
             }
             else
             {
                 out.writeByte(1);
-                NamesQueryFilter.serializer.serialize((NamesQueryFilter)filter, out, version);
+                type.namesQueryFilterSerializer().serialize((NamesQueryFilter)filter, out, version);
             }
         }
 
         public IDiskAtomFilter deserialize(DataInput in, int version) throws IOException
         {
-            throw new UnsupportedOperationException();
-        }
-
-        public IDiskAtomFilter deserialize(DataInput in, int version, AbstractType<?> comparator) throws IOException
-        {
-            int type = in.readByte();
-            if (type == 0)
+            int b = in.readByte();
+            if (b == 0)
             {
-                return SliceQueryFilter.serializer.deserialize(in, version);
+                return type.sliceQueryFilterSerializer().deserialize(in, version);
             }
             else
             {
-                assert type == 1;
-                return NamesQueryFilter.serializer.deserialize(in, version, comparator);
+                assert b == 1;
+                return type.namesQueryFilterSerializer().deserialize(in, version);
             }
         }
 
@@ -122,9 +122,9 @@ public interface IDiskAtomFilter
         {
             int size = 1;
             if (filter instanceof SliceQueryFilter)
-                size += SliceQueryFilter.serializer.serializedSize((SliceQueryFilter)filter, version);
+                size += type.sliceQueryFilterSerializer().serializedSize((SliceQueryFilter)filter, version);
             else
-                size += NamesQueryFilter.serializer.serializedSize((NamesQueryFilter)filter, version);
+                size += type.namesQueryFilterSerializer().serializedSize((NamesQueryFilter)filter, version);
             return size;
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/filter/NamesQueryFilter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/filter/NamesQueryFilter.java b/src/java/org/apache/cassandra/db/filter/NamesQueryFilter.java
index f65bf3a..9058101 100644
--- a/src/java/org/apache/cassandra/db/filter/NamesQueryFilter.java
+++ b/src/java/org/apache/cassandra/db/filter/NamesQueryFilter.java
@@ -20,7 +20,6 @@ package org.apache.cassandra.db.filter;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.nio.ByteBuffer;
 import java.util.Comparator;
 import java.util.Iterator;
 import java.util.SortedSet;
@@ -32,46 +31,40 @@ import com.google.common.collect.AbstractIterator;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.columniterator.OnDiskAtomIterator;
 import org.apache.cassandra.db.columniterator.SSTableNamesIterator;
-import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
+import org.apache.cassandra.io.ISerializer;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.sstable.SSTableReader;
 import org.apache.cassandra.io.util.FileDataInput;
-import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.FBUtilities;
 
 public class NamesQueryFilter implements IDiskAtomFilter
 {
-    public static final Serializer serializer = new Serializer();
-
-    public final SortedSet<ByteBuffer> columns;
+    public final SortedSet<CellName> columns;
 
     // If true, getLiveCount will always return either 0 or 1. This uses the fact that we know 
     // CQL3 will never use a name filter with cell names spanning multiple CQL3 rows.
     private final boolean countCQL3Rows;
 
-    public NamesQueryFilter(SortedSet<ByteBuffer> columns)
+    public NamesQueryFilter(SortedSet<CellName> columns)
     {
         this(columns, false);
     }
 
-    public NamesQueryFilter(SortedSet<ByteBuffer> columns, boolean countCQL3Rows)
+    public NamesQueryFilter(SortedSet<CellName> columns, boolean countCQL3Rows)
     {
         this.columns = columns;
         this.countCQL3Rows = countCQL3Rows;
     }
 
-    public NamesQueryFilter(ByteBuffer column)
-    {
-        this(FBUtilities.singleton(column));
-    }
-
     public NamesQueryFilter cloneShallow()
     {
         // NQF is immutable as far as shallow cloning is concerned, so save the allocation.
         return this;
     }
 
-    public NamesQueryFilter withUpdatedColumns(SortedSet<ByteBuffer> newColumns)
+    public NamesQueryFilter withUpdatedColumns(SortedSet<CellName> newColumns)
     {
        return new NamesQueryFilter(newColumns, countCQL3Rows);
     }
@@ -99,9 +92,9 @@ public class NamesQueryFilter implements IDiskAtomFilter
             container.addIfRelevant(reducedColumns.next(), tester, gcBefore);
     }
 
-    public Comparator<Column> getColumnComparator(AbstractType<?> comparator)
+    public Comparator<Column> getColumnComparator(CellNameType comparator)
     {
-        return comparator.columnComparator;
+        return comparator.columnComparator();
     }
 
     @Override
@@ -137,11 +130,11 @@ public class NamesQueryFilter implements IDiskAtomFilter
         return count;
     }
 
-    public boolean maySelectPrefix(Comparator<ByteBuffer> cmp, ByteBuffer prefix)
+    public boolean maySelectPrefix(Comparator<Composite> cmp, Composite prefix)
     {
-        for (ByteBuffer column : columns)
+        for (CellName column : columns)
         {
-            if (ByteBufferUtil.isPrefix(prefix, column))
+            if (prefix.isPrefixOf(column))
                 return true;
         }
         return false;
@@ -157,7 +150,7 @@ public class NamesQueryFilter implements IDiskAtomFilter
         return countCQL3Rows;
     }
 
-    public ColumnCounter columnCounter(AbstractType<?> comparator, long now)
+    public ColumnCounter columnCounter(CellNameType comparator, long now)
     {
         return countCQL3Rows
              ? new ColumnCounter.GroupByPrefix(now, null, 0)
@@ -168,9 +161,9 @@ public class NamesQueryFilter implements IDiskAtomFilter
     {
         private final ColumnFamily cf;
         private final DecoratedKey key;
-        private final Iterator<ByteBuffer> iter;
+        private final Iterator<CellName> iter;
 
-        public ByNameColumnIterator(Iterator<ByteBuffer> iter, ColumnFamily cf, DecoratedKey key)
+        public ByNameColumnIterator(Iterator<CellName> iter, ColumnFamily cf, DecoratedKey key)
         {
             this.iter = iter;
             this.cf = cf;
@@ -191,7 +184,7 @@ public class NamesQueryFilter implements IDiskAtomFilter
         {
             while (iter.hasNext())
             {
-                ByteBuffer current = iter.next();
+                CellName current = iter.next();
                 Column column = cf.getColumn(current);
                 if (column != null)
                     return column;
@@ -204,27 +197,31 @@ public class NamesQueryFilter implements IDiskAtomFilter
 
     public static class Serializer implements IVersionedSerializer<NamesQueryFilter>
     {
+        private CellNameType type;
+
+        public Serializer(CellNameType type)
+        {
+            this.type = type;
+        }
+
         public void serialize(NamesQueryFilter f, DataOutput out, int version) throws IOException
         {
             out.writeInt(f.columns.size());
-            for (ByteBuffer cName : f.columns)
+            ISerializer<CellName> serializer = type.cellSerializer();
+            for (CellName cName : f.columns)
             {
-                ByteBufferUtil.writeWithShortLength(cName, out);
+                serializer.serialize(cName, out);
             }
             out.writeBoolean(f.countCQL3Rows);
         }
 
         public NamesQueryFilter deserialize(DataInput in, int version) throws IOException
         {
-            throw new UnsupportedOperationException();
-        }
-
-        public NamesQueryFilter deserialize(DataInput in, int version, AbstractType comparator) throws IOException
-        {
             int size = in.readInt();
-            SortedSet<ByteBuffer> columns = new TreeSet<ByteBuffer>(comparator);
+            SortedSet<CellName> columns = new TreeSet<CellName>(type);
+            ISerializer<CellName> serializer = type.cellSerializer();
             for (int i = 0; i < size; ++i)
-                columns.add(ByteBufferUtil.readWithShortLength(in));
+                columns.add(serializer.deserialize(in));
             boolean countCQL3Rows = in.readBoolean();
             return new NamesQueryFilter(columns, countCQL3Rows);
         }
@@ -233,11 +230,9 @@ public class NamesQueryFilter implements IDiskAtomFilter
         {
             TypeSizes sizes = TypeSizes.NATIVE;
             int size = sizes.sizeof(f.columns.size());
-            for (ByteBuffer cName : f.columns)
-            {
-                int cNameSize = cName.remaining();
-                size += sizes.sizeof((short) cNameSize) + cNameSize;
-            }
+            ISerializer<CellName> serializer = type.cellSerializer();
+            for (CellName cName : f.columns)
+                size += serializer.serializedSize(cName, sizes);
             size += sizes.sizeof(f.countCQL3Rows);
             return size;
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/filter/QueryFilter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/filter/QueryFilter.java b/src/java/org/apache/cassandra/db/filter/QueryFilter.java
index 4f71f3a..33e8904 100644
--- a/src/java/org/apache/cassandra/db/filter/QueryFilter.java
+++ b/src/java/org/apache/cassandra/db/filter/QueryFilter.java
@@ -17,12 +17,13 @@
  */
 package org.apache.cassandra.db.filter;
 
-import java.nio.ByteBuffer;
 import java.util.*;
 
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.columniterator.OnDiskAtomIterator;
 import org.apache.cassandra.db.columniterator.IdentityQueryFilter;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.io.sstable.SSTableReader;
 import org.apache.cassandra.io.util.FileDataInput;
 import org.apache.cassandra.utils.HeapAllocator;
@@ -194,8 +195,8 @@ public class QueryFilter
      */
     public static QueryFilter getSliceFilter(DecoratedKey key,
                                              String cfName,
-                                             ByteBuffer start,
-                                             ByteBuffer finish,
+                                             Composite start,
+                                             Composite finish,
                                              boolean reversed,
                                              int limit,
                                              long timestamp)
@@ -218,19 +219,11 @@ public class QueryFilter
      * @param cfName column family to query
      * @param columns the column names to restrict the results to, sorted in comparator order
      */
-    public static QueryFilter getNamesFilter(DecoratedKey key, String cfName, SortedSet<ByteBuffer> columns, long timestamp)
+    public static QueryFilter getNamesFilter(DecoratedKey key, String cfName, SortedSet<CellName> columns, long timestamp)
     {
         return new QueryFilter(key, cfName, new NamesQueryFilter(columns), timestamp);
     }
 
-    /**
-     * convenience method for creating a name filter matching a single column
-     */
-    public static QueryFilter getNamesFilter(DecoratedKey key, String cfName, ByteBuffer column, long timestamp)
-    {
-        return new QueryFilter(key, cfName, new NamesQueryFilter(column), timestamp);
-    }
-
     @Override
     public String toString()
     {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/filter/SliceQueryFilter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/filter/SliceQueryFilter.java b/src/java/org/apache/cassandra/db/filter/SliceQueryFilter.java
index 794d31c..c3b019f 100644
--- a/src/java/org/apache/cassandra/db/filter/SliceQueryFilter.java
+++ b/src/java/org/apache/cassandra/db/filter/SliceQueryFilter.java
@@ -17,10 +17,10 @@
  */
 package org.apache.cassandra.db.filter;
 
+import java.nio.ByteBuffer;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.nio.ByteBuffer;
 import java.util.*;
 
 import org.slf4j.Logger;
@@ -30,8 +30,9 @@ import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.columniterator.OnDiskAtomIterator;
 import org.apache.cassandra.db.columniterator.SSTableSliceIterator;
-import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.CompositeType;
+import org.apache.cassandra.db.composites.CType;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.sstable.SSTableReader;
 import org.apache.cassandra.io.util.FileDataInput;
@@ -40,7 +41,6 @@ import org.apache.cassandra.tracing.Tracing;
 public class SliceQueryFilter implements IDiskAtomFilter
 {
     private static final Logger logger = LoggerFactory.getLogger(SliceQueryFilter.class);
-    public static final Serializer serializer = new Serializer();
 
     public final ColumnSlice[] slices;
     public final boolean reversed;
@@ -50,14 +50,24 @@ public class SliceQueryFilter implements IDiskAtomFilter
     // Not serialized, just a ack for range slices to find the number of live column counted, even when we group
     private ColumnCounter columnCounter;
 
-    public SliceQueryFilter(ByteBuffer start, ByteBuffer finish, boolean reversed, int count)
+    public SliceQueryFilter(Composite start, Composite finish, boolean reversed, int count)
+    {
+        this(new ColumnSlice(start, finish), reversed, count);
+    }
+
+    public SliceQueryFilter(Composite start, Composite finish, boolean reversed, int count, int compositesToGroup)
+    {
+        this(new ColumnSlice(start, finish), reversed, count, compositesToGroup);
+    }
+
+    public SliceQueryFilter(ColumnSlice slice, boolean reversed, int count)
     {
-        this(new ColumnSlice[] { new ColumnSlice(start, finish) }, reversed, count);
+        this(new ColumnSlice[]{ slice }, reversed, count);
     }
 
-    public SliceQueryFilter(ByteBuffer start, ByteBuffer finish, boolean reversed, int count, int compositesToGroup)
+    public SliceQueryFilter(ColumnSlice slice, boolean reversed, int count, int compositesToGroup)
     {
-        this(new ColumnSlice[] { new ColumnSlice(start, finish) }, reversed, count, compositesToGroup);
+        this(new ColumnSlice[]{ slice }, reversed, count, compositesToGroup);
     }
 
     /**
@@ -92,9 +102,9 @@ public class SliceQueryFilter implements IDiskAtomFilter
         return new SliceQueryFilter(newSlices, reversed, count, compositesToGroup);
     }
 
-    public SliceQueryFilter withUpdatedStart(ByteBuffer newStart, AbstractType<?> comparator)
+    public SliceQueryFilter withUpdatedStart(Composite newStart, CellNameType comparator)
     {
-        Comparator<ByteBuffer> cmp = reversed ? comparator.reverseComparator : comparator;
+        Comparator<Composite> cmp = reversed ? comparator.reverseComparator() : comparator;
 
         List<ColumnSlice> newSlices = new ArrayList<ColumnSlice>();
         boolean pastNewStart = false;
@@ -121,7 +131,7 @@ public class SliceQueryFilter implements IDiskAtomFilter
         return withUpdatedSlices(newSlices.toArray(new ColumnSlice[newSlices.size()]));
     }
 
-    public SliceQueryFilter withUpdatedSlice(ByteBuffer start, ByteBuffer finish)
+    public SliceQueryFilter withUpdatedSlice(Composite start, Composite finish)
     {
         return new SliceQueryFilter(new ColumnSlice[]{ new ColumnSlice(start, finish) }, reversed, count, compositesToGroup);
     }
@@ -172,9 +182,9 @@ public class SliceQueryFilter implements IDiskAtomFilter
         return new SSTableSliceIterator(sstable, file, key, slices, reversed, indexEntry);
     }
 
-    public Comparator<Column> getColumnComparator(AbstractType<?> comparator)
+    public Comparator<Column> getColumnComparator(CellNameType comparator)
     {
-        return reversed ? comparator.columnReverseComparator : comparator.columnComparator;
+        return reversed ? comparator.columnReverseComparator() : comparator.columnComparator();
     }
 
     public void collectReducedColumns(ColumnFamily container, Iterator<Column> reducedColumns, int gcBefore, long now)
@@ -219,14 +229,14 @@ public class SliceQueryFilter implements IDiskAtomFilter
         return columnCounter(cf.getComparator(), now).countAll(cf).live();
     }
 
-    public ColumnCounter columnCounter(AbstractType<?> comparator, long now)
+    public ColumnCounter columnCounter(CellNameType comparator, long now)
     {
         if (compositesToGroup < 0)
             return new ColumnCounter(now);
         else if (compositesToGroup == 0)
             return new ColumnCounter.GroupByPrefix(now, null, 0);
         else
-            return new ColumnCounter.GroupByPrefix(now, (CompositeType)comparator, compositesToGroup);
+            return new ColumnCounter.GroupByPrefix(now, comparator, compositesToGroup);
     }
 
     public void trim(ColumnFamily cf, int trimTo, long now)
@@ -256,17 +266,17 @@ public class SliceQueryFilter implements IDiskAtomFilter
         }
     }
 
-    public ByteBuffer start()
+    public Composite start()
     {
         return this.slices[0].start;
     }
 
-    public ByteBuffer finish()
+    public Composite finish()
     {
         return this.slices[slices.length - 1].finish;
     }
 
-    public void setStart(ByteBuffer start)
+    public void setStart(Composite start)
     {
         assert slices.length == 1;
         this.slices[0] = new ColumnSlice(start, this.slices[0].finish);
@@ -303,7 +313,7 @@ public class SliceQueryFilter implements IDiskAtomFilter
         count = newLimit;
     }
 
-    public boolean maySelectPrefix(Comparator<ByteBuffer> cmp, ByteBuffer prefix)
+    public boolean maySelectPrefix(Comparator<Composite> cmp, Composite prefix)
     {
         for (ColumnSlice slice : slices)
             if (slice.includes(cmp, prefix))
@@ -316,21 +326,32 @@ public class SliceQueryFilter implements IDiskAtomFilter
         List<ByteBuffer> minColumnNames = sstable.getSSTableMetadata().minColumnNames;
         List<ByteBuffer> maxColumnNames = sstable.getSSTableMetadata().maxColumnNames;
         assert minColumnNames.size() == maxColumnNames.size();
-        AbstractType<?> comparator = sstable.metadata.comparator;
+        CellNameType comparator = sstable.metadata.comparator;
 
         if (minColumnNames.isEmpty() || maxColumnNames.isEmpty())
             return true;
 
-        return comparator.intersects(minColumnNames, maxColumnNames, this);
+        for (ColumnSlice slice : slices)
+            if (slice.intersects(minColumnNames, maxColumnNames, comparator, reversed))
+                return true;
+
+        return false;
     }
 
     public static class Serializer implements IVersionedSerializer<SliceQueryFilter>
     {
+        private CType type;
+
+        public Serializer(CType type)
+        {
+            this.type = type;
+        }
+
         public void serialize(SliceQueryFilter f, DataOutput out, int version) throws IOException
         {
             out.writeInt(f.slices.length);
             for (ColumnSlice slice : f.slices)
-                ColumnSlice.serializer.serialize(slice, out, version);
+                type.sliceSerializer().serialize(slice, out, version);
             out.writeBoolean(f.reversed);
             int count = f.count;
             out.writeInt(count);
@@ -343,7 +364,7 @@ public class SliceQueryFilter implements IDiskAtomFilter
             ColumnSlice[] slices;
             slices = new ColumnSlice[in.readInt()];
             for (int i = 0; i < slices.length; i++)
-                slices[i] = ColumnSlice.serializer.deserialize(in, version);
+                slices[i] = type.sliceSerializer().deserialize(in, version);
             boolean reversed = in.readBoolean();
             int count = in.readInt();
             int compositesToGroup = -1;
@@ -359,7 +380,7 @@ public class SliceQueryFilter implements IDiskAtomFilter
             int size = 0;
             size += sizes.sizeof(f.slices.length);
             for (ColumnSlice slice : f.slices)
-                size += ColumnSlice.serializer.serializedSize(slice, version);
+                size += type.sliceSerializer().serializedSize(slice, version);
             size += sizes.sizeof(f.reversed);
             size += sizes.sizeof(f.count);
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/index/AbstractSimplePerColumnSecondaryIndex.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/AbstractSimplePerColumnSecondaryIndex.java b/src/java/org/apache/cassandra/db/index/AbstractSimplePerColumnSecondaryIndex.java
index b7593ad..ce7a021 100644
--- a/src/java/org/apache/cassandra/db/index/AbstractSimplePerColumnSecondaryIndex.java
+++ b/src/java/org/apache/cassandra/db/index/AbstractSimplePerColumnSecondaryIndex.java
@@ -22,6 +22,8 @@ import java.nio.ByteBuffer;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.dht.*;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -46,7 +48,7 @@ public abstract class AbstractSimplePerColumnSecondaryIndex extends PerColumnSec
 
         columnDef = columnDefs.iterator().next();
 
-        AbstractType indexComparator = SecondaryIndex.getIndexComparator(baseCfs.metadata, columnDef);
+        CellNameType indexComparator = SecondaryIndex.getIndexComparator(baseCfs.metadata, columnDef);
         CFMetaData indexedCfMetadata = CFMetaData.newIndexMetadata(baseCfs.metadata, columnDef, indexComparator);
         indexCfs = ColumnFamilyStore.createColumnFamilyStore(baseCfs.keyspace,
                                                              indexedCfMetadata.cfName,
@@ -65,7 +67,7 @@ public abstract class AbstractSimplePerColumnSecondaryIndex extends PerColumnSec
         return new DecoratedKey(new LocalToken(getIndexKeyComparator(), value), value);
     }
 
-    protected abstract ByteBuffer makeIndexColumnName(ByteBuffer rowKey, Column column);
+    protected abstract CellName makeIndexColumnName(ByteBuffer rowKey, Column column);
 
     protected abstract ByteBuffer getIndexedValue(ByteBuffer rowKey, Column column);
 
@@ -88,9 +90,7 @@ public abstract class AbstractSimplePerColumnSecondaryIndex extends PerColumnSec
         DecoratedKey valueKey = getIndexKeyFor(getIndexedValue(rowKey, column));
         int localDeletionTime = (int) (System.currentTimeMillis() / 1000);
         ColumnFamily cfi = ArrayBackedSortedColumns.factory.create(indexCfs.metadata);
-        ByteBuffer name = makeIndexColumnName(rowKey, column);
-        assert name.remaining() > 0 && name.remaining() <= Column.MAX_NAME_LENGTH : name.remaining();
-        cfi.addTombstone(name, localDeletionTime, column.timestamp());
+        cfi.addTombstone(makeIndexColumnName(rowKey, column), localDeletionTime, column.timestamp());
         indexCfs.apply(valueKey, cfi, SecondaryIndexManager.nullUpdater);
         if (logger.isDebugEnabled())
             logger.debug("removed index entry for cleaned-up value {}:{}", valueKey, cfi);
@@ -100,8 +100,7 @@ public abstract class AbstractSimplePerColumnSecondaryIndex extends PerColumnSec
     {
         DecoratedKey valueKey = getIndexKeyFor(getIndexedValue(rowKey, column));
         ColumnFamily cfi = ArrayBackedSortedColumns.factory.create(indexCfs.metadata);
-        ByteBuffer name = makeIndexColumnName(rowKey, column);
-        assert name.remaining() > 0 && name.remaining() <= Column.MAX_NAME_LENGTH : name.remaining();
+        CellName name = makeIndexColumnName(rowKey, column);
         if (column instanceof ExpiringColumn)
         {
             ExpiringColumn ec = (ExpiringColumn)column;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/index/SecondaryIndex.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/SecondaryIndex.java b/src/java/org/apache/cassandra/db/index/SecondaryIndex.java
index 43ecb61..38d09be 100644
--- a/src/java/org/apache/cassandra/db/index/SecondaryIndex.java
+++ b/src/java/org/apache/cassandra/db/index/SecondaryIndex.java
@@ -28,6 +28,7 @@ import org.slf4j.LoggerFactory;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.Column;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
@@ -273,20 +274,9 @@ public abstract class SecondaryIndex
     }
 
     /**
-     * Returns true if the provided column name is indexed by this secondary index.
-     *
-     * The default implement checks whether the name is one the columnDef name,
-     * but this should be overriden but subclass if needed.
+     * Returns true if the provided cell name is indexed by this secondary index.
      */
-    public boolean indexes(ByteBuffer name)
-    {
-        for (ColumnDefinition columnDef : columnDefs)
-        {
-            if (baseCfs.getComparator().compare(columnDef.name.bytes, name) == 0)
-                return true;
-        }
-        return false;
-    }
+    public abstract boolean indexes(CellName name);
 
     /**
      * This is the primary way to create a secondary index instance for a CF column.
@@ -342,12 +332,12 @@ public abstract class SecondaryIndex
      * Note: it would be cleaner to have this be a member method. However we need this when opening indexes
      * sstables, but by then the CFS won't be fully initiated, so the SecondaryIndex object won't be accessible.
      */
-    public static AbstractType<?> getIndexComparator(CFMetaData baseMetadata, ColumnDefinition cdef)
+    public static CellNameType getIndexComparator(CFMetaData baseMetadata, ColumnDefinition cdef)
     {
         switch (cdef.getIndexType())
         {
             case KEYS:
-                return keyComparator;
+                return new SimpleDenseCellNameType(keyComparator);
             case COMPOSITES:
                 return CompositesIndex.getIndexComparator(baseMetadata, cdef);
             case CUSTOM:


[11/13] Push composites support in the storage engine

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/statements/CreateIndexStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/CreateIndexStatement.java b/src/java/org/apache/cassandra/cql3/statements/CreateIndexStatement.java
index ae6c15c..e0e2693 100644
--- a/src/java/org/apache/cassandra/cql3/statements/CreateIndexStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/CreateIndexStatement.java
@@ -31,7 +31,6 @@ import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.config.IndexType;
 import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.db.index.SecondaryIndex;
-import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.exceptions.*;
 import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.service.ClientState;
@@ -91,7 +90,7 @@ public class CreateIndexStatement extends SchemaAlteringStatement
             throw new InvalidRequestException("Cannot specify index class for a non-CUSTOM index");
 
         // TODO: we could lift that limitation
-        if (cfm.isDense() && cd.kind != ColumnDefinition.Kind.REGULAR)
+        if (cfm.comparator.isDense() && cd.kind != ColumnDefinition.Kind.REGULAR)
             throw new InvalidRequestException(String.format("Secondary index on %s column %s is not yet supported for compact table", cd.kind, columnName));
 
         if (cd.kind == ColumnDefinition.Kind.PARTITION_KEY && cd.isOnAllComponents())
@@ -111,7 +110,7 @@ public class CreateIndexStatement extends SchemaAlteringStatement
         {
             cd.setIndexType(IndexType.CUSTOM, Collections.singletonMap(SecondaryIndex.CUSTOM_INDEX_OPTION_NAME, indexClass));
         }
-        else if (cfm.hasCompositeComparator())
+        else if (cfm.comparator.isCompound())
         {
             Map<String, String> options = Collections.emptyMap();
             // For now, we only allow indexing values for collections, but we could later allow
@@ -119,8 +118,7 @@ public class CreateIndexStatement extends SchemaAlteringStatement
             // lives easier then.
             if (cd.type.isCollection())
                 options = ImmutableMap.of("index_values", "");
-
-            cd.setIndexType(IndexType.COMPOSITES, options);
+            cd.setIndexType(IndexType.COMPOSITES, Collections.<String, String>emptyMap());
         }
         else
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/statements/CreateTableStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/CreateTableStatement.java b/src/java/org/apache/cassandra/cql3/statements/CreateTableStatement.java
index 809e0dc..8f934e3 100644
--- a/src/java/org/apache/cassandra/cql3/statements/CreateTableStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/CreateTableStatement.java
@@ -30,6 +30,7 @@ import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.cql3.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.ColumnFamilyType;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.exceptions.AlreadyExistsException;
@@ -43,7 +44,7 @@ import org.apache.cassandra.utils.ByteBufferUtil;
 /** A <code>CREATE TABLE</code> parsed from a CQL query statement. */
 public class CreateTableStatement extends SchemaAlteringStatement
 {
-    public AbstractType<?> comparator;
+    public CellNameType comparator;
     private AbstractType<?> defaultValidator;
     private AbstractType<?> keyValidator;
 
@@ -87,22 +88,12 @@ public class CreateTableStatement extends SchemaAlteringStatement
     }
 
     // Column definitions
-    private Map<ByteBuffer, ColumnDefinition> getColumns(CFMetaData cfm)
+    private List<ColumnDefinition> getColumns(CFMetaData cfm)
     {
-        Map<ByteBuffer, ColumnDefinition> columnDefs = new HashMap<ByteBuffer, ColumnDefinition>();
-        Integer componentIndex = null;
-        if (cfm.hasCompositeComparator())
-        {
-            CompositeType ct = (CompositeType) comparator;
-            componentIndex = ct.types.get(ct.types.size() - 1) instanceof ColumnToCollectionType
-                           ? ct.types.size() - 2
-                           : ct.types.size() - 1;
-        }
-
+        List<ColumnDefinition> columnDefs = new ArrayList<>(columns.size());
+        Integer componentIndex = comparator.isCompound() ? comparator.clusteringPrefixSize() : null;
         for (Map.Entry<ColumnIdentifier, AbstractType> col : columns.entrySet())
-        {
-            columnDefs.put(col.getKey().bytes, ColumnDefinition.regularDef(cfm, col.getKey().bytes, col.getValue(), componentIndex));
-        }
+            columnDefs.add(ColumnDefinition.regularDef(cfm, col.getKey().bytes, col.getValue(), componentIndex));
 
         return columnDefs;
     }
@@ -138,8 +129,7 @@ public class CreateTableStatement extends SchemaAlteringStatement
         newCFMD = new CFMetaData(keyspace(),
                                  columnFamily(),
                                  ColumnFamilyType.Standard,
-                                 comparator,
-                                 null);
+                                 comparator);
         applyPropertiesTo(newCFMD);
         return newCFMD;
     }
@@ -148,10 +138,10 @@ public class CreateTableStatement extends SchemaAlteringStatement
     {
         cfmd.defaultValidator(defaultValidator)
             .keyValidator(keyValidator)
-            .columnMetadata(getColumns(cfmd));
+            .addAllColumnDefinitions(getColumns(cfmd));
 
         cfmd.addColumnMetadataFromAliases(keyAliases, keyValidator, ColumnDefinition.Kind.PARTITION_KEY);
-        cfmd.addColumnMetadataFromAliases(columnAliases, comparator, ColumnDefinition.Kind.CLUSTERING_COLUMN);
+        cfmd.addColumnMetadataFromAliases(columnAliases, comparator.asAbstractType(), ColumnDefinition.Kind.CLUSTERING_COLUMN);
         if (valueAlias != null)
             cfmd.addColumnMetadataFromAliases(Collections.<ByteBuffer>singletonList(valueAlias), defaultValidator, ColumnDefinition.Kind.COMPACT_VALUE);
 
@@ -241,15 +231,13 @@ public class CreateTableStatement extends SchemaAlteringStatement
                     if (definedCollections != null)
                         throw new InvalidRequestException("Collection types are not supported with COMPACT STORAGE");
 
-                    stmt.comparator = UTF8Type.instance;
+                    stmt.comparator = new SimpleSparseCellNameType(UTF8Type.instance);
                 }
                 else
                 {
-                    List<AbstractType<?>> types = new ArrayList<AbstractType<?>>(definedCollections == null ? 1 : 2);
-                    types.add(UTF8Type.instance);
-                    if (definedCollections != null)
-                        types.add(ColumnToCollectionType.getInstance(definedCollections));
-                    stmt.comparator = CompositeType.getInstance(types);
+                    stmt.comparator = definedCollections == null
+                                    ? new CompoundSparseCellNameType(Collections.<AbstractType<?>>emptyList())
+                                    : new CompoundSparseCellNameType.WithCollection(Collections.<AbstractType<?>>emptyList(), ColumnToCollectionType.getInstance(definedCollections));
                 }
             }
             else
@@ -261,9 +249,10 @@ public class CreateTableStatement extends SchemaAlteringStatement
                     if (definedCollections != null)
                         throw new InvalidRequestException("Collection types are not supported with COMPACT STORAGE");
                     stmt.columnAliases.add(columnAliases.get(0).bytes);
-                    stmt.comparator = getTypeAndRemove(stmt.columns, columnAliases.get(0));
-                    if (stmt.comparator instanceof CounterColumnType)
+                    AbstractType<?> at = getTypeAndRemove(stmt.columns, columnAliases.get(0));
+                    if (at instanceof CounterColumnType)
                         throw new InvalidRequestException(String.format("counter type is not supported for PRIMARY KEY part %s", stmt.columnAliases.get(0)));
+                    stmt.comparator = new SimpleDenseCellNameType(at);
                 }
                 else
                 {
@@ -282,19 +271,15 @@ public class CreateTableStatement extends SchemaAlteringStatement
                     {
                         if (definedCollections != null)
                             throw new InvalidRequestException("Collection types are not supported with COMPACT STORAGE");
+
+                        stmt.comparator = new CompoundDenseCellNameType(types);
                     }
                     else
                     {
-                        // For sparse, we must add the last UTF8 component
-                        // and the collection type if there is one
-                        types.add(UTF8Type.instance);
-                        if (definedCollections != null)
-                            types.add(ColumnToCollectionType.getInstance(definedCollections));
+                        stmt.comparator = definedCollections == null
+                                        ? new CompoundSparseCellNameType(types)
+                                        : new CompoundSparseCellNameType.WithCollection(types, ColumnToCollectionType.getInstance(definedCollections));
                     }
-
-                    if (types.isEmpty())
-                        throw new IllegalStateException("Nonsensical empty parameter list for CompositeType");
-                    stmt.comparator = CompositeType.getInstance(types);
                 }
             }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/statements/DeleteStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/DeleteStatement.java b/src/java/org/apache/cassandra/cql3/statements/DeleteStatement.java
index db991c0..b465347 100644
--- a/src/java/org/apache/cassandra/cql3/statements/DeleteStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/DeleteStatement.java
@@ -24,6 +24,7 @@ import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.exceptions.*;
 import org.apache.cassandra.utils.Pair;
 
@@ -42,47 +43,40 @@ public class DeleteStatement extends ModificationStatement
         return false;
     }
 
-    public ColumnFamily updateForKey(ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params)
+    public ColumnFamily updateForKey(ByteBuffer key, Composite prefix, UpdateParameters params)
     throws InvalidRequestException
     {
         ColumnFamily cf = TreeMapBackedSortedColumns.factory.create(cfm);
         List<Operation> deletions = getOperations();
 
-        boolean fullKey = builder.componentCount() == cfm.clusteringColumns().size();
-        boolean isRange = cfm.isDense() ? !fullKey : (!fullKey || deletions.isEmpty());
+        if (prefix.size() < cfm.clusteringColumns().size() && !deletions.isEmpty())
+            throw new InvalidRequestException(String.format("Missing mandatory PRIMARY KEY part %s since %s specified", getFirstEmptyKey(), deletions.iterator().next().column.name));
 
-        if (!deletions.isEmpty() && isRange)
-            throw new InvalidRequestException(String.format("Missing mandatory PRIMARY KEY part %s since %s specified", getFirstEmptyKey(), deletions.get(0).columnName));
-
-        if (deletions.isEmpty() && builder.componentCount() == 0)
+        if (deletions.isEmpty())
         {
-            // No columns specified, delete the row
-            cf.delete(new DeletionInfo(params.timestamp, params.localDeletionTime));
-        }
-        else
-        {
-            if (isRange)
+            // We delete the slice selected by the prefix.
+            // However, for performance reasons, we distinguish 2 cases:
+            //   - It's a full internal row delete
+            //   - It's a full cell name (i.e it's a dense layout and the prefix is full)
+            if (prefix.isEmpty())
             {
-                assert deletions.isEmpty();
-                ByteBuffer start = builder.build();
-                ByteBuffer end = builder.buildAsEndOfRange();
-                cf.addAtom(params.makeRangeTombstone(start, end));
+                // No columns specified, delete the row
+                cf.delete(new DeletionInfo(params.timestamp, params.localDeletionTime));
+            }
+            else if (cfm.comparator.isDense() && prefix.size() == cfm.clusteringColumns().size())
+            {
+                cf.addAtom(params.makeTombstone(cfm.comparator.create(prefix, null)));
             }
             else
             {
-                // Delete specific columns
-                if (cfm.isDense())
-                {
-                    ByteBuffer columnName = builder.build();
-                    cf.addColumn(params.makeTombstone(columnName));
-                }
-                else
-                {
-                    for (Operation deletion : deletions)
-                        deletion.execute(key, cf, builder.copy(), params);
-                }
+                cf.addAtom(params.makeRangeTombstone(prefix.slice()));
             }
         }
+        else
+        {
+            for (Operation op : deletions)
+                op.execute(key, cf, prefix, params);
+        }
 
         return cf;
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java
index 25f59c7..2574f73 100644
--- a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java
@@ -27,10 +27,10 @@ import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CBuilder;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.filter.ColumnSlice;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
-import org.apache.cassandra.db.marshal.CompositeType;
-import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.db.marshal.ListType;
 import org.apache.cassandra.db.marshal.BooleanType;
 import org.apache.cassandra.exceptions.*;
@@ -72,7 +72,7 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF
     }
 
     public abstract boolean requireFullClusteringKey();
-    public abstract ColumnFamily updateForKey(ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params) throws InvalidRequestException;
+    public abstract ColumnFamily updateForKey(ByteBuffer key, Composite prefix, UpdateParameters params) throws InvalidRequestException;
 
     public int getBoundsTerms()
     {
@@ -215,7 +215,7 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF
     public List<ByteBuffer> buildPartitionKeyNames(List<ByteBuffer> variables)
     throws InvalidRequestException
     {
-        ColumnNameBuilder keyBuilder = cfm.getKeyNameBuilder();
+        CBuilder keyBuilder = cfm.getKeyValidatorAsCType().builder();
         List<ByteBuffer> keys = new ArrayList<ByteBuffer>();
         for (ColumnDefinition def : cfm.partitionKeyColumns())
         {
@@ -231,7 +231,7 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF
                 {
                     if (val == null)
                         throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", def.name));
-                    keys.add(keyBuilder.copy().add(val).build());
+                    keys.add(keyBuilder.buildWith(val).toByteBuffer());
                 }
             }
             else
@@ -247,10 +247,10 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF
         return keys;
     }
 
-    public ColumnNameBuilder createClusteringPrefixBuilder(List<ByteBuffer> variables)
+    public Composite createClusteringPrefix(List<ByteBuffer> variables)
     throws InvalidRequestException
     {
-        ColumnNameBuilder builder = cfm.getColumnNameBuilder();
+        CBuilder builder = cfm.comparator.prefixBuilder();
         ColumnDefinition firstEmptyKey = null;
         for (ColumnDefinition def : cfm.clusteringColumns())
         {
@@ -258,7 +258,7 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF
             if (r == null)
             {
                 firstEmptyKey = def;
-                if (requireFullClusteringKey() && cfm.hasCompositeComparator() && !cfm.isDense())
+                if (requireFullClusteringKey() && !cfm.comparator.isDense() && cfm.comparator.isCompound())
                     throw new InvalidRequestException(String.format("Missing mandatory PRIMARY KEY part %s", def.name));
             }
             else if (firstEmptyKey != null)
@@ -275,7 +275,7 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF
                 builder.add(val);
             }
         }
-        return builder;
+        return builder.build();
     }
 
     protected ColumnDefinition getFirstEmptyKey()
@@ -288,7 +288,7 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF
         return null;
     }
 
-    protected Map<ByteBuffer, ColumnGroupMap> readRequiredRows(List<ByteBuffer> partitionKeys, ColumnNameBuilder clusteringPrefix, boolean local, ConsistencyLevel cl)
+    protected Map<ByteBuffer, CQL3Row> readRequiredRows(List<ByteBuffer> partitionKeys, Composite clusteringPrefix, boolean local, ConsistencyLevel cl)
     throws RequestExecutionException, RequestValidationException
     {
         // Lists SET operation incurs a read.
@@ -299,14 +299,14 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF
             {
                 if (toRead == null)
                     toRead = new TreeSet<ColumnIdentifier>();
-                toRead.add(op.columnName);
+                toRead.add(op.column.name);
             }
         }
 
-        return toRead == null ? null : readRows(partitionKeys, clusteringPrefix, toRead, (CompositeType)cfm.comparator, local, cl);
+        return toRead == null ? null : readRows(partitionKeys, clusteringPrefix, toRead, cfm, local, cl);
     }
 
-    private Map<ByteBuffer, ColumnGroupMap> readRows(List<ByteBuffer> partitionKeys, ColumnNameBuilder clusteringPrefix, Set<ColumnIdentifier> toRead, CompositeType composite, boolean local, ConsistencyLevel cl)
+    protected Map<ByteBuffer, CQL3Row> readRows(List<ByteBuffer> partitionKeys, Composite rowPrefix, Set<ColumnIdentifier> toRead, CFMetaData cfm, boolean local, ConsistencyLevel cl)
     throws RequestExecutionException, RequestValidationException
     {
         try
@@ -321,11 +321,7 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF
         ColumnSlice[] slices = new ColumnSlice[toRead.size()];
         int i = 0;
         for (ColumnIdentifier name : toRead)
-        {
-            ByteBuffer start = clusteringPrefix.copy().add(name).build();
-            ByteBuffer finish = clusteringPrefix.copy().add(name).buildAsEndOfRange();
-            slices[i++] = new ColumnSlice(start, finish);
-        }
+            slices[i++] = cfm.comparator.create(rowPrefix, name).slice();
 
         List<ReadCommand> commands = new ArrayList<ReadCommand>(partitionKeys.size());
         long now = System.currentTimeMillis();
@@ -340,20 +336,19 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF
                        ? SelectStatement.readLocally(keyspace(), commands)
                        : StorageProxy.read(commands, cl);
 
-        Map<ByteBuffer, ColumnGroupMap> map = new HashMap<ByteBuffer, ColumnGroupMap>();
+        Map<ByteBuffer, CQL3Row> map = new HashMap<ByteBuffer, CQL3Row>();
         for (Row row : rows)
         {
-            if (row.cf == null || row.cf.getColumnCount() == 0)
+            if (row.cf == null || row.cf.isEmpty())
                 continue;
 
-            ColumnGroupMap.Builder groupBuilder = new ColumnGroupMap.Builder(composite, true, now);
-            for (Column column : row.cf)
-                groupBuilder.add(column);
-
-            List<ColumnGroupMap> groups = groupBuilder.groups();
-            assert groups.isEmpty() || groups.size() == 1;
-            if (!groups.isEmpty())
-                map.put(row.key.key, groups.get(0));
+            Iterator<CQL3Row> iter = cfm.comparator.CQL3RowBuilder(now).group(row.cf.getSortedColumns().iterator());
+            if (iter.hasNext())
+            {
+                map.put(row.key.key, iter.next());
+                // We can only update one CQ3Row per partition key at a time (we don't allow IN for clustering key)
+                assert !iter.hasNext();
+            }
         }
         return map;
     }
@@ -402,7 +397,7 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF
         if (keys.size() > 1)
             throw new InvalidRequestException("IN on the partition key is not supported with conditional updates");
 
-        ColumnNameBuilder clusteringPrefix = createClusteringPrefixBuilder(variables);
+        Composite clusteringPrefix = createClusteringPrefix(variables);
 
         ByteBuffer key = keys.get(0);
         ThriftValidation.validateKey(cfm, key);
@@ -467,7 +462,7 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF
         {
             List<ColumnDefinition> defs = new ArrayList<>(columnConditions.size());
             for (Operation condition : columnConditions)
-                defs.add(cfm.getColumnDefinition(condition.columnName));
+                defs.add(condition.column);
             selection = Selection.forColumns(defs);
         }
 
@@ -503,10 +498,10 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF
     throws RequestExecutionException, RequestValidationException
     {
         List<ByteBuffer> keys = buildPartitionKeyNames(variables);
-        ColumnNameBuilder clusteringPrefix = createClusteringPrefixBuilder(variables);
+        Composite clusteringPrefix = createClusteringPrefix(variables);
 
         // Some lists operation requires reading
-        Map<ByteBuffer, ColumnGroupMap> rows = readRequiredRows(keys, clusteringPrefix, local, cl);
+        Map<ByteBuffer, CQL3Row> rows = readRequiredRows(keys, clusteringPrefix, local, cl);
         UpdateParameters params = new UpdateParameters(cfm, variables, getTimestamp(now, variables), getTimeToLive(variables), rows);
 
         Collection<IMutation> mutations = new ArrayList<IMutation>();
@@ -535,7 +530,7 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF
         return isCounter() ? new CounterMutation(rm, cl) : rm;
     }
 
-    private ColumnFamily buildConditions(ByteBuffer key, ColumnNameBuilder clusteringPrefix, UpdateParameters params)
+    private ColumnFamily buildConditions(ByteBuffer key, Composite clusteringPrefix, UpdateParameters params)
     throws InvalidRequestException
     {
         if (ifNotExists)
@@ -544,15 +539,12 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF
         ColumnFamily cf = TreeMapBackedSortedColumns.factory.create(cfm);
 
         // CQL row marker
-        if (cfm.hasCompositeComparator() && !cfm.isDense() && !cfm.isSuper())
-        {
-            ByteBuffer name = clusteringPrefix.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
-            cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
-        }
+        if (cfm.isCQL3Table())
+            cf.addColumn(params.makeColumn(cfm.comparator.rowMarker(clusteringPrefix), ByteBufferUtil.EMPTY_BYTE_BUFFER));
 
         // Conditions
         for (Operation condition : columnConditions)
-            condition.execute(key, cf, clusteringPrefix.copy(), params);
+            condition.execute(key, cf, clusteringPrefix, params);
 
         assert !cf.isEmpty();
         return cf;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
index 62ebd21..d9b4e04 100644
--- a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
@@ -29,6 +29,7 @@ import org.github.jamm.MemoryMeter;
 
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.cql3.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
@@ -47,7 +48,6 @@ import org.apache.cassandra.thrift.ThriftValidation;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.Pair;
 
 /**
  * Encapsulates a completely parsed SELECT query, including the target
@@ -77,8 +77,9 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
     private boolean isKeyRange;
     private boolean keyIsInRelation;
     private boolean usesSecondaryIndexing;
+    private boolean lastClusteringIsIn;
 
-    private Map<ColumnDefinition, Integer> orderingIndexes;
+    private Map<ColumnIdentifier, Integer> orderingIndexes;
 
     // Used by forSelection below
     private static final Parameters defaultParameters = new Parameters(Collections.<ColumnIdentifier, Boolean>emptyMap(), false, false, null, false);
@@ -366,13 +367,9 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
         }
         else if (isColumnRange())
         {
-            // For sparse, we used to ask for 'defined columns' * 'asked limit' (where defined columns includes the row marker)
-            // to account for the grouping of columns.
-            // Since that doesn't work for maps/sets/lists, we now use the compositesToGroup option of SliceQueryFilter.
-            // But we must preserve backward compatibility too (for mixed version cluster that is).
-            int toGroup = cfm.isDense() ? -1 : cfm.clusteringColumns().size();
-            List<ByteBuffer> startBounds = getRequestedBound(Bound.START, variables);
-            List<ByteBuffer> endBounds = getRequestedBound(Bound.END, variables);
+            int toGroup = cfm.comparator.isDense() ? -1 : cfm.clusteringColumns().size();
+            List<Composite> startBounds = getRequestedBound(Bound.START, variables);
+            List<Composite> endBounds = getRequestedBound(Bound.END, variables);
             assert startBounds.size() == endBounds.size();
 
             // The case where startBounds == 1 is common enough that it's worth optimizing
@@ -402,7 +399,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
         }
         else
         {
-            SortedSet<ByteBuffer> cellNames = getRequestedColumns(variables);
+            SortedSet<CellName> cellNames = getRequestedColumns(variables);
             if (cellNames == null) // in case of IN () for the last column of the key
                 return null;
             QueryProcessor.validateCellNames(cellNames);
@@ -444,7 +441,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
     private Collection<ByteBuffer> getKeys(final List<ByteBuffer> variables) throws InvalidRequestException
     {
         List<ByteBuffer> keys = new ArrayList<ByteBuffer>();
-        ColumnNameBuilder builder = cfm.getKeyNameBuilder();
+        CBuilder builder = cfm.getKeyValidatorAsCType().builder();
         for (ColumnDefinition def : cfm.partitionKeyColumns())
         {
             Restriction r = keyRestrictions[def.position()];
@@ -458,7 +455,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
                 {
                     if (val == null)
                         throw new InvalidRequestException(String.format("Invalid null value for partition key part %s", def.name));
-                    keys.add(builder.copy().add(val).build());
+                    keys.add(builder.buildWith(val).toByteBuffer());
                 }
             }
             else
@@ -484,7 +481,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
                 return ByteBufferUtil.EMPTY_BYTE_BUFFER;
 
         // We deal with IN queries for keys in other places, so we know buildBound will return only one result
-        return buildBound(b, cfm.partitionKeyColumns(), keyRestrictions, false, cfm.getKeyNameBuilder(), variables).get(0);
+        return buildBound(b, cfm.partitionKeyColumns(), keyRestrictions, false, cfm.getKeyValidatorAsCType(), variables).get(0).toByteBuffer();
     }
 
     private Token getTokenBound(Bound b, List<ByteBuffer> variables, IPartitioner<?> p) throws InvalidRequestException
@@ -528,8 +525,8 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
     {
         // Due to CASSANDRA-5762, we always do a slice for CQL3 tables (not dense, composite).
         // Static CF (non dense but non composite) never entails a column slice however
-        if (!cfm.isDense())
-            return cfm.hasCompositeComparator();
+        if (!cfm.comparator.isDense())
+            return cfm.comparator.isCompound();
 
         // Otherwise (i.e. for compact table where we don't have a row marker anyway and thus don't care about CASSANDRA-5762),
         // it is a range query if it has at least one the column alias for which no relation is defined or is not EQ.
@@ -541,15 +538,15 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
         return false;
     }
 
-    private SortedSet<ByteBuffer> getRequestedColumns(List<ByteBuffer> variables) throws InvalidRequestException
+    private SortedSet<CellName> getRequestedColumns(List<ByteBuffer> variables) throws InvalidRequestException
     {
         assert !isColumnRange();
 
-        ColumnNameBuilder builder = cfm.getColumnNameBuilder();
+        CBuilder builder = cfm.comparator.prefixBuilder();
         Iterator<ColumnDefinition> idIter = cfm.clusteringColumns().iterator();
         for (Restriction r : columnRestrictions)
         {
-            ColumnIdentifier id = idIter.next().name;
+            ColumnDefinition def = idIter.next();
             assert r != null && !r.isSlice();
 
             List<ByteBuffer> values = r.values(variables);
@@ -557,7 +554,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
             {
                 ByteBuffer val = values.get(0);
                 if (val == null)
-                    throw new InvalidRequestException(String.format("Invalid null value for clustering key part %s", id));
+                    throw new InvalidRequestException(String.format("Invalid null value for clustering key part %s", def.name));
                 builder.add(val);
             }
             else
@@ -567,32 +564,29 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
                 // for each value of the IN, creates all the columns corresponding to the selection.
                 if (values.isEmpty())
                     return null;
-                SortedSet<ByteBuffer> columns = new TreeSet<ByteBuffer>(cfm.comparator);
+                SortedSet<CellName> columns = new TreeSet<CellName>(cfm.comparator);
                 Iterator<ByteBuffer> iter = values.iterator();
                 while (iter.hasNext())
                 {
                     ByteBuffer val = iter.next();
-                    ColumnNameBuilder b = iter.hasNext() ? builder.copy() : builder;
                     if (val == null)
-                        throw new InvalidRequestException(String.format("Invalid null value for clustering key part %s", id));
-                    b.add(val);
-                    if (cfm.isDense())
-                        columns.add(b.build());
-                    else
-                        columns.addAll(addSelectedColumns(b));
+                        throw new InvalidRequestException(String.format("Invalid null value for clustering key part %s", def.name));
+
+                    Composite prefix = builder.buildWith(val);
+                    columns.addAll(addSelectedColumns(prefix));
                 }
                 return columns;
             }
         }
 
-        return addSelectedColumns(builder);
+        return addSelectedColumns(builder.build());
     }
 
-    private SortedSet<ByteBuffer> addSelectedColumns(ColumnNameBuilder builder)
+    private SortedSet<CellName> addSelectedColumns(Composite prefix)
     {
-        if (cfm.isDense())
+        if (cfm.comparator.isDense())
         {
-            return FBUtilities.singleton(builder.build());
+            return FBUtilities.singleton(cfm.comparator.create(prefix, null), cfm.comparator);
         }
         else
         {
@@ -600,31 +594,26 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
             // non-know set of columns, so we shouldn't get there
             assert !selectACollection();
 
-            SortedSet<ByteBuffer> columns = new TreeSet<ByteBuffer>(cfm.comparator);
+            SortedSet<CellName> columns = new TreeSet<CellName>(cfm.comparator);
 
             // We need to query the selected column as well as the marker
             // column (for the case where the row exists but has no columns outside the PK)
             // Two exceptions are "static CF" (non-composite non-compact CF) and "super CF"
             // that don't have marker and for which we must query all columns instead
-            if (cfm.hasCompositeComparator() && !cfm.isSuper())
+            if (cfm.comparator.isCompound() && !cfm.isSuper())
             {
                 // marker
-                columns.add(builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build());
+                columns.add(cfm.comparator.rowMarker(prefix));
 
                 // selected columns
-                for (ColumnIdentifier id : selection.regularColumnsToFetch())
-                    columns.add(builder.copy().add(id).build());
+                for (ColumnDefinition def : selection.getColumnsList())
+                    if (def.kind == ColumnDefinition.Kind.REGULAR)
+                        columns.add(cfm.comparator.create(prefix, def.name));
             }
             else
             {
-                Iterator<ColumnDefinition> iter = cfm.regularColumns().iterator();
-                while (iter.hasNext())
-                {
-                    ColumnDefinition def = iter.next();
-                    ColumnNameBuilder b = iter.hasNext() ? builder.copy() : builder;
-                    ByteBuffer cname = b.add(def.name).build();
-                    columns.add(cname);
-                }
+                for (ColumnDefinition def : cfm.regularColumns())
+                    columns.add(cfm.comparator.create(prefix, def.name));
             }
             return columns;
         }
@@ -632,7 +621,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
 
     private boolean selectACollection()
     {
-        if (!cfm.hasCollections())
+        if (!cfm.comparator.hasCollections())
             return false;
 
         for (ColumnDefinition def : selection.getColumnsList())
@@ -644,13 +633,15 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
         return false;
     }
 
-    private List<ByteBuffer> buildBound(Bound bound,
-                                        Collection<ColumnDefinition> defs,
-                                        Restriction[] restrictions,
-                                        boolean isReversed,
-                                        ColumnNameBuilder builder,
-                                        List<ByteBuffer> variables) throws InvalidRequestException
+    private static List<Composite> buildBound(Bound bound,
+                                              Collection<ColumnDefinition> defs,
+                                              Restriction[] restrictions,
+                                              boolean isReversed,
+                                              CType type,
+                                              List<ByteBuffer> variables) throws InvalidRequestException
     {
+        CBuilder builder = type.builder();
+
         // The end-of-component of composite doesn't depend on whether the
         // component type is reversed or not (i.e. the ReversedType is applied
         // to the component comparator but not to the end-of-component itself),
@@ -668,9 +659,8 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
                 // There wasn't any non EQ relation on that key, we select all records having the preceding component as prefix.
                 // For composites, if there was preceding component and we're computing the end, we must change the last component
                 // End-Of-Component, otherwise we would be selecting only one record.
-                return Collections.singletonList(builder.componentCount() > 0 && eocBound == Bound.END
-                                                 ? builder.buildAsEndOfRange()
-                                                 : builder.build());
+                Composite prefix = builder.build();
+                return Collections.singletonList(!prefix.isEmpty() && eocBound == Bound.END ? prefix.end() : prefix);
             }
 
             if (r.isSlice())
@@ -680,7 +670,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
                 ByteBuffer val = slice.bound(b, variables);
                 if (val == null)
                     throw new InvalidRequestException(String.format("Invalid null clustering key part %s", def.name));
-                return Collections.singletonList(builder.add(val, slice.getRelation(eocBound, b)).build());
+                return Collections.singletonList(builder.add(val).build().withEOC(eocForRelation(slice.getRelation(eocBound, b))));
             }
             else
             {
@@ -691,16 +681,16 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
                     assert def.position() == defs.size() - 1;
                     // The IN query might not have listed the values in comparator order, so we need to re-sort
                     // the bounds lists to make sure the slices works correctly (also, to avoid duplicates).
-                    TreeSet<ByteBuffer> s = new TreeSet<ByteBuffer>(isReversed ? cfm.comparator.reverseComparator : cfm.comparator);
+                    TreeSet<Composite> s = new TreeSet<Composite>(isReversed ? type.reverseComparator() : type);
                     for (ByteBuffer val : values)
                     {
                         if (val == null)
                             throw new InvalidRequestException(String.format("Invalid null clustering key part %s", def.name));
-                        ColumnNameBuilder copy = builder.copy().add(val);
+                        Composite prefix = builder.buildWith(val);
                         // See below for why this
-                        s.add((bound == Bound.END && copy.remainingCount() > 0) ? copy.buildAsEndOfRange() : copy.build());
+                        s.add((bound == Bound.END && builder.remainingCount() > 0) ? prefix.end() : prefix);
                     }
-                    return new ArrayList<ByteBuffer>(s);
+                    return new ArrayList<Composite>(s);
                 }
 
                 ByteBuffer val = values.get(0);
@@ -714,14 +704,34 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
         // it would be harmless to do it. However, we use this method got the partition key too. And when a query
         // with 2ndary index is done, and with the the partition provided with an EQ, we'll end up here, and in that
         // case using the eoc would be bad, since for the random partitioner we have no guarantee that
-        // builder.buildAsEndOfRange() will sort after builder.build() (see #5240).
-        return Collections.singletonList((bound == Bound.END && builder.remainingCount() > 0) ? builder.buildAsEndOfRange() : builder.build());
+        // prefix.end() will sort after prefix (see #5240).
+        Composite prefix = builder.build();
+        return Collections.singletonList(bound == Bound.END && builder.remainingCount() > 0 ? prefix.end() : prefix);
     }
 
-    private List<ByteBuffer> getRequestedBound(Bound b, List<ByteBuffer> variables) throws InvalidRequestException
+    private static Composite.EOC eocForRelation(Relation.Type op)
+    {
+        switch (op)
+        {
+            case LT:
+                // < X => using startOf(X) as finish bound
+                return Composite.EOC.START;
+            case GT:
+            case LTE:
+                // > X => using endOf(X) as start bound
+                // <= X => using endOf(X) as finish bound
+                return Composite.EOC.END;
+            default:
+                // >= X => using X as start bound (could use START_OF too)
+                // = X => using X
+                return Composite.EOC.NONE;
+        }
+    }
+
+    private List<Composite> getRequestedBound(Bound b, List<ByteBuffer> variables) throws InvalidRequestException
     {
         assert isColumnRange();
-        return buildBound(b, cfm.clusteringColumns(), columnRestrictions, isReversed, cfm.getColumnNameBuilder(), variables);
+        return buildBound(b, cfm.clusteringColumns(), columnRestrictions, isReversed, cfm.comparator, variables);
     }
 
     public List<IndexExpression> getIndexExpressions(List<ByteBuffer> variables) throws InvalidRequestException
@@ -798,46 +808,30 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
         return value;
     }
 
-    private Iterable<Column> columnsInOrder(final ColumnFamily cf, final List<ByteBuffer> variables) throws InvalidRequestException
+    private Iterator<Column> applySliceRestriction(final Iterator<Column> cells, final List<ByteBuffer> variables) throws InvalidRequestException
     {
-        if (columnRestrictions.length == 0)
-            return cf.getSortedColumns();
-
-        // If the restriction for the last column alias is an IN, respect
-        // requested order
-        Restriction last = columnRestrictions[columnRestrictions.length - 1];
-        if (last == null || last.isSlice())
-            return cf.getSortedColumns();
-
-        ColumnNameBuilder builder = cfm.getColumnNameBuilder();
-        for (int i = 0; i < columnRestrictions.length - 1; i++)
-            builder.add(columnRestrictions[i].values(variables).get(0));
-
-        List<ByteBuffer> values = last.values(variables);
-        final List<ByteBuffer> requested = new ArrayList<ByteBuffer>(values.size());
-        Iterator<ByteBuffer> iter = values.iterator();
-        while (iter.hasNext())
-        {
-            ByteBuffer t = iter.next();
-            ColumnNameBuilder b = iter.hasNext() ? builder.copy() : builder;
-            requested.add(b.add(t).build());
-        }
+        assert sliceRestriction != null;
+
+        final CellNameType type = cfm.comparator;
+        final CellName excludedStart = sliceRestriction.isInclusive(Bound.START) ? null : type.makeCellName(sliceRestriction.bound(Bound.START, variables));
+        final CellName excludedEnd = sliceRestriction.isInclusive(Bound.END) ? null : type.makeCellName(sliceRestriction.bound(Bound.END, variables));
 
-        return new Iterable<Column>()
+        return new AbstractIterator<Column>()
         {
-            public Iterator<Column> iterator()
+            protected Column computeNext()
             {
-                return new AbstractIterator<Column>()
-                {
-                    Iterator<ByteBuffer> iter = requested.iterator();
-                    public Column computeNext()
-                    {
-                        if (!iter.hasNext())
-                            return endOfData();
-                        Column column = cf.getColumn(iter.next());
-                        return column == null ? computeNext() : column;
-                    }
-                };
+                if (!cells.hasNext())
+                    return endOfData();
+
+                Column c = cells.next();
+
+                // For dynamic CF, the column could be out of the requested bounds (because we don't support strict bounds internally (unless
+                // the comparator is composite that is)), filter here
+                if ( (excludedStart != null && type.compare(c.name(), excludedStart) == 0)
+                  || (excludedEnd != null && type.compare(c.name(), excludedEnd) == 0) )
+                    return computeNext();
+
+                return c;
             }
         };
     }
@@ -856,7 +850,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
 
         ResultSet cqlRows = result.build();
 
-        orderResults(cqlRows);
+        orderResults(cqlRows, variables);
 
         // Internal calls always return columns in the comparator order, even when reverse was set
         if (isReversed)
@@ -871,177 +865,131 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
     void processColumnFamily(ByteBuffer key, ColumnFamily cf, List<ByteBuffer> variables, long now, Selection.ResultSetBuilder result)
     throws InvalidRequestException
     {
-        ByteBuffer[] keyComponents = cfm.getKeyValidator() instanceof CompositeType
-                                   ? ((CompositeType)cfm.getKeyValidator()).split(key)
-                                   : new ByteBuffer[]{ key };
-
-        if (parameters.isDistinct)
+        CFMetaData cfm = cf.metadata();
+        ByteBuffer[] keyComponents = null;
+        if (cfm.getKeyValidator() instanceof CompositeType)
         {
-            if (!cf.hasOnlyTombstones(now))
-            {
-                result.newRow();
-                // selection.getColumnsList() will contain only the partition key components - all of them.
-                for (ColumnDefinition def : selection.getColumnsList())
-                    result.add(keyComponents[def.position()]);
-            }
+            keyComponents = ((CompositeType)cfm.getKeyValidator()).split(key);
         }
-        else if (cfm.isDense())
+        else
         {
-            // One cqlRow per column
-            for (Column c : columnsInOrder(cf, variables))
-            {
-                if (c.isMarkedForDelete(now))
-                    continue;
-
-                ByteBuffer[] components = null;
-                if (cfm.hasCompositeComparator())
-                {
-                    components = ((CompositeType)cfm.comparator).split(c.name());
-                }
-                else if (sliceRestriction != null)
-                {
-                    // For dynamic CF, the column could be out of the requested bounds, filter here
-                    if (!sliceRestriction.isInclusive(Bound.START) && c.name().equals(sliceRestriction.bound(Bound.START, variables)))
-                        continue;
-                    if (!sliceRestriction.isInclusive(Bound.END) && c.name().equals(sliceRestriction.bound(Bound.END, variables)))
-                        continue;
-                }
-
-                result.newRow();
-                // Respect selection order
-                for (ColumnDefinition def : selection.getColumnsList())
-                {
-                    switch (def.kind)
-                    {
-                        case PARTITION_KEY:
-                            result.add(keyComponents[def.position()]);
-                            break;
-                        case CLUSTERING_COLUMN:
-                            ByteBuffer val = cfm.hasCompositeComparator()
-                                           ? (def.position() < components.length ? components[def.position()] : null)
-                                           : c.name();
-                            result.add(val);
-                            break;
-                        case COMPACT_VALUE:
-                            result.add(c);
-                            break;
-                        case REGULAR:
-                            // This should not happen for compact CF
-                            throw new AssertionError();
-                        default:
-                            throw new AssertionError();
-                    }
-                }
-            }
+            keyComponents = new ByteBuffer[]{ key };
         }
-        else if (cfm.hasCompositeComparator())
-        {
-            // Sparse case: group column in cqlRow when composite prefix is equal
-            CompositeType composite = (CompositeType)cfm.comparator;
 
-            ColumnGroupMap.Builder builder = new ColumnGroupMap.Builder(composite, cfm.hasCollections(), now);
+        Iterator<Column> cells = cf.getSortedColumns().iterator();
+        if (sliceRestriction != null)
+            cells = applySliceRestriction(cells, variables);
 
-            for (Column c : cf)
-            {
-                if (c.isMarkedForDelete(now))
-                    continue;
-
-                builder.add(c);
-            }
-
-            for (ColumnGroupMap group : builder.groups())
-                handleGroup(selection, result, keyComponents, group);
-        }
-        else
+        for (Iterator<CQL3Row> iter = cfm.comparator.CQL3RowBuilder(now).group(cells); iter.hasNext();)
         {
-            if (cf.hasOnlyTombstones(now))
-                return;
+            CQL3Row cql3Row = iter.next();
 
-            // Static case: One cqlRow for all columns
+            // Respect requested order
             result.newRow();
+            // Respect selection order
             for (ColumnDefinition def : selection.getColumnsList())
             {
-                if (def.kind == ColumnDefinition.Kind.PARTITION_KEY)
-                    result.add(keyComponents[def.position()]);
-                else
-                    result.add(cf.getColumn(def.name.bytes));
-            }
+                switch (def.kind)
+                {
+                    case PARTITION_KEY:
+                        result.add(keyComponents[def.position()]);
+                        break;
+                    case CLUSTERING_COLUMN:
+                        result.add(cql3Row.getClusteringColumn(def.position()));
+                        break;
+                    case COMPACT_VALUE:
+                        result.add(cql3Row.getColumn(null));
+                        break;
+                    case REGULAR:
+                        if (def.type.isCollection())
+                        {
+                            List<Column> collection = cql3Row.getCollection(def.name);
+                            ByteBuffer value = collection == null
+                                             ? null
+                                             : ((CollectionType)def.type).serialize(collection);
+                            result.add(value);
+                        }
+                        else
+                        {
+                            result.add(cql3Row.getColumn(def.name));
+                        }
+                        break;
+                    }
+                }
         }
     }
 
     /**
      * Orders results when multiple keys are selected (using IN)
      */
-    private void orderResults(ResultSet cqlRows)
+    private void orderResults(ResultSet cqlRows, List<ByteBuffer> variables) throws InvalidRequestException
     {
-        // There is nothing to do if
-        //   a. there are no results,
-        //   b. no ordering information where given,
-        //   c. key restriction is a Range or not an IN expression
-        if (cqlRows.size() == 0 || parameters.orderings.isEmpty() || isKeyRange || !keyIsInRelation)
+        if (cqlRows.size() == 0)
+            return;
+
+        /*
+         * We need to do post-query ordering in 2 cases:
+         *   1) if the last clustering key is restricted by a IN.
+         *   2) if the row key is restricted by a IN and there is some ORDER BY values
+         */
+        if (!(lastClusteringIsIn || (keyIsInRelation && parameters.orderings.size() > 0)))
             return;
 
         assert orderingIndexes != null;
 
-        // optimization when only *one* order condition was given
-        // because there is no point of using composite comparator if there is only one order condition
-        if (parameters.orderings.size() == 1)
+        List<Integer> idToSort = new ArrayList<Integer>();
+        List<Comparator<ByteBuffer>> sorters = new ArrayList<Comparator<ByteBuffer>>();
+
+        // If the restriction for the last clustering key is an IN, respect requested order
+        if (lastClusteringIsIn)
         {
-            ColumnDefinition ordering = cfm.getColumnDefinition(parameters.orderings.keySet().iterator().next());
-            Collections.sort(cqlRows.rows, new SingleColumnComparator(orderingIndexes.get(ordering), ordering.type));
-            return;
+            List<ColumnDefinition> cc = cfm.clusteringColumns();
+            idToSort.add(orderingIndexes.get(cc.get(cc.size() - 1).name));
+            Restriction last = columnRestrictions[columnRestrictions.length - 1];
+            sorters.add(makeComparatorFor(last.values(variables)));
         }
 
-        // builds a 'composite' type for multi-column comparison from the comparators of the ordering components
-        // and passes collected position information and built composite comparator to CompositeComparator to do
-        // an actual comparison of the CQL rows.
-        List<AbstractType<?>> types = new ArrayList<AbstractType<?>>(parameters.orderings.size());
-        int[] positions = new int[parameters.orderings.size()];
-
-        int idx = 0;
+        // Then add the order by
         for (ColumnIdentifier identifier : parameters.orderings.keySet())
         {
             ColumnDefinition orderingColumn = cfm.getColumnDefinition(identifier);
-            types.add(orderingColumn.type);
-            positions[idx++] = orderingIndexes.get(orderingColumn);
+            idToSort.add(orderingIndexes.get(orderingColumn.name));
+            sorters.add(orderingColumn.type);
         }
 
-        Collections.sort(cqlRows.rows, new CompositeComparator(types, positions));
+        Comparator<List<ByteBuffer>> comparator = idToSort.size() == 1
+                                                ? new SingleColumnComparator(idToSort.get(0), sorters.get(0))
+                                                : new CompositeComparator(sorters, idToSort);
+        Collections.sort(cqlRows.rows, comparator);
     }
 
-    private void handleGroup(Selection selection, Selection.ResultSetBuilder result, ByteBuffer[] keyComponents, ColumnGroupMap columns) throws InvalidRequestException
+    // Comparator used when the last clustering key is an IN, to sort result
+    // rows in the order of the values provided for the IN.
+    private Comparator<ByteBuffer> makeComparatorFor(final List<ByteBuffer> values)
     {
-        // Respect requested order
-        result.newRow();
-        for (ColumnDefinition def : selection.getColumnsList())
+        // This may not always be the most efficient, but it probably is if
+        // values is small, which is likely to be the most common case.
+        return new Comparator<ByteBuffer>()
         {
-            switch (def.kind)
+            public int compare(ByteBuffer b1, ByteBuffer b2)
             {
-                case PARTITION_KEY:
-                    result.add(keyComponents[def.position()]);
-                    break;
-                case CLUSTERING_COLUMN:
-                    result.add(columns.getKeyComponent(def.position()));
-                    break;
-                case COMPACT_VALUE:
-                    // This should not happen for SPARSE
-                    throw new AssertionError();
-                case REGULAR:
-                    if (def.type.isCollection())
-                    {
-                        List<Pair<ByteBuffer, Column>> collection = columns.getCollection(def.name.bytes);
-                        ByteBuffer value = collection == null
-                                         ? null
-                                         : ((CollectionType)def.type).serialize(collection);
-                        result.add(value);
-                    }
-                    else
-                    {
-                        result.add(columns.getSimple(def.name.bytes));
-                    }
-                    break;
+                int idx1 = -1;
+                int idx2 = -1;
+                for (int i = 0; i < values.size(); i++)
+                {
+                    ByteBuffer bb = values.get(i);
+                    if (bb.equals(b1))
+                        idx1 = i;
+                    if (bb.equals(b2))
+                        idx2 = i;
+
+                    if (idx1 >= 0 && idx2 >= 0)
+                        break;
+                }
+                assert idx1 >= 0 && idx2 >= 0 : "Got CQL3 row that was not queried in resultset";
+                return idx1 - idx2;
             }
-        }
+        };
     }
 
     private static boolean isReversedType(ColumnDefinition def)
@@ -1263,7 +1211,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
                     Restriction.Slice slice = (Restriction.Slice)restriction;
                     // For non-composite slices, we don't support internally the difference between exclusive and
                     // inclusive bounds, so we deal with it manually.
-                    if (!cfm.hasCompositeComparator() && (!slice.isInclusive(Bound.START) || !slice.isInclusive(Bound.END)))
+                    if (!cfm.comparator.isCompound() && (!slice.isInclusive(Bound.START) || !slice.isInclusive(Bound.END)))
                         stmt.sliceRestriction = slice;
                 }
                 else if (restriction.isIN())
@@ -1274,6 +1222,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
                         throw new InvalidRequestException(String.format("PRIMARY KEY part %s cannot be restricted by IN relation", cdef.name));
                     else if (stmt.selectACollection())
                         throw new InvalidRequestException(String.format("Cannot restrict PRIMARY KEY part %s by IN relation as a collection is selected by the query", cdef.name));
+                    stmt.lastClusteringIsIn = true;
                 }
 
                 previous = cdef;
@@ -1311,7 +1260,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
                 // queried automatically, and then removing it from the resultSet afterwards if needed)
                 if (stmt.keyIsInRelation)
                 {
-                    stmt.orderingIndexes = new HashMap<ColumnDefinition, Integer>();
+                    stmt.orderingIndexes = new HashMap<ColumnIdentifier, Integer>();
                     for (ColumnIdentifier column : stmt.parameters.orderings.keySet())
                     {
                         final ColumnDefinition def = cfm.getColumnDefinition(column);
@@ -1325,14 +1274,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
 
                         if (selectClause.isEmpty()) // wildcard
                         {
-                            stmt.orderingIndexes.put(def, Iterators.indexOf(cfm.allColumnsInSelectOrder(),
-                                                                            new Predicate<ColumnDefinition>()
-                                                                            {
-                                                                                public boolean apply(ColumnDefinition n)
-                                                                                {
-                                                                                    return def.equals(n);
-                                                                                }
-                                                                            }));
+                            stmt.orderingIndexes.put(def.name, indexOf(def, cfm.allColumnsInSelectOrder()));
                         }
                         else
                         {
@@ -1342,7 +1284,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
                                 RawSelector selector = selectClause.get(i);
                                 if (def.name.equals(selector.selectable))
                                 {
-                                    stmt.orderingIndexes.put(def, i);
+                                    stmt.orderingIndexes.put(def.name, i);
                                     hasColumn = true;
                                     break;
                                 }
@@ -1399,6 +1341,16 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
                 stmt.isReversed = isReversed;
             }
 
+            if (stmt.lastClusteringIsIn)
+            {
+                // This means we'll have to do post-query reordering, so update the orderingIndexes
+                if (stmt.orderingIndexes == null)
+                    stmt.orderingIndexes = new HashMap<ColumnIdentifier, Integer>();
+
+                ColumnDefinition last = cfm.clusteringColumns().get(cfm.clusteringColumns().size() - 1);
+                stmt.orderingIndexes.put(last.name, indexOf(last, stmt.selection.getColumnsList().iterator()));
+            }
+
             // Make sure this queries is allowed (note: non key range non indexed cannot involve filtering underneath)
             if (!parameters.allowFiltering && (stmt.isKeyRange || stmt.usesSecondaryIndexing))
             {
@@ -1413,6 +1365,17 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
             return new ParsedStatement.Prepared(stmt, names);
         }
 
+        private int indexOf(final ColumnDefinition def, Iterator<ColumnDefinition> defs)
+        {
+            return Iterators.indexOf(defs, new Predicate<ColumnDefinition>()
+                                           {
+                                               public boolean apply(ColumnDefinition n)
+                                               {
+                                                   return def.name.equals(n.name);
+                                               }
+                                           });
+        }
+
         private void validateDistinctSelection(Collection<ColumnDefinition> requestedColumns, Collection<ColumnDefinition> partitionKey)
         throws InvalidRequestException
         {
@@ -1584,9 +1547,9 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
     private static class SingleColumnComparator implements Comparator<List<ByteBuffer>>
     {
         private final int index;
-        private final AbstractType<?> comparator;
+        private final Comparator<ByteBuffer> comparator;
 
-        public SingleColumnComparator(int columnIndex, AbstractType<?> orderer)
+        public SingleColumnComparator(int columnIndex, Comparator<ByteBuffer> orderer)
         {
             index = columnIndex;
             comparator = orderer;
@@ -1603,10 +1566,10 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
      */
     private static class CompositeComparator implements Comparator<List<ByteBuffer>>
     {
-        private final List<AbstractType<?>> orderTypes;
-        private final int[] positions;
+        private final List<Comparator<ByteBuffer>> orderTypes;
+        private final List<Integer> positions;
 
-        private CompositeComparator(List<AbstractType<?>> orderTypes, int[] positions)
+        private CompositeComparator(List<Comparator<ByteBuffer>> orderTypes, List<Integer> positions)
         {
             this.orderTypes = orderTypes;
             this.positions = positions;
@@ -1614,10 +1577,10 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache
 
         public int compare(List<ByteBuffer> a, List<ByteBuffer> b)
         {
-            for (int i = 0; i < positions.length; i++)
+            for (int i = 0; i < positions.size(); i++)
             {
-                AbstractType<?> type = orderTypes.get(i);
-                int columnPos = positions[i];
+                Comparator<ByteBuffer> type = orderTypes.get(i);
+                int columnPos = positions.get(i);
 
                 ByteBuffer aValue = a.get(columnPos);
                 ByteBuffer bValue = b.get(columnPos);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/statements/Selection.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/Selection.java b/src/java/org/apache/cassandra/cql3/statements/Selection.java
index a578f3f..6a8cfe6 100644
--- a/src/java/org/apache/cassandra/cql3/statements/Selection.java
+++ b/src/java/org/apache/cassandra/cql3/statements/Selection.java
@@ -235,20 +235,6 @@ public abstract class Selection
     protected abstract List<ByteBuffer> handleRow(ResultSetBuilder rs) throws InvalidRequestException;
 
     /**
-     * @return the list of CQL3 "regular" (the "COLUMN_METADATA" ones) column names to fetch.
-     */
-    public List<ColumnIdentifier> regularColumnsToFetch()
-    {
-        List<ColumnIdentifier> toFetch = new ArrayList<ColumnIdentifier>();
-        for (ColumnDefinition def : columnsList)
-        {
-            if (def.kind == ColumnDefinition.Kind.REGULAR)
-                toFetch.add(def.name);
-        }
-        return toFetch;
-    }
-
-    /**
      * @return the list of CQL3 columns value this SelectionClause needs.
      */
     public List<ColumnDefinition> getColumnsList()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java b/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java
index 7a8340d..6cf0856 100644
--- a/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java
@@ -24,7 +24,7 @@ import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.marshal.CompositeType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.exceptions.*;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.Pair;
@@ -35,7 +35,7 @@ import org.apache.cassandra.utils.Pair;
  */
 public class UpdateStatement extends ModificationStatement
 {
-    private static final Operation setToEmptyOperation = new Constants.Setter(null, new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER));
+    private static final Constants.Value EMPTY = new Constants.Value(ByteBufferUtil.EMPTY_BYTE_BUFFER);
 
     private UpdateStatement(int boundTerms, CFMetaData cfm, Attributes attrs)
     {
@@ -47,7 +47,7 @@ public class UpdateStatement extends ModificationStatement
         return true;
     }
 
-    public void addUpdateForKey(ColumnFamily cf, ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params)
+    public void addUpdateForKey(ColumnFamily cf, ByteBuffer key, Composite prefix, UpdateParameters params)
     throws InvalidRequestException
     {
         // Inserting the CQL row marker (see #4361)
@@ -61,17 +61,14 @@ public class UpdateStatement extends ModificationStatement
         // 'DELETE FROM t WHERE k = 1' does remove the row entirely)
         //
         // We never insert markers for Super CF as this would confuse the thrift side.
-        if (cfm.hasCompositeComparator() && !cfm.isDense() && !cfm.isSuper())
-        {
-            ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
-            cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
-        }
+        if (cfm.isCQL3Table())
+            cf.addColumn(params.makeColumn(cfm.comparator.rowMarker(prefix), ByteBufferUtil.EMPTY_BYTE_BUFFER));
 
         List<Operation> updates = getOperations();
 
-        if (cfm.isDense())
+        if (cfm.comparator.isDense())
         {
-            if (builder.componentCount() == 0)
+            if (prefix.isEmpty())
                 throw new InvalidRequestException(String.format("Missing PRIMARY KEY part %s", cfm.clusteringColumns().iterator().next()));
 
             // An empty name for the compact value is what we use to recognize the case where there is not column
@@ -80,7 +77,7 @@ public class UpdateStatement extends ModificationStatement
             {
                 // There is no column outside the PK. So no operation could have passed through validation
                 assert updates.isEmpty();
-                setToEmptyOperation.execute(key, cf, builder.copy(), params);
+                new Constants.Setter(cfm.compactValueColumn(), EMPTY).execute(key, cf, prefix, params);
             }
             else
             {
@@ -89,21 +86,21 @@ public class UpdateStatement extends ModificationStatement
                     throw new InvalidRequestException(String.format("Column %s is mandatory for this COMPACT STORAGE table", cfm.compactValueColumn().name));
 
                 for (Operation update : updates)
-                    update.execute(key, cf, builder.copy(), params);
+                    update.execute(key, cf, prefix, params);
             }
         }
         else
         {
             for (Operation update : updates)
-                update.execute(key, cf, builder.copy(), params);
+                update.execute(key, cf, prefix, params);
         }
     }
 
-    public ColumnFamily updateForKey(ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params)
+    public ColumnFamily updateForKey(ByteBuffer key, Composite prefix, UpdateParameters params)
     throws InvalidRequestException
     {
         ColumnFamily cf = UnsortedColumns.factory.create(cfm);
-        addUpdateForKey(cf, key, builder, params);
+        addUpdateForKey(cf, key, prefix, params);
         return cf;
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/ArrayBackedSortedColumns.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ArrayBackedSortedColumns.java b/src/java/org/apache/cassandra/db/ArrayBackedSortedColumns.java
index d2825c1..432b47e 100644
--- a/src/java/org/apache/cassandra/db/ArrayBackedSortedColumns.java
+++ b/src/java/org/apache/cassandra/db/ArrayBackedSortedColumns.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.db;
 
-import java.nio.ByteBuffer;
 import java.util.*;
 
 import com.google.common.base.Function;
@@ -26,8 +25,10 @@ import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 
 import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.filter.ColumnSlice;
-import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.utils.Allocator;
 
 /**
@@ -79,12 +80,12 @@ public class ArrayBackedSortedColumns extends AbstractThreadUnsafeSortedColumns
         return reversed;
     }
 
-    private Comparator<ByteBuffer> internalComparator()
+    private Comparator<Composite> internalComparator()
     {
-        return reversed ? getComparator().reverseComparator : getComparator();
+        return reversed ? getComparator().reverseComparator() : getComparator();
     }
 
-    public Column getColumn(ByteBuffer name)
+    public Column getColumn(CellName name)
     {
         int pos = binarySearch(name);
         return pos >= 0 ? columns.get(pos) : null;
@@ -147,7 +148,7 @@ public class ArrayBackedSortedColumns extends AbstractThreadUnsafeSortedColumns
         columns.set(i, reconciledColumn);
     }
 
-    private int binarySearch(ByteBuffer name)
+    private int binarySearch(CellName name)
     {
         return binarySearch(columns, internalComparator(), name, 0);
     }
@@ -158,7 +159,7 @@ public class ArrayBackedSortedColumns extends AbstractThreadUnsafeSortedColumns
      * (We don't use Collections.binarySearch() directly because it would require us to create
      * a fake Column (as well as an Column comparator) to do the search, which is ugly.
      */
-    private static int binarySearch(List<Column> columns, Comparator<ByteBuffer> comparator, ByteBuffer name, int start)
+    private static int binarySearch(List<Column> columns, Comparator<Composite> comparator, Composite name, int start)
     {
         int low = start;
         int mid = columns.size();
@@ -266,11 +267,11 @@ public class ArrayBackedSortedColumns extends AbstractThreadUnsafeSortedColumns
         columns.clear();
     }
 
-    public Iterable<ByteBuffer> getColumnNames()
+    public Iterable<CellName> getColumnNames()
     {
-        return Iterables.transform(columns, new Function<Column, ByteBuffer>()
+        return Iterables.transform(columns, new Function<Column, CellName>()
         {
-            public ByteBuffer apply(Column column)
+            public CellName apply(Column column)
             {
                 return column.name;
             }
@@ -296,17 +297,17 @@ public class ArrayBackedSortedColumns extends AbstractThreadUnsafeSortedColumns
     {
         private final List<Column> list;
         private final ColumnSlice[] slices;
-        private final Comparator<ByteBuffer> comparator;
+        private final Comparator<Composite> comparator;
 
         private int idx = 0;
         private int previousSliceEnd = 0;
         private Iterator<Column> currentSlice;
 
-        public SlicesIterator(List<Column> list, AbstractType<?> comparator, ColumnSlice[] slices, boolean reversed)
+        public SlicesIterator(List<Column> list, CellNameType comparator, ColumnSlice[] slices, boolean reversed)
         {
             this.list = reversed ? Lists.reverse(list) : list;
             this.slices = slices;
-            this.comparator = reversed ? comparator.reverseComparator : comparator;
+            this.comparator = reversed ? comparator.reverseComparator() : comparator;
         }
 
         protected Column computeNext()
@@ -318,12 +319,12 @@ public class ArrayBackedSortedColumns extends AbstractThreadUnsafeSortedColumns
 
                 ColumnSlice slice = slices[idx++];
                 // The first idx to include
-                int startIdx = slice.start.remaining() == 0 ? 0 : binarySearch(list, comparator, slice.start, previousSliceEnd);
+                int startIdx = slice.start.isEmpty() ? 0 : binarySearch(list, comparator, slice.start, previousSliceEnd);
                 if (startIdx < 0)
                     startIdx = -startIdx - 1;
 
                 // The first idx to exclude
-                int finishIdx = slice.finish.remaining() == 0 ? list.size() - 1 : binarySearch(list, comparator, slice.finish, previousSliceEnd);
+                int finishIdx = slice.finish.isEmpty() ? list.size() - 1 : binarySearch(list, comparator, slice.finish, previousSliceEnd);
                 if (finishIdx >= 0)
                     finishIdx++;
                 else

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/AtomDeserializer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/AtomDeserializer.java b/src/java/org/apache/cassandra/db/AtomDeserializer.java
new file mode 100644
index 0000000..799ed0e
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/AtomDeserializer.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db;
+
+import java.io.DataInput;
+import java.io.IOException;
+
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
+import org.apache.cassandra.io.sstable.Descriptor;
+
+/**
+ * Helper class to deserialize OnDiskAtom efficiently.
+ *
+ * More precisely, this class is used by the low-level readers
+ * (IndexedSliceReader and SSTableNamesIterator) to ensure we don't
+ * do more work than necessary (i.e. we don't allocate/deserialize
+ * objects for things we don't care about).
+ */
+public class AtomDeserializer
+{
+    private final CellNameType type;
+    private final CellNameType.Deserializer nameDeserializer;
+    private final DataInput in;
+    private final ColumnSerializer.Flag flag;
+    private final int expireBefore;
+    private final Descriptor.Version version;
+
+    public AtomDeserializer(CellNameType type, DataInput in, ColumnSerializer.Flag flag, int expireBefore, Descriptor.Version version)
+    {
+        this.type = type;
+        this.nameDeserializer = type.newDeserializer(in);
+        this.in = in;
+        this.flag = flag;
+        this.expireBefore = expireBefore;
+        this.version = version;
+    }
+
+    /**
+     * Whether or not there is more atom to read.
+     */
+    public boolean hasNext() throws IOException
+    {
+        return nameDeserializer.hasNext();
+    }
+
+    /**
+     * Whether or not some atom has been read but not processed (neither readNext() nor
+     * skipNext() has been called for that atom) yet.
+     */
+    public boolean hasUnprocessed() throws IOException
+    {
+        return nameDeserializer.hasUnprocessed();
+    }
+
+    /**
+     * Compare the provided composite to the next atom to read on disk.
+     *
+     * This will not read/deserialize the whole atom but only what is necessary for the
+     * comparison. Whenever we know what to do with this atom (read it or skip it),
+     * readNext or skipNext should be called.
+     */
+    public int compareNextTo(Composite composite) throws IOException
+    {
+        return nameDeserializer.compareNextTo(composite);
+    }
+
+    /**
+     * Returns the next atom.
+     */
+    public OnDiskAtom readNext() throws IOException
+    {
+        Composite name = nameDeserializer.readNext();
+        assert !name.isEmpty(); // This would imply hasNext() hasn't been called
+        int b = in.readUnsignedByte();
+        if ((b & ColumnSerializer.RANGE_TOMBSTONE_MASK) != 0)
+            return type.rangeTombstoneSerializer().deserializeBody(in, name, version);
+        else
+            return type.columnSerializer().deserializeColumnBody(in, (CellName)name, b, flag, expireBefore);
+    }
+
+    /**
+     * Skips the next atom.
+     */
+    public void skipNext() throws IOException
+    {
+        nameDeserializer.skipNext();
+        int b = in.readUnsignedByte();
+        if ((b & ColumnSerializer.RANGE_TOMBSTONE_MASK) != 0)
+            type.rangeTombstoneSerializer().skipBody(in, version);
+        else
+            type.columnSerializer().skipColumnBody(in, b);
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/AtomicSortedColumns.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/AtomicSortedColumns.java b/src/java/org/apache/cassandra/db/AtomicSortedColumns.java
index b44d8bf..5056c26 100644
--- a/src/java/org/apache/cassandra/db/AtomicSortedColumns.java
+++ b/src/java/org/apache/cassandra/db/AtomicSortedColumns.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.db;
 
-import java.nio.ByteBuffer;
 import java.util.*;
 import java.util.concurrent.atomic.AtomicReference;
 
@@ -27,9 +26,10 @@ import com.google.common.collect.Iterables;
 import edu.stanford.ppl.concurrent.SnapTreeMap;
 
 import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.db.filter.ColumnSlice;
 import org.apache.cassandra.db.index.SecondaryIndexManager;
-import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.utils.Allocator;
 
 /**
@@ -70,9 +70,9 @@ public class AtomicSortedColumns extends ColumnFamily
         this.ref = new AtomicReference<>(holder);
     }
 
-    public AbstractType<?> getComparator()
+    public CellNameType getComparator()
     {
-        return (AbstractType<?>)ref.get().map.comparator();
+        return (CellNameType)ref.get().map.comparator();
     }
 
     public ColumnFamily.Factory getFactory()
@@ -233,12 +233,12 @@ public class AtomicSortedColumns extends ColumnFamily
         while (!ref.compareAndSet(current, modified));
     }
 
-    public Column getColumn(ByteBuffer name)
+    public Column getColumn(CellName name)
     {
         return ref.get().map.get(name);
     }
 
-    public SortedSet<ByteBuffer> getColumnNames()
+    public SortedSet<CellName> getColumnNames()
     {
         return ref.get().map.keySet();
     }
@@ -279,15 +279,15 @@ public class AtomicSortedColumns extends ColumnFamily
         // so we can safely alias one DeletionInfo.live() reference and avoid some allocations.
         private static final DeletionInfo LIVE = DeletionInfo.live();
 
-        final SnapTreeMap<ByteBuffer, Column> map;
+        final SnapTreeMap<CellName, Column> map;
         final DeletionInfo deletionInfo;
 
-        Holder(AbstractType<?> comparator)
+        Holder(CellNameType comparator)
         {
-            this(new SnapTreeMap<ByteBuffer, Column>(comparator), LIVE);
+            this(new SnapTreeMap<CellName, Column>(comparator), LIVE);
         }
 
-        Holder(SnapTreeMap<ByteBuffer, Column> map, DeletionInfo deletionInfo)
+        Holder(SnapTreeMap<CellName, Column> map, DeletionInfo deletionInfo)
         {
             this.map = map;
             this.deletionInfo = deletionInfo;
@@ -303,7 +303,7 @@ public class AtomicSortedColumns extends ColumnFamily
             return new Holder(map, info);
         }
 
-        Holder with(SnapTreeMap<ByteBuffer, Column> newMap)
+        Holder with(SnapTreeMap<CellName, Column> newMap)
         {
             return new Holder(newMap, deletionInfo);
         }
@@ -312,12 +312,12 @@ public class AtomicSortedColumns extends ColumnFamily
         // afterwards.
         Holder clear()
         {
-            return new Holder(new SnapTreeMap<ByteBuffer, Column>(map.comparator()), LIVE);
+            return new Holder(new SnapTreeMap<CellName, Column>(map.comparator()), LIVE);
         }
 
         long addColumn(Column column, Allocator allocator, SecondaryIndexManager.Updater indexer)
         {
-            ByteBuffer name = column.name();
+            CellName name = column.name();
             while (true)
             {
                 Column oldColumn = map.putIfAbsent(name, column);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/BatchlogManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/BatchlogManager.java b/src/java/org/apache/cassandra/db/BatchlogManager.java
index bba3ffe..9bc857b 100644
--- a/src/java/org/apache/cassandra/db/BatchlogManager.java
+++ b/src/java/org/apache/cassandra/db/BatchlogManager.java
@@ -35,6 +35,7 @@ import javax.management.ObjectName;
 
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
+import org.apache.cassandra.db.composites.CellName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -45,7 +46,6 @@ import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.marshal.LongType;
-import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.db.marshal.UUIDType;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.exceptions.WriteTimeoutException;
@@ -128,9 +128,9 @@ public class BatchlogManager implements BatchlogManagerMBean
         ByteBuffer data = serializeRowMutations(mutations);
 
         ColumnFamily cf = ArrayBackedSortedColumns.factory.create(CFMetaData.BatchlogCf);
-        cf.addColumn(new Column(columnName(""), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp));
-        cf.addColumn(new Column(columnName("data"), data, timestamp));
-        cf.addColumn(new Column(columnName("written_at"), writtenAt, timestamp));
+        cf.addColumn(new Column(cellName(""), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp));
+        cf.addColumn(new Column(cellName("data"), data, timestamp));
+        cf.addColumn(new Column(cellName("written_at"), writtenAt, timestamp));
 
         return new RowMutation(Keyspace.SYSTEM_KS, UUIDType.instance.decompose(uuid), cf);
     }
@@ -282,9 +282,9 @@ public class BatchlogManager implements BatchlogManagerMBean
         return (int) ((HintedHandOffManager.calculateHintTTL(mutation) * 1000 - (System.currentTimeMillis() - writtenAt)) / 1000);
     }
 
-    private static ByteBuffer columnName(String name)
+    private static CellName cellName(String name)
     {
-        return CFMetaData.BatchlogCf.getColumnNameBuilder().add(UTF8Type.instance.decompose(name)).build();
+        return CFMetaData.BatchlogCf.comparator.makeCellName(name);
     }
 
     // force flush + compaction to reclaim space from the replayed batches


[04/13] Push composites support in the storage engine

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/tools/SSTableImport.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/SSTableImport.java b/src/java/org/apache/cassandra/tools/SSTableImport.java
index 41986b2..0a6ef6b 100644
--- a/src/java/org/apache/cassandra/tools/SSTableImport.java
+++ b/src/java/org/apache/cassandra/tools/SSTableImport.java
@@ -37,12 +37,12 @@ import org.apache.commons.cli.PosixParser;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.BytesType;
-import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.io.sstable.SSTableWriter;
 import org.apache.cassandra.serializers.MarshalException;
@@ -107,12 +107,12 @@ public class SSTableImport
         {
             if (json instanceof List)
             {
-                AbstractType<?> comparator = oldSCFormat ? SuperColumns.getComparatorFor(meta, isSubColumn) : meta.comparator;
+                CellNameType comparator = oldSCFormat ? new SimpleDenseCellNameType(SuperColumns.getComparatorFor(meta, isSubColumn)) : meta.comparator;
                 List fields = (List<?>) json;
 
                 assert fields.size() >= 3 : "Column definition should have at least 3";
 
-                name  = stringAsType((String) fields.get(0), comparator);
+                name  = stringAsType((String) fields.get(0), comparator.asAbstractType());
                 timestamp = (Long) fields.get(2);
                 kind = "";
 
@@ -157,11 +157,11 @@ public class SSTableImport
                 }
                 else if (isRangeTombstone())
                 {
-                    value = comparator.fromString((String)fields.get(1));
+                    value = stringAsType((String) fields.get(1), comparator.asAbstractType());
                 }
                 else
                 {
-                    value = stringAsType((String) fields.get(1), meta.getValueValidator(meta.getColumnDefinitionFromCellName(name)));
+                    value = stringAsType((String) fields.get(1), meta.getValueValidatorForFullCellName(name));
                 }
             }
         }
@@ -239,7 +239,21 @@ public class SSTableImport
         for (Object c : row)
         {
             JsonColumn col = new JsonColumn<List>((List) c, cfm, oldSCFormat, (superName != null));
-            ByteBuffer cname = superName == null ? col.getName() : CompositeType.build(superName, col.getName());
+            if (col.isRangeTombstone())
+            {
+                Composite start = superName == null
+                                ? cfm.comparator.fromByteBuffer(col.getName())
+                                : cfm.comparator.make(superName, col.getName());
+                Composite end = superName == null
+                              ? cfm.comparator.fromByteBuffer(col.getValue())
+                              : cfm.comparator.make(superName, col.getValue());
+                cfamily.addAtom(new RangeTombstone(start, end, col.timestamp, col.localExpirationTime));
+                continue;
+            }
+
+            CellName cname = superName == null
+                           ? cfm.comparator.cellFromByteBuffer(col.getName())
+                           : cfm.comparator.makeCellName(superName, col.getName());
 
             if (col.isExpiring())
             {
@@ -255,13 +269,15 @@ public class SSTableImport
             }
             else if (col.isRangeTombstone())
             {
-                ByteBuffer end = superName == null ? col.getValue() : CompositeType.build(superName, col.getValue());
+                CellName end = superName == null
+                             ? cfm.comparator.cellFromByteBuffer(col.getValue())
+                             : cfm.comparator.makeCellName(superName, col.getValue());
                 cfamily.addAtom(new RangeTombstone(cname, end, col.timestamp, col.localExpirationTime));
             }
             // cql3 row marker, see CASSANDRA-5852
-            else if (!cname.hasRemaining())
+            else if (cname.isEmpty())
             {
-                cfamily.addColumn(ByteBuffer.wrap(new byte[3]), col.getValue(), col.timestamp);
+                cfamily.addColumn(cfm.comparator.rowMarker(Composites.EMPTY), col.getValue(), col.timestamp);
             }
             else
             {
@@ -298,14 +314,14 @@ public class SSTableImport
         CFMetaData metaData = cfamily.metadata();
         assert metaData != null;
 
-        AbstractType<?> comparator = metaData.comparator;
+        CellNameType comparator = metaData.comparator;
 
         // Super columns
         for (Map.Entry<?, ?> entry : row.entrySet())
         {
             Map<?, ?> data = (Map<?, ?>) entry.getValue();
 
-            ByteBuffer superName = stringAsType((String) entry.getKey(), ((CompositeType)comparator).types.get(0));
+            ByteBuffer superName = stringAsType((String) entry.getKey(), comparator.subtype(0));
 
             addColumnsToCF((List<?>) data.get("subColumns"), superName, cfamily);
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/tracing/Tracing.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tracing/Tracing.java b/src/java/org/apache/cassandra/tracing/Tracing.java
index 3c3092e..b1f618d 100644
--- a/src/java/org/apache/cassandra/tracing/Tracing.java
+++ b/src/java/org/apache/cassandra/tracing/Tracing.java
@@ -33,8 +33,8 @@ import org.slf4j.LoggerFactory;
 import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.concurrent.StageManager;
 import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.cql3.ColumnNameBuilder;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.marshal.TimeUUIDType;
 import org.apache.cassandra.exceptions.OverloadedException;
 import org.apache.cassandra.exceptions.UnavailableException;
@@ -71,27 +71,27 @@ public class Tracing
 
     public static final Tracing instance = new Tracing();
 
-    public static void addColumn(ColumnFamily cf, ByteBuffer name, InetAddress address)
+    public static void addColumn(ColumnFamily cf, CellName name, InetAddress address)
     {
         addColumn(cf, name, ByteBufferUtil.bytes(address));
     }
 
-    public static void addColumn(ColumnFamily cf, ByteBuffer name, int value)
+    public static void addColumn(ColumnFamily cf, CellName name, int value)
     {
         addColumn(cf, name, ByteBufferUtil.bytes(value));
     }
 
-    public static void addColumn(ColumnFamily cf, ByteBuffer name, long value)
+    public static void addColumn(ColumnFamily cf, CellName name, long value)
     {
         addColumn(cf, name, ByteBufferUtil.bytes(value));
     }
 
-    public static void addColumn(ColumnFamily cf, ByteBuffer name, String value)
+    public static void addColumn(ColumnFamily cf, CellName name, String value)
     {
         addColumn(cf, name, ByteBufferUtil.bytes(value));
     }
 
-    private static void addColumn(ColumnFamily cf, ByteBuffer name, ByteBuffer value)
+    private static void addColumn(ColumnFamily cf, CellName name, ByteBuffer value)
     {
         cf.addColumn(new ExpiringColumn(name, value, System.currentTimeMillis(), TTL));
     }
@@ -100,17 +100,14 @@ public class Tracing
     {
         for (Map.Entry<String, String> entry : rawPayload.entrySet())
         {
-            cf.addColumn(new ExpiringColumn(buildName(cf.metadata(), bytes("parameters"), bytes(entry.getKey())),
+            cf.addColumn(new ExpiringColumn(buildName(cf.metadata(), "parameters", entry.getKey()),
                                             bytes(entry.getValue()), System.currentTimeMillis(), TTL));
         }
     }
 
-    public static ByteBuffer buildName(CFMetaData meta, ByteBuffer... args)
+    public static CellName buildName(CFMetaData meta, Object... args)
     {
-        ColumnNameBuilder builder = meta.getColumnNameBuilder();
-        for (ByteBuffer arg : args)
-            builder.add(arg);
-        return builder.build();
+        return meta.comparator.makeCellName(args);
     }
 
     public UUID getSessionId()
@@ -169,7 +166,7 @@ public class Tracing
                 {
                     CFMetaData cfMeta = CFMetaData.TraceSessionsCf;
                     ColumnFamily cf = ArrayBackedSortedColumns.factory.create(cfMeta);
-                    addColumn(cf, buildName(cfMeta, bytes("duration")), elapsed);
+                    addColumn(cf, buildName(cfMeta, "duration"), elapsed);
                     mutateWithCatch(new RowMutation(TRACE_KS, sessionIdBytes, cf));
                 }
             });
@@ -207,10 +204,10 @@ public class Tracing
             {
                 CFMetaData cfMeta = CFMetaData.TraceSessionsCf;
                 ColumnFamily cf = TreeMapBackedSortedColumns.factory.create(cfMeta);
-                addColumn(cf, buildName(cfMeta, bytes("coordinator")), FBUtilities.getBroadcastAddress());
+                addColumn(cf, buildName(cfMeta, "coordinator"), FBUtilities.getBroadcastAddress());
                 addParameterColumns(cf, parameters);
-                addColumn(cf, buildName(cfMeta, bytes("request")), request);
-                addColumn(cf, buildName(cfMeta, bytes("started_at")), started_at);
+                addColumn(cf, buildName(cfMeta, "request"), request);
+                addColumn(cf, buildName(cfMeta, "started_at"), started_at);
                 mutateWithCatch(new RowMutation(TRACE_KS, sessionIdBytes, cf));
             }
         });

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/triggers/TriggerExecutor.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/triggers/TriggerExecutor.java b/src/java/org/apache/cassandra/triggers/TriggerExecutor.java
index 4b3c24a..c294128 100644
--- a/src/java/org/apache/cassandra/triggers/TriggerExecutor.java
+++ b/src/java/org/apache/cassandra/triggers/TriggerExecutor.java
@@ -29,6 +29,7 @@ import com.google.common.collect.Maps;
 
 import org.apache.cassandra.config.TriggerDefinition;
 import org.apache.cassandra.cql.QueryProcessor;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.ColumnFamily;
 import org.apache.cassandra.db.CounterMutation;
 import org.apache.cassandra.db.IMutation;
@@ -94,7 +95,7 @@ public class TriggerExecutor
         {
             QueryProcessor.validateKey(mutation.key());
             for (ColumnFamily tcf : mutation.getColumnFamilies())
-                for (ByteBuffer tName : tcf.getColumnNames())
+                for (CellName tName : tcf.getColumnNames())
                     QueryProcessor.validateColumn(tcf.metadata(), tName, tcf.getColumn(tName).value());
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/utils/FBUtilities.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/utils/FBUtilities.java b/src/java/org/apache/cassandra/utils/FBUtilities.java
index 7eb735e..ccdc4f3 100644
--- a/src/java/org/apache/cassandra/utils/FBUtilities.java
+++ b/src/java/org/apache/cassandra/utils/FBUtilities.java
@@ -504,9 +504,11 @@ public class FBUtilities
         }
     }
 
-    public static <T extends Comparable> SortedSet<T> singleton(T column)
+    public static <T> SortedSet<T> singleton(T column, Comparator<? super T> comparator)
     {
-        return new TreeSet<T>(Arrays.asList(column));
+        SortedSet<T> s = new TreeSet<T>(comparator);
+        s.add(column);
+        return s;
     }
 
     public static String toString(Map<?,?> map)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/utils/ObjectSizes.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/utils/ObjectSizes.java b/src/java/org/apache/cassandra/utils/ObjectSizes.java
index acee919..28ddffd 100644
--- a/src/java/org/apache/cassandra/utils/ObjectSizes.java
+++ b/src/java/org/apache/cassandra/utils/ObjectSizes.java
@@ -98,6 +98,19 @@ public class ObjectSizes
     }
 
     /**
+     * Memory a ByteBuffer array consumes.
+     */
+    public static long getArraySize(ByteBuffer[] array)
+    {
+        long allElementsSize = 0;
+        for (int i = 0; i < array.length; i++)
+            if (array[i] != null)
+                allElementsSize += getSize(array[i]);
+
+        return allElementsSize + getArraySize(array.length, getReferenceSize());
+    }
+
+    /**
      * Memory a byte buffer consumes
      * @param buffer ByteBuffer to calculate in memory size
      * @return Total in-memory size of the byte buffer

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/data/serialization/2.0/db.RowMutation.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/db.RowMutation.bin b/test/data/serialization/2.0/db.RowMutation.bin
deleted file mode 100644
index 73d93e8..0000000
Binary files a/test/data/serialization/2.0/db.RowMutation.bin and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/long/org/apache/cassandra/db/LongKeyspaceTest.java
----------------------------------------------------------------------
diff --git a/test/long/org/apache/cassandra/db/LongKeyspaceTest.java b/test/long/org/apache/cassandra/db/LongKeyspaceTest.java
index b1ef690..0dd872c 100644
--- a/test/long/org/apache/cassandra/db/LongKeyspaceTest.java
+++ b/test/long/org/apache/cassandra/db/LongKeyspaceTest.java
@@ -56,10 +56,7 @@ public class LongKeyspaceTest extends SchemaLoader
                 {
                     for (int j = 0; j < i; j++)
                     {
-                        cf = cfStore.getColumnFamily(QueryFilter.getNamesFilter(Util.dk("key" + i),
-                                                                                "Standard1",
-                                                                                ByteBufferUtil.bytes("c" + j),
-                                                                                System.currentTimeMillis()));
+                        cf = cfStore.getColumnFamily(Util.namesQueryFilter(cfStore, Util.dk("key" + i), "c" + j));
                         KeyspaceTest.assertColumns(cf, "c" + j);
                     }
                 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/long/org/apache/cassandra/db/MeteredFlusherTest.java
----------------------------------------------------------------------
diff --git a/test/long/org/apache/cassandra/db/MeteredFlusherTest.java b/test/long/org/apache/cassandra/db/MeteredFlusherTest.java
index 5e25744..99ae65a 100644
--- a/test/long/org/apache/cassandra/db/MeteredFlusherTest.java
+++ b/test/long/org/apache/cassandra/db/MeteredFlusherTest.java
@@ -27,6 +27,7 @@ import java.nio.ByteBuffer;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.db.marshal.UTF8Type;
@@ -41,11 +42,10 @@ public class MeteredFlusherTest extends SchemaLoader
         Keyspace keyspace = Keyspace.open("Keyspace1");
         for (int i = 0; i < 100; i++)
         {
-            CFMetaData metadata = new CFMetaData(keyspace.getName(), "_CF" + i, ColumnFamilyType.Standard, UTF8Type.instance, null);
+            CFMetaData metadata = CFMetaData.denseCFMetaData(keyspace.getName(), "_CF" + i, UTF8Type.instance);
             MigrationManager.announceNewColumnFamily(metadata);
         }
 
-        ByteBuffer name = ByteBufferUtil.bytes("c");
         for (int j = 0; j < 200; j++)
         {
             for (int i = 0; i < 100; i++)
@@ -54,7 +54,7 @@ public class MeteredFlusherTest extends SchemaLoader
                 ColumnFamily cf = TreeMapBackedSortedColumns.factory.create("Keyspace1", "_CF" + i);
                 // don't cheat by allocating this outside of the loop; that defeats the purpose of deliberately using lots of memory
                 ByteBuffer value = ByteBuffer.allocate(100000);
-                cf.addColumn(new Column(name, value));
+                cf.addColumn(new Column(Util.cellname("c"), value));
                 rm.add(cf);
                 rm.applyUnsafe();
             }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/long/org/apache/cassandra/db/commitlog/ComitLogStress.java
----------------------------------------------------------------------
diff --git a/test/long/org/apache/cassandra/db/commitlog/ComitLogStress.java b/test/long/org/apache/cassandra/db/commitlog/ComitLogStress.java
index 3f65714..b212a22 100644
--- a/test/long/org/apache/cassandra/db/commitlog/ComitLogStress.java
+++ b/test/long/org/apache/cassandra/db/commitlog/ComitLogStress.java
@@ -8,6 +8,7 @@ import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
+import org.apache.cassandra.Util;
 import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
 import org.apache.cassandra.concurrent.NamedThreadFactory;
 import org.apache.cassandra.db.RowMutation;
@@ -64,9 +65,9 @@ public class ComitLogStress
             String ks = "Keyspace1";
             ByteBuffer key = ByteBufferUtil.bytes(keyString);
             RowMutation mutation = new RowMutation(ks, key);
-            mutation.add("Standard1", ByteBufferUtil.bytes("name"), ByteBufferUtil.bytes("value"),
+            mutation.add("Standard1", Util.cellname("name"), ByteBufferUtil.bytes("value"),
                     System.currentTimeMillis());
             CommitLog.instance.add(mutation);
         }
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
----------------------------------------------------------------------
diff --git a/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java b/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
index 21c6457..68a2489 100644
--- a/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
+++ b/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
@@ -129,7 +129,7 @@ public class LongCompactionsTest extends SchemaLoader
                 DecoratedKey key = Util.dk(String.valueOf(i % 2));
                 RowMutation rm = new RowMutation(KEYSPACE1, key.key);
                 long timestamp = j * ROWS_PER_SSTABLE + i;
-                rm.add("Standard1", ByteBufferUtil.bytes(String.valueOf(i / 2)),
+                rm.add("Standard1", Util.cellname(String.valueOf(i / 2)),
                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
                        timestamp);
                 maxTimestampExpected = Math.max(timestamp, maxTimestampExpected);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java
----------------------------------------------------------------------
diff --git a/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java b/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java
index 0eb44d0..0de0395 100644
--- a/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java
+++ b/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java
@@ -61,7 +61,7 @@ public class LongLeveledCompactionStrategyTest extends SchemaLoader
             RowMutation rm = new RowMutation(ksname, key.key);
             for (int c = 0; c < columns; c++)
             {
-                rm.add(cfname, ByteBufferUtil.bytes("column" + c), value, 0);
+                rm.add(cfname, Util.cellname("column" + c), value, 0);
             }
             rm.apply();
             store.forceBlockingFlush();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/AbstractSerializationsTester.java b/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
index 22b2424..9bba196 100644
--- a/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
+++ b/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
@@ -21,6 +21,7 @@ package org.apache.cassandra;
 
 import com.google.common.io.ByteArrayDataOutput;
 import com.google.common.io.ByteStreams;
+import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.net.MessagingService;
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/SchemaLoader.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/SchemaLoader.java b/test/unit/org/apache/cassandra/SchemaLoader.java
index 1c13942..90dc629 100644
--- a/test/unit/org/apache/cassandra/SchemaLoader.java
+++ b/test/unit/org/apache/cassandra/SchemaLoader.java
@@ -145,12 +145,7 @@ public class SchemaLoader
                                            standardCFMD(ks1, "StandardGCGS0").gcGraceSeconds(0),
                                            standardCFMD(ks1, "StandardLong1"),
                                            standardCFMD(ks1, "StandardLong2"),
-                                           new CFMetaData(ks1,
-                                                          "ValuesWithQuotes",
-                                                          st,
-                                                          BytesType.instance,
-                                                          null)
-                                                   .defaultValidator(UTF8Type.instance),
+                                           CFMetaData.denseCFMetaData(ks1, "ValuesWithQuotes", BytesType.instance).defaultValidator(UTF8Type.instance),
                                            superCFMD(ks1, "Super1", LongType.instance),
                                            superCFMD(ks1, "Super2", LongType.instance),
                                            superCFMD(ks1, "Super3", LongType.instance),
@@ -159,44 +154,18 @@ public class SchemaLoader
                                            superCFMD(ks1, "Super6", LexicalUUIDType.instance, UTF8Type.instance),
                                            indexCFMD(ks1, "Indexed1", true),
                                            indexCFMD(ks1, "Indexed2", false),
-                                           new CFMetaData(ks1,
-                                                          "StandardInteger1",
-                                                          st,
-                                                          IntegerType.instance,
-                                                          null),
-                                           new CFMetaData(ks1,
-                                                          "Counter1",
-                                                          st,
-                                                          bytes,
-                                                          null)
-                                                   .defaultValidator(CounterColumnType.instance),
-                                           new CFMetaData(ks1,
-                                                          "SuperCounter1",
-                                                          su,
-                                                          bytes,
-                                                          bytes)
-                                                   .defaultValidator(CounterColumnType.instance),
+                                           CFMetaData.denseCFMetaData(ks1, "StandardInteger1", IntegerType.instance),
+                                           CFMetaData.denseCFMetaData(ks1, "Counter1", bytes).defaultValidator(CounterColumnType.instance),
+                                           CFMetaData.denseCFMetaData(ks1, "SuperCounter1", bytes, bytes).defaultValidator(CounterColumnType.instance),
                                            superCFMD(ks1, "SuperDirectGC", BytesType.instance).gcGraceSeconds(0),
-                                           jdbcCFMD(ks1, "JdbcInteger", IntegerType.instance).addColumnDefinition(integerColumn("ks1", "JdbcInteger")),
-                                           jdbcCFMD(ks1, "JdbcUtf8", UTF8Type.instance).addColumnDefinition(utf8Column("ks1", "JdbcUtf8")),
+                                           jdbcSparseCFMD(ks1, "JdbcInteger", IntegerType.instance).addColumnDefinition(integerColumn(ks1, "JdbcInteger")),
+                                           jdbcSparseCFMD(ks1, "JdbcUtf8", UTF8Type.instance).addColumnDefinition(utf8Column(ks1, "JdbcUtf8")),
                                            jdbcCFMD(ks1, "JdbcLong", LongType.instance),
                                            jdbcCFMD(ks1, "JdbcBytes", bytes),
                                            jdbcCFMD(ks1, "JdbcAscii", AsciiType.instance),
-                                           new CFMetaData(ks1,
-                                                          "StandardComposite",
-                                                          st,
-                                                          composite,
-                                                          null),
-                                           new CFMetaData(ks1,
-                                                          "StandardComposite2",
-                                                          st,
-                                                          compositeMaxMin,
-                                                          null),
-                                           new CFMetaData(ks1,
-                                                          "StandardDynamicComposite",
-                                                          st,
-                                                          dynamicComposite,
-                                                          null),
+                                           CFMetaData.denseCFMetaData(ks1, "StandardComposite", composite),
+                                           CFMetaData.denseCFMetaData(ks1, "StandardComposite2", compositeMaxMin),
+                                           CFMetaData.denseCFMetaData(ks1, "StandardDynamicComposite", dynamicComposite),
                                            standardCFMD(ks1, "StandardLeveled")
                                                                                .compactionStrategyClass(LeveledCompactionStrategy.class)
                                                                                .compactionStrategyOptions(leveledOptions),
@@ -238,11 +207,7 @@ public class SchemaLoader
                                            standardCFMD(ks4, "Standard3"),
                                            superCFMD(ks4, "Super3", bytes),
                                            superCFMD(ks4, "Super4", TimeUUIDType.instance),
-                                           new CFMetaData(ks4,
-                                                          "Super5",
-                                                          su,
-                                                          TimeUUIDType.instance,
-                                                          bytes)));
+                                           CFMetaData.denseCFMetaData(ks4, "Super5", TimeUUIDType.instance, bytes)));
 
         // Keyspace 5
         schema.add(KSMetaData.testMetadata(ks5,
@@ -335,7 +300,7 @@ public class SchemaLoader
                                                       SecondaryIndex.CUSTOM_INDEX_OPTION_NAME,
                                                       PerRowSecondaryIndexTest.TestIndex.class.getName());
 
-        CFMetaData cfm =  standardCFMD(ksName, cfName).keyValidator(AsciiType.instance);
+        CFMetaData cfm =  CFMetaData.sparseCFMetaData(ksName, cfName, BytesType.instance).keyValidator(AsciiType.instance);
 
         ByteBuffer cName = ByteBufferUtil.bytes("indexed");
         return cfm.addOrReplaceColumnDefinition(ColumnDefinition.regularDef(cfm, cName, AsciiType.instance, null)
@@ -355,7 +320,7 @@ public class SchemaLoader
 
     private static CFMetaData standardCFMD(String ksName, String cfName)
     {
-        return new CFMetaData(ksName, cfName, ColumnFamilyType.Standard, BytesType.instance, null);
+        return CFMetaData.denseCFMetaData(ksName, cfName, BytesType.instance);
     }
     private static CFMetaData superCFMD(String ksName, String cfName, AbstractType subcc)
     {
@@ -363,11 +328,11 @@ public class SchemaLoader
     }
     private static CFMetaData superCFMD(String ksName, String cfName, AbstractType cc, AbstractType subcc)
     {
-        return new CFMetaData(ksName, cfName, ColumnFamilyType.Super, cc, subcc);
+        return CFMetaData.denseCFMetaData(ksName, cfName, cc, subcc);
     }
     private static CFMetaData indexCFMD(String ksName, String cfName, final Boolean withIdxType) throws ConfigurationException
     {
-        CFMetaData cfm = standardCFMD(ksName, cfName).keyValidator(AsciiType.instance);
+        CFMetaData cfm = CFMetaData.sparseCFMetaData(ksName, cfName, BytesType.instance).keyValidator(AsciiType.instance);
 
         ByteBuffer cName = ByteBufferUtil.bytes("birthdate");
         IndexType keys = withIdxType ? IndexType.KEYS : null;
@@ -377,7 +342,7 @@ public class SchemaLoader
     private static CFMetaData compositeIndexCFMD(String ksName, String cfName, final Boolean withIdxType, boolean withOldCfIds) throws ConfigurationException
     {
         final CompositeType composite = CompositeType.getInstance(Arrays.asList(new AbstractType<?>[]{UTF8Type.instance, UTF8Type.instance})); 
-        CFMetaData cfm = new CFMetaData(ksName, cfName, ColumnFamilyType.Standard, composite, null);
+        CFMetaData cfm = CFMetaData.sparseCFMetaData(ksName, cfName, composite);
 
         ByteBuffer cName = ByteBufferUtil.bytes("col1");
         IndexType idxType = withIdxType ? IndexType.COMPOSITES : null;
@@ -387,7 +352,12 @@ public class SchemaLoader
     
     private static CFMetaData jdbcCFMD(String ksName, String cfName, AbstractType comp)
     {
-        return new CFMetaData(ksName, cfName, ColumnFamilyType.Standard, comp, null).defaultValidator(comp);
+        return CFMetaData.denseCFMetaData(ksName, cfName, comp).defaultValidator(comp);
+    }
+
+    private static CFMetaData jdbcSparseCFMD(String ksName, String cfName, AbstractType comp)
+    {
+        return CFMetaData.sparseCFMetaData(ksName, cfName, comp).defaultValidator(comp);
     }
 
     public static void cleanupAndLeaveDirs()
@@ -433,7 +403,7 @@ public class SchemaLoader
         {
             ByteBuffer key = ByteBufferUtil.bytes("key" + i);
             RowMutation rowMutation = new RowMutation(keyspace, key);
-            rowMutation.add(columnFamily, ByteBufferUtil.bytes("col" + i), ByteBufferUtil.bytes("val" + i), System.currentTimeMillis());
+            rowMutation.add(columnFamily, Util.cellname("col" + i), ByteBufferUtil.bytes("val" + i), System.currentTimeMillis());
             rowMutation.applyUnsafe();
         }
     }
@@ -445,7 +415,7 @@ public class SchemaLoader
         for (int i = offset; i < offset + numberOfRows; i++)
         {
             DecoratedKey key = Util.dk("key" + i);
-            store.getColumnFamily(QueryFilter.getNamesFilter(key, columnFamily, ByteBufferUtil.bytes("col" + i), System.currentTimeMillis()));
+            store.getColumnFamily(Util.namesQueryFilter(store, key, "col" + i));
         }
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/Util.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/Util.java b/test/unit/org/apache/cassandra/Util.java
index a71dc48..87d19d2 100644
--- a/test/unit/org/apache/cassandra/Util.java
+++ b/test/unit/org/apache/cassandra/Util.java
@@ -25,21 +25,26 @@ import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
+import java.util.SortedSet;
+import java.util.TreeSet;
 import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.compaction.AbstractCompactionTask;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.columniterator.IdentityQueryFilter;
 import org.apache.cassandra.db.filter.IDiskAtomFilter;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
+import org.apache.cassandra.db.filter.NamesQueryFilter;
 import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.dht.*;
 import org.apache.cassandra.gms.ApplicationState;
@@ -77,19 +82,45 @@ public class Util
         return RowPosition.forKey(ByteBufferUtil.bytes(key), partitioner);
     }
 
+    public static CellName cellname(ByteBuffer... bbs)
+    {
+        if (bbs.length == 1)
+            return CellNames.simpleDense(bbs[0]);
+        else
+            return CellNames.compositeDense(bbs);
+    }
+
+    public static CellName cellname(String... strs)
+    {
+        ByteBuffer[] bbs = new ByteBuffer[strs.length];
+        for (int i = 0; i < strs.length; i++)
+            bbs[i] = ByteBufferUtil.bytes(strs[i]);
+        return cellname(bbs);
+    }
+
+    public static CellName cellname(int i)
+    {
+        return CellNames.simpleDense(ByteBufferUtil.bytes(i));
+    }
+
+    public static CellName cellname(long l)
+    {
+        return CellNames.simpleDense(ByteBufferUtil.bytes(l));
+    }
+
     public static Column column(String name, String value, long timestamp)
     {
-        return new Column(ByteBufferUtil.bytes(name), ByteBufferUtil.bytes(value), timestamp);
+        return new Column(cellname(name), ByteBufferUtil.bytes(value), timestamp);
     }
 
     public static Column expiringColumn(String name, String value, long timestamp, int ttl)
     {
-        return new ExpiringColumn(ByteBufferUtil.bytes(name), ByteBufferUtil.bytes(value), timestamp, ttl);
+        return new ExpiringColumn(cellname(name), ByteBufferUtil.bytes(value), timestamp, ttl);
     }
 
     public static Column counterColumn(String name, long value, long timestamp)
     {
-        return new CounterUpdateColumn(ByteBufferUtil.bytes(name), value, timestamp);
+        return new CounterUpdateColumn(cellname(name), value, timestamp);
     }
 
     public static Token token(String key)
@@ -114,9 +145,9 @@ public class Util
 
     public static void addMutation(RowMutation rm, String columnFamilyName, String superColumnName, long columnName, String value, long timestamp)
     {
-        ByteBuffer cname = superColumnName == null
-                         ? getBytes(columnName)
-                         : CompositeType.build(ByteBufferUtil.bytes(superColumnName), getBytes(columnName));
+        CellName cname = superColumnName == null
+                       ? CellNames.simpleDense(getBytes(columnName))
+                       : CellNames.compositeDense(ByteBufferUtil.bytes(superColumnName), getBytes(columnName));
         rm.add(columnFamilyName, cname, ByteBufferUtil.bytes(value), timestamp);
     }
 
@@ -281,4 +312,63 @@ public class Util
 
         assert thrown : exception.getName() + " not received";
     }
+
+    public static ByteBuffer serializeForSSTable(ColumnFamily cf)
+    {
+        try
+        {
+            ByteArrayOutputStream baos = new ByteArrayOutputStream();
+            DataOutputStream out = new DataOutputStream(baos);
+            DeletionTime.serializer.serialize(cf.deletionInfo().getTopLevelDeletion(), out);
+            out.writeInt(cf.getColumnCount());
+            new ColumnIndex.Builder(cf, ByteBufferUtil.EMPTY_BYTE_BUFFER, out).build(cf);
+            return ByteBuffer.wrap(baos.toByteArray());
+        }
+        catch (IOException e)
+        {
+            throw new RuntimeException(e);
+        }
+    }
+
+    public static QueryFilter namesQueryFilter(ColumnFamilyStore cfs, DecoratedKey key)
+    {
+        SortedSet<CellName> s = new TreeSet<CellName>(cfs.getComparator());
+        return QueryFilter.getNamesFilter(key, cfs.name, s, System.currentTimeMillis());
+    }
+
+    public static QueryFilter namesQueryFilter(ColumnFamilyStore cfs, DecoratedKey key, String... names)
+    {
+        SortedSet<CellName> s = new TreeSet<CellName>(cfs.getComparator());
+        for (String str : names)
+            s.add(cellname(str));
+        return QueryFilter.getNamesFilter(key, cfs.name, s, System.currentTimeMillis());
+    }
+
+    public static QueryFilter namesQueryFilter(ColumnFamilyStore cfs, DecoratedKey key, CellName... names)
+    {
+        SortedSet<CellName> s = new TreeSet<CellName>(cfs.getComparator());
+        for (CellName n : names)
+            s.add(n);
+        return QueryFilter.getNamesFilter(key, cfs.name, s, System.currentTimeMillis());
+    }
+
+    public static NamesQueryFilter namesFilter(ColumnFamilyStore cfs, String... names)
+    {
+        SortedSet<CellName> s = new TreeSet<CellName>(cfs.getComparator());
+        for (String str : names)
+            s.add(cellname(str));
+        return new NamesQueryFilter(s);
+    }
+
+    public static String string(ByteBuffer bb)
+    {
+        try
+        {
+            return ByteBufferUtil.string(bb);
+        }
+        catch (Exception e)
+        {
+            throw new RuntimeException(e);
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/config/CFMetaDataTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/config/CFMetaDataTest.java b/test/unit/org/apache/cassandra/config/CFMetaDataTest.java
index b8c061f..fd8b637 100644
--- a/test/unit/org/apache/cassandra/config/CFMetaDataTest.java
+++ b/test/unit/org/apache/cassandra/config/CFMetaDataTest.java
@@ -22,6 +22,7 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 import java.util.HashMap;
+import java.util.HashSet;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.cql3.QueryProcessor;
@@ -92,7 +93,7 @@ public class CFMetaDataTest extends SchemaLoader
         assertEquals(thriftCfDef.name, converted.name);
         assertEquals(thriftCfDef.default_validation_class, converted.default_validation_class);
         assertEquals(thriftCfDef.comment, converted.comment);
-        assertEquals(thriftCfDef.column_metadata, converted.column_metadata);
+        assertEquals(new HashSet<>(thriftCfDef.column_metadata), new HashSet<>(converted.column_metadata));
     }
 
     @Test
@@ -103,6 +104,9 @@ public class CFMetaDataTest extends SchemaLoader
             for (ColumnFamilyStore cfs : Keyspace.open(keyspaceName).getColumnFamilyStores())
             {
                 CFMetaData cfm = cfs.metadata;
+                if (!cfm.isThriftCompatible())
+                    continue;
+
                 checkInverses(cfm);
 
                 // Testing with compression to catch #3558
@@ -113,31 +117,13 @@ public class CFMetaDataTest extends SchemaLoader
         }
     }
 
-    private static CFMetaData withoutThriftIncompatible(CFMetaData cfm)
-    {
-        CFMetaData result = cfm.clone();
-
-        // This is a nasty hack to work around the fact that in thrift we exposes:
-        //   - neither definition with a non-nulll componentIndex
-        //   - nor non REGULAR definitions.
-        Iterator<ColumnDefinition> iter = result.allColumns().iterator();
-        while (iter.hasNext())
-        {
-            ColumnDefinition def = iter.next();
-            // Remove what we know is not thrift compatible
-            if (!def.isThriftCompatible())
-                iter.remove();
-        }
-        return result;
-    }
-
     private void checkInverses(CFMetaData cfm) throws Exception
     {
         DecoratedKey k = StorageService.getPartitioner().decorateKey(ByteBufferUtil.bytes(cfm.ksName));
 
         // Test thrift conversion
-        CFMetaData before = withoutThriftIncompatible(cfm);
-        CFMetaData after = withoutThriftIncompatible(CFMetaData.fromThrift(before.toThrift()));
+        CFMetaData before = cfm;
+        CFMetaData after = CFMetaData.fromThrift(before.toThrift());
         assert before.equals(after) : String.format("\n%s\n!=\n%s", before, after);
 
         // Test schema conversion
@@ -145,7 +131,7 @@ public class CFMetaDataTest extends SchemaLoader
         ColumnFamily serializedCf = rm.getColumnFamily(Schema.instance.getId(Keyspace.SYSTEM_KS, SystemKeyspace.SCHEMA_COLUMNFAMILIES_CF));
         ColumnFamily serializedCD = rm.getColumnFamily(Schema.instance.getId(Keyspace.SYSTEM_KS, SystemKeyspace.SCHEMA_COLUMNS_CF));
         UntypedResultSet.Row result = QueryProcessor.resultify("SELECT * FROM system.schema_columnfamilies", new Row(k, serializedCf)).one();
-        CFMetaData newCfm = CFMetaData.addColumnDefinitionsFromSchema(CFMetaData.fromSchemaNoColumnsNoTriggers(result), new Row(k, serializedCD));
+        CFMetaData newCfm = CFMetaData.fromSchemaNoTriggers(result, ColumnDefinition.resultify(new Row(k, serializedCD)));
         assert cfm.equals(newCfm) : String.format("\n%s\n!=\n%s", cfm, newCfm);
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/config/ColumnDefinitionTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/config/ColumnDefinitionTest.java b/test/unit/org/apache/cassandra/config/ColumnDefinitionTest.java
index d9f5a4f..8cc543d 100644
--- a/test/unit/org/apache/cassandra/config/ColumnDefinitionTest.java
+++ b/test/unit/org/apache/cassandra/config/ColumnDefinitionTest.java
@@ -33,7 +33,7 @@ public class ColumnDefinitionTest
     @Test
     public void testSerializeDeserialize() throws Exception
     {
-        CFMetaData cfm = new CFMetaData("ks", "cf", ColumnFamilyType.Standard, UTF8Type.instance);
+        CFMetaData cfm = CFMetaData.denseCFMetaData("ks", "cf", UTF8Type.instance);
 
         ColumnDefinition cd0 = ColumnDefinition.regularDef(cfm, ByteBufferUtil.bytes("TestColumnDefinitionName0"), BytesType.instance, null)
                                                .setIndex("random index name 0", IndexType.KEYS, null);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/config/DefsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/config/DefsTest.java b/test/unit/org/apache/cassandra/config/DefsTest.java
index 754c029..fd2cb82 100644
--- a/test/unit/org/apache/cassandra/config/DefsTest.java
+++ b/test/unit/org/apache/cassandra/config/DefsTest.java
@@ -28,6 +28,7 @@ import org.apache.cassandra.OrderedJUnit4ClassRunner;
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.db.marshal.BytesType;
 import org.apache.cassandra.db.marshal.UTF8Type;
@@ -40,6 +41,7 @@ import org.apache.cassandra.locator.OldNetworkTopologyStrategy;
 import org.apache.cassandra.locator.SimpleStrategy;
 import org.apache.cassandra.service.MigrationManager;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import static org.apache.cassandra.Util.cellname;
 
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -53,7 +55,7 @@ public class DefsTest extends SchemaLoader
         CFMetaData cfm = new CFMetaData("Keyspace1",
                                         "TestApplyCFM_CF",
                                         ColumnFamilyType.Standard,
-                                        BytesType.instance);
+                                        new SimpleDenseCellNameType(BytesType.instance));
 
         for (int i = 0; i < 5; i++)
         {
@@ -65,7 +67,6 @@ public class DefsTest extends SchemaLoader
            .readRepairChance(0.5)
            .replicateOnWrite(false)
            .gcGraceSeconds(100000)
-           .defaultValidator(null)
            .minCompactionThreshold(500)
            .maxCompactionThreshold(500);
 
@@ -169,17 +170,18 @@ public class DefsTest extends SchemaLoader
         assert Schema.instance.getKSMetaData(ks).cfMetaData().get(newCf.cfName).equals(newCf);
 
         // now read and write to it.
+        CellName col0 = cellname("col0");
         DecoratedKey dk = Util.dk("key0");
         RowMutation rm = new RowMutation(ks, dk.key);
-        rm.add(cf, ByteBufferUtil.bytes("col0"), ByteBufferUtil.bytes("value0"), 1L);
+        rm.add(cf, col0, ByteBufferUtil.bytes("value0"), 1L);
         rm.apply();
         ColumnFamilyStore store = Keyspace.open(ks).getColumnFamilyStore(cf);
         assert store != null;
         store.forceBlockingFlush();
 
-        ColumnFamily cfam = store.getColumnFamily(QueryFilter.getNamesFilter(dk, cf, ByteBufferUtil.bytes("col0"), System.currentTimeMillis()));
-        assert cfam.getColumn(ByteBufferUtil.bytes("col0")) != null;
-        Column col = cfam.getColumn(ByteBufferUtil.bytes("col0"));
+        ColumnFamily cfam = store.getColumnFamily(Util.namesQueryFilter(store, dk, col0));
+        assert cfam.getColumn(col0) != null;
+        Column col = cfam.getColumn(col0);
         assert ByteBufferUtil.bytes("value0").equals(col.value());
     }
 
@@ -196,7 +198,7 @@ public class DefsTest extends SchemaLoader
         // write some data, force a flush, then verify that files exist on disk.
         RowMutation rm = new RowMutation(ks.name, dk.key);
         for (int i = 0; i < 100; i++)
-            rm.add(cfm.cfName, ByteBufferUtil.bytes(("col" + i)), ByteBufferUtil.bytes("anyvalue"), 1L);
+            rm.add(cfm.cfName, cellname("col" + i), ByteBufferUtil.bytes("anyvalue"), 1L);
         rm.apply();
         ColumnFamilyStore store = Keyspace.open(cfm.ksName).getColumnFamilyStore(cfm.cfName);
         assert store != null;
@@ -212,7 +214,7 @@ public class DefsTest extends SchemaLoader
         boolean success = true;
         try
         {
-            rm.add("Standard1", ByteBufferUtil.bytes("col0"), ByteBufferUtil.bytes("value0"), 1L);
+            rm.add("Standard1", cellname("col0"), ByteBufferUtil.bytes("value0"), 1L);
             rm.apply();
         }
         catch (Throwable th)
@@ -243,16 +245,17 @@ public class DefsTest extends SchemaLoader
         assert Schema.instance.getKSMetaData(newCf.ksName).equals(newKs);
 
         // test reads and writes.
+        CellName col0 = cellname("col0");
         RowMutation rm = new RowMutation(newCf.ksName, dk.key);
-        rm.add(newCf.cfName, ByteBufferUtil.bytes("col0"), ByteBufferUtil.bytes("value0"), 1L);
+        rm.add(newCf.cfName, col0, ByteBufferUtil.bytes("value0"), 1L);
         rm.apply();
         ColumnFamilyStore store = Keyspace.open(newCf.ksName).getColumnFamilyStore(newCf.cfName);
         assert store != null;
         store.forceBlockingFlush();
 
-        ColumnFamily cfam = store.getColumnFamily(QueryFilter.getNamesFilter(dk, newCf.cfName, ByteBufferUtil.bytes("col0"), System.currentTimeMillis()));
-        assert cfam.getColumn(ByteBufferUtil.bytes("col0")) != null;
-        Column col = cfam.getColumn(ByteBufferUtil.bytes("col0"));
+        ColumnFamily cfam = store.getColumnFamily(Util.namesQueryFilter(store, dk, col0));
+        assert cfam.getColumn(col0) != null;
+        Column col = cfam.getColumn(col0);
         assert ByteBufferUtil.bytes("value0").equals(col.value());
     }
 
@@ -269,7 +272,7 @@ public class DefsTest extends SchemaLoader
         // write some data, force a flush, then verify that files exist on disk.
         RowMutation rm = new RowMutation(ks.name, dk.key);
         for (int i = 0; i < 100; i++)
-            rm.add(cfm.cfName, ByteBufferUtil.bytes(("col" + i)), ByteBufferUtil.bytes("anyvalue"), 1L);
+            rm.add(cfm.cfName, cellname("col" + i), ByteBufferUtil.bytes("anyvalue"), 1L);
         rm.apply();
         ColumnFamilyStore store = Keyspace.open(cfm.ksName).getColumnFamilyStore(cfm.cfName);
         assert store != null;
@@ -285,7 +288,7 @@ public class DefsTest extends SchemaLoader
         boolean success = true;
         try
         {
-            rm.add("Standard1", ByteBufferUtil.bytes("col0"), ByteBufferUtil.bytes("value0"), 1L);
+            rm.add("Standard1", cellname("col0"), ByteBufferUtil.bytes("value0"), 1L);
             rm.apply();
         }
         catch (Throwable th)
@@ -320,7 +323,7 @@ public class DefsTest extends SchemaLoader
         // write some data
         RowMutation rm = new RowMutation(ks.name, dk.key);
         for (int i = 0; i < 100; i++)
-            rm.add(cfm.cfName, ByteBufferUtil.bytes(("col" + i)), ByteBufferUtil.bytes("anyvalue"), 1L);
+            rm.add(cfm.cfName, cellname("col" + i), ByteBufferUtil.bytes("anyvalue"), 1L);
         rm.apply();
 
         MigrationManager.announceKeyspaceDrop(ks.name);
@@ -350,17 +353,18 @@ public class DefsTest extends SchemaLoader
         assert Schema.instance.getKSMetaData(newKs.name).cfMetaData().get(newCf.cfName).equals(newCf);
 
         // now read and write to it.
+        CellName col0 = cellname("col0");
         DecoratedKey dk = Util.dk("key0");
         RowMutation rm = new RowMutation(newKs.name, dk.key);
-        rm.add(newCf.cfName, ByteBufferUtil.bytes("col0"), ByteBufferUtil.bytes("value0"), 1L);
+        rm.add(newCf.cfName, col0, ByteBufferUtil.bytes("value0"), 1L);
         rm.apply();
         ColumnFamilyStore store = Keyspace.open(newKs.name).getColumnFamilyStore(newCf.cfName);
         assert store != null;
         store.forceBlockingFlush();
 
-        ColumnFamily cfam = store.getColumnFamily(QueryFilter.getNamesFilter(dk, newCf.cfName, ByteBufferUtil.bytes("col0"), System.currentTimeMillis()));
-        assert cfam.getColumn(ByteBufferUtil.bytes("col0")) != null;
-        Column col = cfam.getColumn(ByteBufferUtil.bytes("col0"));
+        ColumnFamily cfam = store.getColumnFamily(Util.namesQueryFilter(store, dk, col0));
+        assert cfam.getColumn(col0) != null;
+        Column col = cfam.getColumn(col0);
         assert ByteBufferUtil.bytes("value0").equals(col.value());
     }
 
@@ -410,7 +414,6 @@ public class DefsTest extends SchemaLoader
 
         // updating certain fields should fail.
         CFMetaData newCfm = cf.clone();
-        newCfm.columnMetadata(new HashMap<ByteBuffer, ColumnDefinition>());
         newCfm.defaultValidator(BytesType.instance);
         newCfm.minCompactionThreshold(5);
         newCfm.maxCompactionThreshold(31);
@@ -483,7 +486,7 @@ public class DefsTest extends SchemaLoader
         catch (ConfigurationException expected) {}
 
         // Change comparator
-        newCfm = new CFMetaData(cf.ksName, cf.cfName, cf.cfType, TimeUUIDType.instance);
+        newCfm = new CFMetaData(cf.ksName, cf.cfName, cf.cfType, new SimpleDenseCellNameType(TimeUUIDType.instance));
         CFMetaData.copyOpts(newCfm, cf);
         try
         {
@@ -498,13 +501,13 @@ public class DefsTest extends SchemaLoader
     {
         // persist keyspace definition in the system keyspace
         Schema.instance.getKSMetaData("Keyspace6").toSchema(System.currentTimeMillis()).apply();
+        ColumnFamilyStore cfs = Keyspace.open("Keyspace6").getColumnFamilyStore("Indexed1");
 
         // insert some data.  save the sstable descriptor so we can make sure it's marked for delete after the drop
         RowMutation rm = new RowMutation("Keyspace6", ByteBufferUtil.bytes("k1"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("notbirthdate"), ByteBufferUtil.bytes(1L), 0);
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), 0);
+        rm.add("Indexed1", cellname("notbirthdate"), ByteBufferUtil.bytes(1L), 0);
+        rm.add("Indexed1", cellname("birthdate"), ByteBufferUtil.bytes(1L), 0);
         rm.apply();
-        ColumnFamilyStore cfs = Keyspace.open("Keyspace6").getColumnFamilyStore("Indexed1");
         cfs.forceBlockingFlush();
         ColumnFamilyStore indexedCfs = cfs.indexManager.getIndexForColumn(ByteBufferUtil.bytes("birthdate")).getIndexCfs();
         Descriptor desc = indexedCfs.getSSTables().iterator().next().descriptor;
@@ -524,7 +527,7 @@ public class DefsTest extends SchemaLoader
 
     private CFMetaData addTestCF(String ks, String cf, String comment)
     {
-        CFMetaData newCFMD = new CFMetaData(ks, cf, ColumnFamilyType.Standard, UTF8Type.instance);
+        CFMetaData newCFMD = new CFMetaData(ks, cf, ColumnFamilyType.Standard, new SimpleDenseCellNameType(UTF8Type.instance));
         newCFMD.comment(comment)
                .readRepairChance(0.0);
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/ArrayBackedSortedColumnsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ArrayBackedSortedColumnsTest.java b/test/unit/org/apache/cassandra/db/ArrayBackedSortedColumnsTest.java
index c720d28..9074cf1 100644
--- a/test/unit/org/apache/cassandra/db/ArrayBackedSortedColumnsTest.java
+++ b/test/unit/org/apache/cassandra/db/ArrayBackedSortedColumnsTest.java
@@ -33,7 +33,9 @@ import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.filter.ColumnSlice;
+import org.apache.cassandra.db.marshal.Int32Type;
 import org.apache.cassandra.utils.HeapAllocator;
 
 public class ArrayBackedSortedColumnsTest extends SchemaLoader
@@ -52,16 +54,17 @@ public class ArrayBackedSortedColumnsTest extends SchemaLoader
 
     private void testAddInternal(boolean reversed)
     {
+        CellNameType type = new SimpleDenseCellNameType(Int32Type.instance);
         ColumnFamily map = ArrayBackedSortedColumns.factory.create(metadata(), reversed);
         int[] values = new int[]{ 1, 2, 2, 3 };
 
         for (int i = 0; i < values.length; ++i)
-            map.addColumn(new Column(ByteBufferUtil.bytes(values[reversed ? values.length - 1 - i : i])), HeapAllocator.instance);
+            map.addColumn(new Column(type.makeCellName(values[reversed ? values.length - 1 - i : i])), HeapAllocator.instance);
 
         Iterator<Column> iter = map.iterator();
-        assertEquals("1st column", 1, iter.next().name().getInt(0));
-        assertEquals("2nd column", 2, iter.next().name().getInt(0));
-        assertEquals("3rd column", 3, iter.next().name().getInt(0));
+        assertEquals("1st column", 1, iter.next().name().toByteBuffer().getInt(0));
+        assertEquals("2nd column", 2, iter.next().name().toByteBuffer().getInt(0));
+        assertEquals("3rd column", 3, iter.next().name().toByteBuffer().getInt(0));
     }
 
     @Test
@@ -73,6 +76,7 @@ public class ArrayBackedSortedColumnsTest extends SchemaLoader
 
     private void testAddAllInternal(boolean reversed)
     {
+        CellNameType type = new SimpleDenseCellNameType(Int32Type.instance);
         ColumnFamily map = ArrayBackedSortedColumns.factory.create(metadata(), reversed);
         ColumnFamily map2 = ArrayBackedSortedColumns.factory.create(metadata(), reversed);
 
@@ -80,20 +84,20 @@ public class ArrayBackedSortedColumnsTest extends SchemaLoader
         int[] values2 = new int[]{ 2, 4, 5, 6 };
 
         for (int i = 0; i < values1.length; ++i)
-            map.addColumn(new Column(ByteBufferUtil.bytes(values1[reversed ? values1.length - 1 - i : i])), HeapAllocator.instance);
+            map.addColumn(new Column(type.makeCellName(values1[reversed ? values1.length - 1 - i : i])), HeapAllocator.instance);
 
         for (int i = 0; i < values2.length; ++i)
-            map2.addColumn(new Column(ByteBufferUtil.bytes(values2[reversed ? values2.length - 1 - i : i])), HeapAllocator.instance);
+            map2.addColumn(new Column(type.makeCellName(values2[reversed ? values2.length - 1 - i : i])), HeapAllocator.instance);
 
         map2.addAll(map, HeapAllocator.instance, Functions.<Column>identity());
 
         Iterator<Column> iter = map2.iterator();
-        assertEquals("1st column", 1, iter.next().name().getInt(0));
-        assertEquals("2nd column", 2, iter.next().name().getInt(0));
-        assertEquals("3rd column", 3, iter.next().name().getInt(0));
-        assertEquals("4st column", 4, iter.next().name().getInt(0));
-        assertEquals("5st column", 5, iter.next().name().getInt(0));
-        assertEquals("6st column", 6, iter.next().name().getInt(0));
+        assertEquals("1st column", 1, iter.next().name().toByteBuffer().getInt(0));
+        assertEquals("2nd column", 2, iter.next().name().toByteBuffer().getInt(0));
+        assertEquals("3rd column", 3, iter.next().name().toByteBuffer().getInt(0));
+        assertEquals("4st column", 4, iter.next().name().toByteBuffer().getInt(0));
+        assertEquals("5st column", 5, iter.next().name().toByteBuffer().getInt(0));
+        assertEquals("6st column", 6, iter.next().name().toByteBuffer().getInt(0));
     }
 
     @Test
@@ -105,17 +109,18 @@ public class ArrayBackedSortedColumnsTest extends SchemaLoader
 
     private void testGetCollectionInternal(boolean reversed)
     {
+        CellNameType type = new SimpleDenseCellNameType(Int32Type.instance);
         ColumnFamily map = ArrayBackedSortedColumns.factory.create(metadata(), reversed);
         int[] values = new int[]{ 1, 2, 3, 5, 9 };
 
         List<Column> sorted = new ArrayList<Column>();
         for (int v : values)
-            sorted.add(new Column(ByteBufferUtil.bytes(v)));
+            sorted.add(new Column(type.makeCellName(v)));
         List<Column> reverseSorted = new ArrayList<Column>(sorted);
         Collections.reverse(reverseSorted);
 
         for (int i = 0; i < values.length; ++i)
-            map.addColumn(new Column(ByteBufferUtil.bytes(values[reversed ? values.length - 1 - i : i])), HeapAllocator.instance);
+            map.addColumn(new Column(type.makeCellName(values[reversed ? values.length - 1 - i : i])), HeapAllocator.instance);
 
         assertSame(sorted, map.getSortedColumns());
         assertSame(reverseSorted, map.getReverseSortedColumns());
@@ -130,15 +135,16 @@ public class ArrayBackedSortedColumnsTest extends SchemaLoader
 
     private void testIteratorInternal(boolean reversed)
     {
+        CellNameType type = new SimpleDenseCellNameType(Int32Type.instance);
         ColumnFamily map = ArrayBackedSortedColumns.factory.create(metadata(), reversed);
 
         int[] values = new int[]{ 1, 2, 3, 5, 9 };
 
         for (int i = 0; i < values.length; ++i)
-            map.addColumn(new Column(ByteBufferUtil.bytes(values[reversed ? values.length - 1 - i : i])), HeapAllocator.instance);
+            map.addColumn(new Column(type.makeCellName(values[reversed ? values.length - 1 - i : i])), HeapAllocator.instance);
 
-        assertSame(new int[]{ 3, 2, 1 }, map.reverseIterator(new ColumnSlice[]{ new ColumnSlice(ByteBufferUtil.bytes(3), ByteBufferUtil.EMPTY_BYTE_BUFFER) }));
-        assertSame(new int[]{ 3, 2, 1 }, map.reverseIterator(new ColumnSlice[]{ new ColumnSlice(ByteBufferUtil.bytes(4), ByteBufferUtil.EMPTY_BYTE_BUFFER) }));
+        assertSame(new int[]{ 3, 2, 1 }, map.reverseIterator(new ColumnSlice[]{ new ColumnSlice(type.make(3), Composites.EMPTY) }));
+        assertSame(new int[]{ 3, 2, 1 }, map.reverseIterator(new ColumnSlice[]{ new ColumnSlice(type.make(4), Composites.EMPTY) }));
 
         assertSame(map.iterator(), map.iterator(ColumnSlice.ALL_COLUMNS_ARRAY));
     }
@@ -161,7 +167,7 @@ public class ArrayBackedSortedColumnsTest extends SchemaLoader
         for (int name : names)
         {
             assert iter.hasNext() : "Expected " + name + " but no more result";
-            int value = ByteBufferUtil.toInt(iter.next().name());
+            int value = ByteBufferUtil.toInt(iter.next().name().toByteBuffer());
             assert name == value : "Expected " + name + " but got " + value;
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/CleanupTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/CleanupTest.java b/test/unit/org/apache/cassandra/db/CleanupTest.java
index 8199275..29a538e 100644
--- a/test/unit/org/apache/cassandra/db/CleanupTest.java
+++ b/test/unit/org/apache/cassandra/db/CleanupTest.java
@@ -149,7 +149,7 @@ public class CleanupTest extends SchemaLoader
             // create a row and update the birthdate value, test that the index query fetches the new version
             RowMutation rm;
             rm = new RowMutation(KEYSPACE1, ByteBufferUtil.bytes(key));
-            rm.add(cfs.name, COLUMN, VALUE, System.currentTimeMillis());
+            rm.add(cfs.name, Util.cellname(COLUMN), VALUE, System.currentTimeMillis());
             rm.applyUnsafe();
         }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/CollationControllerTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/CollationControllerTest.java b/test/unit/org/apache/cassandra/db/CollationControllerTest.java
index 8763a41..5b7197b 100644
--- a/test/unit/org/apache/cassandra/db/CollationControllerTest.java
+++ b/test/unit/org/apache/cassandra/db/CollationControllerTest.java
@@ -26,8 +26,10 @@ import java.util.concurrent.ExecutionException;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
 import org.junit.Test;
 
 import org.apache.cassandra.io.sstable.SSTableReader;
@@ -45,7 +47,7 @@ public class CollationControllerTest extends SchemaLoader
         
         // add data
         rm = new RowMutation(keyspace.getName(), dk.key);
-        rm.add(cfs.name, ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("asdf"), 0);
+        rm.add(cfs.name, Util.cellname("Column1"), ByteBufferUtil.bytes("asdf"), 0);
         rm.apply();
         cfs.forceBlockingFlush();
         
@@ -57,20 +59,20 @@ public class CollationControllerTest extends SchemaLoader
         // add another mutation because sstable maxtimestamp isn't set
         // correctly during flush if the most recent mutation is a row delete
         rm = new RowMutation(keyspace.getName(), Util.dk("key2").key);
-        rm.add(cfs.name, ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("zxcv"), 20);
+        rm.add(cfs.name, Util.cellname("Column1"), ByteBufferUtil.bytes("zxcv"), 20);
         rm.apply();
         
         cfs.forceBlockingFlush();
 
         // add yet one more mutation
         rm = new RowMutation(keyspace.getName(), dk.key);
-        rm.add(cfs.name, ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("foobar"), 30);
+        rm.add(cfs.name, Util.cellname("Column1"), ByteBufferUtil.bytes("foobar"), 30);
         rm.apply();
         cfs.forceBlockingFlush();
 
         // A NamesQueryFilter goes down one code path (through collectTimeOrderedData())
         // It should only iterate the last flushed sstable, since it probably contains the most recent value for Column1
-        QueryFilter filter = QueryFilter.getNamesFilter(dk, cfs.name, ByteBufferUtil.bytes("Column1"), System.currentTimeMillis());
+        QueryFilter filter = Util.namesQueryFilter(cfs, dk, "Column1");
         CollationController controller = new CollationController(cfs, filter, Integer.MIN_VALUE);
         controller.getTopLevelColumns();
         assertEquals(1, controller.getSstablesIterated());
@@ -94,7 +96,7 @@ public class CollationControllerTest extends SchemaLoader
 
         RowMutation rm;
         DecoratedKey dk = Util.dk("key1");
-        ByteBuffer cellName = ByteBufferUtil.bytes("Column1");
+        CellName cellName = Util.cellname("Column1");
 
         // add data
         rm = new RowMutation(keyspace.getName(), dk.key);
@@ -113,7 +115,7 @@ public class CollationControllerTest extends SchemaLoader
         long queryAt = System.currentTimeMillis() + 1000;
         int gcBefore = cfs.gcBefore(queryAt);
 
-        filter = QueryFilter.getNamesFilter(dk, cfs.name, cellName, queryAt);
+        filter = QueryFilter.getNamesFilter(dk, cfs.name, FBUtilities.singleton(cellName, cfs.getComparator()), queryAt);
         CollationController controller = new CollationController(cfs, filter, gcBefore);
         assert ColumnFamilyStore.removeDeleted(controller.getTopLevelColumns(), gcBefore) == null;
 


[10/13] Push composites support in the storage engine

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/CFRowAdder.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/CFRowAdder.java b/src/java/org/apache/cassandra/db/CFRowAdder.java
new file mode 100644
index 0000000..7c70cf6
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/CFRowAdder.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db;
+
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.config.ColumnDefinition;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.Composite;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.MapType;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+/**
+ * Convenience object to populate a given CQL3 row in a ColumnFamily object.
+ *
+ * This is meant for when performance is not of the utmost importance. When
+ * performance matters, it might be worth allocating such builder.
+ */
+public class CFRowAdder
+{
+    public final ColumnFamily cf;
+    public final Composite prefix;
+    public final long timestamp;
+    private final int ldt;
+
+    public CFRowAdder(ColumnFamily cf, Composite prefix, long timestamp)
+    {
+        this.cf = cf;
+        this.prefix = prefix;
+        this.timestamp = timestamp;
+        this.ldt = (int) (System.currentTimeMillis() / 1000);
+
+        // If a CQL3 table, add the row marker
+        if (cf.metadata().isCQL3Table())
+            cf.addColumn(new Column(cf.getComparator().rowMarker(prefix), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp));
+    }
+
+    public CFRowAdder add(String cql3ColumnName, Object value)
+    {
+        ColumnDefinition def = getDefinition(cql3ColumnName);
+        return add(cf.getComparator().create(prefix, def.name), def, value);
+    }
+
+    public CFRowAdder addMapEntry(String cql3ColumnName, Object key, Object value)
+    {
+        ColumnDefinition def = getDefinition(cql3ColumnName);
+        assert def.type instanceof MapType;
+        MapType mt = (MapType)def.type;
+        CellName name = cf.getComparator().create(prefix, def.name, mt.keys.decompose(key));
+        return add(name, def, value);
+    }
+
+    private ColumnDefinition getDefinition(String name)
+    {
+        return cf.metadata().getColumnDefinition(new ColumnIdentifier(name, false));
+    }
+
+    private CFRowAdder add(CellName name, ColumnDefinition def, Object value)
+    {
+        if (value == null)
+            cf.addColumn(new DeletedColumn(name, ldt, timestamp));
+        else
+            cf.addColumn(new Column(name, ((AbstractType)def.type).decompose(value), timestamp));
+        return this;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/CollationController.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/CollationController.java b/src/java/org/apache/cassandra/db/CollationController.java
index d07e772..12f53db 100644
--- a/src/java/org/apache/cassandra/db/CollationController.java
+++ b/src/java/org/apache/cassandra/db/CollationController.java
@@ -17,13 +17,13 @@
  */
 package org.apache.cassandra.db;
 
-import java.nio.ByteBuffer;
 import java.util.*;
 
 import com.google.common.collect.Iterables;
 
 import org.apache.cassandra.db.columniterator.OnDiskAtomIterator;
 import org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.filter.NamesQueryFilter;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.db.marshal.CounterColumnType;
@@ -94,7 +94,7 @@ public class CollationController
             // avoid changing the filter columns of the original filter
             // (reduceNameFilter removes columns that are known to be irrelevant)
             NamesQueryFilter namesFilter = (NamesQueryFilter) filter.filter;
-            TreeSet<ByteBuffer> filterColumns = new TreeSet<ByteBuffer>(namesFilter.columns);
+            TreeSet<CellName> filterColumns = new TreeSet<>(namesFilter.columns);
             QueryFilter reducedFilter = new QueryFilter(filter.key, filter.cfName, namesFilter.withUpdatedColumns(filterColumns), filter.timestamp);
 
             /* add the SSTables on disk */
@@ -173,9 +173,9 @@ public class CollationController
         if (container == null)
             return;
 
-        for (Iterator<ByteBuffer> iterator = ((NamesQueryFilter) filter.filter).columns.iterator(); iterator.hasNext(); )
+        for (Iterator<CellName> iterator = ((NamesQueryFilter) filter.filter).columns.iterator(); iterator.hasNext(); )
         {
-            ByteBuffer filterColumn = iterator.next();
+            CellName filterColumn = iterator.next();
             Column column = container.getColumn(filterColumn);
             if (column != null && column.timestamp() > sstableTimestamp)
                 iterator.remove();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/Column.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Column.java b/src/java/org/apache/cassandra/db/Column.java
index e103cd3..4be1eeb 100644
--- a/src/java/org/apache/cassandra/db/Column.java
+++ b/src/java/org/apache/cassandra/db/Column.java
@@ -20,16 +20,15 @@ package org.apache.cassandra.db;
 import java.io.DataInput;
 import java.io.IOError;
 import java.io.IOException;
-import java.net.InetAddress;
 import java.nio.ByteBuffer;
 import java.security.MessageDigest;
-import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.List;
 
 import com.google.common.collect.AbstractIterator;
 
 import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.util.DataOutputBuffer;
@@ -46,18 +45,16 @@ public class Column implements OnDiskAtom
 {
     public static final int MAX_NAME_LENGTH = FBUtilities.MAX_UNSIGNED_SHORT;
 
-    public static final ColumnSerializer serializer = new ColumnSerializer();
-
-    public static OnDiskAtom.Serializer onDiskSerializer()
-    {
-        return OnDiskAtom.Serializer.instance;
-    }
-
     /**
      * For 2.0-formatted sstables (where column count is not stored), @param count should be Integer.MAX_VALUE,
      * and we will look for the end-of-row column name marker instead of relying on that.
      */
-    public static Iterator<OnDiskAtom> onDiskIterator(final DataInput in, final int count, final ColumnSerializer.Flag flag, final int expireBefore, final Descriptor.Version version)
+    public static Iterator<OnDiskAtom> onDiskIterator(final DataInput in,
+                                                      final int count,
+                                                      final ColumnSerializer.Flag flag,
+                                                      final int expireBefore,
+                                                      final Descriptor.Version version,
+                                                      final CellNameType type)
     {
         return new AbstractIterator<OnDiskAtom>()
         {
@@ -71,7 +68,7 @@ public class Column implements OnDiskAtom
                 OnDiskAtom atom;
                 try
                 {
-                    atom = onDiskSerializer().deserializeFromSSTable(in, flag, expireBefore, version);
+                    atom = type.onDiskAtomSerializer().deserializeFromSSTable(in, flag, expireBefore, version);
                 }
                 catch (IOException e)
                 {
@@ -85,31 +82,30 @@ public class Column implements OnDiskAtom
         };
     }
 
-    protected final ByteBuffer name;
+    protected final CellName name;
     protected final ByteBuffer value;
     protected final long timestamp;
 
-    Column(ByteBuffer name)
+    Column(CellName name)
     {
         this(name, ByteBufferUtil.EMPTY_BYTE_BUFFER);
     }
 
-    public Column(ByteBuffer name, ByteBuffer value)
+    public Column(CellName name, ByteBuffer value)
     {
         this(name, value, 0);
     }
 
-    public Column(ByteBuffer name, ByteBuffer value, long timestamp)
+    public Column(CellName name, ByteBuffer value, long timestamp)
     {
         assert name != null;
         assert value != null;
-        assert name.remaining() <= Column.MAX_NAME_LENGTH;
         this.name = name;
         this.value = value;
         this.timestamp = timestamp;
     }
 
-    public Column withUpdatedName(ByteBuffer newName)
+    public Column withUpdatedName(CellName newName)
     {
         return new Column(newName, value, timestamp);
     }
@@ -119,7 +115,7 @@ public class Column implements OnDiskAtom
         return new Column(name, value, newTimestamp);
     }
 
-    public ByteBuffer name()
+    public CellName name()
     {
         return name;
     }
@@ -162,10 +158,10 @@ public class Column implements OnDiskAtom
 
     public int dataSize()
     {
-        return name().remaining() + value.remaining() + TypeSizes.NATIVE.sizeof(timestamp);
+        return name().dataSize() + value.remaining() + TypeSizes.NATIVE.sizeof(timestamp);
     }
 
-    public int serializedSize(TypeSizes typeSizes)
+    public int serializedSize(CellNameType type, TypeSizes typeSizes)
     {
         /*
          * Size of a column is =
@@ -175,14 +171,8 @@ public class Column implements OnDiskAtom
          * + 4 bytes which basically indicates the size of the byte array
          * + entire byte array.
         */
-        int nameSize = name.remaining();
         int valueSize = value.remaining();
-        return typeSizes.sizeof((short) nameSize) + nameSize + 1 + typeSizes.sizeof(timestamp) + typeSizes.sizeof(valueSize) + valueSize;
-    }
-
-    public long serializedSizeForSSTable()
-    {
-        return serializedSize(TypeSizes.NATIVE);
+        return ((int)type.cellSerializer().serializedSize(name, typeSizes)) + 1 + typeSizes.sizeof(timestamp) + typeSizes.sizeof(valueSize) + valueSize;
     }
 
     public int serializationFlags()
@@ -199,7 +189,7 @@ public class Column implements OnDiskAtom
 
     public void updateDigest(MessageDigest digest)
     {
-        digest.update(name.duplicate());
+        digest.update(name.toByteBuffer().duplicate());
         digest.update(value.duplicate());
 
         DataOutputBuffer buffer = new DataOutputBuffer();
@@ -273,10 +263,10 @@ public class Column implements OnDiskAtom
 
     public Column localCopy(ColumnFamilyStore cfs, Allocator allocator)
     {
-        return new Column(cfs.internOrCopy(name, allocator), allocator.clone(value), timestamp);
+        return new Column(name.copy(allocator), allocator.clone(value), timestamp);
     }
 
-    public String getString(AbstractType<?> comparator)
+    public String getString(CellNameType comparator)
     {
         StringBuilder sb = new StringBuilder();
         sb.append(comparator.getString(name));
@@ -298,7 +288,7 @@ public class Column implements OnDiskAtom
     {
         validateName(metadata);
 
-        AbstractType<?> valueValidator = metadata.getValueValidatorFromCellName(name());
+        AbstractType<?> valueValidator = metadata.getValueValidator(name());
         if (valueValidator != null)
             valueValidator.validate(value());
     }
@@ -308,7 +298,7 @@ public class Column implements OnDiskAtom
         return getLocalDeletionTime() < gcBefore;
     }
 
-    public static Column create(ByteBuffer name, ByteBuffer value, long timestamp, int ttl, CFMetaData metadata)
+    public static Column create(CellName name, ByteBuffer value, long timestamp, int ttl, CFMetaData metadata)
     {
         if (ttl <= 0)
             ttl = metadata.getDefaultTimeToLive();
@@ -317,53 +307,4 @@ public class Column implements OnDiskAtom
                ? new ExpiringColumn(name, value, timestamp, ttl)
                : new Column(name, value, timestamp);
     }
-
-    public static Column create(String value, long timestamp, String... names)
-    {
-        return new Column(decomposeName(names), UTF8Type.instance.decompose(value), timestamp);
-    }
-
-    public static Column create(int value, long timestamp, String... names)
-    {
-        return new Column(decomposeName(names), Int32Type.instance.decompose(value), timestamp);
-    }
-
-    public static Column create(boolean value, long timestamp, String... names)
-    {
-        return new Column(decomposeName(names), BooleanType.instance.decompose(value), timestamp);
-    }
-
-    public static Column create(double value, long timestamp, String... names)
-    {
-        return new Column(decomposeName(names), DoubleType.instance.decompose(value), timestamp);
-    }
-
-    public static Column create(ByteBuffer value, long timestamp, String... names)
-    {
-        return new Column(decomposeName(names), value, timestamp);
-    }
-
-    public static Column create(InetAddress value, long timestamp, String... names)
-    {
-        return new Column(decomposeName(names), InetAddressType.instance.decompose(value), timestamp);
-    }
-
-    static ByteBuffer decomposeName(String... names)
-    {
-        assert names.length > 0;
-
-        if (names.length == 1)
-            return UTF8Type.instance.decompose(names[0]);
-
-        // not super performant.  at this time, only infrequently called schema code uses this.
-        List<AbstractType<?>> types = new ArrayList<AbstractType<?>>(names.length);
-        for (int i = 0; i < names.length; i++)
-            types.add(UTF8Type.instance);
-
-        CompositeType.Builder builder = new CompositeType.Builder(CompositeType.getInstance(types));
-        for (String name : names)
-            builder.add(UTF8Type.instance.decompose(name));
-        return builder.build();
-    }
 }
-

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/ColumnFamily.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ColumnFamily.java b/src/java/org/apache/cassandra/db/ColumnFamily.java
index 2c00071..c2134c2 100644
--- a/src/java/org/apache/cassandra/db/ColumnFamily.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamily.java
@@ -36,8 +36,10 @@ import org.apache.commons.lang3.builder.HashCodeBuilder;
 import org.apache.cassandra.cache.IRowCacheEntry;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.Schema;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.CellNames;
 import org.apache.cassandra.db.filter.ColumnSlice;
-import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.io.sstable.ColumnNameHelper;
 import org.apache.cassandra.io.sstable.ColumnStats;
 import org.apache.cassandra.io.sstable.SSTable;
@@ -116,29 +118,29 @@ public abstract class ColumnFamily implements Iterable<Column>, IRowCacheEntry
         addColumn(column, HeapAllocator.instance);
     }
 
-    public void addColumn(ByteBuffer name, ByteBuffer value, long timestamp)
+    public void addColumn(CellName name, ByteBuffer value, long timestamp)
     {
         addColumn(name, value, timestamp, 0);
     }
 
-    public void addColumn(ByteBuffer name, ByteBuffer value, long timestamp, int timeToLive)
+    public void addColumn(CellName name, ByteBuffer value, long timestamp, int timeToLive)
     {
         assert !metadata().getDefaultValidator().isCommutative();
         Column column = Column.create(name, value, timestamp, timeToLive, metadata());
         addColumn(column);
     }
 
-    public void addCounter(ByteBuffer name, long value)
+    public void addCounter(CellName name, long value)
     {
         addColumn(new CounterUpdateColumn(name, value, System.currentTimeMillis()));
     }
 
-    public void addTombstone(ByteBuffer name, ByteBuffer localDeletionTime, long timestamp)
+    public void addTombstone(CellName name, ByteBuffer localDeletionTime, long timestamp)
     {
         addColumn(new DeletedColumn(name, localDeletionTime, timestamp));
     }
 
-    public void addTombstone(ByteBuffer name, int localDeletionTime, long timestamp)
+    public void addTombstone(CellName name, int localDeletionTime, long timestamp)
     {
         addColumn(new DeletedColumn(name, localDeletionTime, timestamp));
     }
@@ -220,13 +222,13 @@ public abstract class ColumnFamily implements Iterable<Column>, IRowCacheEntry
      * Get a column given its name, returning null if the column is not
      * present.
      */
-    public abstract Column getColumn(ByteBuffer name);
+    public abstract Column getColumn(CellName name);
 
     /**
      * Returns an iterable with the names of columns in this column map in the same order
      * as the underlying columns themselves.
      */
-    public abstract Iterable<ByteBuffer> getColumnNames();
+    public abstract Iterable<CellName> getColumnNames();
 
     /**
      * Returns the columns of this column map as a collection.
@@ -300,7 +302,7 @@ public abstract class ColumnFamily implements Iterable<Column>, IRowCacheEntry
         // takes care of those for us.)
         for (Column columnExternal : cfComposite)
         {
-            ByteBuffer cName = columnExternal.name();
+            CellName cName = columnExternal.name();
             Column columnInternal = getColumn(cName);
             if (columnInternal == null)
             {
@@ -372,7 +374,7 @@ public abstract class ColumnFamily implements Iterable<Column>, IRowCacheEntry
         if (isMarkedForDelete())
             sb.append(" -").append(deletionInfo()).append("-");
 
-        sb.append(" [").append(getComparator().getColumnsString(this)).append("])");
+        sb.append(" [").append(CellNames.getColumnsString(getComparator(), this)).append("])");
         return sb.toString();
     }
 
@@ -440,7 +442,7 @@ public abstract class ColumnFamily implements Iterable<Column>, IRowCacheEntry
     /**
      * @return the comparator whose sorting order the contained columns conform to
      */
-    public AbstractType<?> getComparator()
+    public CellNameType getComparator()
     {
         return metadata.comparator;
     }
@@ -478,9 +480,9 @@ public abstract class ColumnFamily implements Iterable<Column>, IRowCacheEntry
         return false;
     }
 
-    public Map<ByteBuffer, ByteBuffer> asMap()
+    public Map<CellName, ByteBuffer> asMap()
     {
-        ImmutableMap.Builder<ByteBuffer, ByteBuffer> builder = ImmutableMap.builder();
+        ImmutableMap.Builder<CellName, ByteBuffer> builder = ImmutableMap.builder();
         for (Column column : this)
             builder.put(column.name, column.value);
         return builder.build();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/ColumnFamilySerializer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ColumnFamilySerializer.java b/src/java/org/apache/cassandra/db/ColumnFamilySerializer.java
index 3d832b2..13ec6fc 100644
--- a/src/java/org/apache/cassandra/db/ColumnFamilySerializer.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamilySerializer.java
@@ -67,8 +67,8 @@ public class ColumnFamilySerializer implements IVersionedSerializer<ColumnFamily
                 return;
             }
 
-            DeletionInfo.serializer().serialize(cf.deletionInfo(), out, version);
-            ColumnSerializer columnSerializer = Column.serializer;
+            cf.getComparator().deletionInfoSerializer().serialize(cf.deletionInfo(), out, version);
+            ColumnSerializer columnSerializer = cf.getComparator().columnSerializer();
             int count = cf.getColumnCount();
             out.writeInt(count);
             int written = 0;
@@ -108,9 +108,9 @@ public class ColumnFamilySerializer implements IVersionedSerializer<ColumnFamily
         }
         else
         {
-            cf.delete(DeletionInfo.serializer().deserialize(in, version, cf.getComparator()));
+            cf.delete(cf.getComparator().deletionInfoSerializer().deserialize(in, version));
 
-            ColumnSerializer columnSerializer = Column.serializer;
+            ColumnSerializer columnSerializer = cf.getComparator().columnSerializer();
             int size = in.readInt();
             for (int i = 0; i < size; ++i)
                 cf.addColumn(columnSerializer.deserialize(in, flag));
@@ -128,10 +128,11 @@ public class ColumnFamilySerializer implements IVersionedSerializer<ColumnFamily
         }
         else
         {
-            size += DeletionInfo.serializer().serializedSize(cf.deletionInfo(), typeSizes, version);
+            size += cf.getComparator().deletionInfoSerializer().serializedSize(cf.deletionInfo(), typeSizes, version);
             size += typeSizes.sizeof(cf.getColumnCount());
+            ColumnSerializer columnSerializer = cf.getComparator().columnSerializer();
             for (Column column : cf)
-                size += column.serializedSize(typeSizes);
+                size += columnSerializer.serializedSize(column, typeSizes);
         }
         return size;
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
index a98e30b..f00e281 100644
--- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
@@ -33,7 +33,6 @@ import com.google.common.base.Function;
 import com.google.common.collect.*;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.Uninterruptibles;
-import org.cliffc.high_scale_lib.NonBlockingHashMap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -51,10 +50,10 @@ import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.db.commitlog.ReplayPosition;
 import org.apache.cassandra.db.compaction.*;
 import org.apache.cassandra.db.filter.*;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.index.SecondaryIndex;
 import org.apache.cassandra.db.index.SecondaryIndexManager;
-import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.dht.*;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.exceptions.ConfigurationException;
@@ -94,9 +93,6 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
 
     public final SecondaryIndexManager indexManager;
 
-    private static final int INTERN_CUTOFF = 256;
-    public final ConcurrentMap<ByteBuffer, ByteBuffer> internedNames = new NonBlockingHashMap<ByteBuffer, ByteBuffer>();
-
     /* These are locally held copies to be changed from the config during runtime */
     private volatile DefaultInteger minCompactionThreshold;
     private volatile DefaultInteger maxCompactionThreshold;
@@ -932,8 +928,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
     // 2. if it has been re-added since then, this particular column was inserted before the last drop
     private static boolean isDroppedColumn(Column c, CFMetaData meta)
     {
-        ByteBuffer cql3ColumnName = ((CompositeType) meta.comparator).extractLastComponent(c.name());
-        Long droppedAt = meta.getDroppedColumns().get(meta.getColumnDefinition(cql3ColumnName).name);
+        Long droppedAt = meta.getDroppedColumns().get(c.name().cql3ColumnName());
         return droppedAt != null && c.timestamp() <= droppedAt;
     }
 
@@ -1230,8 +1225,8 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
     }
 
     public ColumnFamily getColumnFamily(DecoratedKey key,
-                                        ByteBuffer start,
-                                        ByteBuffer finish,
+                                        Composite start,
+                                        Composite finish,
                                         boolean reversed,
                                         int limit,
                                         long timestamp)
@@ -1617,8 +1612,8 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
      */
     public ExtendedFilter makeExtendedFilter(AbstractBounds<RowPosition> keyRange,
                                              SliceQueryFilter columnRange,
-                                             ByteBuffer columnStart,
-                                             ByteBuffer columnStop,
+                                             Composite columnStart,
+                                             Composite columnStop,
                                              List<IndexExpression> rowFilter,
                                              int maxResults,
                                              long now)
@@ -1756,7 +1751,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
         }
     }
 
-    public AbstractType<?> getComparator()
+    public CellNameType getComparator()
     {
         return metadata.comparator;
     }
@@ -2244,35 +2239,6 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
         return partitioner instanceof LocalPartitioner;
     }
 
-    private ByteBuffer intern(ByteBuffer name)
-    {
-        ByteBuffer internedName = internedNames.get(name);
-        if (internedName == null)
-        {
-            internedName = ByteBufferUtil.clone(name);
-            ByteBuffer concurrentName = internedNames.putIfAbsent(internedName, internedName);
-            if (concurrentName != null)
-                internedName = concurrentName;
-        }
-        return internedName;
-    }
-
-    public ByteBuffer internOrCopy(ByteBuffer name, Allocator allocator)
-    {
-        if (internedNames.size() >= INTERN_CUTOFF)
-            return allocator.clone(name);
-
-        return intern(name);
-    }
-
-    public ByteBuffer maybeIntern(ByteBuffer name)
-    {
-        if (internedNames.size() >= INTERN_CUTOFF)
-            return null;
-
-        return intern(name);
-    }
-
     public Iterable<ColumnFamilyStore> concatWithIndexes()
     {
         return Iterables.concat(indexManager.getIndexesBackedByCfs(), Collections.singleton(this));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/ColumnIndex.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ColumnIndex.java b/src/java/org/apache/cassandra/db/ColumnIndex.java
index cd1baca..3e6d55d 100644
--- a/src/java/org/apache/cassandra/db/ColumnIndex.java
+++ b/src/java/org/apache/cassandra/db/ColumnIndex.java
@@ -25,6 +25,7 @@ import java.util.*;
 import com.google.common.annotations.VisibleForTesting;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.io.sstable.IndexHelper;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
@@ -53,8 +54,6 @@ public class ColumnIndex
      */
     public static class Builder
     {
-        private static final OnDiskAtom.Serializer atomSerializer = Column.onDiskSerializer();
-
         private final ColumnIndex result;
         private final long indexOffset;
         private long startPosition = -1;
@@ -69,6 +68,8 @@ public class ColumnIndex
         private final ByteBuffer key;
         private final DeletionInfo deletionInfo; // only used for serializing and calculating row header size
 
+        private final OnDiskAtom.Serializer atomSerializer;
+
         public Builder(ColumnFamily cf,
                        ByteBuffer key,
                        DataOutput output)
@@ -83,6 +84,7 @@ public class ColumnIndex
             this.result = new ColumnIndex(new ArrayList<IndexHelper.IndexInfo>());
             this.output = output;
             this.tombstoneTracker = new RangeTombstone.Tracker(cf.getComparator());
+            this.atomSerializer = cf.getComparator().onDiskAtomSerializer();
         }
 
         /**
@@ -119,7 +121,7 @@ public class ColumnIndex
         public ColumnIndex build(ColumnFamily cf) throws IOException
         {
             // cf has disentangled the columns and range tombstones, we need to re-interleave them in comparator order
-            Comparator<ByteBuffer> comparator = cf.getComparator();
+            Comparator<Composite> comparator = cf.getComparator();
             DeletionInfo.InOrderTester tester = cf.deletionInfo().inOrderTester();
             Iterator<RangeTombstone> rangeIter = cf.deletionInfo().rangeIterator();
             RangeTombstone tombstone = rangeIter.hasNext() ? rangeIter.next() : null;
@@ -183,7 +185,7 @@ public class ColumnIndex
                                // where we wouldn't make any progress because a block is filled by said marker
             }
 
-            long size = column.serializedSizeForSSTable();
+            long size = atomSerializer.serializedSizeForSSTable(column);
             endPosition += size;
             blockSize += size;
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/ColumnSerializer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ColumnSerializer.java b/src/java/org/apache/cassandra/db/ColumnSerializer.java
index fb38b5f..a1c6ebd 100644
--- a/src/java/org/apache/cassandra/db/ColumnSerializer.java
+++ b/src/java/org/apache/cassandra/db/ColumnSerializer.java
@@ -22,9 +22,12 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.io.ISerializer;
 import org.apache.cassandra.io.FSReadError;
 import org.apache.cassandra.io.util.FileDataInput;
+import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 public class ColumnSerializer implements ISerializer<Column>
@@ -51,10 +54,17 @@ public class ColumnSerializer implements ISerializer<Column>
         LOCAL, FROM_REMOTE, PRESERVE_SIZE;
     }
 
+    private final CellNameType type;
+
+    public ColumnSerializer(CellNameType type)
+    {
+        this.type = type;
+    }
+
     public void serialize(Column column, DataOutput out) throws IOException
     {
-        assert column.name().remaining() > 0;
-        ByteBufferUtil.writeWithShortLength(column.name(), out);
+        assert !column.name().isEmpty();
+        type.cellSerializer().serialize(column.name(), out);
         try
         {
             out.writeByte(column.serializationFlags());
@@ -93,15 +103,13 @@ public class ColumnSerializer implements ISerializer<Column>
 
     public Column deserialize(DataInput in, ColumnSerializer.Flag flag, int expireBefore) throws IOException
     {
-        ByteBuffer name = ByteBufferUtil.readWithShortLength(in);
-        if (name.remaining() <= 0)
-            throw CorruptColumnException.create(in, name);
+        CellName name = type.cellSerializer().deserialize(in);
 
         int b = in.readUnsignedByte();
         return deserializeColumnBody(in, name, b, flag, expireBefore);
     }
 
-    Column deserializeColumnBody(DataInput in, ByteBuffer name, int mask, ColumnSerializer.Flag flag, int expireBefore) throws IOException
+    Column deserializeColumnBody(DataInput in, CellName name, int mask, ColumnSerializer.Flag flag, int expireBefore) throws IOException
     {
         if ((mask & COUNTER_MASK) != 0)
         {
@@ -130,9 +138,22 @@ public class ColumnSerializer implements ISerializer<Column>
         }
     }
 
-    public long serializedSize(Column column, TypeSizes type)
+    void skipColumnBody(DataInput in, int mask) throws IOException
+    {
+        if ((mask & COUNTER_MASK) != 0)
+            FileUtils.skipBytesFully(in, 16);
+        else if ((mask & EXPIRATION_MASK) != 0)
+            FileUtils.skipBytesFully(in, 16);
+        else
+            FileUtils.skipBytesFully(in, 8);
+
+        int length = in.readInt();
+        FileUtils.skipBytesFully(in, length);
+    }
+
+    public long serializedSize(Column column, TypeSizes typeSizes)
     {
-        return column.serializedSize(type);
+        return column.serializedSize(type, typeSizes);
     }
 
     public static class CorruptColumnException extends IOException

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/CounterColumn.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/CounterColumn.java b/src/java/org/apache/cassandra/db/CounterColumn.java
index b470c5a..ac2c88e 100644
--- a/src/java/org/apache/cassandra/db/CounterColumn.java
+++ b/src/java/org/apache/cassandra/db/CounterColumn.java
@@ -31,9 +31,10 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.db.context.CounterContext;
 import org.apache.cassandra.db.context.IContext.ContextRelationship;
-import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.exceptions.OverloadedException;
 import org.apache.cassandra.exceptions.RequestExecutionException;
 import org.apache.cassandra.io.util.DataOutputBuffer;
@@ -53,28 +54,28 @@ public class CounterColumn extends Column
 
     private final long timestampOfLastDelete;
 
-    public CounterColumn(ByteBuffer name, long value, long timestamp)
+    public CounterColumn(CellName name, long value, long timestamp)
     {
         this(name, contextManager.create(value, HeapAllocator.instance), timestamp);
     }
 
-    public CounterColumn(ByteBuffer name, long value, long timestamp, long timestampOfLastDelete)
+    public CounterColumn(CellName name, long value, long timestamp, long timestampOfLastDelete)
     {
         this(name, contextManager.create(value, HeapAllocator.instance), timestamp, timestampOfLastDelete);
     }
 
-    public CounterColumn(ByteBuffer name, ByteBuffer value, long timestamp)
+    public CounterColumn(CellName name, ByteBuffer value, long timestamp)
     {
         this(name, value, timestamp, Long.MIN_VALUE);
     }
 
-    public CounterColumn(ByteBuffer name, ByteBuffer value, long timestamp, long timestampOfLastDelete)
+    public CounterColumn(CellName name, ByteBuffer value, long timestamp, long timestampOfLastDelete)
     {
         super(name, value, timestamp);
         this.timestampOfLastDelete = timestampOfLastDelete;
     }
 
-    public static CounterColumn create(ByteBuffer name, ByteBuffer value, long timestamp, long timestampOfLastDelete, ColumnSerializer.Flag flag)
+    public static CounterColumn create(CellName name, ByteBuffer value, long timestamp, long timestampOfLastDelete, ColumnSerializer.Flag flag)
     {
         // #elt being negative means we have to clean delta
         short count = value.getShort(value.position());
@@ -84,7 +85,7 @@ public class CounterColumn extends Column
     }
 
     @Override
-    public Column withUpdatedName(ByteBuffer newName)
+    public Column withUpdatedName(CellName newName)
     {
         return new CounterColumn(newName, value, timestamp, timestampOfLastDelete);
     }
@@ -110,9 +111,9 @@ public class CounterColumn extends Column
     }
 
     @Override
-    public int serializedSize(TypeSizes typeSizes)
+    public int serializedSize(CellNameType type, TypeSizes typeSizes)
     {
-        return super.serializedSize(typeSizes) + typeSizes.sizeof(timestampOfLastDelete);
+        return super.serializedSize(type, typeSizes) + typeSizes.sizeof(timestampOfLastDelete);
     }
 
     @Override
@@ -147,7 +148,7 @@ public class CounterColumn extends Column
     @Override
     public void updateDigest(MessageDigest digest)
     {
-        digest.update(name.duplicate());
+        digest.update(name.toByteBuffer().duplicate());
         // We don't take the deltas into account in a digest
         contextManager.updateDigest(digest, value);
         DataOutputBuffer buffer = new DataOutputBuffer();
@@ -217,17 +218,17 @@ public class CounterColumn extends Column
     @Override
     public Column localCopy(ColumnFamilyStore cfs)
     {
-        return new CounterColumn(cfs.internOrCopy(name, HeapAllocator.instance), ByteBufferUtil.clone(value), timestamp, timestampOfLastDelete);
+        return localCopy(cfs, HeapAllocator.instance);
     }
 
     @Override
     public Column localCopy(ColumnFamilyStore cfs, Allocator allocator)
     {
-        return new CounterColumn(cfs.internOrCopy(name, allocator), allocator.clone(value), timestamp, timestampOfLastDelete);
+        return new CounterColumn(name.copy(allocator), allocator.clone(value), timestamp, timestampOfLastDelete);
     }
 
     @Override
-    public String getString(AbstractType<?> comparator)
+    public String getString(CellNameType comparator)
     {
         StringBuilder sb = new StringBuilder();
         sb.append(comparator.getString(name));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/CounterMutation.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/CounterMutation.java b/src/java/org/apache/cassandra/db/CounterMutation.java
index fb363c2..eae8e12 100644
--- a/src/java/org/apache/cassandra/db/CounterMutation.java
+++ b/src/java/org/apache/cassandra/db/CounterMutation.java
@@ -30,11 +30,11 @@ import java.util.UUID;
 
 import com.google.common.collect.Iterables;
 
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.filter.NamesQueryFilter;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.net.MessageOut;
 import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.HeapAllocator;
 
@@ -109,7 +109,7 @@ public class CounterMutation implements IMutation
 
     private void addReadCommandFromColumnFamily(String keyspaceName, ByteBuffer key, ColumnFamily columnFamily, long timestamp, List<ReadCommand> commands)
     {
-        SortedSet<ByteBuffer> s = new TreeSet<ByteBuffer>(columnFamily.metadata().comparator);
+        SortedSet<CellName> s = new TreeSet<>(columnFamily.metadata().comparator);
         Iterables.addAll(s, columnFamily.getColumnNames());
         commands.add(new SliceByNamesReadCommand(keyspaceName, key, columnFamily.metadata().cfName, timestamp, new NamesQueryFilter(s)));
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/CounterUpdateColumn.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/CounterUpdateColumn.java b/src/java/org/apache/cassandra/db/CounterUpdateColumn.java
index 1ae7dd7..aaf3307 100644
--- a/src/java/org/apache/cassandra/db/CounterUpdateColumn.java
+++ b/src/java/org/apache/cassandra/db/CounterUpdateColumn.java
@@ -19,6 +19,7 @@ package org.apache.cassandra.db;
 
 import java.nio.ByteBuffer;
 
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.context.CounterContext;
 import org.apache.cassandra.utils.Allocator;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -33,12 +34,12 @@ import org.apache.cassandra.utils.HeapAllocator;
  */
 public class CounterUpdateColumn extends Column
 {
-    public CounterUpdateColumn(ByteBuffer name, long value, long timestamp)
+    public CounterUpdateColumn(CellName name, long value, long timestamp)
     {
         this(name, ByteBufferUtil.bytes(value), timestamp);
     }
 
-    public CounterUpdateColumn(ByteBuffer name, ByteBuffer value, long timestamp)
+    public CounterUpdateColumn(CellName name, ByteBuffer value, long timestamp)
     {
         super(name, value, timestamp);
     }
@@ -81,7 +82,7 @@ public class CounterUpdateColumn extends Column
     @Override
     public CounterColumn localCopy(ColumnFamilyStore cfs)
     {
-        return new CounterColumn(cfs.internOrCopy(name, HeapAllocator.instance),
+        return new CounterColumn(name.copy(HeapAllocator.instance),
                                  CounterContext.instance().create(delta(), HeapAllocator.instance),
                                  timestamp(),
                                  Long.MIN_VALUE);
@@ -90,7 +91,7 @@ public class CounterUpdateColumn extends Column
     @Override
     public Column localCopy(ColumnFamilyStore cfs, Allocator allocator)
     {
-        return new CounterColumn(cfs.internOrCopy(name, allocator),
+        return new CounterColumn(name.copy(allocator),
                                  CounterContext.instance().create(delta(), allocator),
                                  timestamp(),
                                  Long.MIN_VALUE);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/DataRange.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/DataRange.java b/src/java/org/apache/cassandra/db/DataRange.java
index 713027c..453b16a 100644
--- a/src/java/org/apache/cassandra/db/DataRange.java
+++ b/src/java/org/apache/cassandra/db/DataRange.java
@@ -23,8 +23,9 @@ import java.util.Comparator;
 import java.util.List;
 
 import org.apache.cassandra.db.columniterator.IdentityQueryFilter;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.filter.*;
-import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.dht.*;
 
 /**
@@ -57,8 +58,8 @@ public class DataRange
     public static boolean isFullRowSlice(SliceQueryFilter filter)
     {
         return filter.slices.length == 1
-            && filter.start().remaining() == 0
-            && filter.finish().remaining() == 0
+            && filter.start().isEmpty()
+            && filter.finish().isEmpty()
             && filter.count == Integer.MAX_VALUE;
     }
 
@@ -124,11 +125,11 @@ public class DataRange
     public static class Paging extends DataRange
     {
         private final SliceQueryFilter sliceFilter;
-        private final Comparator<ByteBuffer> comparator;
-        private final ByteBuffer columnStart;
-        private final ByteBuffer columnFinish;
+        private final Comparator<Composite> comparator;
+        private final Composite columnStart;
+        private final Composite columnFinish;
 
-        private Paging(AbstractBounds<RowPosition> range, SliceQueryFilter filter, ByteBuffer columnStart, ByteBuffer columnFinish, Comparator<ByteBuffer> comparator)
+        private Paging(AbstractBounds<RowPosition> range, SliceQueryFilter filter, Composite columnStart, Composite columnFinish, Comparator<Composite> comparator)
         {
             super(range, filter);
 
@@ -142,9 +143,9 @@ public class DataRange
             this.columnFinish = columnFinish;
         }
 
-        public Paging(AbstractBounds<RowPosition> range, SliceQueryFilter filter, ByteBuffer columnStart, ByteBuffer columnFinish, AbstractType<?> comparator)
+        public Paging(AbstractBounds<RowPosition> range, SliceQueryFilter filter, Composite columnStart, Composite columnFinish, CellNameType comparator)
         {
-            this(range, filter, columnStart, columnFinish, filter.isReversed() ? comparator.reverseComparator : comparator);
+            this(range, filter, columnStart, columnFinish, filter.isReversed() ? comparator.reverseComparator() : comparator);
         }
 
         @Override
@@ -184,8 +185,8 @@ public class DataRange
         private ColumnSlice[] slicesForKey(ByteBuffer key)
         {
             // We don't call that until it's necessary, so assume we have to do some hard work
-            ByteBuffer newStart = equals(startKey(), key) ? columnStart : null;
-            ByteBuffer newFinish = equals(stopKey(), key) ? columnFinish : null;
+            Composite newStart = equals(startKey(), key) ? columnStart : null;
+            Composite newFinish = equals(stopKey(), key) ? columnFinish : null;
 
             List<ColumnSlice> newSlices = new ArrayList<ColumnSlice>(sliceFilter.slices.length); // in the common case, we'll have the same number of slices
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/DefsTables.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/DefsTables.java b/src/java/org/apache/cassandra/db/DefsTables.java
index 828981e..f30e256 100644
--- a/src/java/org/apache/cassandra/db/DefsTables.java
+++ b/src/java/org/apache/cassandra/db/DefsTables.java
@@ -136,19 +136,6 @@ public class DefsTables
         return keyspaces;
     }
 
-    public static ByteBuffer searchComposite(String name, boolean start)
-    {
-        assert name != null;
-        ByteBuffer nameBytes = UTF8Type.instance.decompose(name);
-        int length = nameBytes.remaining();
-        byte[] bytes = new byte[2 + length + 1];
-        bytes[0] = (byte)((length >> 8) & 0xFF);
-        bytes[1] = (byte)(length & 0xFF);
-        ByteBufferUtil.arrayCopy(nameBytes, 0, bytes, 2, length);
-        bytes[bytes.length - 1] = (byte)(start ? 0 : 1);
-        return ByteBuffer.wrap(bytes);
-    }
-
     private static Row serializedColumnFamilies(DecoratedKey ksNameKey)
     {
         ColumnFamilyStore cfsStore = SystemKeyspace.schemaCFS(SystemKeyspace.SCHEMA_COLUMNFAMILIES_CF);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/DeletedColumn.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/DeletedColumn.java b/src/java/org/apache/cassandra/db/DeletedColumn.java
index 377df27..ec88015 100644
--- a/src/java/org/apache/cassandra/db/DeletedColumn.java
+++ b/src/java/org/apache/cassandra/db/DeletedColumn.java
@@ -22,6 +22,7 @@ import java.nio.ByteBuffer;
 import java.security.MessageDigest;
 
 import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.utils.Allocator;
@@ -30,18 +31,18 @@ import org.apache.cassandra.utils.HeapAllocator;
 
 public class DeletedColumn extends Column
 {
-    public DeletedColumn(ByteBuffer name, int localDeletionTime, long timestamp)
+    public DeletedColumn(CellName name, int localDeletionTime, long timestamp)
     {
         this(name, ByteBufferUtil.bytes(localDeletionTime), timestamp);
     }
 
-    public DeletedColumn(ByteBuffer name, ByteBuffer value, long timestamp)
+    public DeletedColumn(CellName name, ByteBuffer value, long timestamp)
     {
         super(name, value, timestamp);
     }
 
     @Override
-    public Column withUpdatedName(ByteBuffer newName)
+    public Column withUpdatedName(CellName newName)
     {
         return new DeletedColumn(newName, value, timestamp);
     }
@@ -67,7 +68,7 @@ public class DeletedColumn extends Column
     @Override
     public void updateDigest(MessageDigest digest)
     {
-        digest.update(name.duplicate());
+        digest.update(name.toByteBuffer().duplicate());
 
         DataOutputBuffer buffer = new DataOutputBuffer();
         try
@@ -99,13 +100,13 @@ public class DeletedColumn extends Column
     @Override
     public Column localCopy(ColumnFamilyStore cfs)
     {
-        return new DeletedColumn(cfs.internOrCopy(name, HeapAllocator.instance), ByteBufferUtil.clone(value), timestamp);
+        return new DeletedColumn(name.copy(HeapAllocator.instance), ByteBufferUtil.clone(value), timestamp);
     }
 
     @Override
     public Column localCopy(ColumnFamilyStore cfs, Allocator allocator)
     {
-        return new DeletedColumn(cfs.internOrCopy(name, allocator), allocator.clone(value), timestamp);
+        return new DeletedColumn(name.copy(allocator), allocator.clone(value), timestamp);
     }
 
     @Override
@@ -123,9 +124,4 @@ public class DeletedColumn extends Column
         if (getLocalDeletionTime() < 0)
             throw new MarshalException("The local deletion time should not be negative");
     }
-
-    public static DeletedColumn create(int localDeletionTime, long timestamp, String... names)
-    {
-        return new DeletedColumn(decomposeName(names), localDeletionTime, timestamp);
-    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/DeletionInfo.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/DeletionInfo.java b/src/java/org/apache/cassandra/db/DeletionInfo.java
index 13fc824..0bd0635 100644
--- a/src/java/org/apache/cassandra/db/DeletionInfo.java
+++ b/src/java/org/apache/cassandra/db/DeletionInfo.java
@@ -20,13 +20,13 @@ package org.apache.cassandra.db;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.nio.ByteBuffer;
 import java.util.*;
 
 import com.google.common.base.Objects;
 import com.google.common.collect.Iterators;
 
-import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.composites.CType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.io.IVersionedSerializer;
 
 /**
@@ -35,8 +35,6 @@ import org.apache.cassandra.io.IVersionedSerializer;
  */
 public class DeletionInfo
 {
-    private static final Serializer serializer = new Serializer();
-
     /**
      * This represents a deletion of the entire row.  We can't represent this within the RangeTombstoneList, so it's
      * kept separately.  This also slightly optimizes the common case of a full row deletion.
@@ -67,13 +65,13 @@ public class DeletionInfo
         this(topLevel, null);
     }
 
-    public DeletionInfo(ByteBuffer start, ByteBuffer end, Comparator<ByteBuffer> comparator, long markedForDeleteAt, int localDeletionTime)
+    public DeletionInfo(Composite start, Composite end, Comparator<Composite> comparator, long markedForDeleteAt, int localDeletionTime)
     {
         this(DeletionTime.LIVE, new RangeTombstoneList(comparator, 1));
         ranges.add(start, end, markedForDeleteAt, localDeletionTime);
     }
 
-    public DeletionInfo(RangeTombstone rangeTombstone, Comparator<ByteBuffer> comparator)
+    public DeletionInfo(RangeTombstone rangeTombstone, Comparator<Composite> comparator)
     {
         this(rangeTombstone.min, rangeTombstone.max, comparator, rangeTombstone.data.markedForDeleteAt, rangeTombstone.data.localDeletionTime);
     }
@@ -92,11 +90,6 @@ public class DeletionInfo
         return new DeletionInfo(DeletionTime.LIVE);
     }
 
-    public static Serializer serializer()
-    {
-        return serializer;
-    }
-
     public DeletionInfo copy()
     {
         return new DeletionInfo(topLevel, ranges == null ? null : ranges.copy());
@@ -123,7 +116,7 @@ public class DeletionInfo
         return isDeleted(column.name(), column.timestamp());
     }
 
-    public boolean isDeleted(ByteBuffer name, long timestamp)
+    public boolean isDeleted(Composite name, long timestamp)
     {
         // We do rely on this test: if topLevel.markedForDeleteAt is MIN_VALUE, we should not
         // consider the column deleted even if timestamp=MIN_VALUE, otherwise this break QueryFilter.isRelevant
@@ -194,7 +187,7 @@ public class DeletionInfo
             topLevel = newInfo;
     }
 
-    public void add(RangeTombstone tombstone, Comparator<ByteBuffer> comparator)
+    public void add(RangeTombstone tombstone, Comparator<Composite> comparator)
     {
         if (ranges == null)
             ranges = new RangeTombstoneList(comparator, 1);
@@ -255,7 +248,7 @@ public class DeletionInfo
         return ranges == null ? Iterators.<RangeTombstone>emptyIterator() : ranges.iterator();
     }
 
-    public DeletionTime rangeCovering(ByteBuffer name)
+    public DeletionTime rangeCovering(Composite name)
     {
         return ranges == null ? null : ranges.search(name);
     }
@@ -284,15 +277,15 @@ public class DeletionInfo
     {
         assert !ranges.isEmpty();
         StringBuilder sb = new StringBuilder();
-        AbstractType at = (AbstractType)ranges.comparator();
-        assert at != null;
+        CType type = (CType)ranges.comparator();
+        assert type != null;
         Iterator<RangeTombstone> iter = rangeIterator();
         while (iter.hasNext())
         {
             RangeTombstone i = iter.next();
             sb.append("[");
-            sb.append(at.getString(i.min)).append("-");
-            sb.append(at.getString(i.max)).append(", ");
+            sb.append(type.getString(i.min)).append("-");
+            sb.append(type.getString(i.max)).append(", ");
             sb.append(i.data);
             sb.append("]");
         }
@@ -326,32 +319,30 @@ public class DeletionInfo
 
     public static class Serializer implements IVersionedSerializer<DeletionInfo>
     {
-        public void serialize(DeletionInfo info, DataOutput out, int version) throws IOException
+        private final RangeTombstoneList.Serializer rtlSerializer;
+
+        public Serializer(CType type)
         {
-            DeletionTime.serializer.serialize(info.topLevel, out);
-            RangeTombstoneList.serializer.serialize(info.ranges, out, version);
+            this.rtlSerializer = new RangeTombstoneList.Serializer(type);
         }
 
-        /*
-         * Range tombstones internally depend on the column family serializer, but it is not serialized.
-         * Thus deserialize(DataInput, int, Comparator<ByteBuffer>) should be used instead of this method.
-         */
-        public DeletionInfo deserialize(DataInput in, int version) throws IOException
+        public void serialize(DeletionInfo info, DataOutput out, int version) throws IOException
         {
-            throw new UnsupportedOperationException();
+            DeletionTime.serializer.serialize(info.topLevel, out);
+            rtlSerializer.serialize(info.ranges, out, version);
         }
 
-        public DeletionInfo deserialize(DataInput in, int version, Comparator<ByteBuffer> comparator) throws IOException
+        public DeletionInfo deserialize(DataInput in, int version) throws IOException
         {
             DeletionTime topLevel = DeletionTime.serializer.deserialize(in);
-            RangeTombstoneList ranges = RangeTombstoneList.serializer.deserialize(in, version, comparator);
+            RangeTombstoneList ranges = rtlSerializer.deserialize(in, version);
             return new DeletionInfo(topLevel, ranges);
         }
 
         public long serializedSize(DeletionInfo info, TypeSizes typeSizes, int version)
         {
             long size = DeletionTime.serializer.serializedSize(info.topLevel, typeSizes);
-            return size + RangeTombstoneList.serializer.serializedSize(info.ranges, typeSizes, version);
+            return size + rtlSerializer.serializedSize(info.ranges, typeSizes, version);
         }
 
         public long serializedSize(DeletionInfo info, int version)
@@ -389,7 +380,7 @@ public class DeletionInfo
             return isDeleted(column.name(), column.timestamp());
         }
 
-        public boolean isDeleted(ByteBuffer name, long timestamp)
+        public boolean isDeleted(Composite name, long timestamp)
         {
             if (timestamp <= topLevel.markedForDeleteAt)
                 return true;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/DeletionTime.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/DeletionTime.java b/src/java/org/apache/cassandra/db/DeletionTime.java
index 7952fcb..c60b423 100644
--- a/src/java/org/apache/cassandra/db/DeletionTime.java
+++ b/src/java/org/apache/cassandra/db/DeletionTime.java
@@ -25,6 +25,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Objects;
 
 import org.apache.cassandra.io.ISerializer;
+import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.utils.ObjectSizes;
 
 /**
@@ -50,7 +51,7 @@ public class DeletionTime implements Comparable<DeletionTime>
      */
     public final int localDeletionTime;
 
-    public static final ISerializer<DeletionTime> serializer = new Serializer();
+    public static final Serializer serializer = new Serializer();
 
     @VisibleForTesting
     public DeletionTime(long markedForDeleteAt, int localDeletionTime)
@@ -110,7 +111,7 @@ public class DeletionTime implements Comparable<DeletionTime>
         return ObjectSizes.getFieldSize(fields);
     }
 
-    private static class Serializer implements ISerializer<DeletionTime>
+    public static class Serializer implements ISerializer<DeletionTime>
     {
         public void serialize(DeletionTime delTime, DataOutput out) throws IOException
         {
@@ -128,6 +129,11 @@ public class DeletionTime implements Comparable<DeletionTime>
                 return new DeletionTime(mfda, ldt);
         }
 
+        public void skip(DataInput in) throws IOException
+        {
+            FileUtils.skipBytesFully(in, 4 + 8);
+        }
+
         public long serializedSize(DeletionTime delTime, TypeSizes typeSizes)
         {
             return typeSizes.sizeof(delTime.localDeletionTime)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/EmptyColumns.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/EmptyColumns.java b/src/java/org/apache/cassandra/db/EmptyColumns.java
index 129ddc5..782ffc9 100644
--- a/src/java/org/apache/cassandra/db/EmptyColumns.java
+++ b/src/java/org/apache/cassandra/db/EmptyColumns.java
@@ -21,12 +21,12 @@ package org.apache.cassandra.db;
  */
 
 
-import java.nio.ByteBuffer;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Iterator;
 
 import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.filter.ColumnSlice;
 import org.apache.cassandra.utils.Allocator;
 
@@ -78,12 +78,12 @@ public class EmptyColumns extends AbstractThreadUnsafeSortedColumns
         throw new UnsupportedOperationException();
     }
 
-    public Column getColumn(ByteBuffer name)
+    public Column getColumn(CellName name)
     {
         throw new UnsupportedOperationException();
     }
 
-    public Iterable<ByteBuffer> getColumnNames()
+    public Iterable<CellName> getColumnNames()
     {
         return Collections.emptyList();
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/ExpiringColumn.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ExpiringColumn.java b/src/java/org/apache/cassandra/db/ExpiringColumn.java
index e11567f..92c8306 100644
--- a/src/java/org/apache/cassandra/db/ExpiringColumn.java
+++ b/src/java/org/apache/cassandra/db/ExpiringColumn.java
@@ -22,11 +22,11 @@ import java.nio.ByteBuffer;
 import java.security.MessageDigest;
 
 import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.utils.Allocator;
-import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.HeapAllocator;
 
 /**
@@ -46,12 +46,12 @@ public class ExpiringColumn extends Column
     private final int localExpirationTime;
     private final int timeToLive;
 
-    public ExpiringColumn(ByteBuffer name, ByteBuffer value, long timestamp, int timeToLive)
+    public ExpiringColumn(CellName name, ByteBuffer value, long timestamp, int timeToLive)
     {
       this(name, value, timestamp, timeToLive, (int) (System.currentTimeMillis() / 1000) + timeToLive);
     }
 
-    public ExpiringColumn(ByteBuffer name, ByteBuffer value, long timestamp, int timeToLive, int localExpirationTime)
+    public ExpiringColumn(CellName name, ByteBuffer value, long timestamp, int timeToLive, int localExpirationTime)
     {
         super(name, value, timestamp);
         assert timeToLive > 0 : timeToLive;
@@ -61,7 +61,7 @@ public class ExpiringColumn extends Column
     }
 
     /** @return Either a DeletedColumn, or an ExpiringColumn. */
-    public static Column create(ByteBuffer name, ByteBuffer value, long timestamp, int timeToLive, int localExpirationTime, int expireBefore, ColumnSerializer.Flag flag)
+    public static Column create(CellName name, ByteBuffer value, long timestamp, int timeToLive, int localExpirationTime, int expireBefore, ColumnSerializer.Flag flag)
     {
         if (localExpirationTime >= expireBefore || flag == ColumnSerializer.Flag.PRESERVE_SIZE)
             return new ExpiringColumn(name, value, timestamp, timeToLive, localExpirationTime);
@@ -78,7 +78,7 @@ public class ExpiringColumn extends Column
     }
 
     @Override
-    public Column withUpdatedName(ByteBuffer newName)
+    public Column withUpdatedName(CellName newName)
     {
         return new ExpiringColumn(newName, value, timestamp, timeToLive, localExpirationTime);
     }
@@ -96,20 +96,20 @@ public class ExpiringColumn extends Column
     }
 
     @Override
-    public int serializedSize(TypeSizes typeSizes)
+    public int serializedSize(CellNameType type, TypeSizes typeSizes)
     {
         /*
          * An expired column adds to a Column :
          *    4 bytes for the localExpirationTime
          *  + 4 bytes for the timeToLive
         */
-        return super.serializedSize(typeSizes) + typeSizes.sizeof(localExpirationTime) + typeSizes.sizeof(timeToLive);
+        return super.serializedSize(type, typeSizes) + typeSizes.sizeof(localExpirationTime) + typeSizes.sizeof(timeToLive);
     }
 
     @Override
     public void updateDigest(MessageDigest digest)
     {
-        digest.update(name.duplicate());
+        digest.update(name.toByteBuffer().duplicate());
         digest.update(value.duplicate());
 
         DataOutputBuffer buffer = new DataOutputBuffer();
@@ -135,20 +135,17 @@ public class ExpiringColumn extends Column
     @Override
     public Column localCopy(ColumnFamilyStore cfs)
     {
-        return new ExpiringColumn(cfs.internOrCopy(name, HeapAllocator.instance), ByteBufferUtil.clone(value), timestamp, timeToLive, localExpirationTime);
+        return localCopy(cfs, HeapAllocator.instance);
     }
 
     @Override
     public Column localCopy(ColumnFamilyStore cfs, Allocator allocator)
     {
-        ByteBuffer clonedName = cfs.maybeIntern(name);
-        if (clonedName == null)
-            clonedName = allocator.clone(name);
-        return new ExpiringColumn(clonedName, allocator.clone(value), timestamp, timeToLive, localExpirationTime);
+        return new ExpiringColumn(name.copy(allocator), allocator.clone(value), timestamp, timeToLive, localExpirationTime);
     }
 
     @Override
-    public String getString(AbstractType<?> comparator)
+    public String getString(CellNameType comparator)
     {
         StringBuilder sb = new StringBuilder();
         sb.append(super.getString(comparator));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/HintedHandOffManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/HintedHandOffManager.java b/src/java/org/apache/cassandra/db/HintedHandOffManager.java
index 39afd12..8868286 100644
--- a/src/java/org/apache/cassandra/db/HintedHandOffManager.java
+++ b/src/java/org/apache/cassandra/db/HintedHandOffManager.java
@@ -41,12 +41,14 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
 import org.apache.cassandra.concurrent.NamedThreadFactory;
+import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.Schema;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.Composite;
+import org.apache.cassandra.db.composites.Composites;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.filter.*;
-import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.db.marshal.Int32Type;
 import org.apache.cassandra.db.marshal.UUIDType;
 import org.apache.cassandra.dht.IPartitioner;
@@ -102,7 +104,6 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean
 
     private volatile boolean hintedHandOffPaused = false;
 
-    static final CompositeType comparator = CompositeType.getInstance(Arrays.<AbstractType<?>>asList(UUIDType.instance, Int32Type.instance));
     static final int maxHintTTL = Integer.parseInt(System.getProperty("cassandra.maxHintTTL", String.valueOf(Integer.MAX_VALUE)));
 
     private final NonBlockingHashSet<InetAddress> queuedDeliveries = new NonBlockingHashSet<InetAddress>();
@@ -133,7 +134,7 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean
 
         UUID hintId = UUIDGen.getTimeUUID();
         // serialize the hint with id and version as a composite column name
-        ByteBuffer name = comparator.decompose(hintId, MessagingService.current_version);
+        CellName name = CFMetaData.HintsCf.comparator.makeCellName(hintId, MessagingService.current_version);
         ByteBuffer value = ByteBuffer.wrap(FBUtilities.serialize(mutation, RowMutation.serializer, MessagingService.current_version));
         ColumnFamily cf = ArrayBackedSortedColumns.factory.create(Schema.instance.getCFMetaData(Keyspace.SYSTEM_KS, SystemKeyspace.HINTS_CF));
         cf.addColumn(name, value, System.currentTimeMillis(), ttl);
@@ -178,7 +179,7 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean
         StorageService.optionalTasks.scheduleWithFixedDelay(runnable, 10, 10, TimeUnit.MINUTES);
     }
 
-    private static void deleteHint(ByteBuffer tokenBytes, ByteBuffer columnName, long timestamp)
+    private static void deleteHint(ByteBuffer tokenBytes, CellName columnName, long timestamp)
     {
         RowMutation rm = new RowMutation(Keyspace.SYSTEM_KS, tokenBytes);
         rm.delete(SystemKeyspace.HINTS_CF, columnName, timestamp);
@@ -238,11 +239,11 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean
         return CompactionManager.instance.submitUserDefined(hintStore, descriptors, (int) (System.currentTimeMillis() / 1000));
     }
 
-    private static boolean pagingFinished(ColumnFamily hintColumnFamily, ByteBuffer startColumn)
+    private static boolean pagingFinished(ColumnFamily hintColumnFamily, Composite startColumn)
     {
         // done if no hints found or the start column (same as last column processed in previous iteration) is the only one
         return hintColumnFamily == null
-               || (hintColumnFamily.getSortedColumns().size() == 1 && hintColumnFamily.getColumn(startColumn) != null);
+               || (hintColumnFamily.getSortedColumns().size() == 1 && hintColumnFamily.getColumn((CellName)startColumn) != null);
     }
 
     private int waitForSchemaAgreement(InetAddress endpoint) throws TimeoutException
@@ -325,7 +326,7 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean
         DecoratedKey epkey =  StorageService.getPartitioner().decorateKey(hostIdBytes);
 
         final AtomicInteger rowsReplayed = new AtomicInteger(0);
-        ByteBuffer startColumn = ByteBufferUtil.EMPTY_BYTE_BUFFER;
+        Composite startColumn = Composites.EMPTY;
 
         int pageSize = calculatePageSize();
         logger.debug("Using pageSize of {}", pageSize);
@@ -343,7 +344,7 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean
             QueryFilter filter = QueryFilter.getSliceFilter(epkey,
                                                             SystemKeyspace.HINTS_CF,
                                                             startColumn,
-                                                            ByteBufferUtil.EMPTY_BYTE_BUFFER,
+                                                            Composites.EMPTY,
                                                             false,
                                                             pageSize,
                                                             now);
@@ -381,8 +382,7 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean
 
                 startColumn = hint.name();
 
-                ByteBuffer[] components = comparator.split(hint.name());
-                int version = Int32Type.instance.compose(components[1]);
+                int version = Int32Type.instance.compose(hint.name().get(1));
                 DataInputStream in = new DataInputStream(ByteBufferUtil.inputStream(hint.value()));
                 RowMutation rm;
                 try
@@ -493,7 +493,7 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean
         IPartitioner p = StorageService.getPartitioner();
         RowPosition minPos = p.getMinimumToken().minKeyBound();
         Range<RowPosition> range = new Range<RowPosition>(minPos, minPos, p);
-        IDiskAtomFilter filter = new NamesQueryFilter(ImmutableSortedSet.<ByteBuffer>of());
+        IDiskAtomFilter filter = new NamesQueryFilter(ImmutableSortedSet.<CellName>of());
         List<Row> rows = hintStore.getRangeSlice(range, null, filter, Integer.MAX_VALUE, System.currentTimeMillis());
         for (Row row : rows)
         {
@@ -564,8 +564,7 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean
     private List<Row> getHintsSlice(int columnCount)
     {
         // Get count # of columns...
-        SliceQueryFilter predicate = new SliceQueryFilter(ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                                          ByteBufferUtil.EMPTY_BYTE_BUFFER,
+        SliceQueryFilter predicate = new SliceQueryFilter(ColumnSlice.ALL_COLUMNS_ARRAY,
                                                           false,
                                                           columnCount);
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/Memtable.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Memtable.java b/src/java/org/apache/cassandra/db/Memtable.java
index 785f0c2..ffbdd37 100644
--- a/src/java/org/apache/cassandra/db/Memtable.java
+++ b/src/java/org/apache/cassandra/db/Memtable.java
@@ -32,6 +32,7 @@ import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
 import org.apache.cassandra.concurrent.NamedThreadFactory;
 import org.apache.cassandra.concurrent.StageManager;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.db.commitlog.ReplayPosition;
 import org.apache.cassandra.db.index.SecondaryIndexManager;
 import org.apache.cassandra.db.marshal.AbstractType;
@@ -111,7 +112,7 @@ public class Memtable
     // Record the comparator of the CFS at the creation of the memtable. This
     // is only used when a user update the CF comparator, to know if the
     // memtable was created with the new or old comparator.
-    public final AbstractType initialComparator;
+    public final CellNameType initialComparator;
 
     public Memtable(ColumnFamilyStore cfs)
     {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/OnDiskAtom.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/OnDiskAtom.java b/src/java/org/apache/cassandra/db/OnDiskAtom.java
index 2956d6b..bda46d5 100644
--- a/src/java/org/apache/cassandra/db/OnDiskAtom.java
+++ b/src/java/org/apache/cassandra/db/OnDiskAtom.java
@@ -18,18 +18,19 @@
 package org.apache.cassandra.db;
 
 import java.io.*;
-import java.nio.ByteBuffer;
 import java.security.MessageDigest;
 
 import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.io.ISSTableSerializer;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.serializers.MarshalException;
-import org.apache.cassandra.utils.ByteBufferUtil;
 
 public interface OnDiskAtom
 {
-    public ByteBuffer name();
+    public Composite name();
 
     /**
      * For a standard column, this is the same as timestamp().
@@ -39,28 +40,28 @@ public interface OnDiskAtom
     public long maxTimestamp();
     public int getLocalDeletionTime(); // for tombstone GC, so int is sufficient granularity
 
-    public int serializedSize(TypeSizes typeSizes);
-    public long serializedSizeForSSTable();
-
     public void validateFields(CFMetaData metadata) throws MarshalException;
     public void updateDigest(MessageDigest digest);
 
     public static class Serializer implements ISSTableSerializer<OnDiskAtom>
     {
-        public static Serializer instance = new Serializer();
+        private final CellNameType type;
 
-        private Serializer() {}
+        public Serializer(CellNameType type)
+        {
+            this.type = type;
+        }
 
         public void serializeForSSTable(OnDiskAtom atom, DataOutput out) throws IOException
         {
             if (atom instanceof Column)
             {
-                Column.serializer.serialize((Column) atom, out);
+                type.columnSerializer().serialize((Column)atom, out);
             }
             else
             {
                 assert atom instanceof RangeTombstone;
-                RangeTombstone.serializer.serializeForSSTable((RangeTombstone)atom, out);
+                type.rangeTombstoneSerializer().serializeForSSTable((RangeTombstone)atom, out);
             }
         }
 
@@ -71,8 +72,8 @@ public interface OnDiskAtom
 
         public OnDiskAtom deserializeFromSSTable(DataInput in, ColumnSerializer.Flag flag, int expireBefore, Descriptor.Version version) throws IOException
         {
-            ByteBuffer name = ByteBufferUtil.readWithShortLength(in);
-            if (name.remaining() <= 0)
+            Composite name = type.serializer().deserialize(in);
+            if (name.isEmpty())
             {
                 // SSTableWriter.END_OF_ROW
                 return null;
@@ -80,9 +81,22 @@ public interface OnDiskAtom
 
             int b = in.readUnsignedByte();
             if ((b & ColumnSerializer.RANGE_TOMBSTONE_MASK) != 0)
-                return RangeTombstone.serializer.deserializeBody(in, name, version);
+                return type.rangeTombstoneSerializer().deserializeBody(in, name, version);
             else
-                return Column.serializer.deserializeColumnBody(in, name, b, flag, expireBefore);
+                return type.columnSerializer().deserializeColumnBody(in, (CellName)name, b, flag, expireBefore);
+        }
+
+        public long serializedSizeForSSTable(OnDiskAtom atom)
+        {
+            if (atom instanceof Column)
+            {
+                return type.columnSerializer().serializedSize((Column)atom, TypeSizes.NATIVE);
+            }
+            else
+            {
+                assert atom instanceof RangeTombstone;
+                return type.rangeTombstoneSerializer().serializedSizeForSSTable((RangeTombstone)atom);
+            }
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/PagedRangeCommand.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/PagedRangeCommand.java b/src/java/org/apache/cassandra/db/PagedRangeCommand.java
index 0e24859..1a009d9 100644
--- a/src/java/org/apache/cassandra/db/PagedRangeCommand.java
+++ b/src/java/org/apache/cassandra/db/PagedRangeCommand.java
@@ -20,10 +20,12 @@ package org.apache.cassandra.db;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.config.Schema;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.filter.*;
 import org.apache.cassandra.dht.AbstractBounds;
 import org.apache.cassandra.io.IVersionedSerializer;
@@ -35,8 +37,8 @@ public class PagedRangeCommand extends AbstractRangeCommand
 {
     public static final IVersionedSerializer<PagedRangeCommand> serializer = new Serializer();
 
-    public final ByteBuffer start;
-    public final ByteBuffer stop;
+    public final Composite start;
+    public final Composite stop;
     public final int limit;
 
     public PagedRangeCommand(String keyspace,
@@ -44,8 +46,8 @@ public class PagedRangeCommand extends AbstractRangeCommand
                              long timestamp,
                              AbstractBounds<RowPosition> keyRange,
                              SliceQueryFilter predicate,
-                             ByteBuffer start,
-                             ByteBuffer stop,
+                             Composite start,
+                             Composite stop,
                              List<IndexExpression> rowFilter,
                              int limit)
     {
@@ -57,13 +59,13 @@ public class PagedRangeCommand extends AbstractRangeCommand
 
     public MessageOut<PagedRangeCommand> createMessage()
     {
-        return new MessageOut<PagedRangeCommand>(MessagingService.Verb.PAGED_RANGE, this, serializer);
+        return new MessageOut<>(MessagingService.Verb.PAGED_RANGE, this, serializer);
     }
 
     public AbstractRangeCommand forSubRange(AbstractBounds<RowPosition> subRange)
     {
-        ByteBuffer newStart = subRange.left.equals(keyRange.left) ? start : ((SliceQueryFilter)predicate).start();
-        ByteBuffer newStop = subRange.right.equals(keyRange.right) ? stop : ((SliceQueryFilter)predicate).finish();
+        Composite newStart = subRange.left.equals(keyRange.left) ? start : ((SliceQueryFilter)predicate).start();
+        Composite newStop = subRange.right.equals(keyRange.right) ? stop : ((SliceQueryFilter)predicate).finish();
         return new PagedRangeCommand(keyspace,
                                      columnFamily,
                                      timestamp,
@@ -125,13 +127,15 @@ public class PagedRangeCommand extends AbstractRangeCommand
 
             AbstractBounds.serializer.serialize(cmd.keyRange, out, version);
 
+            CFMetaData metadata = Schema.instance.getCFMetaData(cmd.keyspace, cmd.columnFamily);
+
             // SliceQueryFilter (the count is not used)
             SliceQueryFilter filter = (SliceQueryFilter)cmd.predicate;
-            SliceQueryFilter.serializer.serialize(filter, out, version);
+            metadata.comparator.sliceQueryFilterSerializer().serialize(filter, out, version);
 
             // The start and stop of the page
-            ByteBufferUtil.writeWithShortLength(cmd.start, out);
-            ByteBufferUtil.writeWithShortLength(cmd.stop, out);
+            metadata.comparator.serializer().serialize(cmd.start, out);
+            metadata.comparator.serializer().serialize(cmd.stop, out);
 
             out.writeInt(cmd.rowFilter.size());
             for (IndexExpression expr : cmd.rowFilter)
@@ -152,10 +156,12 @@ public class PagedRangeCommand extends AbstractRangeCommand
 
             AbstractBounds<RowPosition> keyRange = AbstractBounds.serializer.deserialize(in, version).toRowBounds();
 
-            SliceQueryFilter predicate = SliceQueryFilter.serializer.deserialize(in, version);
+            CFMetaData metadata = Schema.instance.getCFMetaData(keyspace, columnFamily);
+
+            SliceQueryFilter predicate = metadata.comparator.sliceQueryFilterSerializer().deserialize(in, version);
 
-            ByteBuffer start = ByteBufferUtil.readWithShortLength(in);
-            ByteBuffer stop = ByteBufferUtil.readWithShortLength(in);
+            Composite start = metadata.comparator.serializer().deserialize(in);
+            Composite stop =  metadata.comparator.serializer().deserialize(in);
 
             int filterCount = in.readInt();
             List<IndexExpression> rowFilter = new ArrayList<IndexExpression>(filterCount);
@@ -181,10 +187,12 @@ public class PagedRangeCommand extends AbstractRangeCommand
 
             size += AbstractBounds.serializer.serializedSize(cmd.keyRange, version);
 
-            size += SliceQueryFilter.serializer.serializedSize((SliceQueryFilter)cmd.predicate, version);
+            CFMetaData metadata = Schema.instance.getCFMetaData(cmd.keyspace, cmd.columnFamily);
+
+            size += metadata.comparator.sliceQueryFilterSerializer().serializedSize((SliceQueryFilter)cmd.predicate, version);
 
-            size += TypeSizes.NATIVE.sizeofWithShortLength(cmd.start);
-            size += TypeSizes.NATIVE.sizeofWithShortLength(cmd.stop);
+            size += metadata.comparator.serializer().serializedSize(cmd.start, TypeSizes.NATIVE);
+            size += metadata.comparator.serializer().serializedSize(cmd.stop, TypeSizes.NATIVE);
 
             size += TypeSizes.NATIVE.sizeof(cmd.rowFilter.size());
             for (IndexExpression expr : cmd.rowFilter)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/RangeSliceCommand.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/RangeSliceCommand.java b/src/java/org/apache/cassandra/db/RangeSliceCommand.java
index a9a48a0..b8f67ba 100644
--- a/src/java/org/apache/cassandra/db/RangeSliceCommand.java
+++ b/src/java/org/apache/cassandra/db/RangeSliceCommand.java
@@ -157,7 +157,9 @@ class RangeSliceCommandSerializer implements IVersionedSerializer<RangeSliceComm
         out.writeUTF(sliceCommand.columnFamily);
         out.writeLong(sliceCommand.timestamp);
 
-        IDiskAtomFilter.Serializer.instance.serialize(sliceCommand.predicate, out, version);
+        CFMetaData metadata = Schema.instance.getCFMetaData(sliceCommand.keyspace, sliceCommand.columnFamily);
+
+        metadata.comparator.diskAtomFilterSerializer().serialize(sliceCommand.predicate, out, version);
 
         if (sliceCommand.rowFilter == null)
         {
@@ -187,7 +189,7 @@ class RangeSliceCommandSerializer implements IVersionedSerializer<RangeSliceComm
 
         CFMetaData metadata = Schema.instance.getCFMetaData(keyspace, columnFamily);
 
-        IDiskAtomFilter predicate = IDiskAtomFilter.Serializer.instance.deserialize(in, version, metadata.comparator);
+        IDiskAtomFilter predicate = metadata.comparator.diskAtomFilterSerializer().deserialize(in, version);
 
         List<IndexExpression> rowFilter;
         int filterCount = in.readInt();
@@ -214,9 +216,11 @@ class RangeSliceCommandSerializer implements IVersionedSerializer<RangeSliceComm
         size += TypeSizes.NATIVE.sizeof(rsc.columnFamily);
         size += TypeSizes.NATIVE.sizeof(rsc.timestamp);
 
+        CFMetaData metadata = Schema.instance.getCFMetaData(rsc.keyspace, rsc.columnFamily);
+
         IDiskAtomFilter filter = rsc.predicate;
 
-        size += IDiskAtomFilter.Serializer.instance.serializedSize(filter, version);
+        size += metadata.comparator.diskAtomFilterSerializer().serializedSize(filter, version);
 
         if (rsc.rowFilter == null)
         {


[05/13] Push composites support in the storage engine

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
index 86348aa..c038b02 100644
--- a/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
@@ -31,12 +31,12 @@ import com.google.common.collect.ImmutableMap;
 import org.apache.cassandra.cql3.statements.*;
 import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.config.*;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Murmur3Partitioner;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.exceptions.RequestValidationException;
-import org.apache.cassandra.io.compress.CompressionParameters;
 import org.apache.cassandra.locator.AbstractReplicationStrategy;
 import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.utils.Pair;
@@ -190,14 +190,14 @@ public class CQLSSTableWriter
             throw new InvalidRequestException(String.format("Invalid number of arguments, expecting %d values but got %d", boundNames.size(), values.size()));
 
         List<ByteBuffer> keys = insert.buildPartitionKeyNames(values);
-        ColumnNameBuilder clusteringPrefix = insert.createClusteringPrefixBuilder(values);
+        Composite clusteringPrefix = insert.createClusteringPrefix(values);
 
         long now = System.currentTimeMillis() * 1000;
         UpdateParameters params = new UpdateParameters(insert.cfm,
                                                        values,
                                                        insert.getTimestamp(now, values),
                                                        insert.getTimeToLive(values),
-                                                       Collections.<ByteBuffer, ColumnGroupMap>emptyMap());
+                                                       Collections.<ByteBuffer, CQL3Row>emptyMap());
 
         for (ByteBuffer key: keys)
         {
@@ -321,13 +321,20 @@ public class CQLSSTableWriter
 
                 // We need to register the keyspace/table metadata through Schema, otherwise we won't be able to properly
                 // build the insert statement in using().
-                KSMetaData ksm = KSMetaData.newKeyspace(this.schema.ksName,
-                                                        AbstractReplicationStrategy.getClass("org.apache.cassandra.locator.SimpleStrategy"),
-                                                        ImmutableMap.of("replication_factor", "1"),
-                                                        true,
-                                                        Collections.singleton(this.schema));
+                if (Schema.instance.getKSMetaData(this.schema.ksName) == null)
+                {
+                    KSMetaData ksm = KSMetaData.newKeyspace(this.schema.ksName,
+                                                            AbstractReplicationStrategy.getClass("org.apache.cassandra.locator.SimpleStrategy"),
+                                                            ImmutableMap.of("replication_factor", "1"),
+                                                            true,
+                                                            Collections.singleton(this.schema));
+                    Schema.instance.load(ksm);
+                }
+                else if (Schema.instance.getCFMetaData(this.schema.ksName, this.schema.cfName) == null)
+                {
+                    Schema.instance.load(this.schema);
+                }
 
-                Schema.instance.load(ksm);
                 return this;
             }
             catch (RequestValidationException e)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/ColumnNameHelper.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/ColumnNameHelper.java b/src/java/org/apache/cassandra/io/sstable/ColumnNameHelper.java
index aaed765..d390518 100644
--- a/src/java/org/apache/cassandra/io/sstable/ColumnNameHelper.java
+++ b/src/java/org/apache/cassandra/io/sstable/ColumnNameHelper.java
@@ -19,112 +19,91 @@ package org.apache.cassandra.io.sstable;
 
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
 import java.util.List;
 
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.ColumnToCollectionType;
-import org.apache.cassandra.db.marshal.CompositeType;
 
 import static org.apache.cassandra.utils.ByteBufferUtil.minimalBufferFor;
 
 public class ColumnNameHelper
 {
+    private static List<ByteBuffer> maybeGrow(List<ByteBuffer> l, int size)
+    {
+        if (l.size() >= size)
+            return l;
+
+        List<ByteBuffer> nl = new ArrayList<>(size);
+        nl.addAll(l);
+        for (int i = l.size(); i < size; i++)
+            nl.add(null);
+        return nl;
+    }
+
+    private static List<ByteBuffer> getComponents(Composite prefix, int size)
+    {
+        List<ByteBuffer> l = new ArrayList<>(size);
+        for (int i = 0; i < size; i++)
+            l.add(prefix.get(i));
+        return l;
+    }
+
     /**
-     * finds the max column name(s)
-     *
-     * if comparator is of CompositeType, candidate will be split into its components, and each
-     * component is compared to the component on the same place in maxSeen, and then returning the list
-     * with the max columns.
-     *
-     * will collect at most the number of types in the comparator.
+     * finds the max cell name component(s)
      *
-     * if comparator is not CompositeType, maxSeen is assumed to be of size 1 and the item there is
-     * compared to the candidate.
+     * Note that this method *can modify maxSeen*.
      *
      * @param maxSeen the max columns seen so far
      * @param candidate the candidate column(s)
      * @param comparator the comparator to use
      * @return a list with the max column(s)
      */
-    public static List<ByteBuffer> maxComponents(List<ByteBuffer> maxSeen, ByteBuffer candidate, AbstractType<?> comparator)
+    public static List<ByteBuffer> maxComponents(List<ByteBuffer> maxSeen, Composite candidate, CellNameType comparator)
     {
-        if (comparator instanceof CompositeType)
-        {
-            CompositeType ct = (CompositeType)comparator;
-            if (maxSeen.isEmpty())
-                return Arrays.asList(ct.split(candidate));
-
-            int typeCount = getTypeCount(ct);
-
-            List<ByteBuffer> components = Arrays.asList(ct.split(candidate));
-            List<ByteBuffer> biggest = maxSeen.size() > components.size() ? maxSeen : components;
-            // if typecount is less than both the components and maxseen, we only keep typecount columns.
-            int minSize = Math.min(typeCount, Math.min(components.size(), maxSeen.size()));
-            int maxSize = Math.min(typeCount, biggest.size());
-            List<ByteBuffer> retList = new ArrayList<ByteBuffer>(maxSize);
-
-            for (int i = 0; i < minSize; i++)
-                retList.add(ColumnNameHelper.max(maxSeen.get(i), components.get(i), ct.types.get(i)));
-            for (int i = minSize; i < maxSize; i++)
-                retList.add(biggest.get(i));
-
-            return retList;
-        }
-        else
-        {
-            if (maxSeen.size() == 0)
-                return Collections.singletonList(candidate);
-            return Collections.singletonList(ColumnNameHelper.max(maxSeen.get(0), candidate, comparator));
-        }
+        // For a cell name, no reason to look more than the clustering prefix
+        // (and comparing the collection element would actually crash)
+        int size = candidate instanceof CellName ? ((CellName)candidate).clusteringSize() : candidate.size();
+
+        if (maxSeen.isEmpty())
+            return getComponents(candidate, size);
+
+        // In most case maxSeen is big enough to hold the result so update it in place in those cases
+        maxSeen = maybeGrow(maxSeen, size);
+
+        for (int i = 0; i < size; i++)
+            maxSeen.set(i, max(maxSeen.get(i), candidate.get(i), comparator.subtype(i)));
+
+        return maxSeen;
     }
+
     /**
-     * finds the min column name(s)
-     *
-     * if comparator is of CompositeType, candidate will be split into its components, and each
-     * component is compared to the component on the same place in minSeen, and then returning the list
-     * with the min columns.
+     * finds the min cell name component(s)
      *
-     * if comparator is not CompositeType, maxSeen is assumed to be of size 1 and the item there is
-     * compared to the candidate.
+     * Note that this method *can modify maxSeen*.
      *
      * @param minSeen the max columns seen so far
      * @param candidate the candidate column(s)
      * @param comparator the comparator to use
      * @return a list with the min column(s)
      */
-    public static List<ByteBuffer> minComponents(List<ByteBuffer> minSeen, ByteBuffer candidate, AbstractType<?> comparator)
+    public static List<ByteBuffer> minComponents(List<ByteBuffer> minSeen, Composite candidate, CellNameType comparator)
     {
-        if (comparator instanceof CompositeType)
-        {
-            CompositeType ct = (CompositeType)comparator;
-            if (minSeen.isEmpty())
-                return Arrays.asList(ct.split(candidate));
-
-            int typeCount = getTypeCount(ct);
-
-            List<ByteBuffer> components = Arrays.asList(ct.split(candidate));
-            List<ByteBuffer> biggest = minSeen.size() > components.size() ? minSeen : components;
-            // if typecount is less than both the components and maxseen, we only collect typecount columns.
-            int minSize = Math.min(typeCount, Math.min(components.size(), minSeen.size()));
-            int maxSize = Math.min(typeCount, biggest.size());
-            List<ByteBuffer> retList = new ArrayList<ByteBuffer>(maxSize);
-
-            for (int i = 0; i < minSize; i++)
-                retList.add(ColumnNameHelper.min(minSeen.get(i), components.get(i), ct.types.get(i)));
-            for (int i = minSize; i < maxSize; i++)
-                retList.add(biggest.get(i));
-
-            return retList;
-        }
-        else
-        {
-            if (minSeen.size() == 0)
-                return Collections.singletonList(candidate);
-            return Collections.singletonList(ColumnNameHelper.min(minSeen.get(0), candidate, comparator));
-
-        }
+        // For a cell name, no reason to look more than the clustering prefix
+        // (and comparing the collection element would actually crash)
+        int size = candidate instanceof CellName ? ((CellName)candidate).clusteringSize() : candidate.size();
+
+        if (minSeen.isEmpty())
+            return getComponents(candidate, size);
+
+        // In most case maxSeen is big enough to hold the result so update it in place in those cases
+        minSeen = maybeGrow(minSeen, size);
+
+        for (int i = 0; i < size; i++)
+            minSeen.set(i, min(minSeen.get(i), candidate.get(i), comparator.subtype(i)));
+
+        return minSeen;
     }
 
     /**
@@ -139,6 +118,11 @@ public class ColumnNameHelper
      */
     private static ByteBuffer min(ByteBuffer b1, ByteBuffer b2, AbstractType<?> comparator)
     {
+        if (b1 == null)
+            return b2;
+        if (b2 == null)
+            return b1;
+
         if (comparator.compare(b1, b2) >= 0)
             return b2;
         return b1;
@@ -156,24 +140,25 @@ public class ColumnNameHelper
      */
     private static ByteBuffer max(ByteBuffer b1, ByteBuffer b2, AbstractType<?> comparator)
     {
+        if (b1 == null)
+            return b2;
+        if (b2 == null)
+            return b1;
+
         if (comparator.compare(b1, b2) >= 0)
             return b1;
         return b2;
     }
 
     /**
-     * if columnNameComparator is CompositeType the columns are compared by components using the subcomparator
-     * on the same position.
-     *
-     * if comparator is not CompositeType, the lists are assumed to be of max size 1 and compared using the comparator
-     * directly.
+     * Merge 2 lists of min cell name components.
      *
      * @param minColumnNames lhs
      * @param candidates rhs
-     * @param columnNameComparator comparator to use
+     * @param comparator comparator to use
      * @return a list with smallest column names according to (sub)comparator
      */
-    public static List<ByteBuffer> mergeMin(List<ByteBuffer> minColumnNames, List<ByteBuffer> candidates, AbstractType<?> columnNameComparator)
+    public static List<ByteBuffer> mergeMin(List<ByteBuffer> minColumnNames, List<ByteBuffer> candidates, CellNameType comparator)
     {
         if (minColumnNames.isEmpty())
             return minimalBuffersFor(candidates);
@@ -181,27 +166,18 @@ public class ColumnNameHelper
         if (candidates.isEmpty())
             return minColumnNames;
 
-        if (columnNameComparator instanceof CompositeType)
-        {
-            CompositeType ct = (CompositeType)columnNameComparator;
-            List<ByteBuffer> biggest = minColumnNames.size() > candidates.size() ? minColumnNames : candidates;
-            int typeCount = getTypeCount(ct);
-            int minSize = Math.min(typeCount, Math.min(minColumnNames.size(), candidates.size()));
-            int maxSize = Math.min(typeCount, biggest.size());
-
-            List<ByteBuffer> retList = new ArrayList<ByteBuffer>(maxSize);
-
-            for (int i = 0; i < minSize; i++)
-                retList.add(minimalBufferFor(min(minColumnNames.get(i), candidates.get(i), ct.types.get(i))));
-            for (int i = minSize; i < maxSize; i++)
-                retList.add(minimalBufferFor(biggest.get(i)));
-
-            return retList;
-        }
-        else
-        {
-            return Collections.singletonList(minimalBufferFor(min(minColumnNames.get(0), candidates.get(0), columnNameComparator)));
-        }
+        List<ByteBuffer> biggest = minColumnNames.size() > candidates.size() ? minColumnNames : candidates;
+        List<ByteBuffer> smallest = minColumnNames.size() > candidates.size() ? candidates : minColumnNames;
+
+        // We want to always copy the smallest list, and maybeGrow does it only if it's actually smaller
+        List<ByteBuffer> retList = smallest.size() == biggest.size()
+                                 ? new ArrayList<>(smallest)
+                                 : maybeGrow(smallest, biggest.size());
+
+        for (int i = 0; i < biggest.size(); i++)
+            retList.set(i, minimalBufferFor(min(retList.get(i), biggest.get(i), comparator.subtype(i))));
+
+        return retList;
     }
 
     private static List<ByteBuffer> minimalBuffersFor(List<ByteBuffer> candidates)
@@ -213,18 +189,14 @@ public class ColumnNameHelper
     }
 
     /**
-     * if columnNameComparator is CompositeType the columns are compared by components using the subcomparator
-     * on the same position.
-     *
-     * if comparator is not CompositeType, the lists are assumed to be of max size 1 and compared using the comparator
-     * directly.
+     * Merge 2 lists of max cell name components.
      *
      * @param maxColumnNames lhs
      * @param candidates rhs
-     * @param columnNameComparator comparator to use
+     * @param comparator comparator to use
      * @return a list with biggest column names according to (sub)comparator
      */
-    public static List<ByteBuffer> mergeMax(List<ByteBuffer> maxColumnNames, List<ByteBuffer> candidates, AbstractType<?> columnNameComparator)
+    public static List<ByteBuffer> mergeMax(List<ByteBuffer> maxColumnNames, List<ByteBuffer> candidates, CellNameType comparator)
     {
         if (maxColumnNames.isEmpty())
             return minimalBuffersFor(candidates);
@@ -232,31 +204,17 @@ public class ColumnNameHelper
         if (candidates.isEmpty())
             return maxColumnNames;
 
-        if (columnNameComparator instanceof CompositeType)
-        {
-            CompositeType ct = (CompositeType)columnNameComparator;
-            List<ByteBuffer> biggest = maxColumnNames.size() > candidates.size() ? maxColumnNames : candidates;
-            int typeCount = getTypeCount(ct);
-            int minSize = Math.min(typeCount, Math.min(maxColumnNames.size(), candidates.size()));
-            int maxSize = Math.min(typeCount, biggest.size());
-            List<ByteBuffer> retList = new ArrayList<ByteBuffer>(maxSize);
-
-            for (int i = 0; i < minSize; i++)
-                retList.add(minimalBufferFor(max(maxColumnNames.get(i), candidates.get(i), ct.types.get(i))));
-            for (int i = minSize; i < maxSize; i++)
-                retList.add(minimalBufferFor(biggest.get(i)));
-
-            return retList;
-        }
-        else
-        {
-            return Collections.singletonList(minimalBufferFor(max(maxColumnNames.get(0), candidates.get(0), columnNameComparator)));
-        }
+        List<ByteBuffer> biggest = maxColumnNames.size() > candidates.size() ? maxColumnNames : candidates;
+        List<ByteBuffer> smallest = maxColumnNames.size() > candidates.size() ? candidates : maxColumnNames;
 
-    }
+        // We want to always copy the smallest list, and maybeGrow does it only if it's actually smaller
+        List<ByteBuffer> retList = smallest.size() == biggest.size()
+                                 ? new ArrayList<>(smallest)
+                                 : maybeGrow(smallest, biggest.size());
 
-    private static int getTypeCount(CompositeType ct)
-    {
-        return ct.types.get(ct.types.size() - 1) instanceof ColumnToCollectionType ? ct.types.size() - 1 : ct.types.size();
+        for (int i = 0; i < biggest.size(); i++)
+            retList.set(i, minimalBufferFor(max(retList.get(i), biggest.get(i), comparator.subtype(i))));
+
+        return retList;
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/IndexHelper.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/IndexHelper.java b/src/java/org/apache/cassandra/io/sstable/IndexHelper.java
index d70ff19..e644974 100644
--- a/src/java/org/apache/cassandra/io/sstable/IndexHelper.java
+++ b/src/java/org/apache/cassandra/io/sstable/IndexHelper.java
@@ -18,14 +18,15 @@
 package org.apache.cassandra.io.sstable;
 
 import java.io.*;
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
 
+import org.apache.cassandra.db.composites.CType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.TypeSizes;
-import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.io.ISerializer;
 import org.apache.cassandra.io.util.FileDataInput;
 import org.apache.cassandra.io.util.FileMark;
 import org.apache.cassandra.io.util.FileUtils;
@@ -67,21 +68,23 @@ public class IndexHelper
     /**
      * Deserialize the index into a structure and return it
      *
-     * @param in - input source
+     * @param in input source
+     * @param type the comparator type for the column family
      *
      * @return ArrayList<IndexInfo> - list of de-serialized indexes
      * @throws IOException if an I/O error occurs.
      */
-    public static List<IndexInfo> deserializeIndex(FileDataInput in) throws IOException
+    public static List<IndexInfo> deserializeIndex(FileDataInput in, CType type) throws IOException
     {
         int columnIndexSize = in.readInt();
         if (columnIndexSize == 0)
             return Collections.<IndexInfo>emptyList();
         ArrayList<IndexInfo> indexList = new ArrayList<IndexInfo>();
         FileMark mark = in.mark();
+        ISerializer<IndexInfo> serializer = type.indexSerializer();
         while (in.bytesPastMark(mark) < columnIndexSize)
         {
-            indexList.add(IndexInfo.deserialize(in));
+            indexList.add(serializer.deserialize(in));
         }
         assert in.bytesPastMark(mark) == columnIndexSize;
 
@@ -105,9 +108,9 @@ public class IndexHelper
      *
      * @return int index
      */
-    public static int indexFor(ByteBuffer name, List<IndexInfo> indexList, AbstractType<?> comparator, boolean reversed, int lastIndex)
+    public static int indexFor(Composite name, List<IndexInfo> indexList, CType comparator, boolean reversed, int lastIndex)
     {
-        if (name.remaining() == 0 && reversed)
+        if (name.isEmpty() && reversed)
             return indexList.size() - 1;
 
         if (lastIndex >= indexList.size())
@@ -145,19 +148,19 @@ public class IndexHelper
         return startIdx + (index < 0 ? -index - (reversed ? 2 : 1) : index);
     }
 
-    public static Comparator<IndexInfo> getComparator(final AbstractType<?> nameComparator, boolean reversed)
+    public static Comparator<IndexInfo> getComparator(final CType nameComparator, boolean reversed)
     {
-        return reversed ? nameComparator.indexReverseComparator : nameComparator.indexComparator;
+        return reversed ? nameComparator.indexReverseComparator() : nameComparator.indexComparator();
     }
 
     public static class IndexInfo
     {
         public final long width;
-        public final ByteBuffer lastName;
-        public final ByteBuffer firstName;
+        public final Composite lastName;
+        public final Composite firstName;
         public final long offset;
 
-        public IndexInfo(ByteBuffer firstName, ByteBuffer lastName, long offset, long width)
+        public IndexInfo(Composite firstName, Composite lastName, long offset, long width)
         {
             this.firstName = firstName;
             this.lastName = lastName;
@@ -165,26 +168,38 @@ public class IndexHelper
             this.width = width;
         }
 
-        public void serialize(DataOutput out) throws IOException
+        public static class Serializer implements ISerializer<IndexInfo>
         {
-            ByteBufferUtil.writeWithShortLength(firstName, out);
-            ByteBufferUtil.writeWithShortLength(lastName, out);
-            out.writeLong(offset);
-            out.writeLong(width);
-        }
+            private final CType type;
 
-        public int serializedSize(TypeSizes typeSizes)
-        {
-            int firstNameSize = firstName.remaining();
-            int lastNameSize = lastName.remaining();
-            return typeSizes.sizeof((short) firstNameSize) + firstNameSize +
-                   typeSizes.sizeof((short) lastNameSize) + lastNameSize +
-                   typeSizes.sizeof(offset) + typeSizes.sizeof(width);
-        }
+            public Serializer(CType type)
+            {
+                this.type = type;
+            }
 
-        public static IndexInfo deserialize(DataInput in) throws IOException
-        {
-            return new IndexInfo(ByteBufferUtil.readWithShortLength(in), ByteBufferUtil.readWithShortLength(in), in.readLong(), in.readLong());
+            public void serialize(IndexInfo info, DataOutput out) throws IOException
+            {
+                type.serializer().serialize(info.firstName, out);
+                type.serializer().serialize(info.lastName, out);
+                out.writeLong(info.offset);
+                out.writeLong(info.width);
+            }
+
+            public IndexInfo deserialize(DataInput in) throws IOException
+            {
+                return new IndexInfo(type.serializer().deserialize(in),
+                                     type.serializer().deserialize(in),
+                                     in.readLong(),
+                                     in.readLong());
+            }
+
+            public long serializedSize(IndexInfo info, TypeSizes typeSizes)
+            {
+                return type.serializer().serializedSize(info.firstName, typeSizes)
+                     + type.serializer().serializedSize(info.lastName, typeSizes)
+                     + typeSizes.sizeof(info.offset)
+                     + typeSizes.sizeof(info.width);
+            }
         }
 
         public long memorySize()
@@ -195,7 +210,7 @@ public class IndexHelper
                                             ObjectSizes.getReferenceSize() +
                                             TypeSizes.NATIVE.sizeof(offset) +
                                             TypeSizes.NATIVE.sizeof(width))
-                   + ObjectSizes.getSize(firstName) + ObjectSizes.getSize(lastName);
+                   + firstName.memorySize() + lastName.memorySize();
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/KeyIterator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/KeyIterator.java b/src/java/org/apache/cassandra/io/sstable/KeyIterator.java
index 0c36f62..4d1c663 100644
--- a/src/java/org/apache/cassandra/io/sstable/KeyIterator.java
+++ b/src/java/org/apache/cassandra/io/sstable/KeyIterator.java
@@ -46,7 +46,7 @@ public class KeyIterator extends AbstractIterator<DecoratedKey> implements Close
             if (in.isEOF())
                 return endOfData();
             DecoratedKey key = StorageService.getPartitioner().decorateKey(ByteBufferUtil.readWithShortLength(in));
-            RowIndexEntry.serializer.skip(in); // skip remainder of the entry
+            RowIndexEntry.Serializer.skip(in); // skip remainder of the entry
             return key;
         }
         catch (IOException e)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/SSTable.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTable.java b/src/java/org/apache/cassandra/io/sstable/SSTable.java
index c13c423..dfccf28 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTable.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTable.java
@@ -218,7 +218,7 @@ public abstract class SSTable
         while (ifile.getFilePointer() < BYTES_CAP && keys < SAMPLES_CAP)
         {
             ByteBufferUtil.skipShortLength(ifile);
-            RowIndexEntry.serializer.skip(ifile);
+            RowIndexEntry.Serializer.skip(ifile);
             keys++;
         }
         assert keys > 0 && ifile.getFilePointer() > 0 && ifile.length() > 0 : "Unexpected empty index file: " + ifile;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/SSTableReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/SSTableReader.java
index 055f4b6..de877bc 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableReader.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableReader.java
@@ -563,7 +563,7 @@ public class SSTableReader extends SSTable implements Closeable
             while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
             {
                 ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
-                RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(primaryIndex, descriptor.version);
+                RowIndexEntry indexEntry = metadata.comparator.rowIndexEntrySerializer().deserialize(primaryIndex, descriptor.version);
                 DecoratedKey decoratedKey = partitioner.decorateKey(key);
                 if (first == null)
                     first = decoratedKey;
@@ -731,7 +731,7 @@ public class SSTableReader extends SSTable implements Closeable
             while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
             {
                 summaryBuilder.maybeAddEntry(partitioner.decorateKey(ByteBufferUtil.readWithShortLength(primaryIndex)), indexPosition);
-                RowIndexEntry.serializer.skip(primaryIndex);
+                RowIndexEntry.Serializer.skip(primaryIndex);
             }
 
             return summaryBuilder.build(partitioner);
@@ -1168,7 +1168,7 @@ public class SSTableReader extends SSTable implements Closeable
                     if (opSatisfied)
                     {
                         // read data position from index entry
-                        RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(in, descriptor.version);
+                        RowIndexEntry indexEntry = metadata.comparator.rowIndexEntrySerializer().deserialize(in, descriptor.version);
                         if (exactMatch && updateCacheAndStats)
                         {
                             assert key instanceof DecoratedKey; // key can be == to the index key only if it's a true row key
@@ -1193,7 +1193,7 @@ public class SSTableReader extends SSTable implements Closeable
                         return indexEntry;
                     }
 
-                    RowIndexEntry.serializer.skip(in);
+                    RowIndexEntry.Serializer.skip(in);
                 }
             }
             catch (IOException e)
@@ -1235,7 +1235,7 @@ public class SSTableReader extends SSTable implements Closeable
                     if (indexDecoratedKey.compareTo(token) > 0)
                         return indexDecoratedKey;
 
-                    RowIndexEntry.serializer.skip(in);
+                    RowIndexEntry.Serializer.skip(in);
                 }
             }
             catch (IOException e)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/SSTableScanner.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableScanner.java b/src/java/org/apache/cassandra/io/sstable/SSTableScanner.java
index 703b6e9..3dc0ded 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableScanner.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableScanner.java
@@ -139,7 +139,7 @@ public class SSTableScanner implements ICompactionScanner
                 }
                 else
                 {
-                    RowIndexEntry.serializer.skip(ifile);
+                    sstable.metadata.comparator.rowIndexEntrySerializer().skip(ifile);
                 }
             }
         }
@@ -220,7 +220,7 @@ public class SSTableScanner implements ICompactionScanner
                             return endOfData();
 
                         currentKey = sstable.partitioner.decorateKey(ByteBufferUtil.readWithShortLength(ifile));
-                        currentEntry = RowIndexEntry.serializer.deserialize(ifile, sstable.descriptor.version);
+                        currentEntry = sstable.metadata.comparator.rowIndexEntrySerializer().deserialize(ifile, sstable.descriptor.version);
                     } while (!currentRange.contains(currentKey));
                 }
                 else
@@ -241,7 +241,7 @@ public class SSTableScanner implements ICompactionScanner
                 {
                     // we need the position of the start of the next key, regardless of whether it falls in the current range
                     nextKey = sstable.partitioner.decorateKey(ByteBufferUtil.readWithShortLength(ifile));
-                    nextEntry = RowIndexEntry.serializer.deserialize(ifile, sstable.descriptor.version);
+                    nextEntry = sstable.metadata.comparator.rowIndexEntrySerializer().deserialize(ifile, sstable.descriptor.version);
                     readEnd = nextEntry.position;
 
                     if (!currentRange.contains(nextKey))

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/SSTableSimpleUnsortedWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleUnsortedWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleUnsortedWriter.java
index 6b39024..c881395 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleUnsortedWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleUnsortedWriter.java
@@ -77,7 +77,7 @@ public class SSTableSimpleUnsortedWriter extends AbstractSSTableSimpleWriter
                                        int bufferSizeInMB,
                                        CompressionParameters compressParameters)
     {
-        this(directory, new CFMetaData(keyspace, columnFamily, subComparator == null ? ColumnFamilyType.Standard : ColumnFamilyType.Super, comparator, subComparator).compressionParameters(compressParameters), partitioner, bufferSizeInMB);
+        this(directory, CFMetaData.denseCFMetaData(keyspace, columnFamily, comparator, subComparator).compressionParameters(compressParameters), partitioner, bufferSizeInMB);
     }
 
     public SSTableSimpleUnsortedWriter(File directory,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/SSTableSimpleWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleWriter.java
index 9b584f0..054d780 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleWriter.java
@@ -56,8 +56,7 @@ public class SSTableSimpleWriter extends AbstractSSTableSimpleWriter
                                AbstractType<?> comparator,
                                AbstractType<?> subComparator)
     {
-        this(directory,
-             new CFMetaData(keyspace, columnFamily, subComparator == null ? ColumnFamilyType.Standard : ColumnFamilyType.Super, comparator, subComparator), partitioner);
+        this(directory, CFMetaData.denseCFMetaData(keyspace, columnFamily, comparator, subComparator), partitioner);
     }
 
     public SSTableSimpleWriter(File directory, CFMetaData metadata, IPartitioner partitioner)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
index 60bb8d1..17d9b0e 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
@@ -433,7 +433,7 @@ public class SSTableWriter extends SSTable
             try
             {
                 ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
-                RowIndexEntry.serializer.serialize(indexEntry, indexFile.stream);
+                metadata.comparator.rowIndexEntrySerializer().serialize(indexEntry, indexFile.stream);
             }
             catch (IOException e)
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java
index 4b9329f..c125a98 100644
--- a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java
+++ b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java
@@ -24,7 +24,7 @@ import java.util.*;
 import com.google.common.collect.Maps;
 
 import org.apache.cassandra.db.commitlog.ReplayPosition;
-import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.io.sstable.*;
 import org.apache.cassandra.utils.EstimatedHistogram;
 import org.apache.cassandra.utils.StreamingHistogram;
@@ -77,14 +77,14 @@ public class MetadataCollector
     protected int sstableLevel;
     protected List<ByteBuffer> minColumnNames = Collections.emptyList();
     protected List<ByteBuffer> maxColumnNames = Collections.emptyList();
-    private final AbstractType<?> columnNameComparator;
+    private final CellNameType columnNameComparator;
 
-    public MetadataCollector(AbstractType<?> columnNameComparator)
+    public MetadataCollector(CellNameType columnNameComparator)
     {
         this.columnNameComparator = columnNameComparator;
     }
 
-    public MetadataCollector(Collection<SSTableReader> sstables, AbstractType<?> columnNameComparator, int level)
+    public MetadataCollector(Collection<SSTableReader> sstables, CellNameType columnNameComparator, int level)
     {
         this(columnNameComparator);
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/util/MappedFileDataInput.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/util/MappedFileDataInput.java b/src/java/org/apache/cassandra/io/util/MappedFileDataInput.java
index 786d312..f397ddc 100644
--- a/src/java/org/apache/cassandra/io/util/MappedFileDataInput.java
+++ b/src/java/org/apache/cassandra/io/util/MappedFileDataInput.java
@@ -150,9 +150,10 @@ public class MappedFileDataInput extends AbstractDataInput implements FileDataIn
     }
 
     @Override
-    public final void readFully(byte[] buffer) throws IOException
+    public final void readFully(byte[] bytes) throws IOException
     {
-        throw new UnsupportedOperationException("use readBytes instead");
+        ByteBufferUtil.arrayCopy(buffer, buffer.position() + position, bytes, 0, bytes.length);
+        position += bytes.length;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/service/CacheService.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/CacheService.java b/src/java/org/apache/cassandra/service/CacheService.java
index f626e17..4fe7109 100644
--- a/src/java/org/apache/cassandra/service/CacheService.java
+++ b/src/java/org/apache/cassandra/service/CacheService.java
@@ -39,7 +39,9 @@ import org.apache.cassandra.cache.*;
 import org.apache.cassandra.cache.AutoSavingCache.CacheSerializer;
 import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.concurrent.StageManager;
+import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.db.ColumnFamily;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
@@ -342,7 +344,8 @@ public class CacheService implements CacheServiceMBean
             Descriptor desc = key.desc;
             out.writeInt(desc.generation);
             out.writeBoolean(true);
-            RowIndexEntry.serializer.serialize(entry, out);
+            CFMetaData cfm = Schema.instance.getCFMetaData(key.desc.ksname, key.desc.cfname);
+            cfm.comparator.rowIndexEntrySerializer().serialize(entry, out);
         }
 
         public Future<Pair<KeyCacheKey, RowIndexEntry>> deserialize(DataInputStream input, ColumnFamilyStore cfs) throws IOException
@@ -359,10 +362,10 @@ public class CacheService implements CacheServiceMBean
             input.readBoolean(); // backwards compatibility for "promoted indexes" boolean
             if (reader == null)
             {
-                RowIndexEntry.serializer.skipPromotedIndex(input);
+                RowIndexEntry.Serializer.skipPromotedIndex(input);
                 return null;
             }
-            RowIndexEntry entry = RowIndexEntry.serializer.deserialize(input, reader.descriptor.version);
+            RowIndexEntry entry = reader.metadata.comparator.rowIndexEntrySerializer().deserialize(input, reader.descriptor.version);
             return Futures.immediateFuture(Pair.create(new KeyCacheKey(reader.descriptor, key), entry));
         }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/service/StorageProxy.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/StorageProxy.java b/src/java/org/apache/cassandra/service/StorageProxy.java
index 55125e6..cf53865 100644
--- a/src/java/org/apache/cassandra/service/StorageProxy.java
+++ b/src/java/org/apache/cassandra/service/StorageProxy.java
@@ -41,9 +41,10 @@ import org.apache.cassandra.concurrent.StageManager;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.Schema;
-import org.apache.cassandra.cql3.ColumnNameBuilder;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.composites.Composite;
+import org.apache.cassandra.db.filter.ColumnSlice;
 import org.apache.cassandra.db.filter.NamesQueryFilter;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
 import org.apache.cassandra.db.index.SecondaryIndex;
@@ -208,7 +209,7 @@ public class StorageProxy implements StorageProxyMBean
     public static ColumnFamily cas(String keyspaceName,
                                    String cfName,
                                    ByteBuffer key,
-                                   ColumnNameBuilder prefix,
+                                   Composite prefix,
                                    ColumnFamily expected,
                                    ColumnFamily updates,
                                    ConsistencyLevel consistencyForPaxos,
@@ -238,8 +239,8 @@ public class StorageProxy implements StorageProxyMBean
             if (expected == null || expected.isEmpty())
             {
                 SliceQueryFilter filter = prefix == null
-                                        ? new SliceQueryFilter(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1)
-                                        : new SliceQueryFilter(prefix.build(), prefix.buildAsEndOfRange(), false, 1, prefix.componentCount());
+                                        ? new SliceQueryFilter(ColumnSlice.ALL_COLUMNS_ARRAY, false, 1)
+                                        : new SliceQueryFilter(prefix.slice(), false, 1, prefix.size());
                 readCommand = new SliceFromReadCommand(keyspaceName, key, cfName, timestamp, filter);
             }
             else
@@ -1474,7 +1475,7 @@ public class StorageProxy implements StorageProxyMBean
         }
         else
         {
-            if (cfs.metadata.isDense())
+            if (cfs.metadata.comparator.isDense())
             {
                 // one storage row per result row, so use key estimate directly
                 resultRowsPerRange = cfs.estimateKeys();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java b/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
index 9372665..d4748b0 100644
--- a/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
+++ b/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.service.pager;
 
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -26,6 +25,7 @@ import java.util.Iterator;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.filter.ColumnCounter;
 import org.apache.cassandra.db.filter.IDiskAtomFilter;
 import org.apache.cassandra.exceptions.RequestExecutionException;
@@ -289,12 +289,12 @@ abstract class AbstractQueryPager implements QueryPager
         return Math.min(liveCount, toDiscard);
     }
 
-    protected static ByteBuffer firstName(ColumnFamily cf)
+    protected static CellName firstName(ColumnFamily cf)
     {
         return cf.iterator().next().name();
     }
 
-    protected static ByteBuffer lastName(ColumnFamily cf)
+    protected static CellName lastName(ColumnFamily cf)
     {
         return cf.getReverseSortedColumns().iterator().next().name();
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/service/pager/RangeSliceQueryPager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/pager/RangeSliceQueryPager.java b/src/java/org/apache/cassandra/service/pager/RangeSliceQueryPager.java
index 1f4ba78..9b07163 100644
--- a/src/java/org/apache/cassandra/service/pager/RangeSliceQueryPager.java
+++ b/src/java/org/apache/cassandra/service/pager/RangeSliceQueryPager.java
@@ -17,10 +17,11 @@
  */
 package org.apache.cassandra.service.pager;
 
-import java.nio.ByteBuffer;
 import java.util.List;
 
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
 import org.apache.cassandra.dht.*;
 import org.apache.cassandra.exceptions.RequestExecutionException;
@@ -37,7 +38,7 @@ public class RangeSliceQueryPager extends AbstractQueryPager
 {
     private final RangeSliceCommand command;
     private volatile DecoratedKey lastReturnedKey;
-    private volatile ByteBuffer lastReturnedName;
+    private volatile CellName lastReturnedName;
 
     // Don't use directly, use QueryPagers method instead
     RangeSliceQueryPager(RangeSliceCommand command, ConsistencyLevel consistencyLevel, boolean localQuery)
@@ -54,7 +55,7 @@ public class RangeSliceQueryPager extends AbstractQueryPager
         if (state != null)
         {
             lastReturnedKey = StorageService.getPartitioner().decorateKey(state.partitionKey);
-            lastReturnedName = state.cellName;
+            lastReturnedName = cfm.comparator.cellFromByteBuffer(state.cellName);
             restoreState(state.remaining, true);
         }
     }
@@ -63,7 +64,7 @@ public class RangeSliceQueryPager extends AbstractQueryPager
     {
         return lastReturnedKey == null
              ? null
-             : new PagingState(lastReturnedKey.key, lastReturnedName, maxRemaining());
+             : new PagingState(lastReturnedKey.key, lastReturnedName.toByteBuffer(), maxRemaining());
     }
 
     protected List<Row> queryNextPage(int pageSize, ConsistencyLevel consistencyLevel, boolean localQuery)
@@ -71,7 +72,7 @@ public class RangeSliceQueryPager extends AbstractQueryPager
     {
         SliceQueryFilter sf = (SliceQueryFilter)columnFilter;
         AbstractBounds<RowPosition> keyRange = lastReturnedKey == null ? command.keyRange : makeIncludingKeyBounds(lastReturnedKey);
-        ByteBuffer start = lastReturnedName == null ? sf.start() : lastReturnedName;
+        Composite start = lastReturnedName == null ? sf.start() : lastReturnedName;
         PagedRangeCommand pageCmd = new PagedRangeCommand(command.keyspace,
                                                           command.columnFamily,
                                                           command.timestamp,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/service/pager/SliceQueryPager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/pager/SliceQueryPager.java b/src/java/org/apache/cassandra/service/pager/SliceQueryPager.java
index e3825a9..bafb43e 100644
--- a/src/java/org/apache/cassandra/service/pager/SliceQueryPager.java
+++ b/src/java/org/apache/cassandra/service/pager/SliceQueryPager.java
@@ -17,11 +17,11 @@
  */
 package org.apache.cassandra.service.pager;
 
-import java.nio.ByteBuffer;
 import java.util.Collections;
 import java.util.List;
 
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
 import org.apache.cassandra.exceptions.RequestValidationException;
 import org.apache.cassandra.exceptions.RequestExecutionException;
@@ -34,7 +34,7 @@ public class SliceQueryPager extends AbstractQueryPager implements SinglePartiti
 {
     private final SliceFromReadCommand command;
 
-    private volatile ByteBuffer lastReturned;
+    private volatile CellName lastReturned;
 
     // Don't use directly, use QueryPagers method instead
     SliceQueryPager(SliceFromReadCommand command, ConsistencyLevel consistencyLevel, boolean localQuery)
@@ -49,7 +49,7 @@ public class SliceQueryPager extends AbstractQueryPager implements SinglePartiti
 
         if (state != null)
         {
-            lastReturned = state.cellName;
+            lastReturned = cfm.comparator.cellFromByteBuffer(state.cellName);
             restoreState(state.remaining, true);
         }
     }
@@ -58,7 +58,7 @@ public class SliceQueryPager extends AbstractQueryPager implements SinglePartiti
     {
         return lastReturned == null
              ? null
-             : new PagingState(null, lastReturned, maxRemaining());
+             : new PagingState(null, lastReturned.toByteBuffer(), maxRemaining());
     }
 
     protected List<Row> queryNextPage(int pageSize, ConsistencyLevel consistencyLevel, boolean localQuery)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/thrift/CassandraServer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/thrift/CassandraServer.java b/src/java/org/apache/cassandra/thrift/CassandraServer.java
index 0b8836d..4705513 100644
--- a/src/java/org/apache/cassandra/thrift/CassandraServer.java
+++ b/src/java/org/apache/cassandra/thrift/CassandraServer.java
@@ -46,11 +46,11 @@ import org.apache.cassandra.cql.CQLStatement;
 import org.apache.cassandra.cql.QueryProcessor;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.context.CounterContext;
 import org.apache.cassandra.db.filter.IDiskAtomFilter;
 import org.apache.cassandra.db.filter.NamesQueryFilter;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
-import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.db.marshal.TimeUUIDType;
 import org.apache.cassandra.dht.*;
 import org.apache.cassandra.exceptions.*;
@@ -136,7 +136,7 @@ public class CassandraServer implements Cassandra.Iface
             if (column.isMarkedForDelete(now))
                 continue;
 
-            thriftColumns.add(thriftifyColumnWithName(column, column.name()));
+            thriftColumns.add(thriftifyColumnWithName(column, column.name().toByteBuffer()));
         }
 
         // we have to do the reversing here, since internally we pass results around in ColumnFamily
@@ -159,7 +159,7 @@ public class CassandraServer implements Cassandra.Iface
     {
         assert !(column instanceof org.apache.cassandra.db.CounterColumn);
 
-        Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
+        Column thrift_column = new Column(column.name().toByteBuffer()).setValue(column.value()).setTimestamp(column.timestamp());
         if (column instanceof ExpiringColumn)
         {
             thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
@@ -183,7 +183,7 @@ public class CassandraServer implements Cassandra.Iface
     private CounterColumn thriftifySubCounter(org.apache.cassandra.db.Column column)
     {
         assert column instanceof org.apache.cassandra.db.CounterColumn;
-        return new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
+        return new CounterColumn(column.name().toByteBuffer(), CounterContext.instance().total(column.value()));
     }
 
     private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<org.apache.cassandra.db.Column> columns,
@@ -377,28 +377,34 @@ public class CassandraServer implements Cassandra.Iface
 
     private SliceQueryFilter toInternalFilter(CFMetaData metadata, ColumnParent parent, SliceRange range)
     {
-        SliceQueryFilter filter = new SliceQueryFilter(range.start, range.finish, range.reversed, range.count);
+        Composite start = metadata.comparator.fromByteBuffer(range.start);
+        Composite finish = metadata.comparator.fromByteBuffer(range.finish);
+        SliceQueryFilter filter = new SliceQueryFilter(start, finish, range.reversed, range.count);
         if (metadata.isSuper())
-            filter = SuperColumns.fromSCSliceFilter((CompositeType)metadata.comparator, parent.bufferForSuper_column(), filter);
+            filter = SuperColumns.fromSCSliceFilter(metadata.comparator, parent.bufferForSuper_column(), filter);
         return filter;
     }
 
     private IDiskAtomFilter toInternalFilter(CFMetaData metadata, ColumnParent parent, SlicePredicate predicate)
     {
         IDiskAtomFilter filter;
+        CellNameType columnType = metadata.isSuper()
+                                ? new SimpleDenseCellNameType(metadata.comparator.subtype(parent.isSetSuper_column() ? 1 : 0))
+                                : metadata.comparator;
         if (predicate.column_names != null)
         {
             if (metadata.isSuper())
             {
-                CompositeType type = (CompositeType)metadata.comparator;
-                SortedSet s = new TreeSet<ByteBuffer>(parent.isSetSuper_column() ? type.types.get(1) : type.types.get(0));
-                s.addAll(predicate.column_names);
-                filter = SuperColumns.fromSCNamesFilter(type, parent.bufferForSuper_column(), new NamesQueryFilter(s));
+                SortedSet<CellName> s = new TreeSet<CellName>(columnType);
+                for (ByteBuffer bb : predicate.column_names)
+                    s.add(columnType.cellFromByteBuffer(bb));
+                filter = SuperColumns.fromSCNamesFilter(metadata.comparator, parent.bufferForSuper_column(), new NamesQueryFilter(s));
             }
             else
             {
-                SortedSet s = new TreeSet<ByteBuffer>(metadata.comparator);
-                s.addAll(predicate.column_names);
+                SortedSet<CellName> s = new TreeSet<CellName>(metadata.comparator);
+                for (ByteBuffer bb : predicate.column_names)
+                    s.add(metadata.comparator.cellFromByteBuffer(bb));
                 filter = new NamesQueryFilter(s);
             }
         }
@@ -469,15 +475,15 @@ public class CassandraServer implements Cassandra.Iface
             IDiskAtomFilter filter;
             if (metadata.isSuper())
             {
-                CompositeType type = (CompositeType)metadata.comparator;
-                SortedSet names = new TreeSet<ByteBuffer>(column_path.column == null ? type.types.get(0) : type.types.get(1));
+                CellNameType type = metadata.comparator;
+                SortedSet names = new TreeSet<ByteBuffer>(column_path.column == null ? type.subtype(0) : type.subtype(1));
                 names.add(column_path.column == null ? column_path.super_column : column_path.column);
                 filter = SuperColumns.fromSCNamesFilter(type, column_path.column == null ? null : column_path.bufferForSuper_column(), new NamesQueryFilter(names));
             }
             else
             {
-                SortedSet<ByteBuffer> names = new TreeSet<ByteBuffer>(metadata.comparator);
-                names.add(column_path.column);
+                SortedSet<CellName> names = new TreeSet<CellName>(metadata.comparator);
+                names.add(metadata.comparator.cellFromByteBuffer(column_path.column));
                 filter = new NamesQueryFilter(names);
             }
 
@@ -647,14 +653,14 @@ public class CassandraServer implements Cassandra.Iface
             throw new org.apache.cassandra.exceptions.InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
         }
         ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
-        ThriftValidation.validateColumnData(metadata, column, column_parent.super_column != null);
+        ThriftValidation.validateColumnData(metadata, column_parent.super_column, column);
 
         RowMutation rm;
         try
         {
-            ByteBuffer name = column.name;
-            if (metadata.isSuper())
-                name = CompositeType.build(column_parent.super_column, name);
+            CellName name = metadata.isSuper()
+                          ? metadata.comparator.makeCellName(column_parent.super_column, column.name)
+                          : metadata.comparator.cellFromByteBuffer(column.name);
 
             ColumnFamily cf = ArrayBackedSortedColumns.factory.create(cState.getKeyspace(), column_parent.column_family);
             cf.addColumn(name, column.value, column.timestamp, column.ttl);
@@ -740,12 +746,12 @@ public class CassandraServer implements Cassandra.Iface
             });
             ThriftValidation.validateColumnNames(metadata, new ColumnParent(column_family), names);
             for (Column column : updates)
-                ThriftValidation.validateColumnData(metadata, column, false);
+                ThriftValidation.validateColumnData(metadata, null, column);
 
             CFMetaData cfm = Schema.instance.getCFMetaData(cState.getKeyspace(), column_family);
             UnsortedColumns cfUpdates = UnsortedColumns.factory.create(cfm);
             for (Column column : updates)
-                cfUpdates.addColumn(column.name, column.value, column.timestamp);
+                cfUpdates.addColumn(cfm.comparator.cellFromByteBuffer(column.name), column.value, column.timestamp);
 
             ColumnFamily cfExpected;
             if (expected.isEmpty())
@@ -756,7 +762,7 @@ public class CassandraServer implements Cassandra.Iface
             {
                 cfExpected = TreeMapBackedSortedColumns.factory.create(cfm);
                 for (Column column : expected)
-                    cfExpected.addColumn(column.name, column.value, column.timestamp);
+                    cfExpected.addColumn(cfm.comparator.cellFromByteBuffer(column.name), column.value, column.timestamp);
             }
 
             schedule(DatabaseDescriptor.getWriteRpcTimeout());
@@ -837,11 +843,11 @@ public class CassandraServer implements Cassandra.Iface
 
                     if (mutation.deletion != null)
                     {
-                        deleteColumnOrSuperColumn(rm, cfName, mutation.deletion);
+                        deleteColumnOrSuperColumn(rm, metadata, mutation.deletion);
                     }
                     if (mutation.column_or_supercolumn != null)
                     {
-                        addColumnOrSuperColumn(rm, cfName, mutation.column_or_supercolumn);
+                        addColumnOrSuperColumn(rm, metadata, mutation.column_or_supercolumn);
                     }
                 }
             }
@@ -860,67 +866,70 @@ public class CassandraServer implements Cassandra.Iface
         return rowMutations;
     }
 
-    private void addColumnOrSuperColumn(RowMutation rm, String cfName, ColumnOrSuperColumn cosc)
+    private void addColumnOrSuperColumn(RowMutation rm, CFMetaData cfm, ColumnOrSuperColumn cosc)
     {
         if (cosc.super_column != null)
         {
             for (Column column : cosc.super_column.columns)
             {
-                rm.add(cfName, CompositeType.build(cosc.super_column.name, column.name), column.value, column.timestamp, column.ttl);
+                rm.add(cfm.cfName, cfm.comparator.makeCellName(cosc.super_column.name, column.name), column.value, column.timestamp, column.ttl);
             }
         }
         else if (cosc.column != null)
         {
-            rm.add(cfName, cosc.column.name, cosc.column.value, cosc.column.timestamp, cosc.column.ttl);
+            rm.add(cfm.cfName, cfm.comparator.cellFromByteBuffer(cosc.column.name), cosc.column.value, cosc.column.timestamp, cosc.column.ttl);
         }
         else if (cosc.counter_super_column != null)
         {
             for (CounterColumn column : cosc.counter_super_column.columns)
             {
-                rm.addCounter(cfName, CompositeType.build(cosc.counter_super_column.name, column.name), column.value);
+                rm.addCounter(cfm.cfName, cfm.comparator.makeCellName(cosc.counter_super_column.name, column.name), column.value);
             }
         }
         else // cosc.counter_column != null
         {
-            rm.addCounter(cfName, cosc.counter_column.name, cosc.counter_column.value);
+            rm.addCounter(cfm.cfName, cfm.comparator.cellFromByteBuffer(cosc.counter_column.name), cosc.counter_column.value);
         }
     }
 
-    private void deleteColumnOrSuperColumn(RowMutation rm, String cfName, Deletion del)
+    private void deleteColumnOrSuperColumn(RowMutation rm, CFMetaData cfm, Deletion del)
     {
         if (del.predicate != null && del.predicate.column_names != null)
         {
             for (ByteBuffer c : del.predicate.column_names)
             {
-                if (del.super_column == null && Schema.instance.getColumnFamilyType(rm.getKeyspaceName(), cfName) == ColumnFamilyType.Super)
-                    rm.deleteRange(cfName, SuperColumns.startOf(c), SuperColumns.endOf(c), del.timestamp);
+                if (del.super_column == null && cfm.isSuper())
+                    rm.deleteRange(cfm.cfName, SuperColumns.startOf(c), SuperColumns.endOf(c), del.timestamp);
                 else if (del.super_column != null)
-                    rm.delete(cfName, CompositeType.build(del.super_column, c), del.timestamp);
+                    rm.delete(cfm.cfName, cfm.comparator.makeCellName(del.super_column, c), del.timestamp);
                 else
-                    rm.delete(cfName, c, del.timestamp);
+                    rm.delete(cfm.cfName, cfm.comparator.cellFromByteBuffer(c), del.timestamp);
             }
         }
         else if (del.predicate != null && del.predicate.slice_range != null)
         {
-            if (del.super_column == null && Schema.instance.getColumnFamilyType(rm.getKeyspaceName(), cfName) == ColumnFamilyType.Super)
-                rm.deleteRange(cfName,
+            if (del.super_column == null && cfm.isSuper())
+                rm.deleteRange(cfm.cfName,
                                SuperColumns.startOf(del.predicate.getSlice_range().start),
                                SuperColumns.startOf(del.predicate.getSlice_range().finish),
                                del.timestamp);
             else if (del.super_column != null)
-                rm.deleteRange(cfName,
-                               CompositeType.build(del.super_column, del.predicate.getSlice_range().start),
-                               CompositeType.build(del.super_column, del.predicate.getSlice_range().finish),
+                rm.deleteRange(cfm.cfName,
+                               cfm.comparator.makeCellName(del.super_column, del.predicate.getSlice_range().start),
+                               cfm.comparator.makeCellName(del.super_column, del.predicate.getSlice_range().finish),
                                del.timestamp);
             else
-                rm.deleteRange(cfName, del.predicate.getSlice_range().start, del.predicate.getSlice_range().finish, del.timestamp);
+                rm.deleteRange(cfm.cfName,
+                               cfm.comparator.cellFromByteBuffer(del.predicate.getSlice_range().start),
+                               cfm.comparator.cellFromByteBuffer(del.predicate.getSlice_range().finish),
+                               del.timestamp);
         }
         else
         {
             if (del.super_column != null)
-                rm.deleteRange(cfName, SuperColumns.startOf(del.super_column), SuperColumns.endOf(del.super_column), del.timestamp);
+                rm.deleteRange(cfm.cfName, SuperColumns.startOf(del.super_column), SuperColumns.endOf(del.super_column), del.timestamp);
             else
-                rm.delete(cfName, del.timestamp);
+                rm.delete(cfm.cfName, del.timestamp);
         }
     }
 
@@ -1007,11 +1016,11 @@ public class CassandraServer implements Cassandra.Iface
         if (column_path.super_column == null && column_path.column == null)
             rm.delete(column_path.column_family, timestamp);
         else if (column_path.super_column == null)
-            rm.delete(column_path.column_family, column_path.column, timestamp);
+            rm.delete(column_path.column_family, metadata.comparator.cellFromByteBuffer(column_path.column), timestamp);
         else if (column_path.column == null)
             rm.deleteRange(column_path.column_family, SuperColumns.startOf(column_path.super_column), SuperColumns.endOf(column_path.super_column), timestamp);
         else
-            rm.delete(column_path.column_family, CompositeType.build(column_path.super_column, column_path.column), timestamp);
+            rm.delete(column_path.column_family, metadata.comparator.makeCellName(column_path.super_column, column_path.column), timestamp);
 
         if (isCommutativeOp)
             doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, ThriftConversion.fromThrift(consistency_level))));
@@ -1768,9 +1777,9 @@ public class CassandraServer implements Cassandra.Iface
             try
             {
                 if (metadata.isSuper())
-                    rm.addCounter(column_parent.column_family, CompositeType.build(column_parent.super_column, column.name), column.value);
+                    rm.addCounter(column_parent.column_family, metadata.comparator.makeCellName(column_parent.super_column, column.name), column.value);
                 else
-                    rm.addCounter(column_parent.column_family, column.name, column.value);
+                    rm.addCounter(column_parent.column_family, metadata.comparator.cellFromByteBuffer(column.name), column.value);
             }
             catch (MarshalException e)
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/thrift/ThriftValidation.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/thrift/ThriftValidation.java b/src/java/org/apache/cassandra/thrift/ThriftValidation.java
index e68b070..b1eb50b 100644
--- a/src/java/org/apache/cassandra/thrift/ThriftValidation.java
+++ b/src/java/org/apache/cassandra/thrift/ThriftValidation.java
@@ -20,23 +20,23 @@ package org.apache.cassandra.thrift;
 import java.nio.ByteBuffer;
 import java.util.*;
 
-import org.apache.cassandra.serializers.MarshalException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.config.*;
 import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.filter.IDiskAtomFilter;
 import org.apache.cassandra.db.filter.NamesQueryFilter;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
 import org.apache.cassandra.db.index.SecondaryIndexManager;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.ColumnToCollectionType;
-import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
@@ -229,29 +229,28 @@ public class ThriftValidation
             if (isCQL3Table)
             {
                 // CQL3 table don't support having only part of their composite column names set
-                CompositeType composite = (CompositeType)comparator;
-                ByteBuffer[] components = composite.split(name);
-                int minComponents = composite.types.size() - (metadata.hasCollections() ? 1 : 0);
-                if (components.length < minComponents)
+                Composite composite = metadata.comparator.fromByteBuffer(name);
+
+                int minComponents = metadata.comparator.clusteringPrefixSize() + 1;
+                if (composite.size() < minComponents)
                     throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Not enough components (found %d but %d expected) for column name since %s is a CQL3 table",
-                                                                                                    components.length, minComponents, metadata.cfName));
+                                                                                                    composite.size(), minComponents, metadata.cfName));
 
                 // Furthermore, the column name must be a declared one.
-                int columnIndex = composite.types.size() - (metadata.hasCollections() ? 2 : 1);
-                ByteBuffer CQL3ColumnName = components[columnIndex];
+                int columnIndex = metadata.comparator.clusteringPrefixSize();
+                ByteBuffer CQL3ColumnName = composite.get(columnIndex);
                 if (!CQL3ColumnName.hasRemaining())
                     continue; // Row marker, ok
 
-                ColumnIdentifier columnId = new ColumnIdentifier(CQL3ColumnName, composite.types.get(columnIndex));
+                ColumnIdentifier columnId = new ColumnIdentifier(CQL3ColumnName, metadata.comparator.subtype(columnIndex));
                 if (metadata.getColumnDefinition(columnId) == null)
                     throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Invalid cell for CQL3 table %s. The CQL3 column component (%s) does not correspond to a defined CQL3 column",
                                                                                                     metadata.cfName, columnId));
 
                 // On top of that, if we have a collection component, he (CQL3) column must be a collection
-                if (metadata.hasCollections() && components.length == composite.types.size())
+                if (metadata.comparator.hasCollections() && composite.size() == metadata.comparator.size())
                 {
-                    assert components.length >= 2;
-                    ColumnToCollectionType collectionType = (ColumnToCollectionType)composite.types.get(composite.types.size() - 1);
+                    ColumnToCollectionType collectionType = metadata.comparator.collectionType();
                     if (!collectionType.defined.containsKey(CQL3ColumnName))
                         throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Invalid collection component, %s is not a collection", UTF8Type.instance.getString(CQL3ColumnName)));
                 }
@@ -310,7 +309,7 @@ public class ThriftValidation
 
             validateTtl(cosc.column);
             validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column((ByteBuffer)null).setColumn(cosc.column.name));
-            validateColumnData(metadata, cosc.column, false);
+            validateColumnData(metadata, null, cosc.column);
         }
 
         if (cosc.super_column != null)
@@ -321,7 +320,7 @@ public class ThriftValidation
             for (Column c : cosc.super_column.columns)
             {
                 validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column(cosc.super_column.name).setColumn(c.name));
-                validateColumnData(metadata, c, true);
+                validateColumnData(metadata, cosc.super_column.name, c);
             }
         }
 
@@ -426,7 +425,7 @@ public class ThriftValidation
     /**
      * Validates the data part of the column (everything in the Column object but the name, which is assumed to be valid)
      */
-    public static void validateColumnData(CFMetaData metadata, Column column, boolean isSubColumn) throws org.apache.cassandra.exceptions.InvalidRequestException
+    public static void validateColumnData(CFMetaData metadata, ByteBuffer scName, Column column) throws org.apache.cassandra.exceptions.InvalidRequestException
     {
         validateTtl(column);
         if (!column.isSetValue())
@@ -434,7 +433,10 @@ public class ThriftValidation
         if (!column.isSetTimestamp())
             throw new org.apache.cassandra.exceptions.InvalidRequestException("Column timestamp is required");
 
-        ColumnDefinition columnDef = metadata.getColumnDefinitionFromCellName(column.name);
+        CellName cn = scName == null
+                    ? metadata.comparator.cellFromByteBuffer(column.name)
+                    : metadata.comparator.makeCellName(scName, column.name);
+        ColumnDefinition columnDef = metadata.getColumnDefinition(cn);
         try
         {
             AbstractType<?> validator = metadata.getValueValidator(columnDef);
@@ -450,11 +452,11 @@ public class ThriftValidation
                                                                       me.getMessage(),
                                                                       metadata.ksName,
                                                                       metadata.cfName,
-                                                                      (SuperColumns.getComparatorFor(metadata, isSubColumn)).getString(column.name)));
+                                                                      (SuperColumns.getComparatorFor(metadata, scName != null)).getString(column.name)));
         }
 
         // Indexed column values cannot be larger than 64K.  See CASSANDRA-3057/4240 for more details
-        if (!Keyspace.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager.validate(asDBColumn(column)))
+        if (!Keyspace.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager.validate(asDBColumn(cn, column)))
                     throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Can't index column value of size %d for index %s in CF %s of KS %s",
                                                                               column.value.remaining(),
                                                                               columnDef.getIndexName(),
@@ -462,12 +464,12 @@ public class ThriftValidation
                                                                               metadata.ksName));
     }
 
-    private static org.apache.cassandra.db.Column asDBColumn(Column column)
+    private static org.apache.cassandra.db.Column asDBColumn(CellName name, Column column)
     {
         if (column.ttl <= 0)
-            return new org.apache.cassandra.db.Column(column.name, column.value, column.timestamp);
+            return new org.apache.cassandra.db.Column(name, column.value, column.timestamp);
         else
-            return new org.apache.cassandra.db.ExpiringColumn(column.name, column.value, column.timestamp, column.ttl);
+            return new org.apache.cassandra.db.ExpiringColumn(name, column.value, column.timestamp, column.ttl);
     }
 
     /**
@@ -588,7 +590,8 @@ public class ThriftValidation
             if (expression.value.remaining() > 0xFFFF)
                 throw new org.apache.cassandra.exceptions.InvalidRequestException("Index expression values may not be larger than 64K");
 
-            AbstractType<?> valueValidator = metadata.getValueValidatorFromCellName(expression.column_name);
+            CellName name = metadata.comparator.cellFromByteBuffer(expression.column_name);
+            AbstractType<?> valueValidator = metadata.getValueValidator(name);
             try
             {
                 valueValidator.validate(expression.value);
@@ -601,7 +604,7 @@ public class ThriftValidation
                                                                                   me.getMessage()));
             }
 
-            isIndexed |= (expression.op == IndexOperator.EQ) && idxManager.indexes(expression.column_name);
+            isIndexed |= (expression.op == IndexOperator.EQ) && idxManager.indexes(name);
         }
 
         return isIndexed;
@@ -631,23 +634,28 @@ public class ThriftValidation
     {
         SliceRange sr = sp.slice_range;
         IDiskAtomFilter filter;
+
+        CellNameType comparator = metadata.isSuper()
+                                ? new SimpleDenseCellNameType(metadata.comparator.subtype(superColumn == null ? 0 : 1))
+                                : metadata.comparator;
         if (sr == null)
         {
-            AbstractType<?> comparator = metadata.isSuper()
-                    ? ((CompositeType)metadata.comparator).types.get(superColumn == null ? 0 : 1)
-                    : metadata.comparator;
 
-            SortedSet<ByteBuffer> ss = new TreeSet<ByteBuffer>(comparator);
-            ss.addAll(sp.column_names);
+            SortedSet<CellName> ss = new TreeSet<CellName>(comparator);
+            for (ByteBuffer bb : sp.column_names)
+                ss.add(comparator.cellFromByteBuffer(bb));
             filter = new NamesQueryFilter(ss);
         }
         else
         {
-            filter = new SliceQueryFilter(sr.start, sr.finish, sr.reversed, sr.count);
+            filter = new SliceQueryFilter(comparator.fromByteBuffer(sr.start),
+                                          comparator.fromByteBuffer(sr.finish),
+                                          sr.reversed,
+                                          sr.count);
         }
 
         if (metadata.isSuper())
-            filter = SuperColumns.fromSCFilter((CompositeType)metadata.comparator, superColumn, filter);
+            filter = SuperColumns.fromSCFilter(metadata.comparator, superColumn, filter);
         return filter;
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/tools/BulkLoader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/BulkLoader.java b/src/java/org/apache/cassandra/tools/BulkLoader.java
index c89bb83..38c51ac 100644
--- a/src/java/org/apache/cassandra/tools/BulkLoader.java
+++ b/src/java/org/apache/cassandra/tools/BulkLoader.java
@@ -37,6 +37,7 @@ import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.io.sstable.SSTableLoader;
@@ -208,14 +209,24 @@ public class BulkLoader
                         }
                     }
 
-                    String query = String.format("SELECT * FROM %s.%s WHERE keyspace_name = '%s'",
+                    String cfQuery = String.format("SELECT * FROM %s.%s WHERE keyspace_name = '%s'",
                                                  Keyspace.SYSTEM_KS,
                                                  SystemKeyspace.SCHEMA_COLUMNFAMILIES_CF,
                                                  keyspace);
-                    CqlResult result = client.execute_cql3_query(ByteBufferUtil.bytes(query), Compression.NONE, ConsistencyLevel.ONE);
-                    for (CqlRow row : result.rows)
+                    CqlResult cfRes = client.execute_cql3_query(ByteBufferUtil.bytes(cfQuery), Compression.NONE, ConsistencyLevel.ONE);
+
+
+                    for (CqlRow row : cfRes.rows)
                     {
-                        CFMetaData metadata = CFMetaData.fromThriftCqlRow(row);
+                        String columnFamily = UTF8Type.instance.getString(row.columns.get(1).bufferForName());
+                        String columnsQuery = String.format("SELECT * FROM %s.%s WHERE keyspace_name = '%s' AND columnfamily_name = '%s'",
+                                                            Keyspace.SYSTEM_KS,
+                                                            SystemKeyspace.SCHEMA_COLUMNS_CF,
+                                                            keyspace,
+                                                            columnFamily);
+                        CqlResult columnsRes = client.execute_cql3_query(ByteBufferUtil.bytes(columnsQuery), Compression.NONE, ConsistencyLevel.ONE);
+
+                        CFMetaData metadata = CFMetaData.fromThriftCqlRow(row, columnsRes);
                         knownCfs.put(metadata.cfName, metadata);
                     }
                     break;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/tools/SSTableExport.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/SSTableExport.java b/src/java/org/apache/cassandra/tools/SSTableExport.java
index dab7760..c5fc970 100644
--- a/src/java/org/apache/cassandra/tools/SSTableExport.java
+++ b/src/java/org/apache/cassandra/tools/SSTableExport.java
@@ -29,6 +29,7 @@ import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.exceptions.ConfigurationException;
@@ -146,18 +147,17 @@ public class SSTableExport
 
     private static List<Object> serializeAtom(OnDiskAtom atom, CFMetaData cfMetaData)
     {
-        AbstractType<?> comparator = cfMetaData.comparator;
         if (atom instanceof Column)
         {
-            return serializeColumn((Column) atom, comparator, cfMetaData);
+            return serializeColumn((Column) atom, cfMetaData);
         }
         else
         {
             assert atom instanceof RangeTombstone;
             RangeTombstone rt = (RangeTombstone) atom;
             ArrayList<Object> serializedColumn = new ArrayList<Object>();
-            serializedColumn.add(comparator.getString(rt.min));
-            serializedColumn.add(comparator.getString(rt.max));
+            serializedColumn.add(cfMetaData.comparator.getString(rt.min));
+            serializedColumn.add(cfMetaData.comparator.getString(rt.max));
             serializedColumn.add(rt.data.markedForDeleteAt);
             serializedColumn.add("t");
             serializedColumn.add(rt.data.localDeletionTime);
@@ -173,21 +173,21 @@ public class SSTableExport
      * @param cfMetaData Column Family metadata (to get validator)
      * @return column as serialized list
      */
-    private static List<Object> serializeColumn(Column column, AbstractType<?> comparator, CFMetaData cfMetaData)
+    private static List<Object> serializeColumn(Column column, CFMetaData cfMetaData)
     {
+        CellNameType comparator = cfMetaData.comparator;
         ArrayList<Object> serializedColumn = new ArrayList<Object>();
 
-        ByteBuffer name = ByteBufferUtil.clone(column.name());
         ByteBuffer value = ByteBufferUtil.clone(column.value());
 
-        serializedColumn.add(comparator.getString(name));
+        serializedColumn.add(comparator.getString(column.name()));
         if (column instanceof DeletedColumn)
         {
             serializedColumn.add(ByteBufferUtil.bytesToHex(value));
         }
         else
         {
-            AbstractType<?> validator = cfMetaData.getValueValidator(cfMetaData.getColumnDefinitionFromCellName(name));
+            AbstractType<?> validator = cfMetaData.getValueValidator(column.name());
             serializedColumn.add(validator.getString(value));
         }
         serializedColumn.add(column.timestamp());


[02/13] Push composites support in the storage engine

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/KeyspaceTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/KeyspaceTest.java b/test/unit/org/apache/cassandra/db/KeyspaceTest.java
index f1fcb29..1c392c9 100644
--- a/test/unit/org/apache/cassandra/db/KeyspaceTest.java
+++ b/test/unit/org/apache/cassandra/db/KeyspaceTest.java
@@ -32,6 +32,7 @@ import org.junit.Test;
 import static org.junit.Assert.*;
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.db.marshal.BytesType;
@@ -40,6 +41,8 @@ import org.apache.cassandra.db.marshal.IntegerType;
 import org.apache.cassandra.utils.WrappedRunnable;
 import static org.apache.cassandra.Util.column;
 import static org.apache.cassandra.Util.expiringColumn;
+import static org.apache.cassandra.Util.getBytes;
+import static org.apache.cassandra.Util.cellname;
 import org.apache.cassandra.Util;
 import org.apache.cassandra.io.sstable.SSTableReader;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -74,25 +77,13 @@ public class KeyspaceTest extends SchemaLoader
             {
                 ColumnFamily cf;
 
-                cf = cfStore.getColumnFamily(QueryFilter.getNamesFilter(TEST_KEY,
-                                                                        "Standard3",
-                                                                        new TreeSet<ByteBuffer>(),
-                                                                        System.currentTimeMillis()));
+                cf = cfStore.getColumnFamily(Util.namesQueryFilter(cfStore, TEST_KEY));
                 assertColumns(cf);
 
-                cf = cfStore.getColumnFamily(QueryFilter.getSliceFilter(TEST_KEY,
-                                                                        "Standard3",
-                                                                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                                                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                                                        false,
-                                                                        0,
-                                                                        System.currentTimeMillis()));
+                cf = cfStore.getColumnFamily(QueryFilter.getSliceFilter(TEST_KEY, "Standard3", Composites.EMPTY, Composites.EMPTY, false, 0, System.currentTimeMillis()));
                 assertColumns(cf);
 
-                cf = cfStore.getColumnFamily(QueryFilter.getNamesFilter(TEST_KEY,
-                                                                        "Standard3",
-                                                                        ByteBufferUtil.bytes("col99"),
-                                                                        System.currentTimeMillis()));
+                cf = cfStore.getColumnFamily(Util.namesQueryFilter(cfStore, TEST_KEY, "col99"));
                 assertColumns(cf);
             }
         };
@@ -118,16 +109,10 @@ public class KeyspaceTest extends SchemaLoader
             {
                 ColumnFamily cf;
 
-                cf = cfStore.getColumnFamily(QueryFilter.getNamesFilter(TEST_KEY,
-                                                                        "Standard1",
-                                                                        ByteBufferUtil.bytes("col1"),
-                                                                        System.currentTimeMillis()));
+                cf = cfStore.getColumnFamily(Util.namesQueryFilter(cfStore, TEST_KEY, "col1"));
                 assertColumns(cf, "col1");
 
-                cf = cfStore.getColumnFamily(QueryFilter.getNamesFilter(TEST_KEY,
-                                                                        "Standard1",
-                                                                        ByteBufferUtil.bytes("col3"),
-                                                                        System.currentTimeMillis()));
+                cf = cfStore.getColumnFamily(Util.namesQueryFilter(cfStore, TEST_KEY, "col3"));
                 assertColumns(cf, "col3");
             }
         };
@@ -148,13 +133,13 @@ public class KeyspaceTest extends SchemaLoader
         RowMutation rm = new RowMutation("Keyspace1", key.key, cf);
         rm.apply();
 
-        cf = cfStore.getColumnFamily(key, ByteBufferUtil.bytes("b"), ByteBufferUtil.bytes("c"), false, 100, System.currentTimeMillis());
+        cf = cfStore.getColumnFamily(key, cellname("b"), cellname("c"), false, 100, System.currentTimeMillis());
         assertEquals(2, cf.getColumnCount());
 
-        cf = cfStore.getColumnFamily(key, ByteBufferUtil.bytes("b"), ByteBufferUtil.bytes("b"), false, 100, System.currentTimeMillis());
+        cf = cfStore.getColumnFamily(key, cellname("b"), cellname("b"), false, 100, System.currentTimeMillis());
         assertEquals(1, cf.getColumnCount());
 
-        cf = cfStore.getColumnFamily(key, ByteBufferUtil.bytes("b"), ByteBufferUtil.bytes("c"), false, 1, System.currentTimeMillis());
+        cf = cfStore.getColumnFamily(key, cellname("b"), cellname("c"), false, 1, System.currentTimeMillis());
         assertEquals(1, cf.getColumnCount());
     }
 
@@ -204,30 +189,30 @@ public class KeyspaceTest extends SchemaLoader
                 assert DatabaseDescriptor.getColumnIndexSize() == 4096 : "Unexpected column index size, block boundaries won't be where tests expect them.";
 
                 // test forward, spanning a segment.
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.bytes("col096"), ByteBufferUtil.bytes("col099"), false, 4, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, cellname("col096"), cellname("col099"), false, 4, System.currentTimeMillis());
                 assertColumns(cf, "col096", "col097", "col098", "col099");
 
                 // test reversed, spanning a segment.
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.bytes("col099"), ByteBufferUtil.bytes("col096"), true, 4, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, cellname("col099"), cellname("col096"), true, 4, System.currentTimeMillis());
                 assertColumns(cf, "col096", "col097", "col098", "col099");
 
                 // test forward, within a segment.
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.bytes("col100"), ByteBufferUtil.bytes("col103"), false, 4, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, cellname("col100"), cellname("col103"), false, 4, System.currentTimeMillis());
                 assertColumns(cf, "col100", "col101", "col102", "col103");
 
                 // test reversed, within a segment.
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.bytes("col103"), ByteBufferUtil.bytes("col100"), true, 4, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, cellname("col103"), cellname("col100"), true, 4, System.currentTimeMillis());
                 assertColumns(cf, "col100", "col101", "col102", "col103");
 
                 // test forward from beginning, spanning a segment.
                 String[] strCols = new String[100]; // col000-col099
                 for (int i = 0; i < 100; i++)
                     strCols[i] = "col" + fmt.format(i);
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.bytes("col099"), false, 100, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, Composites.EMPTY, cellname("col099"), false, 100, System.currentTimeMillis());
                 assertColumns(cf, strCols);
 
                 // test reversed, from end, spanning a segment.
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.bytes("col288"), true, 12, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, Composites.EMPTY, cellname("col288"), true, 12, System.currentTimeMillis());
                 assertColumns(cf, "col288", "col289", "col290", "col291", "col292", "col293", "col294", "col295", "col296", "col297", "col298", "col299");
             }
         };
@@ -245,7 +230,7 @@ public class KeyspaceTest extends SchemaLoader
         for (int i = 0; i < 10; i++)
         {
             ColumnFamily cf = TreeMapBackedSortedColumns.factory.create("Keyspace1", "StandardLong1");
-            cf.addColumn(new Column(ByteBufferUtil.bytes((long)i), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0));
+            cf.addColumn(new Column(cellname((long)i), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0));
             RowMutation rm = new RowMutation("Keyspace1", ROW.key, cf);
             rm.apply();
         }
@@ -255,13 +240,13 @@ public class KeyspaceTest extends SchemaLoader
         for (int i = 10; i < 20; i++)
         {
             ColumnFamily cf = TreeMapBackedSortedColumns.factory.create("Keyspace1", "StandardLong1");
-            cf.addColumn(new Column(ByteBufferUtil.bytes((long)i), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0));
+            cf.addColumn(new Column(cellname((long)i), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0));
             RowMutation rm = new RowMutation("Keyspace1", ROW.key, cf);
             rm.apply();
 
-            cf = cfs.getColumnFamily(ROW, ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, true, 1, System.currentTimeMillis());
+            cf = cfs.getColumnFamily(ROW, Composites.EMPTY, Composites.EMPTY, true, 1, System.currentTimeMillis());
             assertEquals(1, Iterables.size(cf.getColumnNames()));
-            assertEquals(i, cf.getColumnNames().iterator().next().getLong());
+            assertEquals(i, cf.getColumnNames().iterator().next().toByteBuffer().getLong());
         }
     }
 
@@ -271,11 +256,11 @@ public class KeyspaceTest extends SchemaLoader
         ColumnFamily cf;
 
         // key before the rows that exists
-        cf = cfStore.getColumnFamily(Util.dk("a"), ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1, System.currentTimeMillis());
+        cf = cfStore.getColumnFamily(Util.dk("a"), Composites.EMPTY, Composites.EMPTY, false, 1, System.currentTimeMillis());
         assertColumns(cf);
 
         // key after the rows that exist
-        cf = cfStore.getColumnFamily(Util.dk("z"), ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1, System.currentTimeMillis());
+        cf = cfStore.getColumnFamily(Util.dk("z"), Composites.EMPTY, Composites.EMPTY, false, 1, System.currentTimeMillis());
         assertColumns(cf);
     }
 
@@ -298,7 +283,7 @@ public class KeyspaceTest extends SchemaLoader
         rm.apply();
 
         rm = new RowMutation("Keyspace1", ROW.key);
-        rm.delete("Standard1", ByteBufferUtil.bytes("col4"), 2L);
+        rm.delete("Standard1", cellname("col4"), 2L);
         rm.apply();
 
         Runnable verify = new WrappedRunnable()
@@ -307,26 +292,26 @@ public class KeyspaceTest extends SchemaLoader
             {
                 ColumnFamily cf;
 
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.bytes("col5"), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 2, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, cellname("col5"), Composites.EMPTY, false, 2, System.currentTimeMillis());
                 assertColumns(cf, "col5", "col7");
 
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.bytes("col4"), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 2, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, cellname("col4"), Composites.EMPTY, false, 2, System.currentTimeMillis());
                 assertColumns(cf, "col4", "col5", "col7");
                 assertColumns(ColumnFamilyStore.removeDeleted(cf, Integer.MAX_VALUE), "col5", "col7");
 
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.bytes("col5"), ByteBufferUtil.EMPTY_BYTE_BUFFER, true, 2, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, cellname("col5"), Composites.EMPTY, true, 2, System.currentTimeMillis());
                 assertColumns(cf, "col3", "col4", "col5");
 
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.bytes("col6"), ByteBufferUtil.EMPTY_BYTE_BUFFER, true, 2, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, cellname("col6"), Composites.EMPTY, true, 2, System.currentTimeMillis());
                 assertColumns(cf, "col3", "col4", "col5");
 
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, true, 2, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, Composites.EMPTY, Composites.EMPTY, true, 2, System.currentTimeMillis());
                 assertColumns(cf, "col7", "col9");
 
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.bytes("col95"), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 2, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, cellname("col95"), Composites.EMPTY, false, 2, System.currentTimeMillis());
                 assertColumns(cf);
 
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.bytes("col0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, true, 2, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, cellname("col0"), Composites.EMPTY, true, 2, System.currentTimeMillis());
                 assertColumns(cf);
             }
         };
@@ -355,11 +340,11 @@ public class KeyspaceTest extends SchemaLoader
             {
                 ColumnFamily cf;
 
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 2, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, Composites.EMPTY, Composites.EMPTY, false, 2, System.currentTimeMillis());
                 assertColumns(cf, "col1", "col2");
                 assertColumns(ColumnFamilyStore.removeDeleted(cf, Integer.MAX_VALUE), "col1");
 
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.bytes("col2"), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, cellname("col2"), Composites.EMPTY, false, 1, System.currentTimeMillis());
                 assertColumns(cf, "col2");
                 assertColumns(ColumnFamilyStore.removeDeleted(cf, Integer.MAX_VALUE));
             }
@@ -400,16 +385,16 @@ public class KeyspaceTest extends SchemaLoader
             {
                 ColumnFamily cf;
 
-                cf = cfStore.getColumnFamily(ROW, ByteBufferUtil.bytes("col2"), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 3, System.currentTimeMillis());
+                cf = cfStore.getColumnFamily(ROW, cellname("col2"), Composites.EMPTY, false, 3, System.currentTimeMillis());
                 assertColumns(cf, "col2", "col3", "col4");
 
-                ByteBuffer col = cf.getColumn(ByteBufferUtil.bytes("col2")).value();
+                ByteBuffer col = cf.getColumn(cellname("col2")).value();
                 assertEquals(ByteBufferUtil.string(col), "valx");
 
-                col = cf.getColumn(ByteBufferUtil.bytes("col3")).value();
+                col = cf.getColumn(cellname("col3")).value();
                 assertEquals(ByteBufferUtil.string(col), "valx");
 
-                col = cf.getColumn(ByteBufferUtil.bytes("col4")).value();
+                col = cf.getColumn(cellname("col4")).value();
                 assertEquals(ByteBufferUtil.string(col), "val4");
             }
         };
@@ -465,32 +450,32 @@ public class KeyspaceTest extends SchemaLoader
             cfStore.forceBlockingFlush();
         }
         cfStore.metric.sstablesPerReadHistogram.clear();
-        ColumnFamily cf = cfStore.getColumnFamily(key, ByteBufferUtil.bytes(""), ByteBufferUtil.bytes("col1499"), false, 1000, System.currentTimeMillis());
+        ColumnFamily cf = cfStore.getColumnFamily(key, Composites.EMPTY, cellname("col1499"), false, 1000, System.currentTimeMillis());
         assertEquals(cfStore.metric.sstablesPerReadHistogram.max(), 5, 0.1);
         int i = 0;
         for (Column c : cf.getSortedColumns())
         {
-            assertEquals(ByteBufferUtil.string(c.name), "col" + (1000 + i++));
+            assertEquals(ByteBufferUtil.string(c.name.toByteBuffer()), "col" + (1000 + i++));
         }
         assertEquals(i, 500);
         cfStore.metric.sstablesPerReadHistogram.clear();
-        cf = cfStore.getColumnFamily(key, ByteBufferUtil.bytes("col1500"), ByteBufferUtil.bytes("col2000"), false, 1000, System.currentTimeMillis());
+        cf = cfStore.getColumnFamily(key, cellname("col1500"), cellname("col2000"), false, 1000, System.currentTimeMillis());
         assertEquals(cfStore.metric.sstablesPerReadHistogram.max(), 5, 0.1);
 
         for (Column c : cf.getSortedColumns())
         {
-            assertEquals(ByteBufferUtil.string(c.name), "col"+(1000 + i++));
+            assertEquals(ByteBufferUtil.string(c.name.toByteBuffer()), "col"+(1000 + i++));
         }
         assertEquals(i, 1000);
 
         // reverse
         cfStore.metric.sstablesPerReadHistogram.clear();
-        cf = cfStore.getColumnFamily(key, ByteBufferUtil.bytes("col2000"), ByteBufferUtil.bytes("col1500"), true, 1000, System.currentTimeMillis());
+        cf = cfStore.getColumnFamily(key, cellname("col2000"), cellname("col1500"), true, 1000, System.currentTimeMillis());
         assertEquals(cfStore.metric.sstablesPerReadHistogram.max(), 5, 0.1);
         i = 500;
         for (Column c : cf.getSortedColumns())
         {
-            assertEquals(ByteBufferUtil.string(c.name), "col"+(1000 + i++));
+            assertEquals(ByteBufferUtil.string(c.name.toByteBuffer()), "col"+(1000 + i++));
         }
         assertEquals(i, 1000);
 
@@ -518,21 +503,21 @@ public class KeyspaceTest extends SchemaLoader
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("StandardComposite2");
         cfs.disableAutoCompaction();
 
-        CompositeType ct = CompositeType.getInstance(BytesType.instance, IntegerType.instance);
+        CellNameType type = cfs.getComparator();
         DecoratedKey key = Util.dk("k");
         for (int j = 0; j < 10; j++)
         {
             for (int i = 0; i < 10; i++)
             {
                 RowMutation rm = new RowMutation("Keyspace1", key.key);
-                ByteBuffer colName = ct.builder().add(ByteBufferUtil.bytes("a" + i)).add(ByteBufferUtil.bytes(j*10 + i)).build();
+                CellName colName = type.makeCellName(ByteBufferUtil.bytes("a" + i), ByteBufferUtil.bytes(j*10 + i));
                 rm.add("StandardComposite2", colName, ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
                 rm.apply();
             }
             cfs.forceBlockingFlush();
         }
-        ByteBuffer start = ct.builder().add(ByteBufferUtil.bytes("a5")).add(ByteBufferUtil.bytes(85)).build();
-        ByteBuffer finish = ct.builder().add(ByteBufferUtil.bytes("a5")).buildAsEndOfRange();
+        Composite start = type.builder().add(ByteBufferUtil.bytes("a5")).add(ByteBufferUtil.bytes(85)).build();
+        Composite finish = type.builder().add(ByteBufferUtil.bytes("a5")).build().end();
         cfs.metric.sstablesPerReadHistogram.clear();
         ColumnFamily cf = cfs.getColumnFamily(key, start, finish, false, 1000, System.currentTimeMillis());
         int colCount = 0;
@@ -546,60 +531,60 @@ public class KeyspaceTest extends SchemaLoader
     {
         DecoratedKey key = Util.dk("row3");
         ColumnFamily cf;
-        cf = cfStore.getColumnFamily(key, ByteBufferUtil.bytes("col1000"), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 3, System.currentTimeMillis());
+        cf = cfStore.getColumnFamily(key, cellname("col1000"), Composites.EMPTY, false, 3, System.currentTimeMillis());
         assertColumns(cf, "col1000", "col1001", "col1002");
 
         ByteBuffer col;
-        col = cf.getColumn(ByteBufferUtil.bytes("col1000")).value();
+        col = cf.getColumn(cellname("col1000")).value();
         assertEquals(ByteBufferUtil.string(col), "v1000");
-        col = cf.getColumn(ByteBufferUtil.bytes("col1001")).value();
+        col = cf.getColumn(cellname("col1001")).value();
         assertEquals(ByteBufferUtil.string(col), "v1001");
-        col = cf.getColumn(ByteBufferUtil.bytes("col1002")).value();
+        col = cf.getColumn(cellname("col1002")).value();
         assertEquals(ByteBufferUtil.string(col), "v1002");
 
-        cf = cfStore.getColumnFamily(key, ByteBufferUtil.bytes("col1195"), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 3, System.currentTimeMillis());
+        cf = cfStore.getColumnFamily(key, cellname("col1195"), Composites.EMPTY, false, 3, System.currentTimeMillis());
         assertColumns(cf, "col1195", "col1196", "col1197");
 
-        col = cf.getColumn(ByteBufferUtil.bytes("col1195")).value();
+        col = cf.getColumn(cellname("col1195")).value();
         assertEquals(ByteBufferUtil.string(col), "v1195");
-        col = cf.getColumn(ByteBufferUtil.bytes("col1196")).value();
+        col = cf.getColumn(cellname("col1196")).value();
         assertEquals(ByteBufferUtil.string(col), "v1196");
-        col = cf.getColumn(ByteBufferUtil.bytes("col1197")).value();
+        col = cf.getColumn(cellname("col1197")).value();
         assertEquals(ByteBufferUtil.string(col), "v1197");
 
 
-        cf = cfStore.getColumnFamily(key, ByteBufferUtil.bytes("col1996"), ByteBufferUtil.EMPTY_BYTE_BUFFER, true, 1000, System.currentTimeMillis());
+        cf = cfStore.getColumnFamily(key, cellname("col1996"), Composites.EMPTY, true, 1000, System.currentTimeMillis());
         Column[] columns = cf.getSortedColumns().toArray(new Column[0]);
         for (int i = 1000; i < 1996; i++)
         {
             String expectedName = "col" + i;
             Column column = columns[i - 1000];
-            assertEquals(ByteBufferUtil.string(column.name()), expectedName);
+            assertEquals(ByteBufferUtil.string(column.name().toByteBuffer()), expectedName);
             assertEquals(ByteBufferUtil.string(column.value()), ("v" + i));
         }
 
-        cf = cfStore.getColumnFamily(key, ByteBufferUtil.bytes("col1990"), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 3, System.currentTimeMillis());
+        cf = cfStore.getColumnFamily(key, cellname("col1990"), Composites.EMPTY, false, 3, System.currentTimeMillis());
         assertColumns(cf, "col1990", "col1991", "col1992");
-        col = cf.getColumn(ByteBufferUtil.bytes("col1990")).value();
+        col = cf.getColumn(cellname("col1990")).value();
         assertEquals(ByteBufferUtil.string(col), "v1990");
-        col = cf.getColumn(ByteBufferUtil.bytes("col1991")).value();
+        col = cf.getColumn(cellname("col1991")).value();
         assertEquals(ByteBufferUtil.string(col), "v1991");
-        col = cf.getColumn(ByteBufferUtil.bytes("col1992")).value();
+        col = cf.getColumn(cellname("col1992")).value();
         assertEquals(ByteBufferUtil.string(col), "v1992");
 
-        cf = cfStore.getColumnFamily(key, ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, true, 3, System.currentTimeMillis());
+        cf = cfStore.getColumnFamily(key, Composites.EMPTY, Composites.EMPTY, true, 3, System.currentTimeMillis());
         assertColumns(cf, "col1997", "col1998", "col1999");
-        col = cf.getColumn(ByteBufferUtil.bytes("col1997")).value();
+        col = cf.getColumn(cellname("col1997")).value();
         assertEquals(ByteBufferUtil.string(col), "v1997");
-        col = cf.getColumn(ByteBufferUtil.bytes("col1998")).value();
+        col = cf.getColumn(cellname("col1998")).value();
         assertEquals(ByteBufferUtil.string(col), "v1998");
-        col = cf.getColumn(ByteBufferUtil.bytes("col1999")).value();
+        col = cf.getColumn(cellname("col1999")).value();
         assertEquals(ByteBufferUtil.string(col), "v1999");
 
-        cf = cfStore.getColumnFamily(key, ByteBufferUtil.bytes("col9000"), ByteBufferUtil.EMPTY_BYTE_BUFFER, true, 3, System.currentTimeMillis());
+        cf = cfStore.getColumnFamily(key, cellname("col9000"), Composites.EMPTY, true, 3, System.currentTimeMillis());
         assertColumns(cf, "col1997", "col1998", "col1999");
 
-        cf = cfStore.getColumnFamily(key, ByteBufferUtil.bytes("col9000"), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 3, System.currentTimeMillis());
+        cf = cfStore.getColumnFamily(key, cellname("col9000"), Composites.EMPTY, false, 3, System.currentTimeMillis());
         assertColumns(cf);
     }
 
@@ -609,14 +594,7 @@ public class KeyspaceTest extends SchemaLoader
         List<String> L = new ArrayList<String>();
         for (Column column : columns)
         {
-            try
-            {
-                L.add(ByteBufferUtil.string(column.name()));
-            }
-            catch (CharacterCodingException e)
-            {
-                throw new AssertionError(e);
-            }
+            L.add(Util.string(column.name().toByteBuffer()));
         }
 
         List<String> names = new ArrayList<String>(columnNames.length);
@@ -628,13 +606,13 @@ public class KeyspaceTest extends SchemaLoader
 
         assert Arrays.equals(la, columnNames1)
                 : String.format("Columns [%s])] is not expected [%s]",
-                                ((container == null) ? "" : container.getComparator().getColumnsString(columns)),
+                                ((container == null) ? "" : CellNames.getColumnsString(container.getComparator(), columns)),
                                 StringUtils.join(columnNames1, ","));
     }
 
     public static void assertColumn(ColumnFamily cf, String name, String value, long timestamp)
     {
-        assertColumn(cf.getColumn(ByteBufferUtil.bytes(name)), value, timestamp);
+        assertColumn(cf.getColumn(cellname(name)), value, timestamp);
     }
 
     public static void assertColumn(Column column, String value, long timestamp)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/NameSortTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/NameSortTest.java b/test/unit/org/apache/cassandra/db/NameSortTest.java
index 1a8905e..523a33a 100644
--- a/test/unit/org/apache/cassandra/db/NameSortTest.java
+++ b/test/unit/org/apache/cassandra/db/NameSortTest.java
@@ -67,7 +67,7 @@ public class NameSortTest extends SchemaLoader
             {
                 ByteBuffer bytes = j % 2 == 0 ? ByteBufferUtil.bytes("a") : ByteBufferUtil.bytes("b");
                 rm = new RowMutation("Keyspace1", key);
-                rm.add("Standard1", ByteBufferUtil.bytes(("Column-" + j)), bytes, j);
+                rm.add("Standard1", Util.cellname("Column-" + j), bytes, j);
                 rm.applyUnsafe();
             }
 
@@ -102,7 +102,7 @@ public class NameSortTest extends SchemaLoader
             Collection<Column> columns = cf.getSortedColumns();
             for (Column column : columns)
             {
-                String name = ByteBufferUtil.string(column.name());
+                String name = ByteBufferUtil.string(column.name().toByteBuffer());
                 int j = Integer.valueOf(name.substring(name.length() - 1));
                 byte[] bytes = j % 2 == 0 ? "a".getBytes() : "b".getBytes();
                 assertEquals(new String(bytes), ByteBufferUtil.string(column.value()));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java b/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java
index dc9f9c4..92a0b4a 100644
--- a/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java
+++ b/test/unit/org/apache/cassandra/db/RangeTombstoneListTest.java
@@ -24,12 +24,15 @@ import java.util.*;
 import org.junit.Test;
 import static org.junit.Assert.*;
 
+import org.apache.cassandra.Util;
+import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.marshal.IntegerType;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 public class RangeTombstoneListTest
 {
-    private static final Comparator<ByteBuffer> cmp = IntegerType.instance;
+    private static final Comparator<Composite> cmp = new SimpleDenseCellNameType(IntegerType.instance);
 
     @Test
     public void sortedAdditionTest()
@@ -305,14 +308,14 @@ public class RangeTombstoneListTest
         return String.format("[%d, %d]@%d", i(rt.min), i(rt.max), rt.data.markedForDeleteAt);
     }
 
-    private static ByteBuffer b(int i)
+    private static Composite b(int i)
     {
-        return ByteBufferUtil.bytes(i);
+        return Util.cellname(i);
     }
 
-    private static int i(ByteBuffer bb)
+    private static int i(Composite c)
     {
-        return ByteBufferUtil.toInt(bb);
+        return ByteBufferUtil.toInt(c.toByteBuffer());
     }
 
     private static RangeTombstone rt(int start, int end, long tstamp)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java b/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
index e324dd2..740eee4 100644
--- a/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
+++ b/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
@@ -28,6 +28,7 @@ import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.IndexType;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.columniterator.OnDiskAtomIterator;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy;
@@ -85,7 +86,7 @@ public class RangeTombstoneTest extends SchemaLoader
         // Queries by name
         int[] live = new int[]{ 4, 9, 11, 17, 28 };
         int[] dead = new int[]{ 12, 19, 21, 24, 27 };
-        SortedSet<ByteBuffer> columns = new TreeSet<>(cfs.getComparator());
+        SortedSet<CellName> columns = new TreeSet<CellName>(cfs.getComparator());
         for (int i : live)
             columns.add(b(i));
         for (int i : dead)
@@ -187,7 +188,7 @@ public class RangeTombstoneTest extends SchemaLoader
         cfs.forceBlockingFlush();
 
         // Get the last value of the row
-        cf = cfs.getColumnFamily(QueryFilter.getSliceFilter(dk(key), CFNAME, ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, true, 1, System.currentTimeMillis()));
+        cf = cfs.getColumnFamily(QueryFilter.getSliceFilter(dk(key), CFNAME, Composites.EMPTY, Composites.EMPTY, true, 1, System.currentTimeMillis()));
 
         assert !cf.isEmpty();
         int last = i(cf.getSortedColumns().iterator().next().name());
@@ -271,7 +272,7 @@ public class RangeTombstoneTest extends SchemaLoader
         cfs.setCompactionStrategyClass(SizeTieredCompactionStrategy.class.getCanonicalName());
         if (cfs.indexManager.getIndexForColumn(indexedColumnName) == null)
         {
-            ColumnDefinition cd = ColumnDefinition.regularDef(cfs.metadata, indexedColumnName, cfs.getComparator(), 0)
+            ColumnDefinition cd = ColumnDefinition.regularDef(cfs.metadata, indexedColumnName, cfs.getComparator().asAbstractType(), 0)
                                                   .setIndex("test_index", IndexType.CUSTOM, ImmutableMap.of(SecondaryIndex.CUSTOM_INDEX_OPTION_NAME, TestIndex.class.getName()));
             cfs.indexManager.addIndexedColumn(cd);
         }
@@ -310,7 +311,7 @@ public class RangeTombstoneTest extends SchemaLoader
         cfs.setCompactionStrategyClass(SizeTieredCompactionStrategy.class.getCanonicalName());
         if (cfs.indexManager.getIndexForColumn(indexedColumnName) == null)
         {
-            ColumnDefinition cd = ColumnDefinition.regularDef(cfs.metadata, indexedColumnName, cfs.getComparator(), 0)
+            ColumnDefinition cd = ColumnDefinition.regularDef(cfs.metadata, indexedColumnName, cfs.getComparator().asAbstractType(), 0)
                                                   .setIndex("test_index", IndexType.CUSTOM, ImmutableMap.of(SecondaryIndex.CUSTOM_INDEX_OPTION_NAME, TestIndex.class.getName()));
             cfs.indexManager.addIndexedColumn(cd);
         }
@@ -349,19 +350,19 @@ public class RangeTombstoneTest extends SchemaLoader
         return c != null && !c.isMarkedForDelete(System.currentTimeMillis()) && !cf.deletionInfo().isDeleted(c);
     }
 
-    private static ByteBuffer b(int i)
+    private static CellName b(int i)
     {
-        return ByteBufferUtil.bytes(i);
+        return CellNames.simpleDense(ByteBufferUtil.bytes(i));
     }
 
-    private static int i(ByteBuffer i)
+    private static int i(CellName i)
     {
-        return ByteBufferUtil.toInt(i);
+        return ByteBufferUtil.toInt(i.toByteBuffer());
     }
 
     private static void add(RowMutation rm, int value, long timestamp)
     {
-        rm.add(CFNAME, b(value), b(value), timestamp);
+        rm.add(CFNAME, b(value), ByteBufferUtil.bytes(value), timestamp);
     }
 
     private static void delete(ColumnFamily cf, int from, int to, long timestamp)
@@ -417,5 +418,7 @@ public class RangeTombstoneTest extends SchemaLoader
         public void invalidate(){}
 
         public void truncateBlocking(long truncatedAt) { }
+
+        public boolean indexes(CellName name) { return name.toByteBuffer().equals(ByteBufferUtil.bytes(1)); }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/ReadMessageTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ReadMessageTest.java b/test/unit/org/apache/cassandra/db/ReadMessageTest.java
index d829dae..7af4256 100644
--- a/test/unit/org/apache/cassandra/db/ReadMessageTest.java
+++ b/test/unit/org/apache/cassandra/db/ReadMessageTest.java
@@ -30,11 +30,13 @@ import org.junit.Test;
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.filter.NamesQueryFilter;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
 
 
 public class ReadMessageTest extends SchemaLoader
@@ -42,9 +44,11 @@ public class ReadMessageTest extends SchemaLoader
     @Test
     public void testMakeReadMessage() throws IOException
     {
-        SortedSet<ByteBuffer> colList = new TreeSet<ByteBuffer>();
-        colList.add(ByteBufferUtil.bytes("col1"));
-        colList.add(ByteBufferUtil.bytes("col2"));
+        CellNameType type = Keyspace.open("Keyspace1").getColumnFamilyStore("Standard1").getComparator();
+
+        SortedSet<CellName> colList = new TreeSet<CellName>(type);
+        colList.add(Util.cellname("col1"));
+        colList.add(Util.cellname("col2"));
 
         ReadCommand rm, rm2;
         DecoratedKey dk = Util.dk("row1");
@@ -54,21 +58,13 @@ public class ReadMessageTest extends SchemaLoader
         rm2 = serializeAndDeserializeReadMessage(rm);
         assert rm2.toString().equals(rm.toString());
 
-        rm = new SliceFromReadCommand("Keyspace1", dk.key, "Standard1", ts, new SliceQueryFilter(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, true, 2));
+        rm = new SliceFromReadCommand("Keyspace1", dk.key, "Standard1", ts, new SliceQueryFilter(Composites.EMPTY, Composites.EMPTY, true, 2));
         rm2 = serializeAndDeserializeReadMessage(rm);
         assert rm2.toString().equals(rm.toString());
 
-        rm = new SliceFromReadCommand("Keyspace1", dk.key, "Standard1", ts, new SliceQueryFilter(ByteBufferUtil.bytes("a"), ByteBufferUtil.bytes("z"), true, 5));
-        rm2 = serializeAndDeserializeReadMessage(rm);
-        assertEquals(rm2.toString(), rm.toString());
-
-        rm = new SliceFromReadCommand("Keyspace1", dk.key, "Standard1", ts, new SliceQueryFilter(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, true, 2));
+        rm = new SliceFromReadCommand("Keyspace1", dk.key, "Standard1", ts, new SliceQueryFilter(Util.cellname("a"), Util.cellname("z"), true, 5));
         rm2 = serializeAndDeserializeReadMessage(rm);
         assert rm2.toString().equals(rm.toString());
-
-        rm = new SliceFromReadCommand("Keyspace1", dk.key, "Standard1", ts, new SliceQueryFilter(ByteBufferUtil.bytes("a"), ByteBufferUtil.bytes("z"), true, 5));
-        rm2 = serializeAndDeserializeReadMessage(rm);
-        assertEquals(rm2.toString(), rm.toString());
     }
 
     private ReadCommand serializeAndDeserializeReadMessage(ReadCommand rm) throws IOException
@@ -86,30 +82,30 @@ public class ReadMessageTest extends SchemaLoader
     public void testGetColumn() throws IOException, ColumnFamilyNotDefinedException
     {
         Keyspace keyspace = Keyspace.open("Keyspace1");
+        CellNameType type = keyspace.getColumnFamilyStore("Standard1").getComparator();
         RowMutation rm;
         DecoratedKey dk = Util.dk("key1");
 
         // add data
         rm = new RowMutation("Keyspace1", dk.key);
-        rm.add("Standard1", ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("abcd"), 0);
+        rm.add("Standard1", Util.cellname("Column1"), ByteBufferUtil.bytes("abcd"), 0);
         rm.apply();
 
-        ReadCommand command = new SliceByNamesReadCommand("Keyspace1", dk.key, "Standard1", System.currentTimeMillis(), new NamesQueryFilter(ByteBufferUtil.bytes("Column1")));
+        ReadCommand command = new SliceByNamesReadCommand("Keyspace1", dk.key, "Standard1", System.currentTimeMillis(), new NamesQueryFilter(FBUtilities.singleton(Util.cellname("Column1"), type)));
         Row row = command.getRow(keyspace);
-        Column col = row.cf.getColumn(ByteBufferUtil.bytes("Column1"));
+        Column col = row.cf.getColumn(Util.cellname("Column1"));
         assertEquals(col.value(), ByteBuffer.wrap("abcd".getBytes()));
     }
 
     @Test
     public void testNoCommitLog() throws Exception
     {
-
         RowMutation rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("row"));
-        rm.add("Standard1", ByteBufferUtil.bytes("commit1"), ByteBufferUtil.bytes("abcd"), 0);
+        rm.add("Standard1", Util.cellname("commit1"), ByteBufferUtil.bytes("abcd"), 0);
         rm.apply();
 
         rm = new RowMutation("NoCommitlogSpace", ByteBufferUtil.bytes("row"));
-        rm.add("Standard1", ByteBufferUtil.bytes("commit2"), ByteBufferUtil.bytes("abcd"), 0);
+        rm.add("Standard1", Util.cellname("commit2"), ByteBufferUtil.bytes("abcd"), 0);
         rm.apply();
 
         boolean commitLogMessageFound = false;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/RecoveryManagerTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RecoveryManagerTest.java b/test/unit/org/apache/cassandra/db/RecoveryManagerTest.java
index c053211..323da7c 100644
--- a/test/unit/org/apache/cassandra/db/RecoveryManagerTest.java
+++ b/test/unit/org/apache/cassandra/db/RecoveryManagerTest.java
@@ -34,6 +34,7 @@ import org.apache.cassandra.utils.ByteBufferUtil;
 
 import static org.apache.cassandra.Util.column;
 import static org.apache.cassandra.db.KeyspaceTest.assertColumns;
+import static org.apache.cassandra.Util.cellname;
 
 public class RecoveryManagerTest extends SchemaLoader
 {
@@ -84,7 +85,7 @@ public class RecoveryManagerTest extends SchemaLoader
         for (int i = 0; i < 10; ++i)
         {
             cf = TreeMapBackedSortedColumns.factory.create("Keyspace1", "Counter1");
-            cf.addColumn(new CounterColumn(ByteBufferUtil.bytes("col"), 1L, 1L));
+            cf.addColumn(new CounterColumn(cellname("col"), 1L, 1L));
             rm = new RowMutation("Keyspace1", dk.key, cf);
             rm.apply();
         }
@@ -97,7 +98,7 @@ public class RecoveryManagerTest extends SchemaLoader
         cf = Util.getColumnFamily(keyspace1, dk, "Counter1");
 
         assert cf.getColumnCount() == 1;
-        Column c = cf.getColumn(ByteBufferUtil.bytes("col"));
+        Column c = cf.getColumn(cellname("col"));
 
         assert c != null;
         assert ((CounterColumn)c).total() == 10L;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/RecoveryManagerTruncateTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RecoveryManagerTruncateTest.java b/test/unit/org/apache/cassandra/db/RecoveryManagerTruncateTest.java
index b98a8b0..28868ad 100644
--- a/test/unit/org/apache/cassandra/db/RecoveryManagerTruncateTest.java
+++ b/test/unit/org/apache/cassandra/db/RecoveryManagerTruncateTest.java
@@ -72,14 +72,11 @@ public class RecoveryManagerTruncateTest extends SchemaLoader
 		{
 			return null;
 		}
-		cf = cfStore.getColumnFamily(QueryFilter.getNamesFilter(Util.dk(keyName),
-                                                                cfName,
-                                                                ByteBufferUtil.bytes(columnName),
-                                                                System.currentTimeMillis()));
+		cf = cfStore.getColumnFamily(Util.namesQueryFilter(cfStore, Util.dk(keyName), columnName));
 		if (cf == null)
 		{
 			return null;
 		}
-		return cf.getColumn(ByteBufferUtil.bytes(columnName));
+		return cf.getColumn(Util.cellname(columnName));
 	}
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/RemoveColumnFamilyTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RemoveColumnFamilyTest.java b/test/unit/org/apache/cassandra/db/RemoveColumnFamilyTest.java
index e3db863..236ba10 100644
--- a/test/unit/org/apache/cassandra/db/RemoveColumnFamilyTest.java
+++ b/test/unit/org/apache/cassandra/db/RemoveColumnFamilyTest.java
@@ -40,7 +40,7 @@ public class RemoveColumnFamilyTest extends SchemaLoader
 
         // add data
         rm = new RowMutation("Keyspace1", dk.key);
-        rm.add("Standard1", ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("asdf"), 0);
+        rm.add("Standard1", Util.cellname("Column1"), ByteBufferUtil.bytes("asdf"), 0);
         rm.apply();
 
         // remove
@@ -50,7 +50,7 @@ public class RemoveColumnFamilyTest extends SchemaLoader
 
         ColumnFamily retrieved = store.getColumnFamily(QueryFilter.getIdentityFilter(dk, "Standard1", System.currentTimeMillis()));
         assert retrieved.isMarkedForDelete();
-        assertNull(retrieved.getColumn(ByteBufferUtil.bytes("Column1")));
+        assertNull(retrieved.getColumn(Util.cellname("Column1")));
         assertNull(Util.cloneAndRemoveDeleted(retrieved, Integer.MAX_VALUE));
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/RemoveColumnFamilyWithFlush1Test.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RemoveColumnFamilyWithFlush1Test.java b/test/unit/org/apache/cassandra/db/RemoveColumnFamilyWithFlush1Test.java
index 7c71dc9..1bfb140 100644
--- a/test/unit/org/apache/cassandra/db/RemoveColumnFamilyWithFlush1Test.java
+++ b/test/unit/org/apache/cassandra/db/RemoveColumnFamilyWithFlush1Test.java
@@ -40,8 +40,8 @@ public class RemoveColumnFamilyWithFlush1Test extends SchemaLoader
 
         // add data
         rm = new RowMutation("Keyspace1", dk.key);
-        rm.add("Standard1", ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("asdf"), 0);
-        rm.add("Standard1", ByteBufferUtil.bytes("Column2"), ByteBufferUtil.bytes("asdf"), 0);
+        rm.add("Standard1", Util.cellname("Column1"), ByteBufferUtil.bytes("asdf"), 0);
+        rm.add("Standard1", Util.cellname("Column2"), ByteBufferUtil.bytes("asdf"), 0);
         rm.apply();
         store.forceBlockingFlush();
 
@@ -52,7 +52,7 @@ public class RemoveColumnFamilyWithFlush1Test extends SchemaLoader
 
         ColumnFamily retrieved = store.getColumnFamily(QueryFilter.getIdentityFilter(dk, "Standard1", System.currentTimeMillis()));
         assert retrieved.isMarkedForDelete();
-        assertNull(retrieved.getColumn(ByteBufferUtil.bytes("Column1")));
+        assertNull(retrieved.getColumn(Util.cellname("Column1")));
         assertNull(Util.cloneAndRemoveDeleted(retrieved, Integer.MAX_VALUE));
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/RemoveColumnFamilyWithFlush2Test.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RemoveColumnFamilyWithFlush2Test.java b/test/unit/org/apache/cassandra/db/RemoveColumnFamilyWithFlush2Test.java
index f6fae5a..9c8aba1 100644
--- a/test/unit/org/apache/cassandra/db/RemoveColumnFamilyWithFlush2Test.java
+++ b/test/unit/org/apache/cassandra/db/RemoveColumnFamilyWithFlush2Test.java
@@ -40,7 +40,7 @@ public class RemoveColumnFamilyWithFlush2Test extends SchemaLoader
 
         // add data
         rm = new RowMutation("Keyspace1", dk.key);
-        rm.add("Standard1", ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("asdf"), 0);
+        rm.add("Standard1", Util.cellname("Column1"), ByteBufferUtil.bytes("asdf"), 0);
         rm.apply();
         // remove
         rm = new RowMutation("Keyspace1", dk.key);
@@ -50,7 +50,7 @@ public class RemoveColumnFamilyWithFlush2Test extends SchemaLoader
 
         ColumnFamily retrieved = store.getColumnFamily(QueryFilter.getIdentityFilter(dk, "Standard1", System.currentTimeMillis()));
         assert retrieved.isMarkedForDelete();
-        assertNull(retrieved.getColumn(ByteBufferUtil.bytes("Column1")));
+        assertNull(retrieved.getColumn(Util.cellname("Column1")));
         assertNull(Util.cloneAndRemoveDeleted(retrieved, Integer.MAX_VALUE));
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/RemoveColumnTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RemoveColumnTest.java b/test/unit/org/apache/cassandra/db/RemoveColumnTest.java
index dfbf513..7ac66e6 100644
--- a/test/unit/org/apache/cassandra/db/RemoveColumnTest.java
+++ b/test/unit/org/apache/cassandra/db/RemoveColumnTest.java
@@ -42,20 +42,17 @@ public class RemoveColumnTest extends SchemaLoader
 
         // add data
         rm = new RowMutation("Keyspace1", dk.key);
-        rm.add("Standard1", ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("asdf"), 0);
+        rm.add("Standard1", Util.cellname("Column1"), ByteBufferUtil.bytes("asdf"), 0);
         rm.apply();
         store.forceBlockingFlush();
 
         // remove
         rm = new RowMutation("Keyspace1", dk.key);
-        rm.delete("Standard1", ByteBufferUtil.bytes("Column1"), 1);
+        rm.delete("Standard1", Util.cellname("Column1"), 1);
         rm.apply();
 
-        ColumnFamily retrieved = store.getColumnFamily(QueryFilter.getNamesFilter(dk,
-                                                                                  "Standard1",
-                                                                                  ByteBufferUtil.bytes("Column1"),
-                                                                                  System.currentTimeMillis()));
-        assert retrieved.getColumn(ByteBufferUtil.bytes("Column1")).isMarkedForDelete(System.currentTimeMillis());
+        ColumnFamily retrieved = store.getColumnFamily(Util.namesQueryFilter(store, dk, "Column1"));
+        assert retrieved.getColumn(Util.cellname("Column1")).isMarkedForDelete(System.currentTimeMillis());
         assertNull(Util.cloneAndRemoveDeleted(retrieved, Integer.MAX_VALUE));
         assertNull(Util.cloneAndRemoveDeleted(store.getColumnFamily(QueryFilter.getIdentityFilter(dk,
                                                                                                   "Standard1",
@@ -63,21 +60,26 @@ public class RemoveColumnTest extends SchemaLoader
                                               Integer.MAX_VALUE));
     }
 
+    private static DeletedColumn dc(String name, int ldt, long timestamp)
+    {
+        return new DeletedColumn(Util.cellname(name), ldt, timestamp);
+    }
+
     @Test
     public void deletedColumnShouldAlwaysBeMarkedForDelete()
     {
         // Check for bug in #4307
         long timestamp = System.currentTimeMillis();
         int localDeletionTime = (int) (timestamp / 1000);
-        Column c = DeletedColumn.create(localDeletionTime, timestamp, "dc1");
+        Column c = dc("dc1", localDeletionTime, timestamp);
         assertTrue("DeletedColumn was not marked for delete", c.isMarkedForDelete(timestamp));
 
         // Simulate a node that is 30 seconds behind
-        c = DeletedColumn.create(localDeletionTime + 30, timestamp + 30000, "dc2");
+        c = dc("dc2", localDeletionTime + 30, timestamp + 30000);
         assertTrue("DeletedColumn was not marked for delete", c.isMarkedForDelete(timestamp));
 
         // Simulate a node that is 30 ahead behind
-        c = DeletedColumn.create(localDeletionTime - 30, timestamp - 30000, "dc3");
+        c = dc("dc3", localDeletionTime - 30, timestamp - 30000);
         assertTrue("DeletedColumn was not marked for delete", c.isMarkedForDelete(timestamp));
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/RemoveSubColumnTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RemoveSubColumnTest.java b/test/unit/org/apache/cassandra/db/RemoveSubColumnTest.java
index 6d2be75..e112b1b 100644
--- a/test/unit/org/apache/cassandra/db/RemoveSubColumnTest.java
+++ b/test/unit/org/apache/cassandra/db/RemoveSubColumnTest.java
@@ -24,6 +24,7 @@ import java.util.concurrent.TimeUnit;
 import org.junit.Test;
 
 import static org.junit.Assert.assertNull;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.db.marshal.CompositeType;
 import static org.apache.cassandra.Util.getBytes;
@@ -50,7 +51,7 @@ public class RemoveSubColumnTest extends SchemaLoader
         rm.apply();
         store.forceBlockingFlush();
 
-        ByteBuffer cname = CompositeType.build(ByteBufferUtil.bytes("SC1"), getBytes(1L));
+        CellName cname = CellNames.compositeDense(ByteBufferUtil.bytes("SC1"), getBytes(1L));
         // remove
         rm = new RowMutation("Keyspace1", dk.key);
         rm.delete("Super1", cname, 1);
@@ -77,7 +78,7 @@ public class RemoveSubColumnTest extends SchemaLoader
 
         // remove the SC
         ByteBuffer scName = ByteBufferUtil.bytes("SC1");
-        ByteBuffer cname = CompositeType.build(scName, getBytes(1L));
+        CellName cname = CellNames.compositeDense(scName, getBytes(1L));
         rm = new RowMutation("Keyspace1", dk.key);
         rm.deleteRange("Super1", SuperColumns.startOf(scName), SuperColumns.endOf(scName), 1);
         rm.apply();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/RowCacheTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RowCacheTest.java b/test/unit/org/apache/cassandra/db/RowCacheTest.java
index 8934a27..6c3a620 100644
--- a/test/unit/org/apache/cassandra/db/RowCacheTest.java
+++ b/test/unit/org/apache/cassandra/db/RowCacheTest.java
@@ -26,6 +26,7 @@ import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.dht.BytesToken;
 import org.apache.cassandra.locator.TokenMetadata;
@@ -67,28 +68,18 @@ public class RowCacheTest extends SchemaLoader
         {
             DecoratedKey key = Util.dk("key" + i);
 
-            cachedStore.getColumnFamily(key,
-                                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                        false,
-                                        1,
-                                        System.currentTimeMillis());
+            cachedStore.getColumnFamily(key, Composites.EMPTY, Composites.EMPTY, false, 1, System.currentTimeMillis());
             assert CacheService.instance.rowCache.size() == i + 1;
             assert cachedStore.containsCachedRow(key); // current key should be stored in the cache
 
             // checking if column is read correctly after cache
-            ColumnFamily cf = cachedStore.getColumnFamily(key,
-                                                          ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                                          ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                                          false,
-                                                          1,
-                                                          System.currentTimeMillis());
+            ColumnFamily cf = cachedStore.getColumnFamily(key, Composites.EMPTY, Composites.EMPTY, false, 1, System.currentTimeMillis());
             Collection<Column> columns = cf.getSortedColumns();
 
             Column column = columns.iterator().next();
 
             assert columns.size() == 1;
-            assert column.name().equals(ByteBufferUtil.bytes("col" + i));
+            assert column.name().toByteBuffer().equals(ByteBufferUtil.bytes("col" + i));
             assert column.value().equals(ByteBufferUtil.bytes("val" + i));
         }
 
@@ -99,27 +90,17 @@ public class RowCacheTest extends SchemaLoader
         {
             DecoratedKey key = Util.dk("key" + i);
 
-            cachedStore.getColumnFamily(key,
-                                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                        false,
-                                        1,
-                                        System.currentTimeMillis());
+            cachedStore.getColumnFamily(key, Composites.EMPTY, Composites.EMPTY, false, 1, System.currentTimeMillis());
             assert cachedStore.containsCachedRow(key); // cache should be populated with the latest rows read (old ones should be popped)
 
             // checking if column is read correctly after cache
-            ColumnFamily cf = cachedStore.getColumnFamily(key,
-                                                          ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                                          ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                                          false,
-                                                          1,
-                                                          System.currentTimeMillis());
+            ColumnFamily cf = cachedStore.getColumnFamily(key, Composites.EMPTY, Composites.EMPTY, false, 1, System.currentTimeMillis());
             Collection<Column> columns = cf.getSortedColumns();
 
             Column column = columns.iterator().next();
 
             assert columns.size() == 1;
-            assert column.name().equals(ByteBufferUtil.bytes("col" + i));
+            assert column.name().toByteBuffer().equals(ByteBufferUtil.bytes("col" + i));
             assert column.value().equals(ByteBufferUtil.bytes("val" + i));
         }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/RowIterationTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RowIterationTest.java b/test/unit/org/apache/cassandra/db/RowIterationTest.java
index 6353a8c..9ec7bec 100644
--- a/test/unit/org/apache/cassandra/db/RowIterationTest.java
+++ b/test/unit/org/apache/cassandra/db/RowIterationTest.java
@@ -30,6 +30,7 @@ import org.apache.cassandra.Util;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.utils.FBUtilities;
 import static org.junit.Assert.assertEquals;
@@ -52,7 +53,7 @@ public class RowIterationTest extends SchemaLoader
         for (int i = 0; i < ROWS_PER_SSTABLE; i++) {
             DecoratedKey key = Util.dk(String.valueOf(i));
             RowMutation rm = new RowMutation(KEYSPACE1, key.key);
-            rm.add("Super3", CompositeType.build(ByteBufferUtil.bytes("sc"), ByteBufferUtil.bytes(String.valueOf(i))), ByteBuffer.wrap(new byte[ROWS_PER_SSTABLE * 10 - i * 2]), i);
+            rm.add("Super3", CellNames.compositeDense(ByteBufferUtil.bytes("sc"), ByteBufferUtil.bytes(String.valueOf(i))), ByteBuffer.wrap(new byte[ROWS_PER_SSTABLE * 10 - i * 2]), i);
             rm.apply();
             inserted.add(key);
         }
@@ -71,7 +72,7 @@ public class RowIterationTest extends SchemaLoader
         // Delete row in first sstable
         RowMutation rm = new RowMutation(KEYSPACE1, key.key);
         rm.delete(CF_NAME, 0);
-        rm.add(CF_NAME, ByteBufferUtil.bytes("c"), ByteBufferUtil.bytes("values"), 0L);
+        rm.add(CF_NAME, Util.cellname("c"), ByteBufferUtil.bytes("values"), 0L);
         DeletionInfo delInfo1 = rm.getColumnFamilies().iterator().next().deletionInfo();
         rm.apply();
         store.forceBlockingFlush();
@@ -79,7 +80,7 @@ public class RowIterationTest extends SchemaLoader
         // Delete row in second sstable with higher timestamp
         rm = new RowMutation(KEYSPACE1, key.key);
         rm.delete(CF_NAME, 1);
-        rm.add(CF_NAME, ByteBufferUtil.bytes("c"), ByteBufferUtil.bytes("values"), 1L);
+        rm.add(CF_NAME, Util.cellname("c"), ByteBufferUtil.bytes("values"), 1L);
         DeletionInfo delInfo2 = rm.getColumnFamilies().iterator().next().deletionInfo();
         assert delInfo2.getTopLevelDeletion().markedForDeleteAt == 1L;
         rm.apply();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/RowTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RowTest.java b/test/unit/org/apache/cassandra/db/RowTest.java
index 2571fa5..d770a6c 100644
--- a/test/unit/org/apache/cassandra/db/RowTest.java
+++ b/test/unit/org/apache/cassandra/db/RowTest.java
@@ -26,6 +26,7 @@ import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
 import static org.apache.cassandra.Util.column;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 import com.google.common.util.concurrent.Uninterruptibles;
@@ -59,14 +60,14 @@ public class RowTest extends SchemaLoader
         cf2.addColumn(column("two", "C", 1));
 
         cf1.resolve(cf2);
-        assert Arrays.equals(cf1.getColumn(ByteBufferUtil.bytes("one")).value().array(), "B".getBytes());
-        assert Arrays.equals(cf1.getColumn(ByteBufferUtil.bytes("two")).value().array(), "C".getBytes());
+        assert Arrays.equals(cf1.getColumn(CellNames.simpleDense(ByteBufferUtil.bytes("one"))).value().array(), "B".getBytes());
+        assert Arrays.equals(cf1.getColumn(CellNames.simpleDense(ByteBufferUtil.bytes("two"))).value().array(), "C".getBytes());
     }
 
     @Test
     public void testExpiringColumnExpiration()
     {
-        Column c = new ExpiringColumn(ByteBufferUtil.bytes("one"), ByteBufferUtil.bytes("A"), 0, 1);
+        Column c = new ExpiringColumn(CellNames.simpleDense(ByteBufferUtil.bytes("one")), ByteBufferUtil.bytes("A"), 0, 1);
         assert !c.isMarkedForDelete(System.currentTimeMillis());
 
         // Because we keep the local deletion time with a precision of a

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/SecondaryIndexColumnSizeTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/SecondaryIndexColumnSizeTest.java b/test/unit/org/apache/cassandra/db/SecondaryIndexColumnSizeTest.java
index 51d9ee8..0dbc12a 100644
--- a/test/unit/org/apache/cassandra/db/SecondaryIndexColumnSizeTest.java
+++ b/test/unit/org/apache/cassandra/db/SecondaryIndexColumnSizeTest.java
@@ -24,6 +24,7 @@ import java.util.Set;
 import org.junit.Test;
 
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.index.PerColumnSecondaryIndex;
 import org.apache.cassandra.db.index.PerRowSecondaryIndex;
 import org.apache.cassandra.db.index.SecondaryIndexSearcher;
@@ -47,7 +48,7 @@ public class SecondaryIndexColumnSizeTest
 
         // for read
         buffer.flip();
-        Column column = new Column(ByteBufferUtil.bytes("test"), buffer, 0);
+        Column column = new Column(CellNames.simpleDense(ByteBufferUtil.bytes("test")), buffer, 0);
 
         SecondaryIndexColumnSizeTest.MockRowIndex mockRowIndex = new SecondaryIndexColumnSizeTest.MockRowIndex();
         SecondaryIndexColumnSizeTest.MockColumnIndex mockColumnIndex = new SecondaryIndexColumnSizeTest.MockColumnIndex();
@@ -137,6 +138,11 @@ public class SecondaryIndexColumnSizeTest
         public void reload()
         {
         }
+
+        public boolean indexes(CellName name)
+        {
+            return true;
+        }
     }
 
 
@@ -215,5 +221,10 @@ public class SecondaryIndexColumnSizeTest
         public void reload()
         {
         }
+
+        public boolean indexes(CellName name)
+        {
+            return true;
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/SerializationsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/SerializationsTest.java b/test/unit/org/apache/cassandra/db/SerializationsTest.java
index 119b47a..7b7edfa 100644
--- a/test/unit/org/apache/cassandra/db/SerializationsTest.java
+++ b/test/unit/org/apache/cassandra/db/SerializationsTest.java
@@ -20,7 +20,9 @@ package org.apache.cassandra.db;
 
 import org.apache.cassandra.AbstractSerializationsTester;
 import org.apache.cassandra.Util;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.filter.*;
+import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.BytesType;
 import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.dht.AbstractBounds;
@@ -56,12 +58,12 @@ public class SerializationsTest extends AbstractSerializationsTester
 
     private ByteBuffer startCol = ByteBufferUtil.bytes("Start");
     private ByteBuffer stopCol = ByteBufferUtil.bytes("Stop");
-    private ByteBuffer emptyCol = ByteBufferUtil.bytes("");
+    private Composite emptyCol = Composites.EMPTY;
     public NamesQueryFilter namesPred = new NamesQueryFilter(statics.NamedCols);
     public NamesQueryFilter namesSCPred = new NamesQueryFilter(statics.NamedSCCols);
     public SliceQueryFilter emptyRangePred = new SliceQueryFilter(emptyCol, emptyCol, false, 100);
-    public SliceQueryFilter nonEmptyRangePred = new SliceQueryFilter(startCol, stopCol, true, 100);
-    public SliceQueryFilter nonEmptyRangeSCPred = new SliceQueryFilter(CompositeType.build(statics.SC, startCol), CompositeType.build(statics.SC, stopCol), true, 100);
+    public SliceQueryFilter nonEmptyRangePred = new SliceQueryFilter(CellNames.simpleDense(startCol), CellNames.simpleDense(stopCol), true, 100);
+    public SliceQueryFilter nonEmptyRangeSCPred = new SliceQueryFilter(CellNames.compositeDense(statics.SC, startCol), CellNames.compositeDense(statics.SC, stopCol), true, 100);
 
     private void testRangeSliceCommandWrite() throws IOException
     {
@@ -334,26 +336,32 @@ public class SerializationsTest extends AbstractSerializationsTester
         in.close();
     }
 
-    private static ByteBuffer bb(String s) {
+    private static ByteBuffer bb(String s)
+    {
         return ByteBufferUtil.bytes(s);
     }
 
+    private static CellName cn(String s)
+    {
+        return CellNames.simpleDense(ByteBufferUtil.bytes(s));
+    }
+
     private static class Statics
     {
         private final String KS = "Keyspace1";
         private final ByteBuffer Key = ByteBufferUtil.bytes("Key01");
-        private final SortedSet<ByteBuffer> NamedCols = new TreeSet<ByteBuffer>(BytesType.instance)
+        private final SortedSet<CellName> NamedCols = new TreeSet<CellName>(new SimpleDenseCellNameType(BytesType.instance))
         {{
-            add(ByteBufferUtil.bytes("AAA"));
-            add(ByteBufferUtil.bytes("BBB"));
-            add(ByteBufferUtil.bytes("CCC"));
+            add(CellNames.simpleDense(ByteBufferUtil.bytes("AAA")));
+            add(CellNames.simpleDense(ByteBufferUtil.bytes("BBB")));
+            add(CellNames.simpleDense(ByteBufferUtil.bytes("CCC")));
         }};
         private final ByteBuffer SC = ByteBufferUtil.bytes("SCName");
-        private final SortedSet<ByteBuffer> NamedSCCols = new TreeSet<ByteBuffer>(BytesType.instance)
+        private final SortedSet<CellName> NamedSCCols = new TreeSet<CellName>(new CompoundDenseCellNameType(Arrays.<AbstractType<?>>asList(BytesType.instance, BytesType.instance)))
         {{
-            add(CompositeType.build(SC, ByteBufferUtil.bytes("AAA")));
-            add(CompositeType.build(SC, ByteBufferUtil.bytes("BBB")));
-            add(CompositeType.build(SC, ByteBufferUtil.bytes("CCC")));
+            add(CellNames.compositeDense(SC, ByteBufferUtil.bytes("AAA")));
+            add(CellNames.compositeDense(SC, ByteBufferUtil.bytes("BBB")));
+            add(CellNames.compositeDense(SC, ByteBufferUtil.bytes("CCC")));
         }};
         private final String StandardCF = "Standard1";
         private final String SuperCF = "Super1";
@@ -369,21 +377,21 @@ public class SerializationsTest extends AbstractSerializationsTester
 
         private Statics()
         {
-            StandardCf.addColumn(new Column(bb("aaaa")));
-            StandardCf.addColumn(new Column(bb("bbbb"), bb("bbbbb-value")));
-            StandardCf.addColumn(new Column(bb("cccc"), bb("ccccc-value"), 1000L));
-            StandardCf.addColumn(new DeletedColumn(bb("dddd"), 500, 1000));
-            StandardCf.addColumn(new DeletedColumn(bb("eeee"), bb("eeee-value"), 1001));
-            StandardCf.addColumn(new ExpiringColumn(bb("ffff"), bb("ffff-value"), 2000, 1000));
-            StandardCf.addColumn(new ExpiringColumn(bb("gggg"), bb("gggg-value"), 2001, 1000, 2002));
-
-            SuperCf.addColumn(new Column(CompositeType.build(SC, bb("aaaa"))));
-            SuperCf.addColumn(new Column(CompositeType.build(SC, bb("bbbb")), bb("bbbbb-value")));
-            SuperCf.addColumn(new Column(CompositeType.build(SC, bb("cccc")), bb("ccccc-value"), 1000L));
-            SuperCf.addColumn(new DeletedColumn(CompositeType.build(SC, bb("dddd")), 500, 1000));
-            SuperCf.addColumn(new DeletedColumn(CompositeType.build(SC, bb("eeee")), bb("eeee-value"), 1001));
-            SuperCf.addColumn(new ExpiringColumn(CompositeType.build(SC, bb("ffff")), bb("ffff-value"), 2000, 1000));
-            SuperCf.addColumn(new ExpiringColumn(CompositeType.build(SC, bb("gggg")), bb("gggg-value"), 2001, 1000, 2002));
+            StandardCf.addColumn(new Column(cn("aaaa")));
+            StandardCf.addColumn(new Column(cn("bbbb"), bb("bbbbb-value")));
+            StandardCf.addColumn(new Column(cn("cccc"), bb("ccccc-value"), 1000L));
+            StandardCf.addColumn(new DeletedColumn(cn("dddd"), 500, 1000));
+            StandardCf.addColumn(new DeletedColumn(cn("eeee"), bb("eeee-value"), 1001));
+            StandardCf.addColumn(new ExpiringColumn(cn("ffff"), bb("ffff-value"), 2000, 1000));
+            StandardCf.addColumn(new ExpiringColumn(cn("gggg"), bb("gggg-value"), 2001, 1000, 2002));
+
+            SuperCf.addColumn(new Column(CellNames.compositeDense(SC, bb("aaaa"))));
+            SuperCf.addColumn(new Column(CellNames.compositeDense(SC, bb("bbbb")), bb("bbbbb-value")));
+            SuperCf.addColumn(new Column(CellNames.compositeDense(SC, bb("cccc")), bb("ccccc-value"), 1000L));
+            SuperCf.addColumn(new DeletedColumn(CellNames.compositeDense(SC, bb("dddd")), 500, 1000));
+            SuperCf.addColumn(new DeletedColumn(CellNames.compositeDense(SC, bb("eeee")), bb("eeee-value"), 1001));
+            SuperCf.addColumn(new ExpiringColumn(CellNames.compositeDense(SC, bb("ffff")), bb("ffff-value"), 2000, 1000));
+            SuperCf.addColumn(new ExpiringColumn(CellNames.compositeDense(SC, bb("gggg")), bb("gggg-value"), 2001, 1000, 2002));
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/TimeSortTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/TimeSortTest.java b/test/unit/org/apache/cassandra/db/TimeSortTest.java
index 37966de..a4daf4f 100644
--- a/test/unit/org/apache/cassandra/db/TimeSortTest.java
+++ b/test/unit/org/apache/cassandra/db/TimeSortTest.java
@@ -27,9 +27,11 @@ import org.junit.Test;
 import static org.junit.Assert.assertEquals;
 
 import org.apache.cassandra.SchemaLoader;
+import static org.apache.cassandra.Util.cellname;
 import static org.apache.cassandra.Util.getBytes;
 import org.apache.cassandra.Util;
 
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.db.marshal.LongType;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -46,15 +48,15 @@ public class TimeSortTest extends SchemaLoader
         DecoratedKey key = Util.dk("key0");
 
         rm = new RowMutation("Keyspace1", key.key);
-        rm.add("StandardLong1", getBytes(100), ByteBufferUtil.bytes("a"), 100);
+        rm.add("StandardLong1", cellname(100), ByteBufferUtil.bytes("a"), 100);
         rm.apply();
         cfStore.forceBlockingFlush();
 
         rm = new RowMutation("Keyspace1", key.key);
-        rm.add("StandardLong1", getBytes(0), ByteBufferUtil.bytes("b"), 0);
+        rm.add("StandardLong1", cellname(0), ByteBufferUtil.bytes("b"), 0);
         rm.apply();
 
-        ColumnFamily cf = cfStore.getColumnFamily(key, getBytes(10), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1000, System.currentTimeMillis());
+        ColumnFamily cf = cfStore.getColumnFamily(key, cellname(10), Composites.EMPTY, false, 1000, System.currentTimeMillis());
         Collection<Column> columns = cf.getSortedColumns();
         assert columns.size() == 1;
     }
@@ -70,7 +72,7 @@ public class TimeSortTest extends SchemaLoader
             RowMutation rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes(Integer.toString(i)));
             for (int j = 0; j < 8; ++j)
             {
-                rm.add("StandardLong1", getBytes(j * 2), ByteBufferUtil.bytes("a"), j * 2);
+                rm.add("StandardLong1", cellname(j * 2), ByteBufferUtil.bytes("a"), j * 2);
             }
             rm.apply();
         }
@@ -85,17 +87,17 @@ public class TimeSortTest extends SchemaLoader
         RowMutation rm = new RowMutation("Keyspace1", key.key);
         for (int j = 0; j < 4; ++j)
         {
-            rm.add("StandardLong1", getBytes(j * 2 + 1), ByteBufferUtil.bytes("b"), j * 2 + 1);
+            rm.add("StandardLong1", cellname(j * 2 + 1), ByteBufferUtil.bytes("b"), j * 2 + 1);
         }
         rm.apply();
         // and some overwrites
         rm = new RowMutation("Keyspace1", key.key);
-        rm.add("StandardLong1", getBytes(0), ByteBufferUtil.bytes("c"), 100);
-        rm.add("StandardLong1", getBytes(10), ByteBufferUtil.bytes("c"), 100);
+        rm.add("StandardLong1", cellname(0), ByteBufferUtil.bytes("c"), 100);
+        rm.add("StandardLong1", cellname(10), ByteBufferUtil.bytes("c"), 100);
         rm.apply();
 
         // verify
-        ColumnFamily cf = cfStore.getColumnFamily(key, getBytes(0), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1000, System.currentTimeMillis());
+        ColumnFamily cf = cfStore.getColumnFamily(key, cellname(0), Composites.EMPTY, false, 1000, System.currentTimeMillis());
         Collection<Column> columns = cf.getSortedColumns();
         assertEquals(12, columns.size());
         Iterator<Column> iter = columns.iterator();
@@ -103,14 +105,14 @@ public class TimeSortTest extends SchemaLoader
         for (int j = 0; j < 8; j++)
         {
             column = iter.next();
-            assert column.name().equals(getBytes(j));
+            assert column.name().toByteBuffer().equals(getBytes(j));
         }
-        TreeSet<ByteBuffer> columnNames = new TreeSet<ByteBuffer>(LongType.instance);
-        columnNames.add(getBytes(10));
-        columnNames.add(getBytes(0));
+        TreeSet<CellName> columnNames = new TreeSet<CellName>(cfStore.getComparator());
+        columnNames.add(cellname(10));
+        columnNames.add(cellname(0));
         cf = cfStore.getColumnFamily(QueryFilter.getNamesFilter(Util.dk("900"), "StandardLong1", columnNames, System.currentTimeMillis()));
-        assert "c".equals(ByteBufferUtil.string(cf.getColumn(getBytes(0)).value()));
-        assert "c".equals(ByteBufferUtil.string(cf.getColumn(getBytes(10)).value()));
+        assert "c".equals(ByteBufferUtil.string(cf.getColumn(cellname(0)).value()));
+        assert "c".equals(ByteBufferUtil.string(cf.getColumn(cellname(10)).value()));
     }
 
     private void validateTimeSort(Keyspace keyspace) throws IOException
@@ -120,12 +122,8 @@ public class TimeSortTest extends SchemaLoader
             DecoratedKey key = Util.dk(Integer.toString(i));
             for (int j = 0; j < 8; j += 3)
             {
-                ColumnFamily cf = keyspace.getColumnFamilyStore("StandardLong1").getColumnFamily(key,
-                                                                                                 getBytes(j * 2),
-                                                                                                 ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                                                                                 false,
-                                                                                                 1000,
-                                                                                                 System.currentTimeMillis());
+                ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("StandardLong1");
+                ColumnFamily cf = cfs.getColumnFamily(key, cellname(j * 2), Composites.EMPTY, false, 1000, System.currentTimeMillis());
                 Collection<Column> columns = cf.getSortedColumns();
                 assert columns.size() == 8 - j;
                 int k = j;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java
index e392a4b..d73ccfd 100644
--- a/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java
@@ -41,6 +41,7 @@ import org.apache.cassandra.utils.ByteBufferUtil;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.apache.cassandra.Util.cellname;
 
 public class BlacklistingCompactionsTest extends SchemaLoader
 {
@@ -93,9 +94,7 @@ public class BlacklistingCompactionsTest extends SchemaLoader
                 DecoratedKey key = Util.dk(String.valueOf(i % 2));
                 RowMutation rm = new RowMutation(KEYSPACE, key.key);
                 long timestamp = j * ROWS_PER_SSTABLE + i;
-                rm.add("Standard1", ByteBufferUtil.bytes(String.valueOf(i / 2)),
-                       ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                       timestamp);
+                rm.add("Standard1", cellname(i / 2), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp);
                 maxTimestampExpected = Math.max(timestamp, maxTimestampExpected);
                 rm.apply();
                 inserted.add(key);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/compaction/CompactionsPurgeTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionsPurgeTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionsPurgeTest.java
index bd814d0..c5f5483 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionsPurgeTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionsPurgeTest.java
@@ -44,6 +44,7 @@ import static org.junit.Assert.assertTrue;
 
 import static org.apache.cassandra.cql3.QueryProcessor.processInternal;
 
+import static org.apache.cassandra.Util.cellname;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 
@@ -68,7 +69,7 @@ public class CompactionsPurgeTest extends SchemaLoader
         rm = new RowMutation(KEYSPACE1, key.key);
         for (int i = 0; i < 10; i++)
         {
-            rm.add(cfName, ByteBufferUtil.bytes(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
+            rm.add(cfName, cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
         }
         rm.apply();
         cfs.forceBlockingFlush();
@@ -77,14 +78,14 @@ public class CompactionsPurgeTest extends SchemaLoader
         for (int i = 0; i < 10; i++)
         {
             rm = new RowMutation(KEYSPACE1, key.key);
-            rm.delete(cfName, ByteBufferUtil.bytes(String.valueOf(i)), 1);
+            rm.delete(cfName, cellname(String.valueOf(i)), 1);
             rm.apply();
         }
         cfs.forceBlockingFlush();
 
         // resurrect one column
         rm = new RowMutation(KEYSPACE1, key.key);
-        rm.add(cfName, ByteBufferUtil.bytes(String.valueOf(5)), ByteBufferUtil.EMPTY_BYTE_BUFFER, 2);
+        rm.add(cfName, cellname(String.valueOf(5)), ByteBufferUtil.EMPTY_BYTE_BUFFER, 2);
         rm.apply();
         cfs.forceBlockingFlush();
 
@@ -93,7 +94,7 @@ public class CompactionsPurgeTest extends SchemaLoader
         cfs.invalidateCachedRow(key);
         ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, cfName, System.currentTimeMillis()));
         assertColumns(cf, "5");
-        assert cf.getColumn(ByteBufferUtil.bytes(String.valueOf(5))) != null;
+        assert cf.getColumn(cellname(String.valueOf(5))) != null;
     }
 
     @Test
@@ -113,7 +114,7 @@ public class CompactionsPurgeTest extends SchemaLoader
             rm = new RowMutation(KEYSPACE2, key.key);
             for (int i = 0; i < 10; i++)
             {
-                rm.add(cfName, ByteBufferUtil.bytes(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
+                rm.add(cfName, cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
             }
             rm.apply();
             cfs.forceBlockingFlush();
@@ -122,7 +123,7 @@ public class CompactionsPurgeTest extends SchemaLoader
             for (int i = 0; i < 10; i++)
             {
                 rm = new RowMutation(KEYSPACE2, key.key);
-                rm.delete(cfName, ByteBufferUtil.bytes(String.valueOf(i)), 1);
+                rm.delete(cfName, cellname(String.valueOf(i)), 1);
                 rm.apply();
             }
             cfs.forceBlockingFlush();
@@ -136,7 +137,7 @@ public class CompactionsPurgeTest extends SchemaLoader
         cfs.forceBlockingFlush();
         Collection<SSTableReader> sstablesIncomplete = cfs.getSSTables();
         rm = new RowMutation(KEYSPACE2, key1.key);
-        rm.add(cfName, ByteBufferUtil.bytes(String.valueOf(5)), ByteBufferUtil.EMPTY_BYTE_BUFFER, 2);
+        rm.add(cfName, cellname(String.valueOf(5)), ByteBufferUtil.EMPTY_BYTE_BUFFER, 2);
         rm.apply();
         cfs.forceBlockingFlush();
         cfs.getCompactionStrategy().getUserDefinedTask(sstablesIncomplete, Integer.MAX_VALUE).execute(null);
@@ -168,20 +169,20 @@ public class CompactionsPurgeTest extends SchemaLoader
 
         // inserts
         rm = new RowMutation(KEYSPACE2, key3.key);
-        rm.add(cfName, ByteBufferUtil.bytes("c1"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 8);
-        rm.add(cfName, ByteBufferUtil.bytes("c2"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 8);
+        rm.add(cfName, cellname("c1"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 8);
+        rm.add(cfName, cellname("c2"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 8);
         rm.apply();
         cfs.forceBlockingFlush();
         // delete c1
         rm = new RowMutation(KEYSPACE2, key3.key);
-        rm.delete(cfName, ByteBufferUtil.bytes("c1"), 10);
+        rm.delete(cfName, cellname("c1"), 10);
         rm.apply();
         cfs.forceBlockingFlush();
         Collection<SSTableReader> sstablesIncomplete = cfs.getSSTables();
 
         // delete c2 so we have new delete in a diffrent SSTable
         rm = new RowMutation(KEYSPACE2, key3.key);
-        rm.delete(cfName, ByteBufferUtil.bytes("c2"), 9);
+        rm.delete(cfName, cellname("c2"), 9);
         rm.apply();
         cfs.forceBlockingFlush();
 
@@ -191,7 +192,7 @@ public class CompactionsPurgeTest extends SchemaLoader
         // We should have both the c1 and c2 tombstones still. Since the min timestamp in the c2 tombstone
         // sstable is older than the c1 tombstone, it is invalid to throw out the c1 tombstone.
         ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key3, cfName, System.currentTimeMillis()));
-        assertFalse(cf.getColumn(ByteBufferUtil.bytes("c2")).isLive(System.currentTimeMillis()));
+        assertFalse(cf.getColumn(cellname("c2")).isLive(System.currentTimeMillis()));
         assertEquals(2, cf.getColumnCount());
     }
 
@@ -211,7 +212,7 @@ public class CompactionsPurgeTest extends SchemaLoader
         rm = new RowMutation(KEYSPACE1, key.key);
         for (int i = 0; i < 5; i++)
         {
-            rm.add(cfName, ByteBufferUtil.bytes(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
+            rm.add(cfName, cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
         }
         rm.apply();
 
@@ -219,7 +220,7 @@ public class CompactionsPurgeTest extends SchemaLoader
         for (int i = 0; i < 5; i++)
         {
             rm = new RowMutation(KEYSPACE1, key.key);
-            rm.delete(cfName, ByteBufferUtil.bytes(String.valueOf(i)), 1);
+            rm.delete(cfName, cellname(String.valueOf(i)), 1);
             rm.apply();
         }
         cfs.forceBlockingFlush();
@@ -249,7 +250,7 @@ public class CompactionsPurgeTest extends SchemaLoader
         rm = new RowMutation(keyspaceName, key.key);
         for (int i = 0; i < 10; i++)
         {
-            rm.add(cfName, ByteBufferUtil.bytes(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
+            rm.add(cfName, cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
         }
         rm.apply();
 
@@ -269,7 +270,7 @@ public class CompactionsPurgeTest extends SchemaLoader
         rm = new RowMutation(keyspaceName, key.key);
         for (int i = 0; i < 10; i++)
         {
-            rm.add(cfName, ByteBufferUtil.bytes(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
+            rm.add(cfName, cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
         }
         rm.apply();
 
@@ -296,7 +297,7 @@ public class CompactionsPurgeTest extends SchemaLoader
         // inserts
         rm = new RowMutation(keyspaceName, key.key);
         for (int i = 0; i < 10; i++)
-            rm.add(cfName, ByteBufferUtil.bytes(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
+            rm.add(cfName, cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
         rm.apply();
 
         // deletes row with timestamp such that not all columns are deleted
@@ -314,7 +315,7 @@ public class CompactionsPurgeTest extends SchemaLoader
         // re-inserts with timestamp lower than delete
         rm = new RowMutation(keyspaceName, key.key);
         for (int i = 0; i < 5; i++)
-            rm.add(cfName, ByteBufferUtil.bytes(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
+            rm.add(cfName, cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
         rm.apply();
 
         // Check that the second insert went in
@@ -379,4 +380,4 @@ public class CompactionsPurgeTest extends SchemaLoader
         result = processInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
         assertEquals(0, result.size());
     }
-}
\ No newline at end of file
+}


[12/13] Push composites support in the storage engine

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql/QueryProcessor.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql/QueryProcessor.java b/src/java/org/apache/cassandra/cql/QueryProcessor.java
index 87f603e..12a0c22 100644
--- a/src/java/org/apache/cassandra/cql/QueryProcessor.java
+++ b/src/java/org/apache/cassandra/cql/QueryProcessor.java
@@ -35,6 +35,9 @@ import org.apache.cassandra.cql.hooks.PreExecutionHook;
 import org.apache.cassandra.cql.hooks.PreparationContext;
 import org.apache.cassandra.db.CounterColumn;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.context.CounterContext;
 import org.apache.cassandra.db.filter.*;
 import org.apache.cassandra.db.index.SecondaryIndexManager;
@@ -103,7 +106,7 @@ public class QueryProcessor
         // ...of a list of column names
         if (!select.isColumnRange())
         {
-            SortedSet<ByteBuffer> columnNames = getColumnNames(select, metadata, variables);
+            SortedSet<CellName> columnNames = getColumnNames(select, metadata, variables);
             validateColumnNames(columnNames);
 
             for (Term rawKey: select.getKeys())
@@ -117,9 +120,9 @@ public class QueryProcessor
         // ...a range (slice) of column names
         else
         {
-            AbstractType<?> comparator = metadata.comparator;
-            ByteBuffer start = select.getColumnStart().getByteBuffer(comparator,variables);
-            ByteBuffer finish = select.getColumnFinish().getByteBuffer(comparator,variables);
+            AbstractType<?> at = metadata.comparator.asAbstractType();
+            Composite start = metadata.comparator.fromByteBuffer(select.getColumnStart().getByteBuffer(at,variables));
+            Composite finish = metadata.comparator.fromByteBuffer(select.getColumnFinish().getByteBuffer(at,variables));
 
             for (Term rawKey : select.getKeys())
             {
@@ -138,17 +141,17 @@ public class QueryProcessor
         return StorageProxy.read(commands, select.getConsistencyLevel());
     }
 
-    private static SortedSet<ByteBuffer> getColumnNames(SelectStatement select, CFMetaData metadata, List<ByteBuffer> variables)
+    private static SortedSet<CellName> getColumnNames(SelectStatement select, CFMetaData metadata, List<ByteBuffer> variables)
     throws InvalidRequestException
     {
         String keyString = metadata.getCQL2KeyName();
         List<Term> selectColumnNames = select.getColumnNames();
-        SortedSet<ByteBuffer> columnNames = new TreeSet<ByteBuffer>(metadata.comparator);
+        SortedSet<CellName> columnNames = new TreeSet<>(metadata.comparator);
         for (Term column : selectColumnNames)
         {
             // skip the key for the slice op; we'll add it to the resultset in extractThriftColumns
             if (!column.getText().equalsIgnoreCase(keyString))
-                columnNames.add(column.getByteBuffer(metadata.comparator,variables));
+                columnNames.add(metadata.comparator.cellFromByteBuffer(column.getByteBuffer(metadata.comparator.asAbstractType(),variables)));
         }
         return columnNames;
     }
@@ -186,8 +189,8 @@ public class QueryProcessor
         for (Relation columnRelation : columnRelations)
         {
             // Left and right side of relational expression encoded according to comparator/validator.
-            ByteBuffer entity = columnRelation.getEntity().getByteBuffer(metadata.comparator, variables);
-            ByteBuffer value = columnRelation.getValue().getByteBuffer(metadata.getValueValidatorFromCellName(entity), variables);
+            ByteBuffer entity = columnRelation.getEntity().getByteBuffer(metadata.comparator.asAbstractType(), variables);
+            ByteBuffer value = columnRelation.getValue().getByteBuffer(metadata.getValueValidatorForFullCellName(entity), variables);
 
             expressions.add(new IndexExpression(entity,
                                                 IndexExpression.Operator.valueOf(columnRelation.operator().toString()),
@@ -230,8 +233,9 @@ public class QueryProcessor
     {
         if (select.isColumnRange() || select.getColumnNames().size() == 0)
         {
-            return new SliceQueryFilter(select.getColumnStart().getByteBuffer(metadata.comparator, variables),
-                                        select.getColumnFinish().getByteBuffer(metadata.comparator, variables),
+            AbstractType<?> comparator = metadata.comparator.asAbstractType();
+            return new SliceQueryFilter(metadata.comparator.fromByteBuffer(select.getColumnStart().getByteBuffer(comparator, variables)),
+                                        metadata.comparator.fromByteBuffer(select.getColumnFinish().getByteBuffer(comparator, variables)),
                                         select.isColumnsReversed(),
                                         select.getColumnsLimit());
         }
@@ -265,12 +269,13 @@ public class QueryProcessor
         if (select.getColumnRelations().size() > 0)
         {
             ColumnFamilyStore cfstore = Keyspace.open(keyspace).getColumnFamilyStore(select.getColumnFamily());
-            AbstractType<?> comparator = cfstore.metadata.comparator;
+            CellNameType comparator = cfstore.metadata.comparator;
+            AbstractType<?> at = comparator.asAbstractType();
             SecondaryIndexManager idxManager = cfstore.indexManager;
             for (Relation relation : select.getColumnRelations())
             {
-                ByteBuffer name = relation.getEntity().getByteBuffer(comparator, variables);
-                if ((relation.operator() == RelationType.EQ) && idxManager.indexes(name))
+                ByteBuffer name = relation.getEntity().getByteBuffer(at, variables);
+                if ((relation.operator() == RelationType.EQ) && idxManager.indexes(comparator.cellFromByteBuffer(name)))
                     return;
             }
             throw new InvalidRequestException("No indexed columns present in by-columns clause with \"equals\" operator");
@@ -300,31 +305,31 @@ public class QueryProcessor
             throw new InvalidRequestException(String.format("Expected key '%s' to be present in WHERE clause for '%s'", realKeyAlias, cfm.cfName));
     }
 
-    private static void validateColumnNames(Iterable<ByteBuffer> columns)
+    private static void validateColumnNames(Iterable<CellName> columns)
     throws InvalidRequestException
     {
-        for (ByteBuffer name : columns)
+        for (CellName name : columns)
         {
-            if (name.remaining() > org.apache.cassandra.db.Column.MAX_NAME_LENGTH)
+            if (name.dataSize() > org.apache.cassandra.db.Column.MAX_NAME_LENGTH)
                 throw new InvalidRequestException(String.format("column name is too long (%s > %s)",
-                                                                name.remaining(),
+                                                                name.dataSize(),
                                                                 org.apache.cassandra.db.Column.MAX_NAME_LENGTH));
-            if (name.remaining() == 0)
+            if (name.isEmpty())
                 throw new InvalidRequestException("zero-length column name");
         }
     }
 
-    public static void validateColumnName(ByteBuffer column)
+    public static void validateColumnName(CellName column)
     throws InvalidRequestException
     {
         validateColumnNames(Arrays.asList(column));
     }
 
-    public static void validateColumn(CFMetaData metadata, ByteBuffer name, ByteBuffer value)
+    public static void validateColumn(CFMetaData metadata, CellName name, ByteBuffer value)
     throws InvalidRequestException
     {
         validateColumnName(name);
-        AbstractType<?> validator = metadata.getValueValidatorFromCellName(name);
+        AbstractType<?> validator = metadata.getValueValidator(name);
 
         try
         {
@@ -334,7 +339,7 @@ public class QueryProcessor
         catch (MarshalException me)
         {
             throw new InvalidRequestException(String.format("Invalid column value for column (name=%s); %s",
-                                                            ByteBufferUtil.bytesToHex(name),
+                                                            ByteBufferUtil.bytesToHex(name.toByteBuffer()),
                                                             me.getMessage()));
         }
     }
@@ -354,12 +359,12 @@ public class QueryProcessor
         validateSliceFilter(metadata, range.start(), range.finish(), range.reversed);
     }
 
-    private static void validateSliceFilter(CFMetaData metadata, ByteBuffer start, ByteBuffer finish, boolean reversed)
+    private static void validateSliceFilter(CFMetaData metadata, Composite start, Composite finish, boolean reversed)
     throws InvalidRequestException
     {
-        AbstractType<?> comparator = metadata.comparator;
-        Comparator<ByteBuffer> orderedComparator = reversed ? comparator.reverseComparator: comparator;
-        if (start.remaining() > 0 && finish.remaining() > 0 && orderedComparator.compare(start, finish) > 0)
+        CellNameType comparator = metadata.comparator;
+        Comparator<Composite> orderedComparator = reversed ? comparator.reverseComparator(): comparator;
+        if (!start.isEmpty() && !finish.isEmpty() && orderedComparator.compare(start, finish) > 0)
             throw new InvalidRequestException("range finish must come after start in traversal order");
     }
 
@@ -442,7 +447,7 @@ public class QueryProcessor
                 // otherwise create resultset from query results
                 result.schema = new CqlMetadata(new HashMap<ByteBuffer, String>(),
                                                 new HashMap<ByteBuffer, String>(),
-                                                TypeParser.getShortName(metadata.comparator),
+                                                TypeParser.getShortName(metadata.comparator.asAbstractType()),
                                                 TypeParser.getShortName(metadata.getDefaultValidator()));
                 List<CqlRow> cqlRows = new ArrayList<CqlRow>(rows.size());
                 for (org.apache.cassandra.db.Row row : rows)
@@ -467,9 +472,9 @@ public class QueryProcessor
                                 if (c.isMarkedForDelete(now))
                                     continue;
 
-                                ColumnDefinition cd = metadata.getColumnDefinitionFromCellName(c.name());
+                                ColumnDefinition cd = metadata.getColumnDefinition(c.name());
                                 if (cd != null)
-                                    result.schema.value_types.put(c.name(), TypeParser.getShortName(cd.type));
+                                    result.schema.value_types.put(c.name().toByteBuffer(), TypeParser.getShortName(cd.type));
 
                                 thriftColumns.add(thriftify(c));
                             }
@@ -495,22 +500,23 @@ public class QueryProcessor
                             if (row.cf == null)
                                 continue;
 
-                            ByteBuffer name;
+                            ByteBuffer nameBytes;
                             try
                             {
-                                name = term.getByteBuffer(metadata.comparator, variables);
+                                nameBytes = term.getByteBuffer(metadata.comparator.asAbstractType(), variables);
                             }
                             catch (InvalidRequestException e)
                             {
                                 throw new AssertionError(e);
                             }
 
-                            ColumnDefinition cd = metadata.getColumnDefinitionFromCellName(name);
+                            CellName name = metadata.comparator.cellFromByteBuffer(nameBytes);
+                            ColumnDefinition cd = metadata.getColumnDefinition(name);
                             if (cd != null)
-                                result.schema.value_types.put(name, TypeParser.getShortName(cd.type));
+                                result.schema.value_types.put(nameBytes, TypeParser.getShortName(cd.type));
                             org.apache.cassandra.db.Column c = row.cf.getColumn(name);
-                            if (c == null || c.isMarkedForDelete(now))
-                                thriftColumns.add(new Column().setName(name));
+                            if (c == null || c.isMarkedForDelete(System.currentTimeMillis()))
+                                thriftColumns.add(new Column().setName(nameBytes));
                             else
                                 thriftColumns.add(thriftify(c));
                         }
@@ -683,7 +689,7 @@ public class QueryProcessor
                         if (cd.getIndexType() != null)
                             throw new InvalidRequestException("Index already exists");
                         if (logger.isDebugEnabled())
-                            logger.debug("Updating column {} definition for index {}", cfm.comparator.getString(columnName), createIdx.getIndexName());
+                            logger.debug("Updating column {} definition for index {}", cfm.comparator.getString(cfm.comparator.fromByteBuffer(columnName)), createIdx.getIndexName());
                         cd.setIndexType(IndexType.KEYS, Collections.<String, String>emptyMap());
                         cd.setIndexName(createIdx.getIndexName());
                         columnExists = true;
@@ -691,7 +697,7 @@ public class QueryProcessor
                     }
                 }
                 if (!columnExists)
-                    throw new InvalidRequestException("No column definition found for column " + oldCfm.comparator.getString(columnName));
+                    throw new InvalidRequestException("No column definition found for column " + oldCfm.comparator.getString(cfm.comparator.fromByteBuffer(columnName)));
 
                 try
                 {
@@ -853,7 +859,7 @@ public class QueryProcessor
         ByteBuffer value = (c instanceof CounterColumn)
                            ? ByteBufferUtil.bytes(CounterContext.instance().total(c.value()))
                            : c.value();
-        return new Column(c.name()).setValue(value).setTimestamp(c.timestamp());
+        return new Column(c.name().toByteBuffer()).setValue(value).setTimestamp(c.timestamp());
     }
 
     private static CQLStatement getStatement(String queryStr) throws SyntaxException

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql/SelectStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql/SelectStatement.java b/src/java/org/apache/cassandra/cql/SelectStatement.java
index b77e9ec..b4ccd56 100644
--- a/src/java/org/apache/cassandra/cql/SelectStatement.java
+++ b/src/java/org/apache/cassandra/cql/SelectStatement.java
@@ -17,13 +17,10 @@
  */
 package org.apache.cassandra.cql;
 
-import java.nio.ByteBuffer;
 import java.util.List;
 import java.util.Set;
 
 import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.config.Schema;
-import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.ConsistencyLevel;
 
 /**

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql/UpdateStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql/UpdateStatement.java b/src/java/org/apache/cassandra/cql/UpdateStatement.java
index 59d797f..446c5a2 100644
--- a/src/java/org/apache/cassandra/cql/UpdateStatement.java
+++ b/src/java/org/apache/cassandra/cql/UpdateStatement.java
@@ -23,6 +23,8 @@ import java.util.*;
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.Schema;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.db.CounterMutation;
 import org.apache.cassandra.db.ConsistencyLevel;
 import org.apache.cassandra.db.IMutation;
@@ -177,7 +179,8 @@ public class UpdateStatement extends AbstractModification
     throws InvalidRequestException
     {
         validateKey(key);
-        AbstractType<?> comparator = metadata.comparator;
+        CellNameType comparator = metadata.comparator;
+        AbstractType<?> at = comparator.asAbstractType();
 
         // if true we need to wrap RowMutation into CounterMutation
         boolean hasCounterColumn = false;
@@ -185,7 +188,7 @@ public class UpdateStatement extends AbstractModification
 
         for (Map.Entry<Term, Operation> column : getColumns().entrySet())
         {
-            ByteBuffer colName = column.getKey().getByteBuffer(comparator, variables);
+            CellName colName = comparator.cellFromByteBuffer(column.getKey().getByteBuffer(at, variables));
             Operation op = column.getValue();
 
             if (op.isUnary())
@@ -193,7 +196,7 @@ public class UpdateStatement extends AbstractModification
                 if (hasCounterColumn)
                     throw new InvalidRequestException("Mix of commutative and non-commutative operations is not allowed.");
 
-                ByteBuffer colValue = op.a.getByteBuffer(metadata.getValueValidatorFromCellName(colName),variables);
+                ByteBuffer colValue = op.a.getByteBuffer(metadata.getValueValidator(colName),variables);
 
                 validateColumn(metadata, colName, colValue);
                 rm.add(columnFamily,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/CQL3Row.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/CQL3Row.java b/src/java/org/apache/cassandra/cql3/CQL3Row.java
new file mode 100644
index 0000000..bc6bb2c
--- /dev/null
+++ b/src/java/org/apache/cassandra/cql3/CQL3Row.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3;
+
+import java.nio.ByteBuffer;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.cassandra.db.Column;
+
+public interface CQL3Row
+{
+    public ByteBuffer getClusteringColumn(int i);
+    public Column getColumn(ColumnIdentifier name);
+    public List<Column> getCollection(ColumnIdentifier name);
+
+    public interface Builder
+    {
+        Iterator<CQL3Row> group(Iterator<Column> cells);
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/ColumnIdentifier.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/ColumnIdentifier.java b/src/java/org/apache/cassandra/cql3/ColumnIdentifier.java
index a11094e..db203e6 100644
--- a/src/java/org/apache/cassandra/cql3/ColumnIdentifier.java
+++ b/src/java/org/apache/cassandra/cql3/ColumnIdentifier.java
@@ -20,14 +20,17 @@ package org.apache.cassandra.cql3;
 import java.util.Locale;
 import java.nio.ByteBuffer;
 
+import org.apache.cassandra.cache.IMeasurableMemory;
 import org.apache.cassandra.cql3.statements.Selectable;
+import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.ObjectSizes;
 
 /**
  * Represents an identifer for a CQL column definition.
  */
-public class ColumnIdentifier implements Selectable, Comparable<ColumnIdentifier>
+public class ColumnIdentifier implements Selectable, Comparable<ColumnIdentifier>, IMeasurableMemory
 {
     public final ByteBuffer bytes;
     private final String text;
@@ -53,6 +56,11 @@ public class ColumnIdentifier implements Selectable, Comparable<ColumnIdentifier
     @Override
     public final boolean equals(Object o)
     {
+        // Note: it's worth checking for reference equality since we intern those
+        // in SparseCellNameType
+        if (this == o)
+            return true;
+
         if(!(o instanceof ColumnIdentifier))
             return false;
         ColumnIdentifier that = (ColumnIdentifier)o;
@@ -65,8 +73,18 @@ public class ColumnIdentifier implements Selectable, Comparable<ColumnIdentifier
         return text;
     }
 
+    public long memorySize()
+    {
+        return ObjectSizes.getFieldSize(2 * ObjectSizes.getReferenceSize())
+             + ObjectSizes.getSize(bytes)
+             + TypeSizes.NATIVE.sizeof(text);
+    }
+
     public int compareTo(ColumnIdentifier other)
     {
+        if (this == other)
+            return 0;
+
         return bytes.compareTo(other.bytes);
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/ColumnNameBuilder.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/ColumnNameBuilder.java b/src/java/org/apache/cassandra/cql3/ColumnNameBuilder.java
deleted file mode 100644
index 384916d..0000000
--- a/src/java/org/apache/cassandra/cql3/ColumnNameBuilder.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.cql3;
-
-import java.nio.ByteBuffer;
-
-/**
- * Build a potentially composite column name.
- */
-public interface ColumnNameBuilder
-{
-    /**
-     * Add a new ByteBuffer as the next component for this name.
-     * @param bb the ByteBuffer to add
-     * @throws IllegalStateException if the builder if full, i.e. if enough component has been added.
-     * @return this builder
-     */
-    public ColumnNameBuilder add(ByteBuffer bb);
-
-    public ColumnNameBuilder add(ColumnIdentifier name);
-
-    /**
-     * Add a new ByteBuffer as the next component for this name.
-     * @param t the ByteBuffer to add
-     * @param op the relationship this component should respect.
-     * @throws IllegalStateException if the builder if full, i.e. if enough component has been added.
-     * @return this builder
-     */
-    public ColumnNameBuilder add(ByteBuffer t, Relation.Type op);
-
-    /**
-     * Returns the number of component already added to this builder.
-     * @return the number of component in this Builder
-     */
-    public int componentCount();
-
-    /**
-     * @return the maximum number of component that can still be added to this Builder
-     */
-    public int remainingCount();
-
-    /**
-     * @return the ith component in this builder.
-     */
-    public ByteBuffer get(int idx);
-
-    /**
-     * Build the column name.
-     * @return the built column name
-     */
-    public ByteBuffer build();
-
-    /**
-     * Build the column name so that the result sorts at the end of the range
-     * represented by this (uncomplete) column name.
-     * @throws IllegalStateException if the builder is empty or full.
-     */
-    public ByteBuffer buildAsEndOfRange();
-
-    /**
-     * Clone this builder.
-     * @return the cloned builder.
-     */
-    public ColumnNameBuilder copy();
-
-    /**
-     * Returns the ith component added to this builder.
-     *
-     * @param i the component to return
-     * @return the ith component added to this builder.
-     * @throws IllegalArgumentException if i >= componentCount().
-     */
-    public ByteBuffer getComponent(int i);
-
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/Constants.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/Constants.java b/src/java/org/apache/cassandra/cql3/Constants.java
index 34b3f8d..1784752 100644
--- a/src/java/org/apache/cassandra/cql3/Constants.java
+++ b/src/java/org/apache/cassandra/cql3/Constants.java
@@ -20,18 +20,20 @@ package org.apache.cassandra.cql3;
 import java.nio.ByteBuffer;
 import java.util.List;
 
-import org.apache.cassandra.serializers.MarshalException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.db.ColumnFamily;
+import org.apache.cassandra.config.ColumnDefinition;
+import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.BytesType;
-import org.apache.cassandra.db.marshal.CollectionType;
 import org.apache.cassandra.db.marshal.CounterColumnType;
 import org.apache.cassandra.db.marshal.LongType;
 import org.apache.cassandra.db.marshal.ReversedType;
 import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 /**
@@ -261,7 +263,7 @@ public abstract class Constants
         protected Marker(int bindIndex, ColumnSpecification receiver)
         {
             super(bindIndex, receiver);
-            assert !(receiver.type instanceof CollectionType);
+            assert !receiver.type.isCollection();
         }
 
         @Override
@@ -289,14 +291,14 @@ public abstract class Constants
 
     public static class Setter extends Operation
     {
-        public Setter(ColumnIdentifier column, Term t)
+        public Setter(ColumnDefinition column, Term t)
         {
             super(column, t);
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
-            ByteBuffer cname = columnName == null ? prefix.build() : prefix.add(columnName).build();
+            CellName cname = cf.getComparator().create(prefix, column.name);
             ByteBuffer value = t.bindAndGet(params.variables);
             cf.addColumn(value == null ? params.makeTombstone(cname) : params.makeColumn(cname, value));
         }
@@ -304,30 +306,30 @@ public abstract class Constants
 
     public static class Adder extends Operation
     {
-        public Adder(ColumnIdentifier column, Term t)
+        public Adder(ColumnDefinition column, Term t)
         {
             super(column, t);
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
             ByteBuffer bytes = t.bindAndGet(params.variables);
             if (bytes == null)
                 throw new InvalidRequestException("Invalid null value for counter increment");
             long increment = ByteBufferUtil.toLong(bytes);
-            ByteBuffer cname = columnName == null ? prefix.build() : prefix.add(columnName).build();
+            CellName cname = cf.getComparator().create(prefix, column.name);
             cf.addCounter(cname, increment);
         }
     }
 
     public static class Substracter extends Operation
     {
-        public Substracter(ColumnIdentifier column, Term t)
+        public Substracter(ColumnDefinition column, Term t)
         {
             super(column, t);
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
             ByteBuffer bytes = t.bindAndGet(params.variables);
             if (bytes == null)
@@ -337,7 +339,7 @@ public abstract class Constants
             if (increment == Long.MIN_VALUE)
                 throw new InvalidRequestException("The negation of " + increment + " overflows supported counter precision (signed 8 bytes integer)");
 
-            ByteBuffer cname = columnName == null ? prefix.build() : prefix.add(columnName).build();
+            CellName cname = cf.getComparator().create(prefix, column.name);
             cf.addCounter(cname, -increment);
         }
     }
@@ -346,22 +348,18 @@ public abstract class Constants
     // duplicating this further
     public static class Deleter extends Operation
     {
-        private final boolean isCollection;
-
-        public Deleter(ColumnIdentifier column, boolean isCollection)
+        public Deleter(ColumnDefinition column)
         {
             super(column, null);
-            this.isCollection = isCollection;
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
-            ColumnNameBuilder column = prefix.add(columnName);
-
-            if (isCollection)
-                cf.addAtom(params.makeRangeTombstone(column.build(), column.buildAsEndOfRange()));
+            CellName cname = cf.getComparator().create(prefix, column.name);
+            if (column.type.isCollection())
+                cf.addAtom(params.makeRangeTombstone(cname.slice()));
             else
-                cf.addColumn(params.makeTombstone(column.build()));
+                cf.addColumn(params.makeTombstone(cname));
         }
     };
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/Lists.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/Lists.java b/src/java/org/apache/cassandra/cql3/Lists.java
index 35da98e..6bcdfcd 100644
--- a/src/java/org/apache/cassandra/cql3/Lists.java
+++ b/src/java/org/apache/cassandra/cql3/Lists.java
@@ -23,8 +23,11 @@ import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicReference;
 
+import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.db.ColumnFamily;
 import org.apache.cassandra.db.Column;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.marshal.CollectionType;
 import org.apache.cassandra.db.marshal.Int32Type;
 import org.apache.cassandra.db.marshal.ListType;
@@ -32,7 +35,6 @@ import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.Pair;
 import org.apache.cassandra.utils.UUIDGen;
 
 /**
@@ -258,17 +260,17 @@ public abstract class Lists
 
     public static class Setter extends Operation
     {
-        public Setter(ColumnIdentifier column, Term t)
+        public Setter(ColumnDefinition column, Term t)
         {
             super(column, t);
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
             // delete + append
-            ColumnNameBuilder column = prefix.add(columnName);
-            cf.addAtom(params.makeTombstoneForOverwrite(column.build(), column.buildAsEndOfRange()));
-            Appender.doAppend(t, cf, column, params);
+            CellName name = cf.getComparator().create(prefix, column.name);
+            cf.addAtom(params.makeTombstoneForOverwrite(name.slice()));
+            Appender.doAppend(t, cf, prefix, column.name, params);
         }
     }
 
@@ -276,7 +278,7 @@ public abstract class Lists
     {
         private final Term idx;
 
-        public SetterByIndex(ColumnIdentifier column, Term idx, Term t)
+        public SetterByIndex(ColumnDefinition column, Term idx, Term t)
         {
             super(column, t);
             this.idx = idx;
@@ -295,7 +297,7 @@ public abstract class Lists
             idx.collectMarkerSpecification(boundNames);
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
             ByteBuffer index = idx.bindAndGet(params.variables);
             ByteBuffer value = t.bindAndGet(params.variables);
@@ -303,13 +305,12 @@ public abstract class Lists
             if (index == null)
                 throw new InvalidRequestException("Invalid null value for list index");
 
-            List<Pair<ByteBuffer, Column>> existingList = params.getPrefetchedList(rowKey, columnName);
+            List<Column> existingList = params.getPrefetchedList(rowKey, column.name);
             int idx = ByteBufferUtil.toInt(index);
             if (idx < 0 || idx >= existingList.size())
                 throw new InvalidRequestException(String.format("List index %d out of bound, list has size %d", idx, existingList.size()));
 
-            ByteBuffer elementName = existingList.get(idx).right.name();
-
+            CellName elementName = existingList.get(idx).name();
             if (value == null)
             {
                 cf.addColumn(params.makeTombstone(elementName));
@@ -329,17 +330,17 @@ public abstract class Lists
 
     public static class Appender extends Operation
     {
-        public Appender(ColumnIdentifier column, Term t)
+        public Appender(ColumnDefinition column, Term t)
         {
             super(column, t);
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
-            doAppend(t, cf, prefix.add(columnName), params);
+            doAppend(t, cf, prefix, column.name, params);
         }
 
-        static void doAppend(Term t, ColumnFamily cf, ColumnNameBuilder columnName, UpdateParameters params) throws InvalidRequestException
+        static void doAppend(Term t, ColumnFamily cf, Composite prefix, ColumnIdentifier columnName, UpdateParameters params) throws InvalidRequestException
         {
             Term.Terminal value = t.bind(params.variables);
             // If we append null, do nothing. Note that for Setter, we've
@@ -351,22 +352,20 @@ public abstract class Lists
             List<ByteBuffer> toAdd = ((Lists.Value)value).elements;
             for (int i = 0; i < toAdd.size(); i++)
             {
-                ColumnNameBuilder b = i == toAdd.size() - 1 ? columnName : columnName.copy();
                 ByteBuffer uuid = ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes());
-                ByteBuffer cellName = b.add(uuid).build();
-                cf.addColumn(params.makeColumn(cellName, toAdd.get(i)));
+                cf.addColumn(params.makeColumn(cf.getComparator().create(prefix, columnName, uuid), toAdd.get(i)));
             }
         }
     }
 
     public static class Prepender extends Operation
     {
-        public Prepender(ColumnIdentifier column, Term t)
+        public Prepender(ColumnDefinition column, Term t)
         {
             super(column, t);
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
             Term.Terminal value = t.bind(params.variables);
             if (value == null)
@@ -376,21 +375,18 @@ public abstract class Lists
             long time = PrecisionTime.REFERENCE_TIME - (System.currentTimeMillis() - PrecisionTime.REFERENCE_TIME);
 
             List<ByteBuffer> toAdd = ((Lists.Value)value).elements;
-            ColumnNameBuilder column = prefix.add(columnName);
             for (int i = 0; i < toAdd.size(); i++)
             {
-                ColumnNameBuilder b = i == toAdd.size() - 1 ? column : column.copy();
                 PrecisionTime pt = PrecisionTime.getNext(time);
                 ByteBuffer uuid = ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes(pt.millis, pt.nanos));
-                ByteBuffer cellName = b.add(uuid).build();
-                cf.addColumn(params.makeColumn(cellName, toAdd.get(i)));
+                cf.addColumn(params.makeColumn(cf.getComparator().create(prefix, column.name, uuid), toAdd.get(i)));
             }
         }
     }
 
     public static class Discarder extends Operation
     {
-        public Discarder(ColumnIdentifier column, Term t)
+        public Discarder(ColumnDefinition column, Term t)
         {
             super(column, t);
         }
@@ -401,9 +397,9 @@ public abstract class Lists
             return true;
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
-            List<Pair<ByteBuffer, Column>> existingList = params.getPrefetchedList(rowKey, columnName);
+            List<Column> existingList = params.getPrefetchedList(rowKey, column.name);
             if (existingList.isEmpty())
                 return;
 
@@ -418,18 +414,17 @@ public abstract class Lists
             // the read-before-write this operation requires limits its usefulness on big lists, so in practice
             // toDiscard will be small and keeping a list will be more efficient.
             List<ByteBuffer> toDiscard = ((Lists.Value)value).elements;
-            for (Pair<ByteBuffer, Column> p : existingList)
+            for (Column cell : existingList)
             {
-                Column element = p.right;
-                if (toDiscard.contains(element.value()))
-                    cf.addColumn(params.makeTombstone(element.name()));
+                if (toDiscard.contains(cell.value()))
+                    cf.addColumn(params.makeTombstone(cell.name()));
             }
         }
     }
 
     public static class DiscarderByIndex extends Operation
     {
-        public DiscarderByIndex(ColumnIdentifier column, Term idx)
+        public DiscarderByIndex(ColumnDefinition column, Term idx)
         {
             super(column, idx);
         }
@@ -440,7 +435,7 @@ public abstract class Lists
             return true;
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
             Term.Terminal index = t.bind(params.variables);
             if (index == null)
@@ -448,12 +443,12 @@ public abstract class Lists
 
             assert index instanceof Constants.Value;
 
-            List<Pair<ByteBuffer, Column>> existingList = params.getPrefetchedList(rowKey, columnName);
+            List<Column> existingList = params.getPrefetchedList(rowKey, column.name);
             int idx = ByteBufferUtil.toInt(((Constants.Value)index).bytes);
             if (idx < 0 || idx >= existingList.size())
                 throw new InvalidRequestException(String.format("List index %d out of bound, list has size %d", idx, existingList.size()));
 
-            ByteBuffer elementName = existingList.get(idx).right.name();
+            CellName elementName = existingList.get(idx).name();
             cf.addColumn(params.makeTombstone(elementName));
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/Maps.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/Maps.java b/src/java/org/apache/cassandra/cql3/Maps.java
index 4107b95..d156845 100644
--- a/src/java/org/apache/cassandra/cql3/Maps.java
+++ b/src/java/org/apache/cassandra/cql3/Maps.java
@@ -27,7 +27,10 @@ import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 
+import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.db.ColumnFamily;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.marshal.CollectionType;
 import org.apache.cassandra.db.marshal.MapType;
 import org.apache.cassandra.exceptions.InvalidRequestException;
@@ -236,17 +239,17 @@ public abstract class Maps
 
     public static class Setter extends Operation
     {
-        public Setter(ColumnIdentifier column, Term t)
+        public Setter(ColumnDefinition column, Term t)
         {
             super(column, t);
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
             // delete + put
-            ColumnNameBuilder column = prefix.add(columnName);
-            cf.addAtom(params.makeTombstoneForOverwrite(column.build(), column.buildAsEndOfRange()));
-            Putter.doPut(t, cf, column, params);
+            CellName name = cf.getComparator().create(prefix, column.name);
+            cf.addAtom(params.makeTombstoneForOverwrite(name.slice()));
+            Putter.doPut(t, cf, prefix, column.name, params);
         }
     }
 
@@ -254,7 +257,7 @@ public abstract class Maps
     {
         private final Term k;
 
-        public SetterByKey(ColumnIdentifier column, Term k, Term t)
+        public SetterByKey(ColumnDefinition column, Term k, Term t)
         {
             super(column, t);
             this.k = k;
@@ -267,14 +270,14 @@ public abstract class Maps
             k.collectMarkerSpecification(boundNames);
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
             ByteBuffer key = k.bindAndGet(params.variables);
             ByteBuffer value = t.bindAndGet(params.variables);
             if (key == null)
                 throw new InvalidRequestException("Invalid null map key");
 
-            ByteBuffer cellName = prefix.add(columnName).add(key).build();
+            CellName cellName = cf.getComparator().create(prefix, column.name, key);
 
             if (value == null)
             {
@@ -295,17 +298,17 @@ public abstract class Maps
 
     public static class Putter extends Operation
     {
-        public Putter(ColumnIdentifier column, Term t)
+        public Putter(ColumnDefinition column, Term t)
         {
             super(column, t);
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
-            doPut(t, cf, prefix.add(columnName), params);
+            doPut(t, cf, prefix, column.name, params);
         }
 
-        static void doPut(Term t, ColumnFamily cf, ColumnNameBuilder columnName, UpdateParameters params) throws InvalidRequestException
+        static void doPut(Term t, ColumnFamily cf, Composite prefix, ColumnIdentifier columnName, UpdateParameters params) throws InvalidRequestException
         {
             Term.Terminal value = t.bind(params.variables);
             if (value == null)
@@ -315,7 +318,7 @@ public abstract class Maps
             Map<ByteBuffer, ByteBuffer> toAdd = ((Maps.Value)value).map;
             for (Map.Entry<ByteBuffer, ByteBuffer> entry : toAdd.entrySet())
             {
-                ByteBuffer cellName = columnName.copy().add(entry.getKey()).build();
+                CellName cellName = cf.getComparator().create(prefix, columnName, entry.getKey());
                 cf.addColumn(params.makeColumn(cellName, entry.getValue()));
             }
         }
@@ -323,19 +326,19 @@ public abstract class Maps
 
     public static class DiscarderByKey extends Operation
     {
-        public DiscarderByKey(ColumnIdentifier column, Term k)
+        public DiscarderByKey(ColumnDefinition column, Term k)
         {
             super(column, k);
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
             Term.Terminal key = t.bind(params.variables);
             if (key == null)
                 throw new InvalidRequestException("Invalid null map key");
             assert key instanceof Constants.Value;
 
-            ByteBuffer cellName = prefix.add(columnName).add(((Constants.Value)key).bytes).build();
+            CellName cellName = cf.getComparator().create(prefix, column.name, ((Constants.Value)key).bytes);
             cf.addColumn(params.makeTombstone(cellName));
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/Operation.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/Operation.java b/src/java/org/apache/cassandra/cql3/Operation.java
index 65f08cb..689cee0 100644
--- a/src/java/org/apache/cassandra/cql3/Operation.java
+++ b/src/java/org/apache/cassandra/cql3/Operation.java
@@ -21,6 +21,7 @@ import java.nio.ByteBuffer;
 
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.db.ColumnFamily;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.marshal.CollectionType;
 import org.apache.cassandra.db.marshal.CounterColumnType;
 import org.apache.cassandra.db.marshal.ListType;
@@ -42,16 +43,17 @@ import org.apache.cassandra.exceptions.InvalidRequestException;
  */
 public abstract class Operation
 {
-    // Name of the column the operation applies to
-    public final ColumnIdentifier columnName;
+    // the column the operation applies to
+    public final ColumnDefinition column;
 
     // Term involved in the operation. In theory this should not be here since some operation
     // may require none of more than one term, but most need 1 so it simplify things a bit.
     protected final Term t;
 
-    protected Operation(ColumnIdentifier columnName, Term t)
+    protected Operation(ColumnDefinition column, Term t)
     {
-        this.columnName = columnName;
+        assert column != null;
+        this.column = column;
         this.t = t;
     }
 
@@ -81,11 +83,10 @@ public abstract class Operation
      *
      * @param rowKey row key for the update.
      * @param cf the column family to which to add the updates generated by this operation.
-     * @param namePrefix the prefix that identify the CQL3 row this operation applies to (callers should not reuse
-     * the ColumnNameBuilder they pass here).
+     * @param prefix the prefix that identify the CQL3 row this operation applies to.
      * @param params parameters of the update.
      */
-    public abstract void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder namePrefix, UpdateParameters params) throws InvalidRequestException;
+    public abstract void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException;
 
     /**
      * A parsed raw UPDATE operation.
@@ -144,7 +145,7 @@ public abstract class Operation
          * @param receiver the "column" this operation applies to.
          * @return the prepared delete operation.
          */
-        public Operation prepare(ColumnSpecification receiver) throws InvalidRequestException;
+        public Operation prepare(ColumnDefinition receiver) throws InvalidRequestException;
     }
 
     public static class SetValue implements RawUpdate
@@ -164,16 +165,16 @@ public abstract class Operation
                 throw new InvalidRequestException(String.format("Cannot set the value of counter column %s (counters can only be incremented/decremented, not set)", receiver));
 
             if (!(receiver.type instanceof CollectionType))
-                return new Constants.Setter(receiver.kind == ColumnDefinition.Kind.COMPACT_VALUE ? null : receiver.name, v);
+                return new Constants.Setter(receiver, v);
 
             switch (((CollectionType)receiver.type).kind)
             {
                 case LIST:
-                    return new Lists.Setter(receiver.name, v);
+                    return new Lists.Setter(receiver, v);
                 case SET:
-                    return new Sets.Setter(receiver.name, v);
+                    return new Sets.Setter(receiver, v);
                 case MAP:
-                    return new Maps.Setter(receiver.name, v);
+                    return new Maps.Setter(receiver, v);
             }
             throw new AssertionError();
         }
@@ -212,13 +213,13 @@ public abstract class Operation
                 case LIST:
                     Term idx = selector.prepare(Lists.indexSpecOf(receiver));
                     Term lval = value.prepare(Lists.valueSpecOf(receiver));
-                    return new Lists.SetterByIndex(receiver.name, idx, lval);
+                    return new Lists.SetterByIndex(receiver, idx, lval);
                 case SET:
                     throw new InvalidRequestException(String.format("Invalid operation (%s) for set column %s", toString(receiver), receiver));
                 case MAP:
                     Term key = selector.prepare(Maps.keySpecOf(receiver));
                     Term mval = value.prepare(Maps.valueSpecOf(receiver));
-                    return new Maps.SetterByKey(receiver.name, key, mval);
+                    return new Maps.SetterByKey(receiver, key, mval);
             }
             throw new AssertionError();
         }
@@ -253,17 +254,17 @@ public abstract class Operation
             {
                 if (!(receiver.type instanceof CounterColumnType))
                     throw new InvalidRequestException(String.format("Invalid operation (%s) for non counter column %s", toString(receiver), receiver));
-                return new Constants.Adder(receiver.kind == ColumnDefinition.Kind.COMPACT_VALUE ? null : receiver.name, v);
+                return new Constants.Adder(receiver, v);
             }
 
             switch (((CollectionType)receiver.type).kind)
             {
                 case LIST:
-                    return new Lists.Appender(receiver.name, v);
+                    return new Lists.Appender(receiver, v);
                 case SET:
-                    return new Sets.Adder(receiver.name, v);
+                    return new Sets.Adder(receiver, v);
                 case MAP:
-                    return new Maps.Putter(receiver.name, v);
+                    return new Maps.Putter(receiver, v);
             }
             throw new AssertionError();
         }
@@ -296,15 +297,15 @@ public abstract class Operation
             {
                 if (!(receiver.type instanceof CounterColumnType))
                     throw new InvalidRequestException(String.format("Invalid operation (%s) for non counter column %s", toString(receiver), receiver));
-                return new Constants.Substracter(receiver.kind == ColumnDefinition.Kind.COMPACT_VALUE ? null : receiver.name, v);
+                return new Constants.Substracter(receiver, v);
             }
 
             switch (((CollectionType)receiver.type).kind)
             {
                 case LIST:
-                    return new Lists.Discarder(receiver.name, v);
+                    return new Lists.Discarder(receiver, v);
                 case SET:
-                    return new Sets.Discarder(receiver.name, v);
+                    return new Sets.Discarder(receiver, v);
                 case MAP:
                     throw new InvalidRequestException(String.format("Invalid operation (%s) for map column %s", toString(receiver), receiver));
             }
@@ -338,7 +339,7 @@ public abstract class Operation
             if (!(receiver.type instanceof ListType))
                 throw new InvalidRequestException(String.format("Invalid operation (%s) for non list column %s", toString(receiver), receiver));
 
-            return new Lists.Prepender(receiver.name, v);
+            return new Lists.Prepender(receiver, v);
         }
 
         protected String toString(ColumnSpecification column)
@@ -366,10 +367,10 @@ public abstract class Operation
             return id;
         }
 
-        public Operation prepare(ColumnSpecification receiver) throws InvalidRequestException
+        public Operation prepare(ColumnDefinition receiver) throws InvalidRequestException
         {
             // No validation, deleting a column is always "well typed"
-            return new Constants.Deleter(id, receiver.type instanceof CollectionType);
+            return new Constants.Deleter(receiver);
         }
     }
 
@@ -389,7 +390,7 @@ public abstract class Operation
             return id;
         }
 
-        public Operation prepare(ColumnSpecification receiver) throws InvalidRequestException
+        public Operation prepare(ColumnDefinition receiver) throws InvalidRequestException
         {
             if (!(receiver.type instanceof CollectionType))
                 throw new InvalidRequestException(String.format("Invalid deletion operation for non collection column %s", receiver));
@@ -398,13 +399,13 @@ public abstract class Operation
             {
                 case LIST:
                     Term idx = element.prepare(Lists.indexSpecOf(receiver));
-                    return new Lists.DiscarderByIndex(id, idx);
+                    return new Lists.DiscarderByIndex(receiver, idx);
                 case SET:
                     Term elt = element.prepare(Sets.valueSpecOf(receiver));
-                    return new Sets.Discarder(id, elt);
+                    return new Sets.Discarder(receiver, elt);
                 case MAP:
                     Term key = element.prepare(Maps.keySpecOf(receiver));
-                    return new Maps.DiscarderByKey(id, key);
+                    return new Maps.DiscarderByKey(receiver, key);
             }
             throw new AssertionError();
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/QueryProcessor.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/QueryProcessor.java b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
index 351fc86..772c103 100644
--- a/src/java/org/apache/cassandra/cql3/QueryProcessor.java
+++ b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
@@ -34,6 +34,8 @@ import org.apache.cassandra.cql3.hooks.*;
 import org.apache.cassandra.cql3.statements.*;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.exceptions.*;
 import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.QueryState;
@@ -157,21 +159,25 @@ public class QueryProcessor
         }
     }
 
-    public static void validateCellNames(Iterable<ByteBuffer> cellNames) throws InvalidRequestException
+    public static void validateCellNames(Iterable<CellName> cellNames) throws InvalidRequestException
     {
-        for (ByteBuffer name : cellNames)
+        for (CellName name : cellNames)
             validateCellName(name);
     }
 
-    public static void validateCellName(ByteBuffer name) throws InvalidRequestException
+    public static void validateCellName(CellName name) throws InvalidRequestException
     {
-        if (name.remaining() > Column.MAX_NAME_LENGTH)
+        validateComposite(name);
+        if (name.isEmpty())
+            throw new InvalidRequestException("Invalid empty value for clustering column of COMPACT TABLE");
+    }
+
+    public static void validateComposite(Composite name) throws InvalidRequestException
+    {
+        if (name.dataSize() > Column.MAX_NAME_LENGTH)
             throw new InvalidRequestException(String.format("The sum of all clustering columns is too long (%s > %s)",
-                                                            name.remaining(),
+                                                            name.dataSize(),
                                                             Column.MAX_NAME_LENGTH));
-
-        if (name.remaining() == 0)
-            throw new InvalidRequestException("Invalid empty value for clustering column of COMPACT TABLE");
     }
 
     private static ResultMessage processStatement(CQLStatement statement,
@@ -233,7 +239,7 @@ public class QueryProcessor
         {
             ResultMessage result = process(query, QueryState.forInternalCalls(), new QueryOptions(cl, Collections.<ByteBuffer>emptyList()));
             if (result instanceof ResultMessage.Rows)
-                return new UntypedResultSet(((ResultMessage.Rows)result).result);
+                return UntypedResultSet.create(((ResultMessage.Rows)result).result);
             else
                 return null;
         }
@@ -254,7 +260,7 @@ public class QueryProcessor
             statement.validate(state);
             ResultMessage result = statement.executeInternal(qState);
             if (result instanceof ResultMessage.Rows)
-                return new UntypedResultSet(((ResultMessage.Rows)result).result);
+                return UntypedResultSet.create(((ResultMessage.Rows)result).result);
             else
                 return null;
         }
@@ -279,7 +285,7 @@ public class QueryProcessor
         {
             SelectStatement ss = (SelectStatement) getStatement(query, null).statement;
             ResultSet cqlRows = ss.process(rows);
-            return new UntypedResultSet(cqlRows);
+            return UntypedResultSet.create(cqlRows);
         }
         catch (RequestValidationException e)
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/Sets.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/Sets.java b/src/java/org/apache/cassandra/cql3/Sets.java
index 01ac1e7..2531b2a 100644
--- a/src/java/org/apache/cassandra/cql3/Sets.java
+++ b/src/java/org/apache/cassandra/cql3/Sets.java
@@ -29,7 +29,10 @@ import java.util.TreeSet;
 
 import com.google.common.base.Joiner;
 
+import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.db.ColumnFamily;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.marshal.CollectionType;
 import org.apache.cassandra.db.marshal.MapType;
 import org.apache.cassandra.db.marshal.SetType;
@@ -222,33 +225,33 @@ public abstract class Sets
 
     public static class Setter extends Operation
     {
-        public Setter(ColumnIdentifier column, Term t)
+        public Setter(ColumnDefinition column, Term t)
         {
             super(column, t);
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
             // delete + add
-            ColumnNameBuilder column = prefix.add(columnName);
-            cf.addAtom(params.makeTombstoneForOverwrite(column.build(), column.buildAsEndOfRange()));
-            Adder.doAdd(t, cf, column, params);
+            CellName name = cf.getComparator().create(prefix, column.name);
+            cf.addAtom(params.makeTombstoneForOverwrite(name.slice()));
+            Adder.doAdd(t, cf, prefix, column.name, params);
         }
     }
 
     public static class Adder extends Operation
     {
-        public Adder(ColumnIdentifier column, Term t)
+        public Adder(ColumnDefinition column, Term t)
         {
             super(column, t);
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
-            doAdd(t, cf, prefix.add(columnName), params);
+            doAdd(t, cf, prefix, column.name, params);
         }
 
-        static void doAdd(Term t, ColumnFamily cf, ColumnNameBuilder columnName, UpdateParameters params) throws InvalidRequestException
+        static void doAdd(Term t, ColumnFamily cf, Composite prefix, ColumnIdentifier columnName, UpdateParameters params) throws InvalidRequestException
         {
             Term.Terminal value = t.bind(params.variables);
             if (value == null)
@@ -259,7 +262,7 @@ public abstract class Sets
             Set<ByteBuffer> toAdd = ((Sets.Value)value).elements;
             for (ByteBuffer bb : toAdd)
             {
-                ByteBuffer cellName = columnName.copy().add(bb).build();
+                CellName cellName = cf.getComparator().create(prefix, columnName, bb);
                 cf.addColumn(params.makeColumn(cellName, ByteBufferUtil.EMPTY_BYTE_BUFFER));
             }
         }
@@ -267,12 +270,12 @@ public abstract class Sets
 
     public static class Discarder extends Operation
     {
-        public Discarder(ColumnIdentifier column, Term t)
+        public Discarder(ColumnDefinition column, Term t)
         {
             super(column, t);
         }
 
-        public void execute(ByteBuffer rowKey, ColumnFamily cf, ColumnNameBuilder prefix, UpdateParameters params) throws InvalidRequestException
+        public void execute(ByteBuffer rowKey, ColumnFamily cf, Composite prefix, UpdateParameters params) throws InvalidRequestException
         {
             Term.Terminal value = t.bind(params.variables);
             if (value == null)
@@ -283,11 +286,9 @@ public abstract class Sets
                                       ? Collections.singleton(((Constants.Value)value).bytes)
                                       : ((Sets.Value)value).elements;
 
-            ColumnNameBuilder column = prefix.add(columnName);
             for (ByteBuffer bb : toDiscard)
             {
-                ByteBuffer cellName = column.copy().add(bb).build();
-                cf.addColumn(params.makeTombstone(cellName));
+                cf.addColumn(params.makeTombstone(cf.getComparator().create(prefix, column.name, bb)));
             }
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/UntypedResultSet.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/UntypedResultSet.java b/src/java/org/apache/cassandra/cql3/UntypedResultSet.java
index 8f1aaf5..5519f2e 100644
--- a/src/java/org/apache/cassandra/cql3/UntypedResultSet.java
+++ b/src/java/org/apache/cassandra/cql3/UntypedResultSet.java
@@ -28,45 +28,98 @@ import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.cql3.ResultSet;
 
 /** a utility for doing internal cql-based queries */
-public class UntypedResultSet implements Iterable<UntypedResultSet.Row>
+public abstract class UntypedResultSet implements Iterable<UntypedResultSet.Row>
 {
-    private final ResultSet cqlRows;
-
-    public UntypedResultSet(ResultSet cqlRows)
+    public static UntypedResultSet create(ResultSet rs)
     {
-        this.cqlRows = cqlRows;
+        return new FromResultSet(rs);
     }
 
-    public boolean isEmpty()
+    public static UntypedResultSet create(List<Map<String, ByteBuffer>> results)
     {
-        return cqlRows.size() == 0;
+        return new FromResultList(results);
     }
 
-    public int size()
+    public boolean isEmpty()
     {
-        return cqlRows.size();
+        return size() == 0;
     }
 
-    public Row one()
+    public abstract int size();
+    public abstract Row one();
+
+    private static class FromResultSet extends UntypedResultSet
     {
-        if (cqlRows.rows.size() != 1)
-            throw new IllegalStateException("One row required, " + cqlRows.rows.size() + " found");
-        return new Row(cqlRows.metadata.names, cqlRows.rows.get(0));
+        private final ResultSet cqlRows;
+
+        private FromResultSet(ResultSet cqlRows)
+        {
+            this.cqlRows = cqlRows;
+        }
+
+        public int size()
+        {
+            return cqlRows.size();
+        }
+
+        public Row one()
+        {
+            if (cqlRows.rows.size() != 1)
+                throw new IllegalStateException("One row required, " + cqlRows.rows.size() + " found");
+            return new Row(cqlRows.metadata.names, cqlRows.rows.get(0));
+        }
+
+        public Iterator<Row> iterator()
+        {
+            return new AbstractIterator<Row>()
+            {
+                Iterator<List<ByteBuffer>> iter = cqlRows.rows.iterator();
+
+                protected Row computeNext()
+                {
+                    if (!iter.hasNext())
+                        return endOfData();
+                    return new Row(cqlRows.metadata.names, iter.next());
+                }
+            };
+        }
     }
 
-    public Iterator<Row> iterator()
+    private static class FromResultList extends UntypedResultSet
     {
-        return new AbstractIterator<Row>()
+        private final List<Map<String, ByteBuffer>> cqlRows;
+
+        private FromResultList(List<Map<String, ByteBuffer>> cqlRows)
+        {
+            this.cqlRows = cqlRows;
+        }
+
+        public int size()
         {
-            Iterator<List<ByteBuffer>> iter = cqlRows.rows.iterator();
+            return cqlRows.size();
+        }
+
+        public Row one()
+        {
+            if (cqlRows.size() != 1)
+                throw new IllegalStateException("One row required, " + cqlRows.size() + " found");
+            return new Row(cqlRows.get(0));
+        }
 
-            protected Row computeNext()
+        public Iterator<Row> iterator()
+        {
+            return new AbstractIterator<Row>()
             {
-                if (!iter.hasNext())
-                    return endOfData();
-                return new Row(cqlRows.metadata.names, iter.next());
-            }
-        };
+                Iterator<Map<String, ByteBuffer>> iter = cqlRows.iterator();
+
+                protected Row computeNext()
+                {
+                    if (!iter.hasNext())
+                        return endOfData();
+                    return new Row(iter.next());
+                }
+            };
+        }
     }
 
     public static class Row

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/UpdateParameters.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/UpdateParameters.java b/src/java/org/apache/cassandra/cql3/UpdateParameters.java
index 1288576..cc3fe4d 100644
--- a/src/java/org/apache/cassandra/cql3/UpdateParameters.java
+++ b/src/java/org/apache/cassandra/cql3/UpdateParameters.java
@@ -23,10 +23,10 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.cql3.statements.ColumnGroupMap;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.filter.ColumnSlice;
 import org.apache.cassandra.exceptions.InvalidRequestException;
-import org.apache.cassandra.utils.Pair;
 
 /**
  * A simple container that simplify passing parameters for collections methods.
@@ -40,9 +40,9 @@ public class UpdateParameters
     public final int localDeletionTime;
 
     // For lists operation that require a read-before-write. Will be null otherwise.
-    private final Map<ByteBuffer, ColumnGroupMap> prefetchedLists;
+    private final Map<ByteBuffer, CQL3Row> prefetchedLists;
 
-    public UpdateParameters(CFMetaData metadata, List<ByteBuffer> variables, long timestamp, int ttl, Map<ByteBuffer, ColumnGroupMap> prefetchedLists)
+    public UpdateParameters(CFMetaData metadata, List<ByteBuffer> variables, long timestamp, int ttl, Map<ByteBuffer, CQL3Row> prefetchedLists)
     {
         this.metadata = metadata;
         this.variables = variables;
@@ -52,38 +52,38 @@ public class UpdateParameters
         this.prefetchedLists = prefetchedLists;
     }
 
-    public Column makeColumn(ByteBuffer name, ByteBuffer value) throws InvalidRequestException
+    public Column makeColumn(CellName name, ByteBuffer value) throws InvalidRequestException
     {
         QueryProcessor.validateCellName(name);
         return Column.create(name, value, timestamp, ttl, metadata);
     }
 
-    public Column makeTombstone(ByteBuffer name) throws InvalidRequestException
+    public Column makeTombstone(CellName name) throws InvalidRequestException
     {
         QueryProcessor.validateCellName(name);
         return new DeletedColumn(name, localDeletionTime, timestamp);
     }
 
-    public RangeTombstone makeRangeTombstone(ByteBuffer start, ByteBuffer end) throws InvalidRequestException
+    public RangeTombstone makeRangeTombstone(ColumnSlice slice) throws InvalidRequestException
     {
-        QueryProcessor.validateCellName(start);
-        QueryProcessor.validateCellName(end);
-        return new RangeTombstone(start, end, timestamp, localDeletionTime);
+        QueryProcessor.validateComposite(slice.start);
+        QueryProcessor.validateComposite(slice.finish);
+        return new RangeTombstone(slice.start, slice.finish, timestamp, localDeletionTime);
     }
 
-    public RangeTombstone makeTombstoneForOverwrite(ByteBuffer start, ByteBuffer end) throws InvalidRequestException
+    public RangeTombstone makeTombstoneForOverwrite(ColumnSlice slice) throws InvalidRequestException
     {
-        QueryProcessor.validateCellName(start);
-        QueryProcessor.validateCellName(end);
-        return new RangeTombstone(start, end, timestamp - 1, localDeletionTime);
+        QueryProcessor.validateComposite(slice.start);
+        QueryProcessor.validateComposite(slice.finish);
+        return new RangeTombstone(slice.start, slice.finish, timestamp - 1, localDeletionTime);
     }
 
-    public List<Pair<ByteBuffer, Column>> getPrefetchedList(ByteBuffer rowKey, ColumnIdentifier cql3ColumnName)
+    public List<Column> getPrefetchedList(ByteBuffer rowKey, ColumnIdentifier cql3ColumnName)
     {
         if (prefetchedLists == null)
             return Collections.emptyList();
 
-        ColumnGroupMap m = prefetchedLists.get(rowKey);
-        return m == null ? Collections.<Pair<ByteBuffer, Column>>emptyList() : m.getCollection(cql3ColumnName.bytes);
+        CQL3Row row = prefetchedLists.get(rowKey);
+        return row == null ? Collections.<Column>emptyList() : row.getCollection(cql3ColumnName);
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/functions/TimeuuidFcts.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/functions/TimeuuidFcts.java b/src/java/org/apache/cassandra/cql3/functions/TimeuuidFcts.java
index 9c69c19..be20102 100644
--- a/src/java/org/apache/cassandra/cql3/functions/TimeuuidFcts.java
+++ b/src/java/org/apache/cassandra/cql3/functions/TimeuuidFcts.java
@@ -91,3 +91,4 @@ public abstract class TimeuuidFcts
         }
     };
 }
+

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/functions/TokenFct.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/functions/TokenFct.java b/src/java/org/apache/cassandra/cql3/functions/TokenFct.java
index cb6e6c4..5093a72 100644
--- a/src/java/org/apache/cassandra/cql3/functions/TokenFct.java
+++ b/src/java/org/apache/cassandra/cql3/functions/TokenFct.java
@@ -23,7 +23,7 @@ import java.util.List;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.config.Schema;
-import org.apache.cassandra.cql3.ColumnNameBuilder;
+import org.apache.cassandra.db.composites.CBuilder;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.exceptions.InvalidRequestException;
@@ -61,7 +61,7 @@ public class TokenFct extends AbstractFunction
 
     public ByteBuffer execute(List<ByteBuffer> parameters) throws InvalidRequestException
     {
-        ColumnNameBuilder builder = cfm.getKeyNameBuilder();
+        CBuilder builder = cfm.getKeyValidatorAsCType().builder();
         for (int i = 0; i < parameters.size(); i++)
         {
             ByteBuffer bb = parameters.get(i);
@@ -69,6 +69,6 @@ public class TokenFct extends AbstractFunction
                 return null;
             builder.add(bb);
         }
-        return partitioner.getTokenFactory().toByteArray(partitioner.getToken(builder.build()));
+        return partitioner.getTokenFactory().toByteArray(partitioner.getToken(builder.build().toByteBuffer()));
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java b/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java
index 049f90d..d1452d2 100644
--- a/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java
@@ -17,9 +17,7 @@
  */
 package org.apache.cassandra.cql3.statements;
 
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -77,7 +75,7 @@ public class AlterTableStatement extends SchemaAlteringStatement
         switch (oType)
         {
             case ADD:
-                if (cfm.isDense())
+                if (cfm.comparator.isDense())
                     throw new InvalidRequestException("Cannot add new column to a compact CF");
                 if (def != null)
                 {
@@ -94,28 +92,15 @@ public class AlterTableStatement extends SchemaAlteringStatement
                 AbstractType<?> type = validator.getType();
                 if (type instanceof CollectionType)
                 {
-                    if (!cfm.hasCompositeComparator())
+                    if (!cfm.comparator.supportCollections())
                         throw new InvalidRequestException("Cannot use collection types with non-composite PRIMARY KEY");
                     if (cfm.isSuper())
                         throw new InvalidRequestException("Cannot use collection types with Super column family");
 
-                    Map<ByteBuffer, CollectionType> collections = cfm.hasCollections()
-                                                                ? new HashMap<ByteBuffer, CollectionType>(cfm.getCollectionType().defined)
-                                                                : new HashMap<ByteBuffer, CollectionType>();
-
-                    collections.put(columnName.bytes, (CollectionType)type);
-                    ColumnToCollectionType newColType = ColumnToCollectionType.getInstance(collections);
-                    List<AbstractType<?>> ctypes = new ArrayList<AbstractType<?>>(((CompositeType)cfm.comparator).types);
-                    if (cfm.hasCollections())
-                        ctypes.set(ctypes.size() - 1, newColType);
-                    else
-                        ctypes.add(newColType);
-                    cfm.comparator = CompositeType.getInstance(ctypes);
+                    cfm.comparator = cfm.comparator.addCollection(columnName, (CollectionType)type);
                 }
 
-                Integer componentIndex = meta.hasCompositeComparator()
-                                       ? ((CompositeType)meta.comparator).types.size() - (meta.hasCollections() ? 2 : 1)
-                                       : null;
+                Integer componentIndex = cfm.comparator.isCompound() ? cfm.comparator.clusteringPrefixSize() : null;
                 cfm.addColumnDefinition(ColumnDefinition.regularDef(cfm, columnName.bytes, type, componentIndex));
                 break;
 
@@ -153,19 +138,17 @@ public class AlterTableStatement extends SchemaAlteringStatement
                         }
                         break;
                     case CLUSTERING_COLUMN:
-                        assert cfm.hasCompositeComparator();
-                        List<AbstractType<?>> oldTypes = ((CompositeType) cfm.comparator).types;
+                        AbstractType<?> oldType = cfm.comparator.subtype(def.position());
                         // Note that CFMetaData.validateCompatibility already validate the change we're about to do. However, the error message it
                         // sends is a bit cryptic for a CQL3 user, so validating here for a sake of returning a better error message
                         // Do note that we need isCompatibleWith here, not just isValueCompatibleWith.
-                        if (!validator.getType().isCompatibleWith(oldTypes.get(def.position())))
+                        if (!validator.getType().isCompatibleWith(oldType))
                             throw new ConfigurationException(String.format("Cannot change %s from type %s to type %s: types are not order-compatible.",
                                                                            columnName,
-                                                                           oldTypes.get(def.position()).asCQL3Type(),
+                                                                           oldType.asCQL3Type(),
                                                                            validator));
-                        List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(oldTypes);
-                        newTypes.set(def.position(), validator.getType());
-                        cfm.comparator = CompositeType.getInstance(newTypes);
+
+                        cfm.comparator = cfm.comparator.setSubtype(def.position(), validator.getType());
                         break;
                     case COMPACT_VALUE:
                         // See below
@@ -195,9 +178,7 @@ public class AlterTableStatement extends SchemaAlteringStatement
                 break;
 
             case DROP:
-                if (cfm.isDense())
-                    throw new InvalidRequestException("Cannot drop columns from a compact CF");
-                if (!cfm.hasCompositeComparator())
+                if (!cfm.isCQL3Table())
                     throw new InvalidRequestException("Cannot drop columns from a non-CQL3 CF");
                 if (def == null)
                     throw new InvalidRequestException(String.format("Column %s was not found in table %s", columnName, columnFamily()));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/statements/AlterTypeStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/AlterTypeStatement.java b/src/java/org/apache/cassandra/cql3/statements/AlterTypeStatement.java
index fbecefe..ea9f0c7 100644
--- a/src/java/org/apache/cassandra/cql3/statements/AlterTypeStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/AlterTypeStatement.java
@@ -23,6 +23,7 @@ import java.util.*;
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.config.*;
 import org.apache.cassandra.cql3.*;
+import org.apache.cassandra.db.composites.CellNames;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.exceptions.*;
 import org.apache.cassandra.service.ClientState;
@@ -153,7 +154,7 @@ public abstract class AlterTypeStatement extends SchemaAlteringStatement
                 cfm.keyValidator(updateWith(cfm.getKeyValidator(), toReplace, updated));
                 break;
             case CLUSTERING_COLUMN:
-                cfm.comparator = updateWith(cfm.comparator, toReplace, updated);
+                cfm.comparator = CellNames.fromAbstractType(updateWith(cfm.comparator.asAbstractType(), toReplace, updated), cfm.comparator.isDense());
                 break;
         }
         return true;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/cql3/statements/ColumnGroupMap.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/ColumnGroupMap.java b/src/java/org/apache/cassandra/cql3/statements/ColumnGroupMap.java
deleted file mode 100644
index 8974523..0000000
--- a/src/java/org/apache/cassandra/cql3/statements/ColumnGroupMap.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.cql3.statements;
-
-import java.nio.ByteBuffer;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.cassandra.db.Column;
-import org.apache.cassandra.db.marshal.CompositeType;
-import org.apache.cassandra.utils.Pair;
-
-public class ColumnGroupMap
-{
-    private final ByteBuffer[] fullPath;
-    private final Map<ByteBuffer, Value> map = new HashMap<ByteBuffer, Value>();
-
-    private ColumnGroupMap(ByteBuffer[] fullPath)
-    {
-        this.fullPath = fullPath;
-    }
-
-    private void add(ByteBuffer[] fullName, int idx, Column column)
-    {
-        ByteBuffer columnName = fullName[idx];
-        if (fullName.length == idx + 2)
-        {
-            // It's a collection
-            Value v = map.get(columnName);
-            if (v == null)
-            {
-                v = new Collection();
-                map.put(columnName, v);
-            }
-            assert v instanceof Collection;
-
-            ((Collection)v).add(Pair.create(fullName[idx + 1], column));
-        }
-        else
-        {
-            assert !map.containsKey(columnName);
-            map.put(columnName, new Simple(column));
-        }
-    }
-
-    public ByteBuffer getKeyComponent(int pos)
-    {
-        return fullPath[pos];
-    }
-
-    public Column getSimple(ByteBuffer key)
-    {
-        Value v = map.get(key);
-        if (v == null)
-            return null;
-
-        assert v instanceof Simple;
-        return ((Simple)v).column;
-    }
-
-    public List<Pair<ByteBuffer, Column>> getCollection(ByteBuffer key)
-    {
-        Value v = map.get(key);
-        if (v == null)
-            return null;
-
-        assert v instanceof Collection;
-        return (List<Pair<ByteBuffer, Column>>)v;
-    }
-
-    private interface Value {};
-
-    private static class Simple implements Value
-    {
-        public final Column column;
-
-        Simple(Column column)
-        {
-            this.column = column;
-        }
-    }
-
-    private static class Collection extends ArrayList<Pair<ByteBuffer, Column>> implements Value {}
-
-    public static class Builder
-    {
-        private final CompositeType composite;
-        private final int idx;
-        private final long now;
-        private ByteBuffer[] previous;
-
-        private final List<ColumnGroupMap> groups = new ArrayList<ColumnGroupMap>();
-        private ColumnGroupMap currentGroup;
-
-        public Builder(CompositeType composite, boolean hasCollections, long now)
-        {
-            this.composite = composite;
-            this.idx = composite.types.size() - (hasCollections ? 2 : 1);
-            this.now = now;
-        }
-
-        public void add(Column c)
-        {
-            if (c.isMarkedForDelete(now))
-                return;
-
-            ByteBuffer[] current = composite.split(c.name());
-
-            if (currentGroup == null)
-            {
-                currentGroup = new ColumnGroupMap(current);
-                currentGroup.add(current, idx, c);
-                previous = current;
-                return;
-            }
-
-            if (!isSameGroup(current))
-            {
-                groups.add(currentGroup);
-                currentGroup = new ColumnGroupMap(current);
-            }
-            currentGroup.add(current, idx, c);
-            previous = current;
-        }
-
-        /**
-         * For sparse composite, returns wheter the column belong to the same
-         * cqlRow than the previously added, based on the full list of component
-         * in the name.
-         * Two columns do belong together if they differ only by the last
-         * component.
-         */
-        private boolean isSameGroup(ByteBuffer[] c)
-        {
-            for (int i = 0; i < idx; i++)
-            {
-                if (!c[i].equals(previous[i]))
-                    return false;
-            }
-            return true;
-        }
-
-        public List<ColumnGroupMap> groups()
-        {
-            if (currentGroup != null)
-            {
-                groups.add(currentGroup);
-                currentGroup = null;
-            }
-            return groups;
-        }
-    }
-}


[09/13] Push composites support in the storage engine

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/RangeTombstone.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/RangeTombstone.java b/src/java/org/apache/cassandra/db/RangeTombstone.java
index c943ad7..9a60209 100644
--- a/src/java/org/apache/cassandra/db/RangeTombstone.java
+++ b/src/java/org/apache/cassandra/db/RangeTombstone.java
@@ -20,33 +20,31 @@ package org.apache.cassandra.db;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.nio.ByteBuffer;
 import java.security.MessageDigest;
 import java.util.*;
 
 import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.db.composites.CType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.io.ISSTableSerializer;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.serializers.MarshalException;
-import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.Interval;
 
-public class RangeTombstone extends Interval<ByteBuffer, DeletionTime> implements OnDiskAtom
+public class RangeTombstone extends Interval<Composite, DeletionTime> implements OnDiskAtom
 {
-    public static final Serializer serializer = new Serializer();
-
-    public RangeTombstone(ByteBuffer start, ByteBuffer stop, long markedForDeleteAt, int localDeletionTime)
+    public RangeTombstone(Composite start, Composite stop, long markedForDeleteAt, int localDeletionTime)
     {
         this(start, stop, new DeletionTime(markedForDeleteAt, localDeletionTime));
     }
 
-    public RangeTombstone(ByteBuffer start, ByteBuffer stop, DeletionTime delTime)
+    public RangeTombstone(Composite start, Composite stop, DeletionTime delTime)
     {
         super(start, stop, delTime);
     }
 
-    public ByteBuffer name()
+    public Composite name()
     {
         return min;
     }
@@ -66,20 +64,6 @@ public class RangeTombstone extends Interval<ByteBuffer, DeletionTime> implement
         return data.markedForDeleteAt;
     }
 
-    public int serializedSize(TypeSizes typeSizes)
-    {
-        throw new UnsupportedOperationException();
-    }
-
-    public long serializedSizeForSSTable()
-    {
-        TypeSizes typeSizes = TypeSizes.NATIVE;
-        return typeSizes.sizeof((short)min.remaining()) + min.remaining()
-             + 1 // serialization flag
-             + typeSizes.sizeof((short)max.remaining()) + max.remaining()
-             + DeletionTime.serializer.serializedSize(data, typeSizes);
-    }
-
     public void validateFields(CFMetaData metadata) throws MarshalException
     {
         metadata.comparator.validate(min);
@@ -88,8 +72,8 @@ public class RangeTombstone extends Interval<ByteBuffer, DeletionTime> implement
 
     public void updateDigest(MessageDigest digest)
     {
-        digest.update(min.duplicate());
-        digest.update(max.duplicate());
+        digest.update(min.toByteBuffer().duplicate());
+        digest.update(max.toByteBuffer().duplicate());
         DataOutputBuffer buffer = new DataOutputBuffer();
         try
         {
@@ -106,7 +90,7 @@ public class RangeTombstone extends Interval<ByteBuffer, DeletionTime> implement
      * This tombstone supersedes another one if it is more recent and cover a
      * bigger range than rt.
      */
-    public boolean supersedes(RangeTombstone rt, Comparator<ByteBuffer> comparator)
+    public boolean supersedes(RangeTombstone rt, Comparator<Composite> comparator)
     {
         if (rt.data.markedForDeleteAt > data.markedForDeleteAt)
             return false;
@@ -116,7 +100,7 @@ public class RangeTombstone extends Interval<ByteBuffer, DeletionTime> implement
 
     public static class Tracker
     {
-        private final Comparator<ByteBuffer> comparator;
+        private final Comparator<Composite> comparator;
         private final Deque<RangeTombstone> ranges = new ArrayDeque<RangeTombstone>();
         private final SortedSet<RangeTombstone> maxOrderingSet = new TreeSet<RangeTombstone>(new Comparator<RangeTombstone>()
         {
@@ -127,7 +111,7 @@ public class RangeTombstone extends Interval<ByteBuffer, DeletionTime> implement
         });
         private int atomCount;
 
-        public Tracker(Comparator<ByteBuffer> comparator)
+        public Tracker(Comparator<Composite> comparator)
         {
             this.comparator = comparator;
         }
@@ -174,7 +158,7 @@ public class RangeTombstone extends Interval<ByteBuffer, DeletionTime> implement
 
             for (RangeTombstone tombstone : toWrite)
             {
-                size += tombstone.serializedSizeForSSTable();
+                size += atomSerializer.serializedSizeForSSTable(tombstone);
                 atomCount++;
                 if (out != null)
                     atomSerializer.serializeForSSTable(tombstone, out);
@@ -254,33 +238,50 @@ public class RangeTombstone extends Interval<ByteBuffer, DeletionTime> implement
 
     public static class Serializer implements ISSTableSerializer<RangeTombstone>
     {
+        private final CType type;
+
+        public Serializer(CType type)
+        {
+            this.type = type;
+        }
+
         public void serializeForSSTable(RangeTombstone t, DataOutput out) throws IOException
         {
-            ByteBufferUtil.writeWithShortLength(t.min, out);
+            type.serializer().serialize(t.min, out);
             out.writeByte(ColumnSerializer.RANGE_TOMBSTONE_MASK);
-            ByteBufferUtil.writeWithShortLength(t.max, out);
+            type.serializer().serialize(t.max, out);
             DeletionTime.serializer.serialize(t.data, out);
         }
 
         public RangeTombstone deserializeFromSSTable(DataInput in, Descriptor.Version version) throws IOException
         {
-            ByteBuffer min = ByteBufferUtil.readWithShortLength(in);
-            if (min.remaining() <= 0)
-                throw ColumnSerializer.CorruptColumnException.create(in, min);
+            Composite min = type.serializer().deserialize(in);
 
             int b = in.readUnsignedByte();
             assert (b & ColumnSerializer.RANGE_TOMBSTONE_MASK) != 0;
             return deserializeBody(in, min, version);
         }
 
-        public RangeTombstone deserializeBody(DataInput in, ByteBuffer min, Descriptor.Version version) throws IOException
+        public RangeTombstone deserializeBody(DataInput in, Composite min, Descriptor.Version version) throws IOException
         {
-            ByteBuffer max = ByteBufferUtil.readWithShortLength(in);
-            if (max.remaining() <= 0)
-                throw ColumnSerializer.CorruptColumnException.create(in, max);
-
+            Composite max = type.serializer().deserialize(in);
             DeletionTime dt = DeletionTime.serializer.deserialize(in);
             return new RangeTombstone(min, max, dt);
         }
+
+        public void skipBody(DataInput in, Descriptor.Version version) throws IOException
+        {
+            type.serializer().skip(in);
+            DeletionTime.serializer.skip(in);
+        }
+
+        public long serializedSizeForSSTable(RangeTombstone t)
+        {
+            TypeSizes typeSizes = TypeSizes.NATIVE;
+            return type.serializer().serializedSize(t.min, typeSizes)
+                 + 1 // serialization flag
+                 + type.serializer().serializedSize(t.max, typeSizes)
+                 + DeletionTime.serializer.serializedSize(t.data, typeSizes);
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/RangeTombstoneList.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/RangeTombstoneList.java b/src/java/org/apache/cassandra/db/RangeTombstoneList.java
index dad9004..4199ea4 100644
--- a/src/java/org/apache/cassandra/db/RangeTombstoneList.java
+++ b/src/java/org/apache/cassandra/db/RangeTombstoneList.java
@@ -20,16 +20,16 @@ package org.apache.cassandra.db;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Comparator;
 import java.util.Iterator;
 
 import com.google.common.collect.AbstractIterator;
 
+import org.apache.cassandra.db.composites.CType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.utils.ByteBufferUtil;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -54,20 +54,18 @@ public class RangeTombstoneList implements Iterable<RangeTombstone>
 {
     private static final Logger logger = LoggerFactory.getLogger(RangeTombstoneList.class);
 
-    public static final Serializer serializer = new Serializer();
-
-    private final Comparator<ByteBuffer> comparator;
+    private final Comparator<Composite> comparator;
 
     // Note: we don't want to use a List for the markedAts and delTimes to avoid boxing. We could
     // use a List for starts and ends, but having arrays everywhere is almost simpler.
-    private ByteBuffer[] starts;
-    private ByteBuffer[] ends;
+    private Composite[] starts;
+    private Composite[] ends;
     private long[] markedAts;
     private int[] delTimes;
 
     private int size;
 
-    private RangeTombstoneList(Comparator<ByteBuffer> comparator, ByteBuffer[] starts, ByteBuffer[] ends, long[] markedAts, int[] delTimes, int size)
+    private RangeTombstoneList(Comparator<Composite> comparator, Composite[] starts, Composite[] ends, long[] markedAts, int[] delTimes, int size)
     {
         assert starts.length == ends.length && starts.length == markedAts.length && starts.length == delTimes.length;
         this.comparator = comparator;
@@ -78,9 +76,9 @@ public class RangeTombstoneList implements Iterable<RangeTombstone>
         this.size = size;
     }
 
-    public RangeTombstoneList(Comparator<ByteBuffer> comparator, int capacity)
+    public RangeTombstoneList(Comparator<Composite> comparator, int capacity)
     {
-        this(comparator, new ByteBuffer[capacity], new ByteBuffer[capacity], new long[capacity], new int[capacity], 0);
+        this(comparator, new Composite[capacity], new Composite[capacity], new long[capacity], new int[capacity], 0);
     }
 
     public boolean isEmpty()
@@ -93,7 +91,7 @@ public class RangeTombstoneList implements Iterable<RangeTombstone>
         return size;
     }
 
-    public Comparator<ByteBuffer> comparator()
+    public Comparator<Composite> comparator()
     {
         return comparator;
     }
@@ -119,7 +117,7 @@ public class RangeTombstoneList implements Iterable<RangeTombstone>
      * This method will be faster if the new tombstone sort after all the currently existing ones (this is a common use case), 
      * but it doesn't assume it.
      */
-    public void add(ByteBuffer start, ByteBuffer end, long markedAt, int delTime)
+    public void add(Composite start, Composite end, long markedAt, int delTime)
     {
         if (isEmpty())
         {
@@ -205,7 +203,7 @@ public class RangeTombstoneList implements Iterable<RangeTombstone>
      * Returns whether the given name/timestamp pair is deleted by one of the tombstone
      * of this RangeTombstoneList.
      */
-    public boolean isDeleted(ByteBuffer name, long timestamp)
+    public boolean isDeleted(Composite name, long timestamp)
     {
         int idx = searchInternal(name);
         return idx >= 0 && markedAts[idx] >= timestamp;
@@ -223,12 +221,12 @@ public class RangeTombstoneList implements Iterable<RangeTombstone>
      * Returns the DeletionTime for the tombstone overlapping {@code name} (there can't be more than one),
      * or null if {@code name} is not covered by any tombstone.
      */
-    public DeletionTime search(ByteBuffer name) {
+    public DeletionTime search(Composite name) {
         int idx = searchInternal(name);
         return idx < 0 ? null : new DeletionTime(markedAts[idx], delTimes[idx]);
     }
 
-    private int searchInternal(ByteBuffer name)
+    private int searchInternal(Composite name)
     {
         if (isEmpty())
             return -1;
@@ -259,7 +257,7 @@ public class RangeTombstoneList implements Iterable<RangeTombstone>
         int dataSize = TypeSizes.NATIVE.sizeof(size);
         for (int i = 0; i < size; i++)
         {
-            dataSize += starts[i].remaining() + ends[i].remaining();
+            dataSize += starts[i].dataSize() + ends[i].dataSize();
             dataSize += TypeSizes.NATIVE.sizeof(markedAts[i]);
             dataSize += TypeSizes.NATIVE.sizeof(delTimes[i]);
         }
@@ -384,7 +382,7 @@ public class RangeTombstoneList implements Iterable<RangeTombstone>
      * in term of intervals for start:
      *    ends[i-1] <= start < ends[i]
      */
-    private void insertFrom(int i, ByteBuffer start, ByteBuffer end, long markedAt, int delTime)
+    private void insertFrom(int i, Composite start, Composite end, long markedAt, int delTime)
     {
         while (i < size)
         {
@@ -490,7 +488,7 @@ public class RangeTombstoneList implements Iterable<RangeTombstone>
     /*
      * Adds the new tombstone at index i, growing and/or moving elements to make room for it.
      */
-    private void addInternal(int i, ByteBuffer start, ByteBuffer end, long markedAt, int delTime)
+    private void addInternal(int i, Composite start, Composite end, long markedAt, int delTime)
     {
         assert i >= 0;
 
@@ -529,12 +527,12 @@ public class RangeTombstoneList implements Iterable<RangeTombstone>
         delTimes = grow(delTimes, size, newLength, i);
     }
 
-    private static ByteBuffer[] grow(ByteBuffer[] a, int size, int newLength, int i)
+    private static Composite[] grow(Composite[] a, int size, int newLength, int i)
     {
         if (i < 0 || i >= size)
             return Arrays.copyOf(a, newLength);
 
-        ByteBuffer[] newA = new ByteBuffer[newLength];
+        Composite[] newA = new Composite[newLength];
         System.arraycopy(a, 0, newA, 0, i);
         System.arraycopy(a, i, newA, i+1, size - i);
         return newA;
@@ -576,7 +574,7 @@ public class RangeTombstoneList implements Iterable<RangeTombstone>
         System.arraycopy(delTimes, i, delTimes, i+1, size - i);
     }
 
-    private void setInternal(int i, ByteBuffer start, ByteBuffer end, long markedAt, int delTime)
+    private void setInternal(int i, Composite start, Composite end, long markedAt, int delTime)
     {
         starts[i] = start;
         ends[i] = end;
@@ -586,7 +584,12 @@ public class RangeTombstoneList implements Iterable<RangeTombstone>
 
     public static class Serializer implements IVersionedSerializer<RangeTombstoneList>
     {
-        private Serializer() {}
+        private final CType type;
+
+        public Serializer(CType type)
+        {
+            this.type = type;
+        }
 
         public void serialize(RangeTombstoneList tombstones, DataOutput out, int version) throws IOException
         {
@@ -599,34 +602,25 @@ public class RangeTombstoneList implements Iterable<RangeTombstone>
             out.writeInt(tombstones.size);
             for (int i = 0; i < tombstones.size; i++)
             {
-                ByteBufferUtil.writeWithShortLength(tombstones.starts[i], out);
-                ByteBufferUtil.writeWithShortLength(tombstones.ends[i], out);
+                type.serializer().serialize(tombstones.starts[i], out);
+                type.serializer().serialize(tombstones.ends[i], out);
                 out.writeInt(tombstones.delTimes[i]);
                 out.writeLong(tombstones.markedAts[i]);
             }
         }
 
-        /*
-         * RangeTombstoneList depends on the column family comparator, but it is not serialized.
-         * Thus deserialize(DataInput, int, Comparator<ByteBuffer>) should be used instead of this method.
-         */
         public RangeTombstoneList deserialize(DataInput in, int version) throws IOException
         {
-            throw new UnsupportedOperationException();
-        }
-
-        public RangeTombstoneList deserialize(DataInput in, int version, Comparator<ByteBuffer> comparator) throws IOException
-        {
             int size = in.readInt();
             if (size == 0)
                 return null;
 
-            RangeTombstoneList tombstones = new RangeTombstoneList(comparator, size);
+            RangeTombstoneList tombstones = new RangeTombstoneList(type, size);
 
             for (int i = 0; i < size; i++)
             {
-                ByteBuffer start = ByteBufferUtil.readWithShortLength(in);
-                ByteBuffer end = ByteBufferUtil.readWithShortLength(in);
+                Composite start = type.serializer().deserialize(in);
+                Composite end = type.serializer().deserialize(in);
                 int delTime =  in.readInt();
                 long markedAt = in.readLong();
 
@@ -658,10 +652,8 @@ public class RangeTombstoneList implements Iterable<RangeTombstone>
             long size = typeSizes.sizeof(tombstones.size);
             for (int i = 0; i < tombstones.size; i++)
             {
-                int startSize = tombstones.starts[i].remaining();
-                size += typeSizes.sizeof((short)startSize) + startSize;
-                int endSize = tombstones.ends[i].remaining();
-                size += typeSizes.sizeof((short)endSize) + endSize;
+                size += type.serializer().serializedSize(tombstones.starts[i], typeSizes);
+                size += type.serializer().serializedSize(tombstones.ends[i], typeSizes);
                 size += typeSizes.sizeof(tombstones.delTimes[i]);
                 size += typeSizes.sizeof(tombstones.markedAts[i]);
             }
@@ -687,7 +679,7 @@ public class RangeTombstoneList implements Iterable<RangeTombstone>
     {
         private int idx;
 
-        public boolean isDeleted(ByteBuffer name, long timestamp)
+        public boolean isDeleted(Composite name, long timestamp)
         {
             while (idx < size)
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/RowIndexEntry.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/RowIndexEntry.java b/src/java/org/apache/cassandra/db/RowIndexEntry.java
index cfc7cf1..3ddaae5 100644
--- a/src/java/org/apache/cassandra/db/RowIndexEntry.java
+++ b/src/java/org/apache/cassandra/db/RowIndexEntry.java
@@ -27,6 +27,8 @@ import java.util.List;
 import com.google.common.primitives.Ints;
 
 import org.apache.cassandra.cache.IMeasurableMemory;
+import org.apache.cassandra.db.composites.CType;
+import org.apache.cassandra.io.ISerializer;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.IndexHelper;
 import org.apache.cassandra.io.util.FileUtils;
@@ -34,8 +36,6 @@ import org.apache.cassandra.utils.ObjectSizes;
 
 public class RowIndexEntry implements IMeasurableMemory
 {
-    public static final Serializer serializer = new Serializer();
-
     public final long position;
 
     public RowIndexEntry(long position)
@@ -43,12 +43,7 @@ public class RowIndexEntry implements IMeasurableMemory
         this.position = position;
     }
 
-    public int serializedSize()
-    {
-        return TypeSizes.NATIVE.sizeof(position) + promotedSize();
-    }
-
-    protected int promotedSize()
+    protected int promotedSize(CType type)
     {
         return 0;
     }
@@ -93,17 +88,25 @@ public class RowIndexEntry implements IMeasurableMemory
 
     public static class Serializer
     {
+        private final CType type;
+
+        public Serializer(CType type)
+        {
+            this.type = type;
+        }
+
         public void serialize(RowIndexEntry rie, DataOutput out) throws IOException
         {
             out.writeLong(rie.position);
-            out.writeInt(rie.promotedSize());
+            out.writeInt(rie.promotedSize(type));
 
             if (rie.isIndexed())
             {
                 DeletionTime.serializer.serialize(rie.deletionTime(), out);
                 out.writeInt(rie.columnsIndex().size());
+                ISerializer<IndexHelper.IndexInfo> idxSerializer = type.indexSerializer();
                 for (IndexHelper.IndexInfo info : rie.columnsIndex())
-                    info.serialize(out);
+                    idxSerializer.serialize(info, out);
             }
         }
 
@@ -117,9 +120,10 @@ public class RowIndexEntry implements IMeasurableMemory
                 DeletionTime deletionTime = DeletionTime.serializer.deserialize(in);
 
                 int entries = in.readInt();
+                ISerializer<IndexHelper.IndexInfo> idxSerializer = type.indexSerializer();
                 List<IndexHelper.IndexInfo> columnsIndex = new ArrayList<IndexHelper.IndexInfo>(entries);
                 for (int i = 0; i < entries; i++)
-                    columnsIndex.add(IndexHelper.IndexInfo.deserialize(in));
+                    columnsIndex.add(idxSerializer.deserialize(in));
 
                 return new IndexedEntry(position, deletionTime, columnsIndex);
             }
@@ -129,13 +133,13 @@ public class RowIndexEntry implements IMeasurableMemory
             }
         }
 
-        public void skip(DataInput in) throws IOException
+        public static void skip(DataInput in) throws IOException
         {
             in.readLong();
             skipPromotedIndex(in);
         }
 
-        public void skipPromotedIndex(DataInput in) throws IOException
+        public static void skipPromotedIndex(DataInput in) throws IOException
         {
             int size = in.readInt();
             if (size <= 0)
@@ -143,6 +147,11 @@ public class RowIndexEntry implements IMeasurableMemory
 
             FileUtils.skipBytesFully(in, size);
         }
+
+        public int serializedSize(RowIndexEntry rie)
+        {
+            return TypeSizes.NATIVE.sizeof(rie.position) + rie.promotedSize(type);
+        }
     }
 
     /**
@@ -175,13 +184,14 @@ public class RowIndexEntry implements IMeasurableMemory
         }
 
         @Override
-        public int promotedSize()
+        public int promotedSize(CType type)
         {
             TypeSizes typeSizes = TypeSizes.NATIVE;
             long size = DeletionTime.serializer.serializedSize(deletionTime, typeSizes);
             size += typeSizes.sizeof(columnsIndex.size()); // number of entries
+            ISerializer<IndexHelper.IndexInfo> idxSerializer = type.indexSerializer();
             for (IndexHelper.IndexInfo info : columnsIndex)
-                size += info.serializedSize(typeSizes);
+                size += idxSerializer.serializedSize(info, typeSizes);
 
             return Ints.checkedCast(size);
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/RowMutation.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/RowMutation.java b/src/java/org/apache/cassandra/db/RowMutation.java
index e9d177b..c2c1780 100644
--- a/src/java/org/apache/cassandra/db/RowMutation.java
+++ b/src/java/org/apache/cassandra/db/RowMutation.java
@@ -27,6 +27,8 @@ import org.apache.commons.lang3.StringUtils;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.Schema;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.net.MessageOut;
 import org.apache.cassandra.net.MessagingService;
@@ -139,17 +141,17 @@ public class RowMutation implements IMutation
         return modifications.isEmpty();
     }
 
-    public void add(String cfName, ByteBuffer name, ByteBuffer value, long timestamp, int timeToLive)
+    public void add(String cfName, CellName name, ByteBuffer value, long timestamp, int timeToLive)
     {
         addOrGet(cfName).addColumn(name, value, timestamp, timeToLive);
     }
 
-    public void addCounter(String cfName, ByteBuffer name, long value)
+    public void addCounter(String cfName, CellName name, long value)
     {
         addOrGet(cfName).addCounter(name, value);
     }
 
-    public void add(String cfName, ByteBuffer name, ByteBuffer value, long timestamp)
+    public void add(String cfName, CellName name, ByteBuffer value, long timestamp)
     {
         add(cfName, name, value, timestamp, 0);
     }
@@ -160,13 +162,13 @@ public class RowMutation implements IMutation
         addOrGet(cfName).delete(new DeletionInfo(timestamp, localDeleteTime));
     }
 
-    public void delete(String cfName, ByteBuffer name, long timestamp)
+    public void delete(String cfName, CellName name, long timestamp)
     {
         int localDeleteTime = (int) (System.currentTimeMillis() / 1000);
         addOrGet(cfName).addTombstone(name, localDeleteTime, timestamp);
     }
 
-    public void deleteRange(String cfName, ByteBuffer start, ByteBuffer end, long timestamp)
+    public void deleteRange(String cfName, Composite start, Composite end, long timestamp)
     {
         int localDeleteTime = (int) (System.currentTimeMillis() / 1000);
         addOrGet(cfName).addAtom(new RangeTombstone(start, end, timestamp, localDeleteTime));
@@ -213,7 +215,7 @@ public class RowMutation implements IMutation
 
     public MessageOut<RowMutation> createMessage(MessagingService.Verb verb)
     {
-        return new MessageOut<RowMutation>(verb, this, serializer);
+        return new MessageOut<>(verb, this, serializer);
     }
 
     public String toString()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/SliceByNamesReadCommand.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SliceByNamesReadCommand.java b/src/java/org/apache/cassandra/db/SliceByNamesReadCommand.java
index 60487c8..78531f7 100644
--- a/src/java/org/apache/cassandra/db/SliceByNamesReadCommand.java
+++ b/src/java/org/apache/cassandra/db/SliceByNamesReadCommand.java
@@ -82,7 +82,9 @@ class SliceByNamesReadCommandSerializer implements IVersionedSerializer<ReadComm
         ByteBufferUtil.writeWithShortLength(command.key, out);
         out.writeUTF(command.cfName);
         out.writeLong(cmd.timestamp);
-        NamesQueryFilter.serializer.serialize(command.filter, out, version);
+
+        CFMetaData metadata = Schema.instance.getCFMetaData(cmd.ksName, cmd.cfName);
+        metadata.comparator.namesQueryFilterSerializer().serialize(command.filter, out, version);
     }
 
     public ReadCommand deserialize(DataInput in, int version) throws IOException
@@ -93,7 +95,7 @@ class SliceByNamesReadCommandSerializer implements IVersionedSerializer<ReadComm
         String cfName = in.readUTF();
         long timestamp = in.readLong();
         CFMetaData metadata = Schema.instance.getCFMetaData(keyspaceName, cfName);
-        NamesQueryFilter filter = NamesQueryFilter.serializer.deserialize(in, version, metadata.comparator);
+        NamesQueryFilter filter = metadata.comparator.namesQueryFilterSerializer().deserialize(in, version);
         ReadCommand command = new SliceByNamesReadCommand(keyspaceName, key, cfName, timestamp, filter);
         command.setDigestQuery(isDigest);
         return command;
@@ -106,11 +108,13 @@ class SliceByNamesReadCommandSerializer implements IVersionedSerializer<ReadComm
         int size = sizes.sizeof(command.isDigestQuery());
         int keySize = command.key.remaining();
 
+        CFMetaData metadata = Schema.instance.getCFMetaData(cmd.ksName, cmd.cfName);
+
         size += sizes.sizeof(command.ksName);
         size += sizes.sizeof((short)keySize) + keySize;
         size += sizes.sizeof(command.cfName);
         size += sizes.sizeof(cmd.timestamp);
-        size += NamesQueryFilter.serializer.serializedSize(command.filter, version);
+        size += metadata.comparator.namesQueryFilterSerializer().serializedSize(command.filter, version);
 
         return size;
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/SliceFromReadCommand.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SliceFromReadCommand.java b/src/java/org/apache/cassandra/db/SliceFromReadCommand.java
index 72de2ca..f6ff89a 100644
--- a/src/java/org/apache/cassandra/db/SliceFromReadCommand.java
+++ b/src/java/org/apache/cassandra/db/SliceFromReadCommand.java
@@ -24,6 +24,8 @@ import java.nio.ByteBuffer;
 
 import com.google.common.base.Objects;
 
+import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.db.filter.IDiskAtomFilter;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
@@ -136,7 +138,8 @@ class SliceFromReadCommandSerializer implements IVersionedSerializer<ReadCommand
         ByteBufferUtil.writeWithShortLength(realRM.key, out);
         out.writeUTF(realRM.cfName);
         out.writeLong(realRM.timestamp);
-        SliceQueryFilter.serializer.serialize(realRM.filter, out, version);
+        CFMetaData metadata = Schema.instance.getCFMetaData(realRM.ksName, realRM.cfName);
+        metadata.comparator.sliceQueryFilterSerializer().serialize(realRM.filter, out, version);
     }
 
     public ReadCommand deserialize(DataInput in, int version) throws IOException
@@ -146,7 +149,8 @@ class SliceFromReadCommandSerializer implements IVersionedSerializer<ReadCommand
         ByteBuffer key = ByteBufferUtil.readWithShortLength(in);
         String cfName = in.readUTF();
         long timestamp = in.readLong();
-        SliceQueryFilter filter = SliceQueryFilter.serializer.deserialize(in, version);
+        CFMetaData metadata = Schema.instance.getCFMetaData(keyspaceName, cfName);
+        SliceQueryFilter filter = metadata.comparator.sliceQueryFilterSerializer().deserialize(in, version);
         ReadCommand command = new SliceFromReadCommand(keyspaceName, key, cfName, timestamp, filter);
         command.setDigestQuery(isDigest);
         return command;
@@ -158,12 +162,14 @@ class SliceFromReadCommandSerializer implements IVersionedSerializer<ReadCommand
         SliceFromReadCommand command = (SliceFromReadCommand) cmd;
         int keySize = command.key.remaining();
 
+        CFMetaData metadata = Schema.instance.getCFMetaData(cmd.ksName, cmd.cfName);
+
         int size = sizes.sizeof(cmd.isDigestQuery()); // boolean
         size += sizes.sizeof(command.ksName);
         size += sizes.sizeof((short) keySize) + keySize;
         size += sizes.sizeof(command.cfName);
         size += sizes.sizeof(cmd.timestamp);
-        size += SliceQueryFilter.serializer.serializedSize(command.filter, version);
+        size += metadata.comparator.sliceQueryFilterSerializer().serializedSize(command.filter, version);
 
         return size;
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/SuperColumns.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SuperColumns.java b/src/java/org/apache/cassandra/db/SuperColumns.java
index 513db38..402dc7e 100644
--- a/src/java/org/apache/cassandra/db/SuperColumns.java
+++ b/src/java/org/apache/cassandra/db/SuperColumns.java
@@ -31,16 +31,16 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 
 import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.filter.*;
 import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 public class SuperColumns
 {
-    public static Iterator<OnDiskAtom> onDiskIterator(DataInput in, int superColumnCount, ColumnSerializer.Flag flag, int expireBefore)
+    public static Iterator<OnDiskAtom> onDiskIterator(DataInput in, int superColumnCount, ColumnSerializer.Flag flag, int expireBefore, CellNameType type)
     {
-        return new SCIterator(in, superColumnCount, flag, expireBefore);
+        return new SCIterator(in, superColumnCount, flag, expireBefore, type);
     }
 
     public static void serializeSuperColumnFamily(ColumnFamily scf, DataOutput out, int version) throws IOException
@@ -55,44 +55,48 @@ public class SuperColumns
          *   subcolumns range deletions).
          */
         DeletionInfo delInfo = scf.deletionInfo();
-        Map<ByteBuffer, List<Column>> scMap = groupSuperColumns(scf);
+        Map<CellName, List<Column>> scMap = groupSuperColumns(scf);
 
         // Actually Serialize
-        DeletionInfo.serializer().serialize(new DeletionInfo(delInfo.getTopLevelDeletion()), out, version);
+        scf.getComparator().deletionInfoSerializer().serialize(new DeletionInfo(delInfo.getTopLevelDeletion()), out, version);
         out.writeInt(scMap.size());
 
-        for (Map.Entry<ByteBuffer, List<Column>> entry : scMap.entrySet())
+        CellNameType subComparator = subType(scf.getComparator());
+        for (Map.Entry<CellName, List<Column>> entry : scMap.entrySet())
         {
-            ByteBufferUtil.writeWithShortLength(entry.getKey(), out);
+            scf.getComparator().cellSerializer().serialize(entry.getKey(), out);
 
             DeletionTime delTime = delInfo.rangeCovering(entry.getKey());
             DeletionInfo scDelInfo = delTime == null ? DeletionInfo.live() : new DeletionInfo(delTime);
             DeletionTime.serializer.serialize(scDelInfo.getTopLevelDeletion(), out);
 
             out.writeInt(entry.getValue().size());
+            ColumnSerializer serializer = subComparator.columnSerializer();
             for (Column subColumn : entry.getValue())
-                Column.serializer.serialize(subColumn, out);
+                serializer.serialize(subColumn, out);
         }
     }
 
-    private static Map<ByteBuffer, List<Column>> groupSuperColumns(ColumnFamily scf)
+    private static Map<CellName, List<Column>> groupSuperColumns(ColumnFamily scf)
     {
-        CompositeType type = (CompositeType)scf.getComparator();
+        CellNameType type = scf.getComparator();
         // The order of insertion matters!
-        Map<ByteBuffer, List<Column>> scMap = new LinkedHashMap<ByteBuffer, List<Column>>();
+        Map<CellName, List<Column>> scMap = new LinkedHashMap<>();
 
-        ByteBuffer scName = null;
+        CellName scName = null;
         List<Column> subColumns = null;
+        CellNameType scType = scType(type);
+        CellNameType subType = subType(type);
         for (Column column : scf)
         {
-            ByteBuffer newScName = scName(column.name());
-            ByteBuffer newSubName = subName(column.name());
+            CellName newScName = scType.makeCellName(scName(column.name()));
+            CellName newSubName = subType.makeCellName(subName(column.name()));
 
-            if (scName == null || type.types.get(0).compare(scName, newScName) != 0)
+            if (scName == null || scType.compare(scName, newScName) != 0)
             {
                 // new super column
                 scName = newScName;
-                subColumns = new ArrayList<Column>();
+                subColumns = new ArrayList<>();
                 scMap.put(scName, subColumns);
             }
 
@@ -104,25 +108,28 @@ public class SuperColumns
     public static void deserializerSuperColumnFamily(DataInput in, ColumnFamily cf, ColumnSerializer.Flag flag, int version) throws IOException
     {
         // Note that there was no way to insert a range tombstone in a SCF in 1.2
-        cf.delete(DeletionInfo.serializer().deserialize(in, version, cf.getComparator()));
+        cf.delete(cf.getComparator().deletionInfoSerializer().deserialize(in, version));
         assert !cf.deletionInfo().rangeIterator().hasNext();
 
-        Iterator<OnDiskAtom> iter = onDiskIterator(in, in.readInt(), flag, Integer.MIN_VALUE);
+        Iterator<OnDiskAtom> iter = onDiskIterator(in, in.readInt(), flag, Integer.MIN_VALUE, cf.getComparator());
         while (iter.hasNext())
             cf.addAtom(iter.next());
     }
 
     public static long serializedSize(ColumnFamily scf, TypeSizes typeSizes, int version)
     {
-        Map<ByteBuffer, List<Column>> scMap = groupSuperColumns(scf);
+        Map<CellName, List<Column>> scMap = groupSuperColumns(scf);
         DeletionInfo delInfo = scf.deletionInfo();
 
         // Actually Serialize
-        long size = DeletionInfo.serializer().serializedSize(new DeletionInfo(delInfo.getTopLevelDeletion()), version);
-        for (Map.Entry<ByteBuffer, List<Column>> entry : scMap.entrySet())
+        long size = scType(scf.getComparator()).deletionInfoSerializer().serializedSize(new DeletionInfo(delInfo.getTopLevelDeletion()), version);
+
+        CellNameType scType = scType(scf.getComparator());
+        CellNameType subType = subType(scf.getComparator());
+        ColumnSerializer colSer = subType.columnSerializer();
+        for (Map.Entry<CellName, List<Column>> entry : scMap.entrySet())
         {
-            int nameSize = entry.getKey().remaining();
-            size += typeSizes.sizeof((short) nameSize) + nameSize;
+            size += scType.cellSerializer().serializedSize(entry.getKey(), typeSizes);
 
             DeletionTime delTime = delInfo.rangeCovering(entry.getKey());
             DeletionInfo scDelInfo = delTime == null ? DeletionInfo.live() : new DeletionInfo(delTime);
@@ -130,7 +137,7 @@ public class SuperColumns
 
             size += typeSizes.sizeof(entry.getValue().size());
             for (Column subColumn : entry.getValue())
-                size += Column.serializer.serializedSize(subColumn, typeSizes);
+                size += colSer.serializedSize(subColumn, typeSizes);
         }
         return size;
     }
@@ -143,16 +150,19 @@ public class SuperColumns
         private final ColumnSerializer.Flag flag;
         private final int expireBefore;
 
+        private final CellNameType type;
+
         private int read;
         private ByteBuffer scName;
         private Iterator<Column> subColumnsIterator;
 
-        private SCIterator(DataInput in, int superColumnCount, ColumnSerializer.Flag flag, int expireBefore)
+        private SCIterator(DataInput in, int superColumnCount, ColumnSerializer.Flag flag, int expireBefore, CellNameType type)
         {
             this.in = in;
             this.scCount = superColumnCount;
             this.flag = flag;
             this.expireBefore = expireBefore;
+            this.type = type;
         }
 
         public boolean hasNext()
@@ -167,7 +177,7 @@ public class SuperColumns
                 if (subColumnsIterator != null && subColumnsIterator.hasNext())
                 {
                     Column c = subColumnsIterator.next();
-                    return c.withUpdatedName(CompositeType.build(scName, c.name()));
+                    return c.withUpdatedName(type.makeCellName(scName, c.name().toByteBuffer()));
                 }
 
                 // Read one more super column
@@ -175,14 +185,14 @@ public class SuperColumns
 
                 scName = ByteBufferUtil.readWithShortLength(in);
                 DeletionInfo delInfo = new DeletionInfo(DeletionTime.serializer.deserialize(in));
-                assert !delInfo.rangeIterator().hasNext(); // We assume no range tombstone (there was no way to insert some in a SCF in 1.2)
 
                 /* read the number of columns */
                 int size = in.readInt();
-                List<Column> subColumns = new ArrayList<Column>(size);
+                List<Column> subColumns = new ArrayList<>(size);
 
+                ColumnSerializer colSer = subType(type).columnSerializer();
                 for (int i = 0; i < size; ++i)
-                    subColumns.add(Column.serializer.deserialize(in, flag, expireBefore));
+                    subColumns.add(colSer.deserialize(in, flag, expireBefore));
 
                 subColumnsIterator = subColumns.iterator();
 
@@ -205,6 +215,16 @@ public class SuperColumns
         }
     }
 
+    private static CellNameType scType(CellNameType type)
+    {
+        return new SimpleDenseCellNameType(type.subtype(0));
+    }
+
+    private static CellNameType subType(CellNameType type)
+    {
+        return new SimpleDenseCellNameType(type.subtype(1));
+    }
+
     public static AbstractType<?> getComparatorFor(CFMetaData metadata, ByteBuffer superColumn)
     {
         return getComparatorFor(metadata, superColumn != null);
@@ -213,44 +233,33 @@ public class SuperColumns
     public static AbstractType<?> getComparatorFor(CFMetaData metadata, boolean subColumn)
     {
         return metadata.isSuper()
-             ? ((CompositeType)metadata.comparator).types.get(subColumn ? 1 : 0)
-             : metadata.comparator;
+             ? metadata.comparator.subtype(subColumn ? 1 : 0)
+             : metadata.comparator.asAbstractType();
     }
 
     // Extract the first component of a columnName, i.e. the super column name
-    public static ByteBuffer scName(ByteBuffer columnName)
+    public static ByteBuffer scName(Composite columnName)
     {
-        return CompositeType.extractComponent(columnName, 0);
+        return columnName.get(0);
     }
 
     // Extract the 2nd component of a columnName, i.e. the sub-column name
-    public static ByteBuffer subName(ByteBuffer columnName)
+    public static ByteBuffer subName(Composite columnName)
     {
-        return CompositeType.extractComponent(columnName, 1);
+        return columnName.get(1);
     }
 
-    // We don't use CompositeType.Builder mostly because we want to avoid having to provide the comparator.
-    public static ByteBuffer startOf(ByteBuffer scName)
+    public static Composite startOf(ByteBuffer scName)
     {
-        int length = scName.remaining();
-        ByteBuffer bb = ByteBuffer.allocate(2 + length + 1);
-
-        bb.put((byte) ((length >> 8) & 0xFF));
-        bb.put((byte) (length & 0xFF));
-        bb.put(scName.duplicate());
-        bb.put((byte) 0);
-        bb.flip();
-        return bb;
+        return CellNames.compositeDense(scName).start();
     }
 
-    public static ByteBuffer endOf(ByteBuffer scName)
+    public static Composite endOf(ByteBuffer scName)
     {
-        ByteBuffer bb = startOf(scName);
-        bb.put(bb.remaining() - 1, (byte)1);
-        return bb;
+        return CellNames.compositeDense(scName).end();
     }
 
-    public static SCFilter filterToSC(CompositeType type, IDiskAtomFilter filter)
+    public static SCFilter filterToSC(CellNameType type, IDiskAtomFilter filter)
     {
         if (filter instanceof NamesQueryFilter)
             return namesFilterToSC(type, (NamesQueryFilter)filter);
@@ -258,11 +267,12 @@ public class SuperColumns
             return sliceFilterToSC(type, (SliceQueryFilter)filter);
     }
 
-    public static SCFilter namesFilterToSC(CompositeType type, NamesQueryFilter filter)
+    public static SCFilter namesFilterToSC(CellNameType type, NamesQueryFilter filter)
     {
         ByteBuffer scName = null;
-        SortedSet<ByteBuffer> newColumns = new TreeSet<ByteBuffer>(filter.columns.comparator());
-        for (ByteBuffer name : filter.columns)
+        CellNameType subComparator = subType(type);
+        SortedSet<CellName> newColumns = new TreeSet<CellName>(subComparator);
+        for (CellName name : filter.columns)
         {
             ByteBuffer newScName = scName(name);
 
@@ -270,18 +280,23 @@ public class SuperColumns
             {
                 scName = newScName;
             }
-            else if (type.types.get(0).compare(scName, newScName) != 0)
+            else if (type.subtype(0).compare(scName, newScName) != 0)
             {
                 // If we're selecting column across multiple SC, it's not something we can translate for an old node
                 throw new RuntimeException("Cannot convert filter to old super column format. Update all nodes to Cassandra 2.0 first.");
             }
 
-            newColumns.add(subName(name));
+            newColumns.add(subComparator.makeCellName(name));
         }
         return new SCFilter(scName, new NamesQueryFilter(newColumns));
     }
 
-    public static SCFilter sliceFilterToSC(CompositeType type, SliceQueryFilter filter)
+    private static boolean isEndOfRange(Composite c)
+    {
+        return c.eoc() == Composite.EOC.END;
+    }
+
+    public static SCFilter sliceFilterToSC(CellNameType type, SliceQueryFilter filter)
     {
         /*
          * There is 3 main cases that we can translate back into super column
@@ -300,64 +315,67 @@ public class SuperColumns
         boolean reversed = filter.reversed;
         if (filter.slices.length == 1)
         {
-            ByteBuffer start = filter.slices[0].start;
-            ByteBuffer finish = filter.slices[0].start;
+            Composite start = filter.slices[0].start;
+            Composite finish = filter.slices[0].start;
 
             if (filter.compositesToGroup == 1)
             {
                 // Note: all the resulting filter must have compositeToGroup == 0 because this
                 // make no sense for super column on the destination node otherwise
-                if (start.remaining() == 0)
+                if (start.isEmpty())
                 {
-                    if (finish.remaining() == 0)
+                    if (finish.isEmpty())
                         // An 'IdentityFilter', keep as is (except for the compositeToGroup)
                         return new SCFilter(null, new SliceQueryFilter(filter.start(), filter.finish(), reversed, filter.count));
 
                     if (subName(finish) == null
-                            && ((!reversed && !firstEndOfComponent(finish)) || (reversed && firstEndOfComponent(finish))))
-                        return new SCFilter(null, new SliceQueryFilter(ByteBufferUtil.EMPTY_BYTE_BUFFER, scName(finish), reversed, filter.count));
+                            && ((!reversed && !isEndOfRange(finish)) || (reversed && isEndOfRange(finish))))
+                        return new SCFilter(null, new SliceQueryFilter(Composites.EMPTY, CellNames.simpleDense(scName(finish)), reversed, filter.count));
                 }
-                else if (finish.remaining() == 0)
+                else if (finish.isEmpty())
                 {
                     if (subName(start) == null
-                            && ((!reversed && firstEndOfComponent(start)) || (reversed && !firstEndOfComponent(start))))
-                        return new SCFilter(null, new SliceQueryFilter(scName(start), ByteBufferUtil.EMPTY_BYTE_BUFFER, reversed, filter.count));
+                            && ((!reversed && isEndOfRange(start)) || (reversed && !isEndOfRange(start))))
+                        return new SCFilter(null, new SliceQueryFilter(CellNames.simpleDense(scName(start)), Composites.EMPTY, reversed, filter.count));
                 }
                 else if (subName(start) == null && subName(finish) == null
-                        && ((   reversed && !firstEndOfComponent(start) &&  firstEndOfComponent(finish))
-                            || (!reversed &&  firstEndOfComponent(start) && !firstEndOfComponent(finish))))
+                        && ((   reversed && !isEndOfRange(start) && isEndOfRange(finish))
+                            || (!reversed &&  isEndOfRange(start) && !isEndOfRange(finish))))
                 {
                     // A slice of supercolumns
-                    return new SCFilter(null, new SliceQueryFilter(scName(start), scName(finish), reversed, filter.count));
+                    return new SCFilter(null, new SliceQueryFilter(CellNames.simpleDense(scName(start)),
+                                                                   CellNames.simpleDense(scName(finish)),
+                                                                   reversed,
+                                                                   filter.count));
                 }
             }
-            else if (filter.compositesToGroup == 0 && type.types.get(0).compare(scName(start), scName(finish)) == 0)
+            else if (filter.compositesToGroup == 0 && type.subtype(0).compare(scName(start), scName(finish)) == 0)
             {
                 // A slice of subcolumns
-                return new SCFilter(scName(start), filter.withUpdatedSlice(subName(start), subName(finish)));
+                return new SCFilter(scName(start), filter.withUpdatedSlice(CellNames.simpleDense(subName(start)), CellNames.simpleDense(subName(finish))));
             }
         }
         else if (!reversed)
         {
-            SortedSet<ByteBuffer> columns = new TreeSet<ByteBuffer>(type.types.get(0));
+            SortedSet<CellName> columns = new TreeSet<CellName>(scType(type));
             for (int i = 0; i < filter.slices.length; ++i)
             {
-                ByteBuffer start = filter.slices[i].start;
-                ByteBuffer finish = filter.slices[i].finish;
+                Composite start = filter.slices[i].start;
+                Composite finish = filter.slices[i].finish;
 
                 if (subName(start) != null || subName(finish) != null
-                  || type.types.get(0).compare(scName(start), scName(finish)) != 0
-                  || firstEndOfComponent(start) || !firstEndOfComponent(finish))
+                  || type.subtype(0).compare(scName(start), scName(finish)) != 0
+                  || isEndOfRange(start) || !isEndOfRange(finish))
                     throw new RuntimeException("Cannot convert filter to old super column format. Update all nodes to Cassandra 2.0 first.");
 
-                columns.add(scName(start));
+                columns.add(CellNames.simpleDense(scName(start)));
             }
             return new SCFilter(null, new NamesQueryFilter(columns));
         }
         throw new RuntimeException("Cannot convert filter to old super column format. Update all nodes to Cassandra 2.0 first.");
     }
 
-    public static IDiskAtomFilter fromSCFilter(CompositeType type, ByteBuffer scName, IDiskAtomFilter filter)
+    public static IDiskAtomFilter fromSCFilter(CellNameType type, ByteBuffer scName, IDiskAtomFilter filter)
     {
         if (filter instanceof NamesQueryFilter)
             return fromSCNamesFilter(type, scName, (NamesQueryFilter)filter);
@@ -365,65 +383,55 @@ public class SuperColumns
             return fromSCSliceFilter(type, scName, (SliceQueryFilter)filter);
     }
 
-    public static IDiskAtomFilter fromSCNamesFilter(CompositeType type, ByteBuffer scName, NamesQueryFilter filter)
+    public static IDiskAtomFilter fromSCNamesFilter(CellNameType type, ByteBuffer scName, NamesQueryFilter filter)
     {
         if (scName == null)
         {
             ColumnSlice[] slices = new ColumnSlice[filter.columns.size()];
             int i = 0;
-            for (ByteBuffer bb : filter.columns)
+            for (CellName name : filter.columns)
             {
-                CompositeType.Builder builder = type.builder().add(bb);
-                slices[i++] = new ColumnSlice(builder.build(), builder.buildAsEndOfRange());
+                slices[i++] = name.slice();
             }
             return new SliceQueryFilter(slices, false, slices.length, 1);
         }
         else
         {
-            SortedSet<ByteBuffer> newColumns = new TreeSet<ByteBuffer>(type);
-            for (ByteBuffer c : filter.columns)
-                newColumns.add(CompositeType.build(scName, c));
+            SortedSet<CellName> newColumns = new TreeSet<CellName>(type);
+            for (CellName c : filter.columns)
+                newColumns.add(type.makeCellName(scName, c.toByteBuffer()));
             return filter.withUpdatedColumns(newColumns);
         }
     }
 
-    public static SliceQueryFilter fromSCSliceFilter(CompositeType type, ByteBuffer scName, SliceQueryFilter filter)
+    public static SliceQueryFilter fromSCSliceFilter(CellNameType type, ByteBuffer scName, SliceQueryFilter filter)
     {
         assert filter.slices.length == 1;
         if (scName == null)
         {
-            ByteBuffer start = filter.start().remaining() == 0
-                             ? filter.start()
-                             : (filter.reversed ? type.builder().add(filter.start()).buildAsEndOfRange()
-                                                : type.builder().add(filter.start()).build());
-            ByteBuffer finish = filter.finish().remaining() == 0
-                              ? filter.finish()
-                              : (filter.reversed ? type.builder().add(filter.finish()).build()
-                                                 : type.builder().add(filter.finish()).buildAsEndOfRange());
+            // The filter is on the super column name
+            CBuilder builder = type.builder();
+            Composite start = filter.start().isEmpty()
+                            ? Composites.EMPTY
+                            : builder.buildWith(filter.start().toByteBuffer()).withEOC(filter.reversed ? Composite.EOC.END : Composite.EOC.START);
+            Composite finish = filter.finish().isEmpty()
+                             ? Composites.EMPTY
+                             : builder.buildWith(filter.finish().toByteBuffer()).withEOC(filter.reversed ? Composite.EOC.START : Composite.EOC.END);
             return new SliceQueryFilter(start, finish, filter.reversed, filter.count, 1);
         }
         else
         {
-            CompositeType.Builder builder = type.builder().add(scName);
-            ByteBuffer start = filter.start().remaining() == 0
-                             ? filter.reversed ? builder.buildAsEndOfRange() : builder.build()
-                             : builder.copy().add(filter.start()).build();
-            ByteBuffer end = filter.finish().remaining() == 0
-                             ? filter.reversed ? builder.build() : builder.buildAsEndOfRange()
-                             : builder.add(filter.finish()).build();
+            CBuilder builder = type.builder().add(scName);
+            Composite start = filter.start().isEmpty()
+                            ? builder.build().withEOC(filter.reversed ? Composite.EOC.END : Composite.EOC.START)
+                            : builder.buildWith(filter.start().toByteBuffer());
+            Composite end = filter.finish().isEmpty()
+                          ? builder.build().withEOC(filter.reversed ? Composite.EOC.START : Composite.EOC.END)
+                          : builder.buildWith(filter.finish().toByteBuffer());
             return new SliceQueryFilter(start, end, filter.reversed, filter.count);
         }
     }
 
-    private static boolean firstEndOfComponent(ByteBuffer bb)
-    {
-        bb = bb.duplicate();
-        int length = (bb.get() & 0xFF) << 8;
-        length |= (bb.get() & 0xFF);
-
-        return bb.get(length + 2) == 1;
-    }
-
     public static class SCFilter
     {
         public final ByteBuffer scName;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/SystemKeyspace.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SystemKeyspace.java b/src/java/org/apache/cassandra/db/SystemKeyspace.java
index 1ecb0e4..e650d57 100644
--- a/src/java/org/apache/cassandra/db/SystemKeyspace.java
+++ b/src/java/org/apache/cassandra/db/SystemKeyspace.java
@@ -31,6 +31,8 @@ import com.google.common.collect.SetMultimap;
 import com.google.common.collect.Sets;
 
 import org.apache.cassandra.db.compaction.CompactionHistoryTabularData;
+import org.apache.cassandra.db.composites.Composite;
+import org.apache.cassandra.db.composites.Composites;
 import org.apache.cassandra.metrics.RestorableMeter;
 import org.apache.cassandra.transport.Server;
 import org.apache.commons.lang3.StringUtils;
@@ -586,7 +588,7 @@ public class SystemKeyspace
         ColumnFamilyStore cfs = Keyspace.open(Keyspace.SYSTEM_KS).getColumnFamilyStore(INDEX_CF);
         QueryFilter filter = QueryFilter.getNamesFilter(decorate(ByteBufferUtil.bytes(keyspaceName)),
                                                         INDEX_CF,
-                                                        ByteBufferUtil.bytes(indexName),
+                                                        FBUtilities.singleton(cfs.getComparator().makeCellName(indexName), cfs.getComparator()),
                                                         System.currentTimeMillis());
         return ColumnFamilyStore.removeDeleted(cfs.getColumnFamily(filter), Integer.MAX_VALUE) != null;
     }
@@ -594,7 +596,7 @@ public class SystemKeyspace
     public static void setIndexBuilt(String keyspaceName, String indexName)
     {
         ColumnFamily cf = ArrayBackedSortedColumns.factory.create(Keyspace.SYSTEM_KS, INDEX_CF);
-        cf.addColumn(new Column(ByteBufferUtil.bytes(indexName), ByteBufferUtil.EMPTY_BYTE_BUFFER, FBUtilities.timestampMicros()));
+        cf.addColumn(new Column(cf.getComparator().makeCellName(indexName), ByteBufferUtil.EMPTY_BYTE_BUFFER, FBUtilities.timestampMicros()));
         RowMutation rm = new RowMutation(Keyspace.SYSTEM_KS, ByteBufferUtil.bytes(keyspaceName), cf);
         rm.apply();
     }
@@ -602,7 +604,7 @@ public class SystemKeyspace
     public static void setIndexRemoved(String keyspaceName, String indexName)
     {
         RowMutation rm = new RowMutation(Keyspace.SYSTEM_KS, ByteBufferUtil.bytes(keyspaceName));
-        rm.delete(INDEX_CF, ByteBufferUtil.bytes(indexName), FBUtilities.timestampMicros());
+        rm.delete(INDEX_CF, CFMetaData.IndexCf.comparator.makeCellName(indexName), FBUtilities.timestampMicros());
         rm.apply();
     }
 
@@ -650,14 +652,14 @@ public class SystemKeyspace
         // Get the last CounterId (since CounterId are timeuuid is thus ordered from the older to the newer one)
         QueryFilter filter = QueryFilter.getSliceFilter(decorate(ALL_LOCAL_NODE_ID_KEY),
                                                         COUNTER_ID_CF,
-                                                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
+                                                        Composites.EMPTY,
+                                                        Composites.EMPTY,
                                                         true,
                                                         1,
                                                         System.currentTimeMillis());
         ColumnFamily cf = keyspace.getColumnFamilyStore(COUNTER_ID_CF).getColumnFamily(filter);
         if (cf != null && cf.getColumnCount() != 0)
-            return CounterId.wrap(cf.iterator().next().name());
+            return CounterId.wrap(cf.iterator().next().name().toByteBuffer());
         else
             return null;
     }
@@ -673,7 +675,7 @@ public class SystemKeyspace
         ByteBuffer ip = ByteBuffer.wrap(FBUtilities.getBroadcastAddress().getAddress());
 
         ColumnFamily cf = ArrayBackedSortedColumns.factory.create(Keyspace.SYSTEM_KS, COUNTER_ID_CF);
-        cf.addColumn(new Column(newCounterId.bytes(), ip, now));
+        cf.addColumn(new Column(cf.getComparator().makeCellName(newCounterId.bytes()), ip, now));
         RowMutation rm = new RowMutation(Keyspace.SYSTEM_KS, ALL_LOCAL_NODE_ID_KEY, cf);
         rm.apply();
         forceBlockingFlush(COUNTER_ID_CF);
@@ -695,7 +697,7 @@ public class SystemKeyspace
 
             // this will ignore the last column on purpose since it is the
             // current local node id
-            previous = CounterId.wrap(c.name());
+            previous = CounterId.wrap(c.name().toByteBuffer());
         }
         return l;
     }
@@ -799,9 +801,10 @@ public class SystemKeyspace
     {
         DecoratedKey key = StorageService.getPartitioner().decorateKey(getSchemaKSKey(ksName));
         ColumnFamilyStore schemaCFS = SystemKeyspace.schemaCFS(schemaCfName);
+        Composite prefix = schemaCFS.getComparator().make(cfName);
         ColumnFamily cf = schemaCFS.getColumnFamily(key,
-                                                    DefsTables.searchComposite(cfName, true),
-                                                    DefsTables.searchComposite(cfName, false),
+                                                    prefix,
+                                                    prefix.end(),
                                                     false,
                                                     Integer.MAX_VALUE,
                                                     System.currentTimeMillis());

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/TreeMapBackedSortedColumns.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/TreeMapBackedSortedColumns.java b/src/java/org/apache/cassandra/db/TreeMapBackedSortedColumns.java
index 466833b..597cae6 100644
--- a/src/java/org/apache/cassandra/db/TreeMapBackedSortedColumns.java
+++ b/src/java/org/apache/cassandra/db/TreeMapBackedSortedColumns.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.db;
 
-import java.nio.ByteBuffer;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.SortedMap;
@@ -27,13 +26,14 @@ import java.util.TreeMap;
 import com.google.common.base.Function;
 
 import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.db.filter.ColumnSlice;
-import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.utils.Allocator;
 
 public class TreeMapBackedSortedColumns extends AbstractThreadUnsafeSortedColumns
 {
-    private final TreeMap<ByteBuffer, Column> map;
+    private final TreeMap<CellName, Column> map;
 
     public static final ColumnFamily.Factory<TreeMapBackedSortedColumns> factory = new Factory<TreeMapBackedSortedColumns>()
     {
@@ -44,21 +44,21 @@ public class TreeMapBackedSortedColumns extends AbstractThreadUnsafeSortedColumn
         }
     };
 
-    public AbstractType<?> getComparator()
+    public CellNameType getComparator()
     {
-        return (AbstractType<?>)map.comparator();
+        return (CellNameType)map.comparator();
     }
 
     private TreeMapBackedSortedColumns(CFMetaData metadata)
     {
         super(metadata);
-        this.map = new TreeMap<ByteBuffer, Column>(metadata.comparator);
+        this.map = new TreeMap<>(metadata.comparator);
     }
 
-    private TreeMapBackedSortedColumns(CFMetaData metadata, SortedMap<ByteBuffer, Column> columns)
+    private TreeMapBackedSortedColumns(CFMetaData metadata, SortedMap<CellName, Column> columns)
     {
         super(metadata);
-        this.map = new TreeMap<ByteBuffer, Column>(columns);
+        this.map = new TreeMap<>(columns);
     }
 
     public ColumnFamily.Factory getFactory()
@@ -82,7 +82,7 @@ public class TreeMapBackedSortedColumns extends AbstractThreadUnsafeSortedColumn
     */
     public void addColumn(Column column, Allocator allocator)
     {
-        ByteBuffer name = column.name();
+        CellName name = column.name();
         // this is a slightly unusual way to structure this; a more natural way is shown in ThreadSafeSortedColumns,
         // but TreeMap lacks putAbsent.  Rather than split it into a "get, then put" check, we do it as follows,
         // which saves the extra "get" in the no-conflict case [for both normal and super columns],
@@ -128,7 +128,7 @@ public class TreeMapBackedSortedColumns extends AbstractThreadUnsafeSortedColumn
         return true;
     }
 
-    public Column getColumn(ByteBuffer name)
+    public Column getColumn(CellName name)
     {
         return map.get(name);
     }
@@ -154,7 +154,7 @@ public class TreeMapBackedSortedColumns extends AbstractThreadUnsafeSortedColumn
         return map.descendingMap().values();
     }
 
-    public SortedSet<ByteBuffer> getColumnNames()
+    public SortedSet<CellName> getColumnNames()
     {
         return map.navigableKeySet();
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/UnsortedColumns.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/UnsortedColumns.java b/src/java/org/apache/cassandra/db/UnsortedColumns.java
index 2b33cd0..d6520b0 100644
--- a/src/java/org/apache/cassandra/db/UnsortedColumns.java
+++ b/src/java/org/apache/cassandra/db/UnsortedColumns.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.db;
 
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Iterator;
@@ -26,6 +25,7 @@ import com.google.common.base.Function;
 import com.google.common.collect.Iterables;
 
 import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.filter.ColumnSlice;
 import org.apache.cassandra.utils.Allocator;
 
@@ -101,16 +101,16 @@ public class UnsortedColumns extends AbstractThreadUnsafeSortedColumns
         throw new UnsupportedOperationException();
     }
 
-    public Column getColumn(ByteBuffer name)
+    public Column getColumn(CellName name)
     {
         throw new UnsupportedOperationException();
     }
 
-    public Iterable<ByteBuffer> getColumnNames()
+    public Iterable<CellName> getColumnNames()
     {
-        return Iterables.transform(columns, new Function<Column, ByteBuffer>()
+        return Iterables.transform(columns, new Function<Column, CellName>()
         {
-            public ByteBuffer apply(Column column)
+            public CellName apply(Column column)
             {
                 return column.name;
             }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/columniterator/IdentityQueryFilter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/columniterator/IdentityQueryFilter.java b/src/java/org/apache/cassandra/db/columniterator/IdentityQueryFilter.java
index 828f7e5..7185eef 100644
--- a/src/java/org/apache/cassandra/db/columniterator/IdentityQueryFilter.java
+++ b/src/java/org/apache/cassandra/db/columniterator/IdentityQueryFilter.java
@@ -17,8 +17,8 @@
  */
 package org.apache.cassandra.db.columniterator;
 
+import org.apache.cassandra.db.composites.Composites;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
-import org.apache.cassandra.utils.ByteBufferUtil;
 
 public class IdentityQueryFilter extends SliceQueryFilter
 {
@@ -27,7 +27,7 @@ public class IdentityQueryFilter extends SliceQueryFilter
      */
     public IdentityQueryFilter()
     {
-        super(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, Integer.MAX_VALUE);
+        super(Composites.EMPTY, Composites.EMPTY, false, Integer.MAX_VALUE);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java b/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java
index 036d0cf..8715a90 100644
--- a/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java
+++ b/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java
@@ -18,17 +18,16 @@
 package org.apache.cassandra.db.columniterator;
 
 import java.io.IOException;
-import java.nio.ByteBuffer;
 import java.util.ArrayDeque;
 import java.util.Deque;
-import java.util.Iterator;
 import java.util.List;
 
 import com.google.common.collect.AbstractIterator;
 
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.filter.ColumnSlice;
-import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.sstable.IndexHelper;
 import org.apache.cassandra.io.sstable.IndexHelper.IndexInfo;
@@ -54,7 +53,7 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA
     private final ColumnSlice[] slices;
     private final BlockFetcher fetcher;
     private final Deque<OnDiskAtom> blockColumns = new ArrayDeque<OnDiskAtom>();
-    private final AbstractType<?> comparator;
+    private final CellNameType comparator;
 
     // Holds range tombstone in reverse queries. See addColumn()
     private final Deque<OnDiskAtom> rangeTombstonesReversed;
@@ -191,7 +190,7 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA
         /*
          * Return the smallest key selected by the current ColumnSlice.
          */
-        protected ByteBuffer currentStart()
+        protected Composite currentStart()
         {
             return reversed ? slices[currentSliceIdx].finish : slices[currentSliceIdx].start;
         }
@@ -199,7 +198,7 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA
         /*
          * Return the biggest key selected by the current ColumnSlice.
          */
-        protected ByteBuffer currentFinish()
+        protected Composite currentFinish()
         {
             return reversed ? slices[currentSliceIdx].start : slices[currentSliceIdx].finish;
         }
@@ -213,22 +212,22 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA
             return isBeforeSliceStart(column.name());
         }
 
-        protected boolean isBeforeSliceStart(ByteBuffer name)
+        protected boolean isBeforeSliceStart(Composite name)
         {
-            ByteBuffer start = currentStart();
-            return start.remaining() != 0 && comparator.compare(name, start) < 0;
+            Composite start = currentStart();
+            return !start.isEmpty() && comparator.compare(name, start) < 0;
         }
 
         protected boolean isColumnBeforeSliceFinish(OnDiskAtom column)
         {
-            ByteBuffer finish = currentFinish();
-            return finish.remaining() == 0 || comparator.compare(column.name(), finish) <= 0;
+            Composite finish = currentFinish();
+            return finish.isEmpty() || comparator.compare(column.name(), finish) <= 0;
         }
 
-        protected boolean isAfterSliceFinish(ByteBuffer name)
+        protected boolean isAfterSliceFinish(Composite name)
         {
-            ByteBuffer finish = currentFinish();
-            return finish.remaining() != 0 && comparator.compare(name, finish) > 0;
+            Composite finish = currentFinish();
+            return !finish.isEmpty() && comparator.compare(name, finish) > 0;
         }
     }
 
@@ -361,9 +360,8 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA
             if (file == null)
                 file = originalInput == null ? sstable.getFileDataInput(positionToSeek) : originalInput;
 
-            // Give a bogus atom count since we'll deserialize as long as we're
-            // within the index block but we don't know how much atom is there
-            Iterator<OnDiskAtom> atomIterator = emptyColumnFamily.metadata().getOnDiskIterator(file, Integer.MAX_VALUE, sstable.descriptor.version);
+            AtomDeserializer deserializer = emptyColumnFamily.metadata().getOnDiskDeserializer(file, sstable.descriptor.version);
+
             file.seek(positionToSeek);
             FileMark mark = file.mark();
 
@@ -371,63 +369,65 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA
             boolean inSlice = false;
 
             // scan from index start
-            OnDiskAtom column = null;
-            while (file.bytesPastMark(mark) < currentIndex.width || column != null)
+            while (file.bytesPastMark(mark) < currentIndex.width || deserializer.hasUnprocessed())
             {
-                // Only fetch a new column if we haven't dealt with the previous one.
-                if (column == null)
-                    column = atomIterator.next();
-
                 // col is before slice
                 // (If in slice, don't bother checking that until we change slice)
-                if (!inSlice && isColumnBeforeSliceStart(column))
+                Composite start = currentStart();
+                if (!inSlice && !start.isEmpty() && deserializer.compareNextTo(start) < 0)
                 {
                     if (reversed)
                     {
                         // the next slice select columns that are before the current one, so it may
                         // match this column, so keep it around.
-                        prefetched.addFirst(column);
+                        prefetched.addFirst(deserializer.readNext());
+                    }
+                    else
+                    {
+                        deserializer.skipNext();
                     }
-                    column = null;
                 }
                 // col is within slice
-                else if (isColumnBeforeSliceFinish(column))
-                {
-                    inSlice = true;
-                    addColumn(column);
-                    column = null;
-                }
-                // col is after slice.
                 else
                 {
-                    // When reading forward, if we hit a column that sorts after the current slice, it means we're done with this slice.
-                    // For reversed, this may either mean that we're done with the current slice, or that we need to read the previous
-                    // index block. However, we can be sure that we are in the first case though (the current slice is done) if the first
-                    // columns of the block were not part of the current slice, i.e. if we have columns in prefetched.
-                    if (reversed && prefetched.isEmpty())
-                        break;
+                    Composite finish = currentFinish();
+                    if (finish.isEmpty() || deserializer.compareNextTo(finish) <= 0)
+                    {
+                        inSlice = true;
+                        addColumn(deserializer.readNext());
+                    }
+                    // col is after slice.
+                    else
+                    {
+                        // When reading forward, if we hit a column that sorts after the current slice, it means we're done with this slice.
+                        // For reversed, this may either mean that we're done with the current slice, or that we need to read the previous
+                        // index block. However, we can be sure that we are in the first case though (the current slice is done) if the first
+                        // columns of the block were not part of the current slice, i.e. if we have columns in prefetched.
+                        if (reversed && prefetched.isEmpty())
+                            break;
 
-                    if (!setNextSlice())
-                        break;
+                        if (!setNextSlice())
+                            break;
 
-                    inSlice = false;
+                        inSlice = false;
 
-                    // The next index block now corresponds to the first block that may have columns for the newly set slice.
-                    // So if it's different from the current block, we're done with this block. And in that case, we know
-                    // that our prefetched columns won't match.
-                    if (nextIndexIdx != lastDeserializedBlock)
-                    {
+                        // The next index block now corresponds to the first block that may have columns for the newly set slice.
+                        // So if it's different from the current block, we're done with this block. And in that case, we know
+                        // that our prefetched columns won't match.
+                        if (nextIndexIdx != lastDeserializedBlock)
+                        {
+                            if (reversed)
+                                prefetched.clear();
+                            break;
+                        }
+
+                        // Even if the next slice may have column in this blocks, if we're reversed, those columns have been
+                        // prefetched and we're done with that block
                         if (reversed)
-                            prefetched.clear();
-                        break;
-                    }
-
-                    // Even if the next slice may have column in this blocks, if we're reversed, those columns have been
-                    // prefetched and we're done with that block
-                    if (reversed)
-                        break;
+                            break;
 
-                    // otherwise, we will deal with that column at the next iteration
+                        // otherwise, we will deal with that column at the next iteration
+                    }
                 }
             }
             return true;
@@ -446,28 +446,27 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA
             boolean inSlice = false;
 
             int columnCount = sstable.descriptor.version.hasRowSizeAndColumnCount ? file.readInt() : Integer.MAX_VALUE;
-            Iterator<OnDiskAtom> atomIterator = emptyColumnFamily.metadata().getOnDiskIterator(file, columnCount, sstable.descriptor.version);
-            OnDiskAtom column = null;
-            while (atomIterator.hasNext() || column != null)
+            AtomDeserializer deserializer = emptyColumnFamily.metadata().getOnDiskDeserializer(file, sstable.descriptor.version);
+            int deserialized = 0;
+            while (deserializer.hasNext() && deserialized < columnCount)
             {
-                // Only fetch a new column if we haven't dealt with the previous one.
-                if (column == null)
-                    column = atomIterator.next();
-
                 // col is before slice
                 // (If in slice, don't bother checking that until we change slice)
-                if (!inSlice && isColumnBeforeSliceStart(column))
+                Composite start = currentStart();
+                if (!inSlice && !start.isEmpty() && deserializer.compareNextTo(start) < 0)
                 {
-                    column = null;
+                    deserializer.skipNext();
+                    ++deserialized;
                     continue;
                 }
 
                 // col is within slice
-                if (isColumnBeforeSliceFinish(column))
+                Composite finish = currentFinish();
+                if (finish.isEmpty() || deserializer.compareNextTo(finish) <= 0)
                 {
                     inSlice = true;
-                    addColumn(column);
-                    column = null;
+                    addColumn(deserializer.readNext());
+                    ++deserialized;
                 }
                 // col is after slice. more slices?
                 else

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java b/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java
index 3467244..41b95f9 100644
--- a/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java
+++ b/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java
@@ -18,17 +18,14 @@
 package org.apache.cassandra.db.columniterator;
 
 import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.SortedSet;
+import java.util.*;
 
 import com.google.common.collect.AbstractIterator;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.sstable.IndexHelper;
 import org.apache.cassandra.io.sstable.SSTableReader;
@@ -43,10 +40,10 @@ public class SSTableNamesIterator extends AbstractIterator<OnDiskAtom> implement
     private final SSTableReader sstable;
     private FileDataInput fileToClose;
     private Iterator<OnDiskAtom> iter;
-    public final SortedSet<ByteBuffer> columns;
+    public final SortedSet<CellName> columns;
     public final DecoratedKey key;
 
-    public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<ByteBuffer> columns)
+    public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<CellName> columns)
     {
         assert columns != null;
         this.sstable = sstable;
@@ -73,7 +70,7 @@ public class SSTableNamesIterator extends AbstractIterator<OnDiskAtom> implement
         }
     }
 
-    public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<ByteBuffer> columns, RowIndexEntry indexEntry)
+    public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<CellName> columns, RowIndexEntry indexEntry)
     {
         assert columns != null;
         this.sstable = sstable;
@@ -152,7 +149,7 @@ public class SSTableNamesIterator extends AbstractIterator<OnDiskAtom> implement
         iter = result.iterator();
     }
 
-    private void readSimpleColumns(FileDataInput file, SortedSet<ByteBuffer> columnNames, List<OnDiskAtom> result, int columnCount)
+    private void readSimpleColumns(FileDataInput file, SortedSet<CellName> columnNames, List<OnDiskAtom> result, int columnCount)
     {
         Iterator<OnDiskAtom> atomIterator = cf.metadata().getOnDiskIterator(file, columnCount, sstable.descriptor.version);
         int n = 0;
@@ -177,17 +174,17 @@ public class SSTableNamesIterator extends AbstractIterator<OnDiskAtom> implement
 
     private void readIndexedColumns(CFMetaData metadata,
                                     FileDataInput file,
-                                    SortedSet<ByteBuffer> columnNames,
+                                    SortedSet<CellName> columnNames,
                                     List<IndexHelper.IndexInfo> indexList,
                                     long basePosition,
                                     List<OnDiskAtom> result)
     throws IOException
     {
         /* get the various column ranges we have to read */
-        AbstractType<?> comparator = metadata.comparator;
+        CellNameType comparator = metadata.comparator;
         List<IndexHelper.IndexInfo> ranges = new ArrayList<IndexHelper.IndexInfo>();
         int lastIndexIdx = -1;
-        for (ByteBuffer name : columns)
+        for (CellName name : columnNames)
         {
             int index = IndexHelper.indexFor(name, indexList, comparator, false, lastIndexIdx);
             if (index < 0 || index == indexList.size())
@@ -203,6 +200,8 @@ public class SSTableNamesIterator extends AbstractIterator<OnDiskAtom> implement
         if (ranges.isEmpty())
             return;
 
+        Iterator<CellName> toFetch = columnNames.iterator();
+        CellName nextToFetch = toFetch.next();
         for (IndexHelper.IndexInfo indexInfo : ranges)
         {
             long positionToSeek = basePosition + indexInfo.offset;
@@ -211,17 +210,22 @@ public class SSTableNamesIterator extends AbstractIterator<OnDiskAtom> implement
             if (file == null)
                 file = createFileDataInput(positionToSeek);
 
-            // We'll read as much atom as there is in the index block, so provide a bogus atom count
-            Iterator<OnDiskAtom> atomIterator = cf.metadata().getOnDiskIterator(file, Integer.MAX_VALUE, sstable.descriptor.version);
+            AtomDeserializer deserializer = cf.metadata().getOnDiskDeserializer(file, sstable.descriptor.version);
             file.seek(positionToSeek);
             FileMark mark = file.mark();
-            // TODO only completely deserialize columns we are interested in
-            while (file.bytesPastMark(mark) < indexInfo.width)
+            while (file.bytesPastMark(mark) < indexInfo.width && nextToFetch != null)
             {
-                OnDiskAtom column = atomIterator.next();
-                // we check vs the original Set, not the filtered List, for efficiency
-                if (!(column instanceof Column) || columnNames.contains(column.name()))
-                    result.add(column);
+                int cmp = deserializer.compareNextTo(nextToFetch);
+                if (cmp == 0)
+                {
+                    nextToFetch = toFetch.hasNext() ? toFetch.next() : null;
+                    result.add(deserializer.readNext());
+                    continue;
+                }
+
+                deserializer.skipNext();
+                if (cmp > 0)
+                    nextToFetch = toFetch.hasNext() ? toFetch.next() : null;
             }
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/columniterator/SSTableSliceIterator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/columniterator/SSTableSliceIterator.java b/src/java/org/apache/cassandra/db/columniterator/SSTableSliceIterator.java
index 4faa651..d338580 100644
--- a/src/java/org/apache/cassandra/db/columniterator/SSTableSliceIterator.java
+++ b/src/java/org/apache/cassandra/db/columniterator/SSTableSliceIterator.java
@@ -62,7 +62,7 @@ public class SSTableSliceIterator implements OnDiskAtomIterator
 
     private static OnDiskAtomIterator createReader(SSTableReader sstable, RowIndexEntry indexEntry, FileDataInput file, ColumnSlice[] slices, boolean reversed)
     {
-        return slices.length == 1 && slices[0].start.remaining() == 0 && !reversed
+        return slices.length == 1 && slices[0].start.isEmpty() && !reversed
              ? new SimpleSliceReader(sstable, indexEntry, file, slices[0].finish)
              : new IndexedSliceReader(sstable, indexEntry, file, slices, reversed);
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/columniterator/SimpleSliceReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/columniterator/SimpleSliceReader.java b/src/java/org/apache/cassandra/db/columniterator/SimpleSliceReader.java
index 48d20db..862fad2 100644
--- a/src/java/org/apache/cassandra/db/columniterator/SimpleSliceReader.java
+++ b/src/java/org/apache/cassandra/db/columniterator/SimpleSliceReader.java
@@ -18,7 +18,6 @@
 package org.apache.cassandra.db.columniterator;
 
 import java.io.IOException;
-import java.nio.ByteBuffer;
 import java.util.Iterator;
 
 import com.google.common.collect.AbstractIterator;
@@ -26,7 +25,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.SSTableReader;
@@ -40,12 +40,12 @@ class SimpleSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskAt
 
     private final FileDataInput file;
     private final boolean needsClosing;
-    private final ByteBuffer finishColumn;
-    private final AbstractType<?> comparator;
+    private final Composite finishColumn;
+    private final CellNameType comparator;
     private final ColumnFamily emptyColumnFamily;
     private final Iterator<OnDiskAtom> atomIterator;
 
-    public SimpleSliceReader(SSTableReader sstable, RowIndexEntry indexEntry, FileDataInput input, ByteBuffer finishColumn)
+    public SimpleSliceReader(SSTableReader sstable, RowIndexEntry indexEntry, FileDataInput input, Composite finishColumn)
     {
         Tracing.trace("Seeking to partition beginning in data file");
         this.finishColumn = finishColumn;
@@ -89,7 +89,7 @@ class SimpleSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskAt
             return endOfData();
 
         OnDiskAtom column = atomIterator.next();
-        if (finishColumn.remaining() > 0 && comparator.compare(column.name(), finishColumn) > 0)
+        if (!finishColumn.isEmpty() && comparator.compare(column.name(), finishColumn) > 0)
             return endOfData();
 
         return column;


[03/13] Push composites support in the storage engine

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
index ace32e7..e807e2e 100644
--- a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
+++ b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
@@ -41,6 +41,7 @@ import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.config.IndexType;
 import org.apache.cassandra.config.Schema;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.columniterator.IdentityQueryFilter;
 import org.apache.cassandra.db.filter.*;
 import org.apache.cassandra.db.index.SecondaryIndex;
@@ -53,6 +54,7 @@ import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.thrift.*;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.WrappedRunnable;
 
 import static org.junit.Assert.*;
@@ -84,17 +86,17 @@ public class ColumnFamilyStoreTest extends SchemaLoader
 
         RowMutation rm;
         rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("key1"));
-        rm.add("Standard1", ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("asdf"), 0);
+        rm.add("Standard1", cellname("Column1"), ByteBufferUtil.bytes("asdf"), 0);
         rm.apply();
         cfs.forceBlockingFlush();
 
         rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("key1"));
-        rm.add("Standard1", ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("asdf"), 1);
+        rm.add("Standard1", cellname("Column1"), ByteBufferUtil.bytes("asdf"), 1);
         rm.apply();
         cfs.forceBlockingFlush();
 
         cfs.getRecentSSTablesPerReadHistogram(); // resets counts
-        cfs.getColumnFamily(QueryFilter.getNamesFilter(Util.dk("key1"), "Standard1", ByteBufferUtil.bytes("Column1"), System.currentTimeMillis()));
+        cfs.getColumnFamily(Util.namesQueryFilter(cfs, Util.dk("key1"), "Column1"));
         assertEquals(1, cfs.getRecentSSTablesPerReadHistogram()[0]);
     }
 
@@ -108,8 +110,8 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         List<IMutation> rms = new LinkedList<IMutation>();
         RowMutation rm;
         rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("key1"));
-        rm.add("Standard1", ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("asdf"), 0);
-        rm.add("Standard1", ByteBufferUtil.bytes("Column2"), ByteBufferUtil.bytes("asdf"), 0);
+        rm.add("Standard1", cellname("Column1"), ByteBufferUtil.bytes("asdf"), 0);
+        rm.add("Standard1", cellname("Column2"), ByteBufferUtil.bytes("asdf"), 0);
         rms.add(rm);
         Util.writeColumnFamily(rms);
 
@@ -135,21 +137,12 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         {
             public void runMayThrow() throws IOException
             {
-                QueryFilter sliceFilter = QueryFilter.getSliceFilter(Util.dk("key1"),
-                                                                     "Standard2",
-                                                                     ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                                                     ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                                                     false,
-                                                                     1,
-                                                                     System.currentTimeMillis());
+                QueryFilter sliceFilter = QueryFilter.getSliceFilter(Util.dk("key1"), "Standard2", Composites.EMPTY, Composites.EMPTY, false, 1, System.currentTimeMillis());
                 ColumnFamily cf = store.getColumnFamily(sliceFilter);
                 assert cf.isMarkedForDelete();
                 assert cf.getColumnCount() == 0;
 
-                QueryFilter namesFilter = QueryFilter.getNamesFilter(Util.dk("key1"),
-                                                                     "Standard2",
-                                                                     ByteBufferUtil.bytes("a"),
-                                                                     System.currentTimeMillis());
+                QueryFilter namesFilter = Util.namesQueryFilter(store, Util.dk("key1"), "a");
                 cf = store.getColumnFamily(namesFilter);
                 assert cf.isMarkedForDelete();
                 assert cf.getColumnCount() == 0;
@@ -167,7 +160,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         IPartitioner p = StorageService.getPartitioner();
         List<Row> result = cfs.getRangeSlice(Util.range(p, "key1", "key2"),
                                              null,
-                                             new NamesQueryFilter(ByteBufferUtil.bytes("asdf")),
+                                             Util.namesFilter(cfs, "asdf"),
                                              10);
         assertEquals(1, result.size());
         assert result.get(0).key.key.equals(ByteBufferUtil.bytes("key2"));
@@ -176,26 +169,29 @@ public class ColumnFamilyStoreTest extends SchemaLoader
     @Test
     public void testIndexScan()
     {
+        ColumnFamilyStore cfs = Keyspace.open("Keyspace1").getColumnFamilyStore("Indexed1");
         RowMutation rm;
+        CellName nobirthdate = cellname("notbirthdate");
+        CellName birthdate = cellname("birthdate");
 
         rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("k1"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("notbirthdate"), ByteBufferUtil.bytes(1L), 0);
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), 0);
+        rm.add("Indexed1", nobirthdate, ByteBufferUtil.bytes(1L), 0);
+        rm.add("Indexed1", birthdate, ByteBufferUtil.bytes(1L), 0);
         rm.apply();
 
         rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("k2"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("notbirthdate"), ByteBufferUtil.bytes(2L), 0);
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(2L), 0);
+        rm.add("Indexed1", nobirthdate, ByteBufferUtil.bytes(2L), 0);
+        rm.add("Indexed1", birthdate, ByteBufferUtil.bytes(2L), 0);
         rm.apply();
 
         rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("k3"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("notbirthdate"), ByteBufferUtil.bytes(2L), 0);
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), 0);
+        rm.add("Indexed1", nobirthdate, ByteBufferUtil.bytes(2L), 0);
+        rm.add("Indexed1", birthdate, ByteBufferUtil.bytes(1L), 0);
         rm.apply();
 
         rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("k4aaaa"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("notbirthdate"), ByteBufferUtil.bytes(2L), 0);
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(3L), 0);
+        rm.add("Indexed1", nobirthdate, ByteBufferUtil.bytes(2L), 0);
+        rm.add("Indexed1", birthdate, ByteBufferUtil.bytes(3L), 0);
         rm.apply();
 
         // basic single-expression query
@@ -203,7 +199,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         List<IndexExpression> clause = Arrays.asList(expr);
         IDiskAtomFilter filter = new IdentityQueryFilter();
         Range<RowPosition> range = Util.range("", "");
-        List<Row> rows = Keyspace.open("Keyspace1").getColumnFamilyStore("Indexed1").search(range, clause, filter, 100);
+        List<Row> rows = cfs.search(range, clause, filter, 100);
 
         assert rows != null;
         assert rows.size() == 2 : StringUtils.join(rows, ",");
@@ -214,20 +210,20 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         key = new String(rows.get(1).key.key.array(),rows.get(1).key.key.position(),rows.get(1).key.key.remaining());
         assert "k3".equals(key) : key;
 
-        assert ByteBufferUtil.bytes(1L).equals( rows.get(0).cf.getColumn(ByteBufferUtil.bytes("birthdate")).value());
-        assert ByteBufferUtil.bytes(1L).equals( rows.get(1).cf.getColumn(ByteBufferUtil.bytes("birthdate")).value());
+        assert ByteBufferUtil.bytes(1L).equals( rows.get(0).cf.getColumn(birthdate).value());
+        assert ByteBufferUtil.bytes(1L).equals( rows.get(1).cf.getColumn(birthdate).value());
 
         // add a second expression
         IndexExpression expr2 = new IndexExpression(ByteBufferUtil.bytes("notbirthdate"), IndexExpression.Operator.GTE, ByteBufferUtil.bytes(2L));
         clause = Arrays.asList(expr, expr2);
-        rows = Keyspace.open("Keyspace1").getColumnFamilyStore("Indexed1").search(range, clause, filter, 100);
+        rows = cfs.search(range, clause, filter, 100);
 
         assert rows.size() == 1 : StringUtils.join(rows, ",");
         key = new String(rows.get(0).key.key.array(),rows.get(0).key.key.position(),rows.get(0).key.key.remaining());
         assert "k3".equals( key );
 
         // same query again, but with resultset not including the subordinate expression
-        rows = Keyspace.open("Keyspace1").getColumnFamilyStore("Indexed1").search(range, clause, new NamesQueryFilter(ByteBufferUtil.bytes("birthdate")), 100);
+        rows = cfs.search(range, clause, Util.namesFilter(cfs, "birthdate"), 100);
 
         assert rows.size() == 1 : StringUtils.join(rows, ",");
         key = new String(rows.get(0).key.key.array(),rows.get(0).key.key.position(),rows.get(0).key.key.remaining());
@@ -236,8 +232,8 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         assert rows.get(0).cf.getColumnCount() == 1 : rows.get(0).cf;
 
         // once more, this time with a slice rowset that needs to be expanded
-        SliceQueryFilter emptyFilter = new SliceQueryFilter(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 0);
-        rows = Keyspace.open("Keyspace1").getColumnFamilyStore("Indexed1").search(range, clause, emptyFilter, 100);
+        SliceQueryFilter emptyFilter = new SliceQueryFilter(Composites.EMPTY, Composites.EMPTY, false, 0);
+        rows = cfs.search(range, clause, emptyFilter, 100);
 
         assert rows.size() == 1 : StringUtils.join(rows, ",");
         key = new String(rows.get(0).key.key.array(),rows.get(0).key.key.position(),rows.get(0).key.key.remaining());
@@ -249,7 +245,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         // doesn't tell the scan loop that it's done
         IndexExpression expr3 = new IndexExpression(ByteBufferUtil.bytes("notbirthdate"), IndexExpression.Operator.EQ, ByteBufferUtil.bytes(-1L));
         clause = Arrays.asList(expr, expr3);
-        rows = Keyspace.open("Keyspace1").getColumnFamilyStore("Indexed1").search(range, clause, filter, 100);
+        rows = cfs.search(range, clause, filter, 100);
 
         assert rows.isEmpty();
     }
@@ -258,11 +254,12 @@ public class ColumnFamilyStoreTest extends SchemaLoader
     public void testLargeScan()
     {
         RowMutation rm;
+        ColumnFamilyStore cfs = Keyspace.open("Keyspace1").getColumnFamilyStore("Indexed1");
         for (int i = 0; i < 100; i++)
         {
             rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("key" + i));
-            rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(34L), 0);
-            rm.add("Indexed1", ByteBufferUtil.bytes("notbirthdate"), ByteBufferUtil.bytes((long) (i % 2)), 0);
+            rm.add("Indexed1", cellname("birthdate"), ByteBufferUtil.bytes(34L), 0);
+            rm.add("Indexed1", cellname("notbirthdate"), ByteBufferUtil.bytes((long) (i % 2)), 0);
             rm.applyUnsafe();
         }
 
@@ -271,7 +268,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         List<IndexExpression> clause = Arrays.asList(expr, expr2);
         IDiskAtomFilter filter = new IdentityQueryFilter();
         Range<RowPosition> range = Util.range("", "");
-        List<Row> rows = Keyspace.open("Keyspace1").getColumnFamilyStore("Indexed1").search(range, clause, filter, 100);
+        List<Row> rows = cfs.search(range, clause, filter, 100);
 
         assert rows != null;
         assert rows.size() == 50 : rows.size();
@@ -289,7 +286,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         RowMutation rm;
 
         rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), 0);
+        rm.add("Indexed1", cellname("birthdate"), ByteBufferUtil.bytes(1L), 0);
         rm.apply();
 
         IndexExpression expr = new IndexExpression(ByteBufferUtil.bytes("birthdate"), IndexExpression.Operator.EQ, ByteBufferUtil.bytes(1L));
@@ -303,7 +300,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
 
         // delete the column directly
         rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
-        rm.delete("Indexed1", ByteBufferUtil.bytes("birthdate"), 1);
+        rm.delete("Indexed1", cellname("birthdate"), 1);
         rm.apply();
         rows = cfs.search(range, clause, filter, 100);
         assert rows.isEmpty();
@@ -318,7 +315,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
 
         // resurrect w/ a newer timestamp
         rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), 2);
+        rm.add("Indexed1", cellname("birthdate"), ByteBufferUtil.bytes(1L), 2);
         rm.apply();
         rows = cfs.search(range, clause, filter, 100);
         assert rows.size() == 1 : StringUtils.join(rows, ",");
@@ -336,7 +333,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
 
         // similarly, column delete w/ older timestamp should do nothing
         rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
-        rm.delete("Indexed1", ByteBufferUtil.bytes("birthdate"), 1);
+        rm.delete("Indexed1", cellname("birthdate"), 1);
         rm.apply();
         rows = cfs.search(range, clause, filter, 100);
         assert rows.size() == 1 : StringUtils.join(rows, ",");
@@ -352,14 +349,14 @@ public class ColumnFamilyStoreTest extends SchemaLoader
 
         // make sure obsolete mutations don't generate an index entry
         rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), 3);
+        rm.add("Indexed1", cellname("birthdate"), ByteBufferUtil.bytes(1L), 3);
         rm.apply();
         rows = cfs.search(range, clause, filter, 100);
         assert rows.isEmpty() : StringUtils.join(rows, ",");
 
         // try insert followed by row delete in the same mutation
         rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), 1);
+        rm.add("Indexed1", cellname("birthdate"), ByteBufferUtil.bytes(1L), 1);
         rm.delete("Indexed1", 2);
         rm.apply();
         rows = cfs.search(range, clause, filter, 100);
@@ -368,7 +365,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         // try row delete followed by insert in the same mutation
         rm = new RowMutation("Keyspace3", ByteBufferUtil.bytes("k1"));
         rm.delete("Indexed1", 3);
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), 4);
+        rm.add("Indexed1", cellname("birthdate"), ByteBufferUtil.bytes(1L), 4);
         rm.apply();
         rows = cfs.search(range, clause, filter, 100);
         assert rows.size() == 1 : StringUtils.join(rows, ",");
@@ -380,21 +377,23 @@ public class ColumnFamilyStoreTest extends SchemaLoader
     public void testIndexUpdate() throws IOException
     {
         Keyspace keyspace = Keyspace.open("Keyspace2");
+        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Indexed1");
+        CellName birthdate = cellname("birthdate");
 
         // create a row and update the birthdate value, test that the index query fetches the new version
         RowMutation rm;
         rm = new RowMutation("Keyspace2", ByteBufferUtil.bytes("k1"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), 1);
+        rm.add("Indexed1", birthdate, ByteBufferUtil.bytes(1L), 1);
         rm.apply();
         rm = new RowMutation("Keyspace2", ByteBufferUtil.bytes("k1"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(2L), 2);
+        rm.add("Indexed1", birthdate, ByteBufferUtil.bytes(2L), 2);
         rm.apply();
 
         IndexExpression expr = new IndexExpression(ByteBufferUtil.bytes("birthdate"), IndexExpression.Operator.EQ, ByteBufferUtil.bytes(1L));
         List<IndexExpression> clause = Arrays.asList(expr);
         IDiskAtomFilter filter = new IdentityQueryFilter();
         Range<RowPosition> range = Util.range("", "");
-        List<Row> rows = keyspace.getColumnFamilyStore("Indexed1").search(range, clause, filter, 100);
+        List<Row> rows = cfs.search(range, clause, filter, 100);
         assert rows.size() == 0;
 
         expr = new IndexExpression(ByteBufferUtil.bytes("birthdate"), IndexExpression.Operator.EQ, ByteBufferUtil.bytes(2L));
@@ -405,7 +404,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
 
         // update the birthdate value with an OLDER timestamp, and test that the index ignores this
         rm = new RowMutation("Keyspace2", ByteBufferUtil.bytes("k1"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(3L), 0);
+        rm.add("Indexed1", birthdate, ByteBufferUtil.bytes(3L), 0);
         rm.apply();
 
         rows = keyspace.getColumnFamilyStore("Indexed1").search(range, clause, filter, 100);
@@ -425,7 +424,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         cfs.truncateBlocking();
 
         ByteBuffer rowKey = ByteBufferUtil.bytes("k1");
-        ByteBuffer colName = ByteBufferUtil.bytes("birthdate");
+        CellName colName = cellname("birthdate"); 
         ByteBuffer val1 = ByteBufferUtil.bytes(1L);
         ByteBuffer val2 = ByteBufferUtil.bytes(2L);
 
@@ -434,7 +433,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         rm = new RowMutation(keySpace, rowKey);
         rm.add(cfName, colName, val1, 0);
         rm.apply();
-        IndexExpression expr = new IndexExpression(colName, IndexExpression.Operator.EQ, val1);
+        IndexExpression expr = new IndexExpression(ByteBufferUtil.bytes("birthdate"), IndexExpression.Operator.EQ, val1);
         List<IndexExpression> clause = Arrays.asList(expr);
         IDiskAtomFilter filter = new IdentityQueryFilter();
         Range<RowPosition> range = Util.range("", "");
@@ -456,7 +455,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         rows = keyspace.getColumnFamilyStore(cfName).search(range, clause, filter, 100);
         assertEquals(0, rows.size());
         // now check for the updated value
-        expr = new IndexExpression(colName, IndexExpression.Operator.EQ, val2);
+        expr = new IndexExpression(ByteBufferUtil.bytes("birthdate"), IndexExpression.Operator.EQ, val2);
         clause = Arrays.asList(expr);
         filter = new IdentityQueryFilter();
         range = Util.range("", "");
@@ -469,7 +468,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         rm.add(cfName, colName, ByteBufferUtil.bytes(1L), 3);
         keyspace.apply(rm, true, false);
 
-        expr = new IndexExpression(colName, IndexExpression.Operator.EQ, ByteBufferUtil.bytes(1L));
+        expr = new IndexExpression(ByteBufferUtil.bytes("birthdate"), IndexExpression.Operator.EQ, ByteBufferUtil.bytes(1L));
         clause = Arrays.asList(expr);
         filter = new IdentityQueryFilter();
         range = Util.range("", "");
@@ -489,12 +488,10 @@ public class ColumnFamilyStoreTest extends SchemaLoader
 
         ByteBuffer rowKey = ByteBufferUtil.bytes("k1");
         ByteBuffer clusterKey = ByteBufferUtil.bytes("ck1");
-        ByteBuffer colName = ByteBufferUtil.bytes("col1");
-        CompositeType baseComparator = (CompositeType)cfs.getComparator();
-        CompositeType.Builder builder = baseComparator.builder();
-        builder.add(clusterKey);
-        builder.add(colName);
-        ByteBuffer compositeName = builder.build();
+        ByteBuffer colName = ByteBufferUtil.bytes("col1"); 
+
+        CellNameType baseComparator = cfs.getComparator();
+        CellName compositeName = baseComparator.makeCellName(clusterKey, colName);
 
         ByteBuffer val1 = ByteBufferUtil.bytes("v1");
         ByteBuffer val2 = ByteBufferUtil.bytes("v2");
@@ -565,11 +562,9 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         ByteBuffer rowKey = ByteBufferUtil.bytes("k1");
         ByteBuffer clusterKey = ByteBufferUtil.bytes("ck1");
         ByteBuffer colName = ByteBufferUtil.bytes("col1");
-        CompositeType baseComparator = (CompositeType)cfs.getComparator();
-        CompositeType.Builder builder = baseComparator.builder();
-        builder.add(clusterKey);
-        builder.add(colName);
-        ByteBuffer compositeName = builder.build();
+
+        CellNameType baseComparator = cfs.getComparator();
+        CellName compositeName = baseComparator.makeCellName(clusterKey, colName);
 
         ByteBuffer val1 = ByteBufferUtil.bytes("v2");
 
@@ -603,26 +598,30 @@ public class ColumnFamilyStoreTest extends SchemaLoader
     @Test
     public void testIndexScanWithLimitOne()
     {
+        ColumnFamilyStore cfs = Keyspace.open("Keyspace1").getColumnFamilyStore("Indexed1");
         RowMutation rm;
 
+        CellName nobirthdate = cellname("notbirthdate");
+        CellName birthdate = cellname("birthdate");
+
         rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("kk1"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("notbirthdate"), ByteBufferUtil.bytes(1L), 0);
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), 0);
+        rm.add("Indexed1", nobirthdate, ByteBufferUtil.bytes(1L), 0);
+        rm.add("Indexed1", birthdate, ByteBufferUtil.bytes(1L), 0);
         rm.apply();
 
         rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("kk2"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("notbirthdate"), ByteBufferUtil.bytes(2L), 0);
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), 0);
+        rm.add("Indexed1", nobirthdate, ByteBufferUtil.bytes(2L), 0);
+        rm.add("Indexed1", birthdate, ByteBufferUtil.bytes(1L), 0);
         rm.apply();
 
         rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("kk3"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("notbirthdate"), ByteBufferUtil.bytes(2L), 0);
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), 0);
+        rm.add("Indexed1", nobirthdate, ByteBufferUtil.bytes(2L), 0);
+        rm.add("Indexed1", birthdate, ByteBufferUtil.bytes(1L), 0);
         rm.apply();
 
         rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("kk4"));
-        rm.add("Indexed1", ByteBufferUtil.bytes("notbirthdate"), ByteBufferUtil.bytes(2L), 0);
-        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), 0);
+        rm.add("Indexed1", nobirthdate, ByteBufferUtil.bytes(2L), 0);
+        rm.add("Indexed1", birthdate, ByteBufferUtil.bytes(1L), 0);
         rm.apply();
 
         // basic single-expression query
@@ -631,7 +630,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         List<IndexExpression> clause = Arrays.asList(expr1, expr2);
         IDiskAtomFilter filter = new IdentityQueryFilter();
         Range<RowPosition> range = Util.range("", "");
-        List<Row> rows = Keyspace.open("Keyspace1").getColumnFamilyStore("Indexed1").search(range, clause, filter, 1);
+        List<Row> rows = cfs.search(range, clause, filter, 1);
 
         assert rows != null;
         assert rows.size() == 1 : StringUtils.join(rows, ",");
@@ -641,14 +640,14 @@ public class ColumnFamilyStoreTest extends SchemaLoader
     public void testIndexCreate() throws IOException, InterruptedException, ExecutionException
     {
         Keyspace keyspace = Keyspace.open("Keyspace1");
+        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Indexed2");
 
         // create a row and update the birthdate value, test that the index query fetches the new version
         RowMutation rm;
         rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("k1"));
-        rm.add("Indexed2", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), 1);
+        rm.add("Indexed2", cellname("birthdate"), ByteBufferUtil.bytes(1L), 1);
         rm.apply();
 
-        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Indexed2");
         ColumnDefinition old = cfs.metadata.getColumnDefinition(ByteBufferUtil.bytes("birthdate"));
         ColumnDefinition cd = ColumnDefinition.regularDef(cfs.metadata, old.name.bytes, old.type, null).setIndex("birthdate_index", IndexType.KEYS, null);
         Future<?> future = cfs.indexManager.addIndexedColumn(cd);
@@ -686,7 +685,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
 
         List<Row> result = cfs.getRangeSlice(Util.bounds("key1", "key2"),
                                              null,
-                                             new NamesQueryFilter(ByteBufferUtil.bytes("asdf")),
+                                             Util.namesFilter(cfs, "asdf"),
                                              10);
         assertEquals(2, result.size());
         assert result.get(0).key.key.equals(ByteBufferUtil.bytes("key1"));
@@ -704,16 +703,16 @@ public class ColumnFamilyStoreTest extends SchemaLoader
 
         // create an isolated sstable.
         putColsSuper(cfs, key, scfName,
-                new Column(getBytes(1L), ByteBufferUtil.bytes("val1"), 1),
-                new Column(getBytes(2L), ByteBufferUtil.bytes("val2"), 1),
-                new Column(getBytes(3L), ByteBufferUtil.bytes("val3"), 1));
+                new Column(cellname(1L), ByteBufferUtil.bytes("val1"), 1),
+                new Column(cellname(2L), ByteBufferUtil.bytes("val2"), 1),
+                new Column(cellname(3L), ByteBufferUtil.bytes("val3"), 1));
         cfs.forceBlockingFlush();
 
         // insert, don't flush.
         putColsSuper(cfs, key, scfName,
-                new Column(getBytes(4L), ByteBufferUtil.bytes("val4"), 1),
-                new Column(getBytes(5L), ByteBufferUtil.bytes("val5"), 1),
-                new Column(getBytes(6L), ByteBufferUtil.bytes("val6"), 1));
+                new Column(cellname(4L), ByteBufferUtil.bytes("val4"), 1),
+                new Column(cellname(5L), ByteBufferUtil.bytes("val5"), 1),
+                new Column(cellname(6L), ByteBufferUtil.bytes("val6"), 1));
 
         // verify insert.
         final SlicePredicate sp = new SlicePredicate();
@@ -740,17 +739,17 @@ public class ColumnFamilyStoreTest extends SchemaLoader
 
         // late insert.
         putColsSuper(cfs, key, scfName,
-                new Column(getBytes(4L), ByteBufferUtil.bytes("val4"), 1L),
-                new Column(getBytes(7L), ByteBufferUtil.bytes("val7"), 1L));
+                new Column(cellname(4L), ByteBufferUtil.bytes("val4"), 1L),
+                new Column(cellname(7L), ByteBufferUtil.bytes("val7"), 1L));
 
         // re-verify delete.
         assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));
 
         // make sure new writes are recognized.
         putColsSuper(cfs, key, scfName,
-                new Column(getBytes(3L), ByteBufferUtil.bytes("val3"), 3),
-                new Column(getBytes(8L), ByteBufferUtil.bytes("val8"), 3),
-                new Column(getBytes(9L), ByteBufferUtil.bytes("val9"), 3));
+                new Column(cellname(3L), ByteBufferUtil.bytes("val3"), 3),
+                new Column(cellname(8L), ByteBufferUtil.bytes("val8"), 3),
+                new Column(cellname(9L), ByteBufferUtil.bytes("val9"), 3));
         assertRowAndColCount(1, 3, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));
     }
 
@@ -770,7 +769,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
     {
         StringBuilder sb = new StringBuilder();
         for (Column col : cf.getSortedColumns())
-            sb.append(String.format("(%s,%s,%d),", ByteBufferUtil.string(col.name()), ByteBufferUtil.string(col.value()), col.timestamp()));
+            sb.append(String.format("(%s,%s,%d),", ByteBufferUtil.string(col.name().toByteBuffer()), ByteBufferUtil.string(col.value()), col.timestamp()));
         return sb.toString();
     }
 
@@ -778,7 +777,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
     {
         ColumnFamily cf = TreeMapBackedSortedColumns.factory.create(cfs.keyspace.getName(), cfs.name);
         for (Column col : cols)
-            cf.addColumn(col.withUpdatedName(CompositeType.build(scfName, col.name())));
+            cf.addColumn(col.withUpdatedName(CellNames.compositeDense(scfName, col.name().toByteBuffer())));
         RowMutation rm = new RowMutation(cfs.keyspace.getName(), key.key, cf);
         rm.apply();
     }
@@ -851,15 +850,16 @@ public class ColumnFamilyStoreTest extends SchemaLoader
 
     private ColumnFamilyStore insertKey1Key2() throws IOException, ExecutionException, InterruptedException
     {
+        ColumnFamilyStore cfs = Keyspace.open("Keyspace2").getColumnFamilyStore("Standard1");
         List<IMutation> rms = new LinkedList<IMutation>();
         RowMutation rm;
         rm = new RowMutation("Keyspace2", ByteBufferUtil.bytes("key1"));
-        rm.add("Standard1", ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("asdf"), 0);
+        rm.add("Standard1", cellname("Column1"), ByteBufferUtil.bytes("asdf"), 0);
         rms.add(rm);
         Util.writeColumnFamily(rms);
 
         rm = new RowMutation("Keyspace2", ByteBufferUtil.bytes("key2"));
-        rm.add("Standard1", ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("asdf"), 0);
+        rm.add("Standard1", cellname("Column1"), ByteBufferUtil.bytes("asdf"), 0);
         rms.add(rm);
         return Util.writeColumnFamily(rms);
     }
@@ -890,24 +890,24 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         DecoratedKey key = Util.dk("slice-get-uuid-type");
 
         // Insert a row with one supercolumn and multiple subcolumns
-        putColsSuper(cfs, key, superColName, new Column(ByteBufferUtil.bytes("a"), ByteBufferUtil.bytes("A"), 1),
-                                             new Column(ByteBufferUtil.bytes("b"), ByteBufferUtil.bytes("B"), 1));
+        putColsSuper(cfs, key, superColName, new Column(cellname("a"), ByteBufferUtil.bytes("A"), 1),
+                                             new Column(cellname("b"), ByteBufferUtil.bytes("B"), 1));
 
         // Get the entire supercolumn like normal
         ColumnFamily cfGet = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, cfName, System.currentTimeMillis()));
-        assertEquals(ByteBufferUtil.bytes("A"), cfGet.getColumn(CompositeType.build(superColName, ByteBufferUtil.bytes("a"))).value());
-        assertEquals(ByteBufferUtil.bytes("B"), cfGet.getColumn(CompositeType.build(superColName, ByteBufferUtil.bytes("b"))).value());
+        assertEquals(ByteBufferUtil.bytes("A"), cfGet.getColumn(CellNames.compositeDense(superColName, ByteBufferUtil.bytes("a"))).value());
+        assertEquals(ByteBufferUtil.bytes("B"), cfGet.getColumn(CellNames.compositeDense(superColName, ByteBufferUtil.bytes("b"))).value());
 
         // Now do the SliceByNamesCommand on the supercolumn, passing both subcolumns in as columns to get
-        SortedSet<ByteBuffer> sliceColNames = new TreeSet<ByteBuffer>(cfs.metadata.comparator);
-        sliceColNames.add(CompositeType.build(superColName, ByteBufferUtil.bytes("a")));
-        sliceColNames.add(CompositeType.build(superColName, ByteBufferUtil.bytes("b")));
+        SortedSet<CellName> sliceColNames = new TreeSet<CellName>(cfs.metadata.comparator);
+        sliceColNames.add(CellNames.compositeDense(superColName, ByteBufferUtil.bytes("a")));
+        sliceColNames.add(CellNames.compositeDense(superColName, ByteBufferUtil.bytes("b")));
         SliceByNamesReadCommand cmd = new SliceByNamesReadCommand(keyspaceName, key.key, cfName, System.currentTimeMillis(), new NamesQueryFilter(sliceColNames));
         ColumnFamily cfSliced = cmd.getRow(keyspace).cf;
 
         // Make sure the slice returns the same as the straight get
-        assertEquals(ByteBufferUtil.bytes("A"), cfSliced.getColumn(CompositeType.build(superColName, ByteBufferUtil.bytes("a"))).value());
-        assertEquals(ByteBufferUtil.bytes("B"), cfSliced.getColumn(CompositeType.build(superColName, ByteBufferUtil.bytes("b"))).value());
+        assertEquals(ByteBufferUtil.bytes("A"), cfSliced.getColumn(CellNames.compositeDense(superColName, ByteBufferUtil.bytes("a"))).value());
+        assertEquals(ByteBufferUtil.bytes("B"), cfSliced.getColumn(CellNames.compositeDense(superColName, ByteBufferUtil.bytes("b"))).value());
     }
 
     @Test
@@ -916,7 +916,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         String keyspaceName = "Keyspace1";
         String cfName= "Standard1";
         DecoratedKey key = Util.dk("slice-name-old-metadata");
-        ByteBuffer cname = ByteBufferUtil.bytes("c1");
+        CellName cname = cellname("c1");
         Keyspace keyspace = Keyspace.open(keyspaceName);
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
         cfs.clearUnsafe();
@@ -938,7 +938,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         putColsStandard(cfs, key, new Column(cname, ByteBufferUtil.bytes("b"), 1));
 
         // Test fetching the column by name returns the first column
-        SliceByNamesReadCommand cmd = new SliceByNamesReadCommand(keyspaceName, key.key, cfName, System.currentTimeMillis(), new NamesQueryFilter(cname));
+        SliceByNamesReadCommand cmd = new SliceByNamesReadCommand(keyspaceName, key.key, cfName, System.currentTimeMillis(), new NamesQueryFilter(FBUtilities.singleton(cname, cfs.getComparator())));
         ColumnFamily cf = cmd.getRow(keyspace).cf;
         Column column = cf.getColumn(cname);
         assert column.value().equals(ByteBufferUtil.bytes("a")) : "expecting a, got " + ByteBufferUtil.string(column.value());
@@ -1138,11 +1138,11 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         assertColumnNames(row2, "c0", "c1");
 
         // Paging within bounds
-        SliceQueryFilter sf = new SliceQueryFilter(ByteBufferUtil.bytes("c1"),
-                                                   ByteBufferUtil.bytes("c2"),
+        SliceQueryFilter sf = new SliceQueryFilter(cellname("c1"),
+                                                   cellname("c2"),
                                                    false,
                                                    0);
-        rows = cfs.getRangeSlice(cfs.makeExtendedFilter(new Bounds<RowPosition>(ka, kc), sf, ByteBufferUtil.bytes("c2"), ByteBufferUtil.bytes("c1"), null, 2, System.currentTimeMillis()));
+        rows = cfs.getRangeSlice(cfs.makeExtendedFilter(new Bounds<RowPosition>(ka, kc), sf, cellname("c2"), cellname("c1"), null, 2, System.currentTimeMillis()));
         assert rows.size() == 2 : "Expected 2 rows, got " + toString(rows);
         iter = rows.iterator();
         row1 = iter.next();
@@ -1150,7 +1150,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         assertColumnNames(row1, "c2");
         assertColumnNames(row2, "c1");
 
-        rows = cfs.getRangeSlice(cfs.makeExtendedFilter(new Bounds<RowPosition>(kb, kc), sf, ByteBufferUtil.bytes("c1"), ByteBufferUtil.bytes("c1"), null, 10, System.currentTimeMillis()));
+        rows = cfs.getRangeSlice(cfs.makeExtendedFilter(new Bounds<RowPosition>(kb, kc), sf, cellname("c1"), cellname("c1"), null, 10, System.currentTimeMillis()));
         assert rows.size() == 2 : "Expected 2 rows, got " + toString(rows);
         iter = rows.iterator();
         row1 = iter.next();
@@ -1172,7 +1172,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
                 if (row.cf != null && !row.cf.isEmpty())
                 {
                     for (Column c : row.cf)
-                        sb.append(" ").append(ByteBufferUtil.string(c.name()));
+                        sb.append(" ").append(row.cf.getComparator().getString(c.name()));
                 }
                 sb.append("} ");
             }
@@ -1195,9 +1195,9 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         while (columns.hasNext())
         {
             Column c = columns.next();
-            assert names.hasNext() : "Got more columns that expected (first unexpected column: " + ByteBufferUtil.string(c.name()) + ")";
+            assert names.hasNext() : "Got more columns that expected (first unexpected column: " + ByteBufferUtil.string(c.name().toByteBuffer()) + ")";
             String n = names.next();
-            assert c.name().equals(ByteBufferUtil.bytes(n)) : "Expected " + n + ", got " + ByteBufferUtil.string(c.name());
+            assert c.name().toByteBuffer().equals(ByteBufferUtil.bytes(n)) : "Expected " + n + ", got " + ByteBufferUtil.string(c.name().toByteBuffer());
         }
         assert !names.hasNext() : "Missing expected column " + names.next();
     }
@@ -1273,7 +1273,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         {
             ByteBuffer key = ByteBufferUtil.bytes(String.valueOf("k" + i));
             RowMutation rm = new RowMutation("Keyspace1", key);
-            rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), LongType.instance.decompose(1L), System.currentTimeMillis());
+            rm.add("Indexed1", cellname("birthdate"), LongType.instance.decompose(1L), System.currentTimeMillis());
             rm.apply();
         }
 
@@ -1293,18 +1293,18 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         // in order not to change thrift interfaces at this stage we build SliceQueryFilter
         // directly instead of using QueryFilter to build it for us
         ColumnSlice[] ranges = new ColumnSlice[] {
-                new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colA")),
-                new ColumnSlice(bytes("colC"), bytes("colE")),
-                new ColumnSlice(bytes("colF"), bytes("colF")),
-                new ColumnSlice(bytes("colG"), bytes("colG")),
-                new ColumnSlice(bytes("colI"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+                new ColumnSlice(Composites.EMPTY, cellname("colA")),
+                new ColumnSlice(cellname("colC"), cellname("colE")),
+                new ColumnSlice(cellname("colF"), cellname("colF")),
+                new ColumnSlice(cellname("colG"), cellname("colG")),
+                new ColumnSlice(cellname("colI"), Composites.EMPTY) };
 
         ColumnSlice[] rangesReversed = new ColumnSlice[] {
-                new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colI")),
-                new ColumnSlice(bytes("colG"), bytes("colG")),
-                new ColumnSlice(bytes("colF"), bytes("colF")),
-                new ColumnSlice(bytes("colE"), bytes("colC")),
-                new ColumnSlice(bytes("colA"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+                new ColumnSlice(Composites.EMPTY, cellname("colI")),
+                new ColumnSlice(cellname("colG"), cellname("colG")),
+                new ColumnSlice(cellname("colF"), cellname("colF")),
+                new ColumnSlice(cellname("colE"), cellname("colC")),
+                new ColumnSlice(cellname("colA"), Composites.EMPTY) };
 
         String tableName = "Keyspace1";
         String cfName = "Standard1";
@@ -1316,7 +1316,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         Column[] cols = new Column[letters.length];
         for (int i = 0; i < cols.length; i++)
         {
-            cols[i] = new Column(ByteBufferUtil.bytes("col" + letters[i].toUpperCase()),
+            cols[i] = new Column(cellname("col" + letters[i].toUpperCase()),
                     ByteBuffer.wrap(new byte[1]), 1);
         }
 
@@ -1342,18 +1342,18 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         // in order not to change thrift interfaces at this stage we build SliceQueryFilter
         // directly instead of using QueryFilter to build it for us
         ColumnSlice[] ranges = new ColumnSlice[] {
-                new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colA")),
-                new ColumnSlice(bytes("colC"), bytes("colE")),
-                new ColumnSlice(bytes("colF"), bytes("colF")),
-                new ColumnSlice(bytes("colG"), bytes("colG")),
-                new ColumnSlice(bytes("colI"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+                new ColumnSlice(Composites.EMPTY, cellname("colA")),
+                new ColumnSlice(cellname("colC"), cellname("colE")),
+                new ColumnSlice(cellname("colF"), cellname("colF")),
+                new ColumnSlice(cellname("colG"), cellname("colG")),
+                new ColumnSlice(cellname("colI"), Composites.EMPTY) };
 
         ColumnSlice[] rangesReversed = new ColumnSlice[] {
-                new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colI")),
-                new ColumnSlice(bytes("colG"), bytes("colG")),
-                new ColumnSlice(bytes("colF"), bytes("colF")),
-                new ColumnSlice(bytes("colE"), bytes("colC")),
-                new ColumnSlice(bytes("colA"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+                new ColumnSlice(Composites.EMPTY,  cellname("colI")),
+                new ColumnSlice(cellname("colG"), cellname("colG")),
+                new ColumnSlice(cellname("colF"), cellname("colF")),
+                new ColumnSlice(cellname("colE"), cellname("colC")),
+                new ColumnSlice(cellname("colA"), Composites.EMPTY) };
 
         String tableName = "Keyspace1";
         String cfName = "Standard1";
@@ -1365,7 +1365,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         Column[] cols = new Column[letters.length];
         for (int i = 0; i < cols.length; i++)
         {
-            cols[i] = new Column(ByteBufferUtil.bytes("col" + letters[i].toUpperCase()),
+            cols[i] = new Column(cellname("col" + letters[i].toUpperCase()),
                     ByteBuffer.wrap(new byte[1366]), 1);
         }
 
@@ -1391,18 +1391,18 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         // in order not to change thrift interfaces at this stage we build SliceQueryFilter
         // directly instead of using QueryFilter to build it for us
         ColumnSlice[] ranges = new ColumnSlice[] {
-                new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colA")),
-                new ColumnSlice(bytes("colC"), bytes("colE")),
-                new ColumnSlice(bytes("colF"), bytes("colF")),
-                new ColumnSlice(bytes("colG"), bytes("colG")),
-                new ColumnSlice(bytes("colI"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+                new ColumnSlice(Composites.EMPTY, cellname("colA")),
+                new ColumnSlice(cellname("colC"), cellname("colE")),
+                new ColumnSlice(cellname("colF"), cellname("colF")),
+                new ColumnSlice(cellname("colG"), cellname("colG")),
+                new ColumnSlice(cellname("colI"), Composites.EMPTY) };
 
         ColumnSlice[] rangesReversed = new ColumnSlice[] {
-                new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colI")),
-                new ColumnSlice(bytes("colG"), bytes("colG")),
-                new ColumnSlice(bytes("colF"), bytes("colF")),
-                new ColumnSlice(bytes("colE"), bytes("colC")),
-                new ColumnSlice(bytes("colA"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+                new ColumnSlice(Composites.EMPTY, cellname("colI")),
+                new ColumnSlice(cellname("colG"), cellname("colG")),
+                new ColumnSlice(cellname("colF"), cellname("colF")),
+                new ColumnSlice(cellname("colE"), cellname("colC")),
+                new ColumnSlice(cellname("colA"), Composites.EMPTY) };
 
         String tableName = "Keyspace1";
         String cfName = "Standard1";
@@ -1414,7 +1414,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         Column[] cols = new Column[letters.length];
         for (int i = 0; i < cols.length; i++)
         {
-            cols[i] = new Column(ByteBufferUtil.bytes("col" + letters[i].toUpperCase()),
+            cols[i] = new Column(cellname("col" + letters[i].toUpperCase()),
                     ByteBuffer.wrap(new byte[1]), 1);
         }
 
@@ -1441,18 +1441,18 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         // in order not to change thrift interfaces at this stage we build SliceQueryFilter
         // directly instead of using QueryFilter to build it for us
         ColumnSlice[] ranges = new ColumnSlice[] {
-                new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colA")),
-                new ColumnSlice(bytes("colC"), bytes("colE")),
-                new ColumnSlice(bytes("colF"), bytes("colF")),
-                new ColumnSlice(bytes("colG"), bytes("colG")),
-                new ColumnSlice(bytes("colI"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+                new ColumnSlice(Composites.EMPTY, cellname("colA")),
+                new ColumnSlice(cellname("colC"), cellname("colE")),
+                new ColumnSlice(cellname("colF"), cellname("colF")),
+                new ColumnSlice(cellname("colG"), cellname("colG")),
+                new ColumnSlice(cellname("colI"), Composites.EMPTY) };
 
         ColumnSlice[] rangesReversed = new ColumnSlice[] {
-                new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colI")),
-                new ColumnSlice(bytes("colG"), bytes("colG")),
-                new ColumnSlice(bytes("colF"), bytes("colF")),
-                new ColumnSlice(bytes("colE"), bytes("colC")),
-                new ColumnSlice(bytes("colA"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+                new ColumnSlice(Composites.EMPTY, cellname("colI")),
+                new ColumnSlice(cellname("colG"), cellname("colG")),
+                new ColumnSlice(cellname("colF"), cellname("colF")),
+                new ColumnSlice(cellname("colE"), cellname("colC")),
+                new ColumnSlice(cellname("colA"), Composites.EMPTY) };
 
         String tableName = "Keyspace1";
         String cfName = "Standard1";
@@ -1464,7 +1464,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         Column[] cols = new Column[letters.length];
         for (int i = 0; i < cols.length; i++)
         {
-            cols[i] = new Column(ByteBufferUtil.bytes("col" + letters[i].toUpperCase()),
+            cols[i] = new Column(cellname("col" + letters[i].toUpperCase()),
                     ByteBuffer.wrap(new byte[1366]), 1);
         }
 
@@ -1491,16 +1491,16 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         // in order not to change thrift interfaces at this stage we build SliceQueryFilter
         // directly instead of using QueryFilter to build it for us
         ColumnSlice[] ranges = new ColumnSlice[] {
-                new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colA")),
-                new ColumnSlice(bytes("colC"), bytes("colE")),
-                new ColumnSlice(bytes("colG"), bytes("colG")),
-                new ColumnSlice(bytes("colI"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+                new ColumnSlice(Composites.EMPTY, cellname("colA")),
+                new ColumnSlice(cellname("colC"), cellname("colE")),
+                new ColumnSlice(cellname("colG"), cellname("colG")),
+                new ColumnSlice(cellname("colI"), Composites.EMPTY) };
 
         ColumnSlice[] rangesReversed = new ColumnSlice[] {
-                new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colI")),
-                new ColumnSlice(bytes("colG"), bytes("colG")),
-                new ColumnSlice(bytes("colE"), bytes("colC")),
-                new ColumnSlice(bytes("colA"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+                new ColumnSlice(Composites.EMPTY, cellname("colI")),
+                new ColumnSlice(cellname("colG"), cellname("colG")),
+                new ColumnSlice(cellname("colE"), cellname("colC")),
+                new ColumnSlice(cellname("colA"), Composites.EMPTY) };
 
         String keyspaceName = "Keyspace1";
         String cfName = "Standard1";
@@ -1512,7 +1512,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         Column[] cols = new Column[letters.length];
         for (int i = 0; i < cols.length; i++)
         {
-            cols[i] = new Column(ByteBufferUtil.bytes("col" + letters[i].toUpperCase()),
+            cols[i] = new Column(cellname("col" + letters[i].toUpperCase()),
                     // use 1366 so that three cols make an index segment
                     ByteBuffer.wrap(new byte[1366]), 1);
         }
@@ -1631,7 +1631,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         Column[] cols = new Column[12];
         for (int i = 0; i < cols.length; i++)
         {
-            cols[i] = new Column(ByteBufferUtil.bytes("col" + letters[i]), ByteBuffer.wrap(new byte[valueSize]), 1);
+            cols[i] = new Column(cellname("col" + letters[i]), ByteBuffer.wrap(new byte[valueSize]), 1);
         }
 
         for (int i = 0; i < 12; i++)
@@ -1654,35 +1654,37 @@ public class ColumnFamilyStoreTest extends SchemaLoader
 
     private void testMultiRangeSlicesBehavior(ColumnFamilyStore cfs)
     {
+        CellNameType type = cfs.getComparator();
+
         // in order not to change thrift interfaces at this stage we build SliceQueryFilter
         // directly instead of using QueryFilter to build it for us
         ColumnSlice[] startMiddleAndEndRanges = new ColumnSlice[] {
-                new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colc")),
-                new ColumnSlice(bytes("colf"), bytes("colg")),
-                new ColumnSlice(bytes("colj"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+                new ColumnSlice(Composites.EMPTY, cellname("colc")),
+                new ColumnSlice(cellname("colf"), cellname("colg")),
+                new ColumnSlice(cellname("colj"), Composites.EMPTY) };
 
         ColumnSlice[] startMiddleAndEndRangesReversed = new ColumnSlice[] {
-                new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colj")),
-                new ColumnSlice(bytes("colg"), bytes("colf")),
-                new ColumnSlice(bytes("colc"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+                new ColumnSlice(Composites.EMPTY, cellname("colj")),
+                new ColumnSlice(cellname("colg"), cellname("colf")),
+                new ColumnSlice(cellname("colc"), Composites.EMPTY) };
 
         ColumnSlice[] startOnlyRange =
-                new ColumnSlice[] { new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colc")) };
+                new ColumnSlice[] { new ColumnSlice(Composites.EMPTY, cellname("colc")) };
 
         ColumnSlice[] startOnlyRangeReversed =
-                new ColumnSlice[] { new ColumnSlice(bytes("colc"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+                new ColumnSlice[] { new ColumnSlice(cellname("colc"), Composites.EMPTY) };
 
         ColumnSlice[] middleOnlyRanges =
-                new ColumnSlice[] { new ColumnSlice(bytes("colf"), bytes("colg")) };
+                new ColumnSlice[] { new ColumnSlice(cellname("colf"), cellname("colg")) };
 
         ColumnSlice[] middleOnlyRangesReversed =
-                new ColumnSlice[] { new ColumnSlice(bytes("colg"), bytes("colf")) };
+                new ColumnSlice[] { new ColumnSlice(cellname("colg"), cellname("colf")) };
 
         ColumnSlice[] endOnlyRanges =
-                new ColumnSlice[] { new ColumnSlice(bytes("colj"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+                new ColumnSlice[] { new ColumnSlice(cellname("colj"), Composites.EMPTY) };
 
         ColumnSlice[] endOnlyRangesReversed =
-                new ColumnSlice[] { new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colj")) };
+                new ColumnSlice[] { new ColumnSlice(Composites.EMPTY, cellname("colj")) };
 
         SliceQueryFilter startOnlyFilter = new SliceQueryFilter(startOnlyRange, false,
                 Integer.MAX_VALUE);
@@ -1867,7 +1869,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         {
             public String apply(Column arg0)
             {
-                return new String(arg0.name().array());
+                return Util.string(arg0.name().toByteBuffer());
             }
         }), String.class);
 
@@ -1877,7 +1879,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         int i = 0;
         for (Column col : cols)
         {
-            assertEquals(colNames[i++], new String(col.name().array()));
+            assertEquals(colNames[i++], Util.string(col.name().toByteBuffer()));
         }
     }
 
@@ -1891,7 +1893,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
                 {
                     public String apply(Column arg0)
                     {
-                        return new String(arg0.name().array());
+                        return Util.string(arg0.name().toByteBuffer());
                     }
                 }));
         System.err.println("Filtered:");
@@ -1899,7 +1901,7 @@ public class ColumnFamilyStoreTest extends SchemaLoader
         {
             public String apply(Column arg0)
             {
-                return new String(arg0.name().array());
+                return Util.string(arg0.name().toByteBuffer());
             }
         });
         System.err.println("Row key: " + rowKey + " Cols: " + transformed);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/ColumnFamilyTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ColumnFamilyTest.java b/test/unit/org/apache/cassandra/db/ColumnFamilyTest.java
index a01c25c..42f6066 100644
--- a/test/unit/org/apache/cassandra/db/ColumnFamilyTest.java
+++ b/test/unit/org/apache/cassandra/db/ColumnFamilyTest.java
@@ -33,6 +33,7 @@ import org.apache.cassandra.io.sstable.ColumnStats;
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.net.MessagingService;
 import static org.apache.cassandra.Util.column;
+import static org.apache.cassandra.Util.cellname;
 import static org.junit.Assert.assertEquals;
 
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -87,7 +88,7 @@ public class ColumnFamilyTest extends SchemaLoader
         cf = ColumnFamily.serializer.deserialize(new DataInputStream(bufIn), version);
         for (String cName : map.navigableKeySet())
         {
-            ByteBuffer val = cf.getColumn(ByteBufferUtil.bytes(cName)).value();
+            ByteBuffer val = cf.getColumn(cellname(cName)).value();
             assert new String(val.array(),val.position(),val.remaining()).equals(map.get(cName));
         }
         assert Iterables.size(cf.getColumnNames()) == map.size();
@@ -115,7 +116,7 @@ public class ColumnFamilyTest extends SchemaLoader
         cf.addColumn(column("col1", "val2", 2)); // same timestamp, new value
         cf.addColumn(column("col1", "val3", 1)); // older timestamp -- should be ignored
 
-        assert ByteBufferUtil.bytes("val2").equals(cf.getColumn(ByteBufferUtil.bytes("col1")).value());
+        assert ByteBufferUtil.bytes("val2").equals(cf.getColumn(cellname("col1")).value());
     }
 
     @Test
@@ -127,30 +128,30 @@ public class ColumnFamilyTest extends SchemaLoader
         ByteBuffer val = ByteBufferUtil.bytes("sample value");
         ByteBuffer val2 = ByteBufferUtil.bytes("x value ");
 
-        cf_new.addColumn(ByteBufferUtil.bytes("col1"), val, 3);
-        cf_new.addColumn(ByteBufferUtil.bytes("col2"), val, 4);
+        cf_new.addColumn(cellname("col1"), val, 3);
+        cf_new.addColumn(cellname("col2"), val, 4);
 
-        cf_old.addColumn(ByteBufferUtil.bytes("col2"), val2, 1);
-        cf_old.addColumn(ByteBufferUtil.bytes("col3"), val2, 2);
+        cf_old.addColumn(cellname("col2"), val2, 1);
+        cf_old.addColumn(cellname("col3"), val2, 2);
 
         cf_result.addAll(cf_new, HeapAllocator.instance);
         cf_result.addAll(cf_old, HeapAllocator.instance);
 
         assert 3 == cf_result.getColumnCount() : "Count is " + cf_new.getColumnCount();
         //addcolumns will only add if timestamp >= old timestamp
-        assert val.equals(cf_result.getColumn(ByteBufferUtil.bytes("col2")).value());
+        assert val.equals(cf_result.getColumn(cellname("col2")).value());
 
         // check that tombstone wins timestamp ties
-        cf_result.addTombstone(ByteBufferUtil.bytes("col1"), 0, 3);
-        assert cf_result.getColumn(ByteBufferUtil.bytes("col1")).isMarkedForDelete(System.currentTimeMillis());
-        cf_result.addColumn(ByteBufferUtil.bytes("col1"), val2, 3);
-        assert cf_result.getColumn(ByteBufferUtil.bytes("col1")).isMarkedForDelete(System.currentTimeMillis());
+        cf_result.addTombstone(cellname("col1"), 0, 3);
+        assert cf_result.getColumn(cellname("col1")).isMarkedForDelete(System.currentTimeMillis());
+        cf_result.addColumn(cellname("col1"), val2, 3);
+        assert cf_result.getColumn(cellname("col1")).isMarkedForDelete(System.currentTimeMillis());
 
         // check that column value wins timestamp ties in absence of tombstone
-        cf_result.addColumn(ByteBufferUtil.bytes("col3"), val, 2);
-        assert cf_result.getColumn(ByteBufferUtil.bytes("col3")).value().equals(val2);
-        cf_result.addColumn(ByteBufferUtil.bytes("col3"), ByteBufferUtil.bytes("z"), 2);
-        assert cf_result.getColumn(ByteBufferUtil.bytes("col3")).value().equals(ByteBufferUtil.bytes("z"));
+        cf_result.addColumn(cellname("col3"), val, 2);
+        assert cf_result.getColumn(cellname("col3")).value().equals(val2);
+        cf_result.addColumn(cellname("col3"), ByteBufferUtil.bytes("z"), 2);
+        assert cf_result.getColumn(cellname("col3")).value().equals(ByteBufferUtil.bytes("z"));
     }
 
     @Test

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/CommitLogTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/CommitLogTest.java b/test/unit/org/apache/cassandra/db/CommitLogTest.java
index 6c1b56b..587e731 100644
--- a/test/unit/org/apache/cassandra/db/CommitLogTest.java
+++ b/test/unit/org/apache/cassandra/db/CommitLogTest.java
@@ -29,6 +29,7 @@ import org.junit.Assert;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.db.commitlog.CommitLogDescriptor;
@@ -99,7 +100,7 @@ public class CommitLogTest extends SchemaLoader
         CommitLog.instance.resetUnsafe();
         // Roughly 32 MB mutation
         RowMutation rm = new RowMutation("Keyspace1", bytes("k"));
-        rm.add("Standard1", bytes("c1"), ByteBuffer.allocate(DatabaseDescriptor.getCommitLogSegmentSize()/4), 0);
+        rm.add("Standard1", Util.cellname("c1"), ByteBuffer.allocate(DatabaseDescriptor.getCommitLogSegmentSize()/4), 0);
 
         // Adding it 5 times
         CommitLog.instance.add(rm);
@@ -110,7 +111,7 @@ public class CommitLogTest extends SchemaLoader
 
         // Adding new mutation on another CF
         RowMutation rm2 = new RowMutation("Keyspace1", bytes("k"));
-        rm2.add("Standard2", bytes("c1"), ByteBuffer.allocate(4), 0);
+        rm2.add("Standard2", Util.cellname("c1"), ByteBuffer.allocate(4), 0);
         CommitLog.instance.add(rm2);
 
         assert CommitLog.instance.activeSegments() == 2 : "Expecting 2 segments, got " + CommitLog.instance.activeSegments();
@@ -129,7 +130,7 @@ public class CommitLogTest extends SchemaLoader
         CommitLog.instance.resetUnsafe();
         // Roughly 32 MB mutation
         RowMutation rm = new RowMutation("Keyspace1", bytes("k"));
-        rm.add("Standard1", bytes("c1"), ByteBuffer.allocate((DatabaseDescriptor.getCommitLogSegmentSize()/4) - 1), 0);
+        rm.add("Standard1", Util.cellname("c1"), ByteBuffer.allocate((DatabaseDescriptor.getCommitLogSegmentSize()/4) - 1), 0);
 
         // Adding it twice (won't change segment)
         CommitLog.instance.add(rm);
@@ -146,7 +147,7 @@ public class CommitLogTest extends SchemaLoader
 
         // Adding new mutation on another CF, large enough (including CL entry overhead) that a new segment is created
         RowMutation rm2 = new RowMutation("Keyspace1", bytes("k"));
-        rm2.add("Standard2", bytes("c1"), ByteBuffer.allocate((DatabaseDescriptor.getCommitLogSegmentSize()/2) - 100), 0);
+        rm2.add("Standard2", Util.cellname("c1"), ByteBuffer.allocate((DatabaseDescriptor.getCommitLogSegmentSize()/2) - 100), 0);
         CommitLog.instance.add(rm2);
         // also forces a new segment, since each entry-with-overhead is just under half the CL size
         CommitLog.instance.add(rm2);
@@ -172,7 +173,7 @@ public class CommitLogTest extends SchemaLoader
         CommitLog.instance.resetUnsafe();
 
         RowMutation rm = new RowMutation("Keyspace1", bytes("k"));
-        rm.add("Standard1", bytes("c1"), ByteBuffer.allocate((DatabaseDescriptor.getCommitLogSegmentSize()) - 83), 0);
+        rm.add("Standard1", Util.cellname("c1"), ByteBuffer.allocate((DatabaseDescriptor.getCommitLogSegmentSize()) - 83), 0);
         CommitLog.instance.add(rm);
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/CounterColumnTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/CounterColumnTest.java b/test/unit/org/apache/cassandra/db/CounterColumnTest.java
index a2add18..0ca2636 100644
--- a/test/unit/org/apache/cassandra/db/CounterColumnTest.java
+++ b/test/unit/org/apache/cassandra/db/CounterColumnTest.java
@@ -31,7 +31,9 @@ import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.context.CounterContext;
+import org.apache.cassandra.db.marshal.UTF8Type;
 import static org.apache.cassandra.db.context.CounterContext.ContextState;
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.utils.*;
@@ -59,7 +61,7 @@ public class CounterColumnTest extends SchemaLoader
     public void testCreate()
     {
         long delta = 3L;
-        CounterUpdateColumn cuc = new CounterUpdateColumn(ByteBufferUtil.bytes("x"), delta, 1L);
+        CounterUpdateColumn cuc = new CounterUpdateColumn(Util.cellname("x"), delta, 1L);
         CounterColumn column = cuc.localCopy(Keyspace.open("Keyspace5").getColumnFamilyStore("Counter1"));
 
         Assert.assertEquals(delta, column.total());
@@ -80,33 +82,33 @@ public class CounterColumnTest extends SchemaLoader
         ByteBuffer context;
 
         // tombstone + tombstone
-        left  = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 1L);
-        right = new DeletedColumn(ByteBufferUtil.bytes("x"), 2, 2L);
+        left  = new DeletedColumn(Util.cellname("x"), 1, 1L);
+        right = new DeletedColumn(Util.cellname("x"), 2, 2L);
 
         Assert.assertEquals(left.reconcile(right).getMarkedForDeleteAt(), right.getMarkedForDeleteAt());
         Assert.assertEquals(right.reconcile(left).getMarkedForDeleteAt(), right.getMarkedForDeleteAt());
 
         // tombstone > live
-        left  = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 2L);
-        right = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 1L);
+        left  = new DeletedColumn(Util.cellname("x"), 1, 2L);
+        right = new CounterColumn(Util.cellname("x"), 0L, 1L);
 
         Assert.assertEquals(left.reconcile(right), left);
 
         // tombstone < live last delete
-        left  = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 1L);
-        right = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 4L, 2L);
+        left  = new DeletedColumn(Util.cellname("x"), 1, 1L);
+        right = new CounterColumn(Util.cellname("x"), 0L, 4L, 2L);
 
         Assert.assertEquals(left.reconcile(right), right);
 
         // tombstone == live last delete
-        left  = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 2L);
-        right = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 4L, 2L);
+        left  = new DeletedColumn(Util.cellname("x"), 1, 2L);
+        right = new CounterColumn(Util.cellname("x"), 0L, 4L, 2L);
 
         Assert.assertEquals(left.reconcile(right), right);
 
         // tombstone > live last delete
-        left  = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 4L);
-        right = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 9L, 1L);
+        left  = new DeletedColumn(Util.cellname("x"), 1, 4L);
+        right = new CounterColumn(Util.cellname("x"), 0L, 9L, 1L);
 
         reconciled = left.reconcile(right);
         Assert.assertEquals(reconciled.name(), right.name());
@@ -115,26 +117,26 @@ public class CounterColumnTest extends SchemaLoader
         Assert.assertEquals(((CounterColumn)reconciled).timestampOfLastDelete(), left.getMarkedForDeleteAt());
 
         // live < tombstone
-        left  = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 1L);
-        right = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 2L);
+        left  = new CounterColumn(Util.cellname("x"), 0L, 1L);
+        right = new DeletedColumn(Util.cellname("x"), 1, 2L);
 
         Assert.assertEquals(left.reconcile(right), right);
 
         // live last delete > tombstone
-        left  = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 4L, 2L);
-        right = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 1L);
+        left  = new CounterColumn(Util.cellname("x"), 0L, 4L, 2L);
+        right = new DeletedColumn(Util.cellname("x"), 1, 1L);
 
         Assert.assertEquals(left.reconcile(right), left);
 
         // live last delete == tombstone
-        left  = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 4L, 2L);
-        right = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 2L);
+        left  = new CounterColumn(Util.cellname("x"), 0L, 4L, 2L);
+        right = new DeletedColumn(Util.cellname("x"), 1, 2L);
 
         Assert.assertEquals(left.reconcile(right), left);
 
         // live last delete < tombstone
-        left  = new CounterColumn(ByteBufferUtil.bytes("x"), 0L, 9L, 1L);
-        right = new DeletedColumn(ByteBufferUtil.bytes("x"), 1, 4L);
+        left  = new CounterColumn(Util.cellname("x"), 0L, 9L, 1L);
+        right = new DeletedColumn(Util.cellname("x"), 1, 4L);
 
         reconciled = left.reconcile(right);
         Assert.assertEquals(reconciled.name(), left.name());
@@ -143,20 +145,20 @@ public class CounterColumnTest extends SchemaLoader
         Assert.assertEquals(((CounterColumn)reconciled).timestampOfLastDelete(), right.getMarkedForDeleteAt());
 
         // live < live last delete
-        left  = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(CounterId.fromInt(1), 2L, 3L, false), 1L, Long.MIN_VALUE);
-        right = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(CounterId.fromInt(1), 1L, 1L, false), 4L, 3L);
+        left  = new CounterColumn(Util.cellname("x"), cc.create(CounterId.fromInt(1), 2L, 3L, false), 1L, Long.MIN_VALUE);
+        right = new CounterColumn(Util.cellname("x"), cc.create(CounterId.fromInt(1), 1L, 1L, false), 4L, 3L);
 
         Assert.assertEquals(left.reconcile(right), right);
 
         // live last delete > live
-        left  = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(CounterId.fromInt(1), 2L, 3L, false), 6L, 5L);
-        right = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(CounterId.fromInt(1), 1L, 1L, false), 4L, 3L);
+        left  = new CounterColumn(Util.cellname("x"), cc.create(CounterId.fromInt(1), 2L, 3L, false), 6L, 5L);
+        right = new CounterColumn(Util.cellname("x"), cc.create(CounterId.fromInt(1), 1L, 1L, false), 4L, 3L);
 
         Assert.assertEquals(left.reconcile(right), left);
 
         // live + live
-        left = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(CounterId.fromInt(1), 1L, 1L, false), 4L, Long.MIN_VALUE);
-        right = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(CounterId.fromInt(1), 2L, 3L, false), 1L, Long.MIN_VALUE);
+        left = new CounterColumn(Util.cellname("x"), cc.create(CounterId.fromInt(1), 1L, 1L, false), 4L, Long.MIN_VALUE);
+        right = new CounterColumn(Util.cellname("x"), cc.create(CounterId.fromInt(1), 2L, 3L, false), 1L, Long.MIN_VALUE);
 
         reconciled = left.reconcile(right);
         Assert.assertEquals(reconciled.name(), left.name());
@@ -164,7 +166,7 @@ public class CounterColumnTest extends SchemaLoader
         Assert.assertEquals(4L, reconciled.timestamp());
 
         left = reconciled;
-        right = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(CounterId.fromInt(2), 1L, 5L, false), 2L, Long.MIN_VALUE);
+        right = new CounterColumn(Util.cellname("x"), cc.create(CounterId.fromInt(2), 1L, 5L, false), 2L, Long.MIN_VALUE);
 
         reconciled = left.reconcile(right);
         Assert.assertEquals(reconciled.name(), left.name());
@@ -172,7 +174,7 @@ public class CounterColumnTest extends SchemaLoader
         Assert.assertEquals(4L, reconciled.timestamp());
 
         left = reconciled;
-        right = new CounterColumn(ByteBufferUtil.bytes("x"), cc.create(CounterId.fromInt(2), 2L, 2L, false), 6L, Long.MIN_VALUE);
+        right = new CounterColumn(Util.cellname("x"), cc.create(CounterId.fromInt(2), 2L, 2L, false), 6L, Long.MIN_VALUE);
 
         reconciled = left.reconcile(right);
         Assert.assertEquals(reconciled.name(), left.name());
@@ -205,15 +207,15 @@ public class CounterColumnTest extends SchemaLoader
         CounterColumn rightCol;
 
         // timestamp
-        leftCol = new CounterColumn(ByteBufferUtil.bytes("x"), 0, 1L);
-        rightCol = new CounterColumn(ByteBufferUtil.bytes("x"), 0, 2L);
+        leftCol = new CounterColumn(Util.cellname("x"), 0, 1L);
+        rightCol = new CounterColumn(Util.cellname("x"), 0, 2L);
 
         Assert.assertEquals(rightCol, leftCol.diff(rightCol));
         Assert.assertNull(rightCol.diff(leftCol));
 
         // timestampOfLastDelete
-        leftCol = new CounterColumn(ByteBufferUtil.bytes("x"), 0, 1L, 1L);
-        rightCol = new CounterColumn(ByteBufferUtil.bytes("x"), 0, 1L, 2L);
+        leftCol = new CounterColumn(Util.cellname("x"), 0, 1L, 1L);
+        rightCol = new CounterColumn(Util.cellname("x"), 0, 1L, 2L);
 
         Assert.assertEquals(rightCol, leftCol.diff(rightCol));
         Assert.assertNull(rightCol.diff(leftCol));
@@ -225,8 +227,8 @@ public class CounterColumnTest extends SchemaLoader
         left.writeElement(CounterId.fromInt(9), 1L, 0L);
         right = new ContextState(ByteBufferUtil.clone(left.context), 2);
 
-        leftCol  = new CounterColumn(ByteBufferUtil.bytes("x"), left.context,  1L);
-        rightCol = new CounterColumn(ByteBufferUtil.bytes("x"), right.context, 1L);
+        leftCol  = new CounterColumn(Util.cellname("x"), left.context,  1L);
+        rightCol = new CounterColumn(Util.cellname("x"), right.context, 1L);
         Assert.assertNull(leftCol.diff(rightCol));
 
         // greater than: left has superset of nodes (counts equal)
@@ -241,8 +243,8 @@ public class CounterColumnTest extends SchemaLoader
         right.writeElement(CounterId.fromInt(6), 2L, 0L);
         right.writeElement(CounterId.fromInt(9), 1L, 0L);
 
-        leftCol  = new CounterColumn(ByteBufferUtil.bytes("x"), left.context,  1L);
-        rightCol = new CounterColumn(ByteBufferUtil.bytes("x"), right.context, 1L);
+        leftCol  = new CounterColumn(Util.cellname("x"), left.context,  1L);
+        rightCol = new CounterColumn(Util.cellname("x"), right.context, 1L);
         Assert.assertNull(leftCol.diff(rightCol));
 
         // less than: right has subset of nodes (counts equal)
@@ -259,8 +261,8 @@ public class CounterColumnTest extends SchemaLoader
         right.writeElement(CounterId.fromInt(6), 1L, 0L);
         right.writeElement(CounterId.fromInt(9), 1L, 0L);
 
-        leftCol  = new CounterColumn(ByteBufferUtil.bytes("x"), left.context,  1L);
-        rightCol = new CounterColumn(ByteBufferUtil.bytes("x"), right.context, 1L);
+        leftCol  = new CounterColumn(Util.cellname("x"), left.context,  1L);
+        rightCol = new CounterColumn(Util.cellname("x"), right.context, 1L);
         Assert.assertEquals(rightCol, leftCol.diff(rightCol));
         Assert.assertEquals(leftCol, rightCol.diff(leftCol));
     }
@@ -275,20 +277,21 @@ public class CounterColumnTest extends SchemaLoader
         state.writeElement(CounterId.fromInt(3), 4L, 4L);
         state.writeElement(CounterId.fromInt(4), 4L, 4L, true);
 
-        CounterColumn original = new CounterColumn(ByteBufferUtil.bytes("x"), state.context, 1L);
+        CellNameType type = new SimpleDenseCellNameType(UTF8Type.instance);
+        CounterColumn original = new CounterColumn(Util.cellname("x"), state.context, 1L);
         byte[] serialized;
         try (DataOutputBuffer bufOut = new DataOutputBuffer())
         {
-            Column.serializer.serialize(original, bufOut);
+            type.columnSerializer().serialize(original, bufOut);
             serialized = bufOut.getData();
         }
 
         ByteArrayInputStream bufIn = new ByteArrayInputStream(serialized, 0, serialized.length);
-        CounterColumn deserialized = (CounterColumn) Column.serializer.deserialize(new DataInputStream(bufIn));
+        CounterColumn deserialized = (CounterColumn) type.columnSerializer().deserialize(new DataInputStream(bufIn));
         Assert.assertEquals(original, deserialized);
 
         bufIn = new ByteArrayInputStream(serialized, 0, serialized.length);
-        CounterColumn deserializedOnRemote = (CounterColumn) Column.serializer.deserialize(new DataInputStream(bufIn), ColumnSerializer.Flag.FROM_REMOTE);
+        CounterColumn deserializedOnRemote = (CounterColumn) type.columnSerializer().deserialize(new DataInputStream(bufIn), ColumnSerializer.Flag.FROM_REMOTE);
         Assert.assertEquals(deserializedOnRemote.name(), original.name());
         Assert.assertEquals(deserializedOnRemote.total(), original.total());
         Assert.assertEquals(deserializedOnRemote.value(), cc.clearAllDelta(original.value()));
@@ -309,8 +312,8 @@ public class CounterColumnTest extends SchemaLoader
         state.writeElement(CounterId.fromInt(3), 4L, 4L);
         state.writeElement(CounterId.fromInt(4), 4L, 4L, true);
 
-        CounterColumn original = new CounterColumn(ByteBufferUtil.bytes("x"), state.context, 1L);
-        CounterColumn cleared = new CounterColumn(ByteBufferUtil.bytes("x"), cc.clearAllDelta(state.context), 1L);
+        CounterColumn original = new CounterColumn(Util.cellname("x"), state.context, 1L);
+        CounterColumn cleared = new CounterColumn(Util.cellname("x"), cc.clearAllDelta(state.context), 1L);
 
         original.updateDigest(digest1);
         cleared.updateDigest(digest2);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/CounterMutationTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/CounterMutationTest.java b/test/unit/org/apache/cassandra/db/CounterMutationTest.java
index 3cbd030..74deb37 100644
--- a/test/unit/org/apache/cassandra/db/CounterMutationTest.java
+++ b/test/unit/org/apache/cassandra/db/CounterMutationTest.java
@@ -42,7 +42,7 @@ public class CounterMutationTest extends SchemaLoader
         CounterId id1 = CounterId.getLocalId();
 
         rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("key1"));
-        rm.addCounter("Counter1", ByteBufferUtil.bytes("Column1"), 3);
+        rm.addCounter("Counter1", Util.cellname("Column1"), 3);
         cm = new CounterMutation(rm, ConsistencyLevel.ONE);
         cm.apply();
 
@@ -50,7 +50,7 @@ public class CounterMutationTest extends SchemaLoader
         CounterId id2 = CounterId.getLocalId();
 
         rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("key1"));
-        rm.addCounter("Counter1", ByteBufferUtil.bytes("Column1"), 4);
+        rm.addCounter("Counter1", Util.cellname("Column1"), 4);
         cm = new CounterMutation(rm, ConsistencyLevel.ONE);
         cm.apply();
 
@@ -58,8 +58,8 @@ public class CounterMutationTest extends SchemaLoader
         CounterId id3 = CounterId.getLocalId();
 
         rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("key1"));
-        rm.addCounter("Counter1", ByteBufferUtil.bytes("Column1"), 5);
-        rm.addCounter("Counter1", ByteBufferUtil.bytes("Column2"), 1);
+        rm.addCounter("Counter1", Util.cellname("Column1"), 5);
+        rm.addCounter("Counter1", Util.cellname("Column2"), 1);
         cm = new CounterMutation(rm, ConsistencyLevel.ONE);
         cm.apply();
 
@@ -69,7 +69,7 @@ public class CounterMutationTest extends SchemaLoader
         // First merges old shards
         CounterColumn.mergeAndRemoveOldShards(dk, cf, Integer.MIN_VALUE, Integer.MAX_VALUE, false);
         long now = System.currentTimeMillis();
-        Column c = cf.getColumn(ByteBufferUtil.bytes("Column1"));
+        Column c = cf.getColumn(Util.cellname("Column1"));
         assert c != null;
         assert c instanceof CounterColumn;
         assert ((CounterColumn)c).total() == 12L;
@@ -89,7 +89,7 @@ public class CounterMutationTest extends SchemaLoader
 
         // Then collect old shards
         CounterColumn.mergeAndRemoveOldShards(dk, cf, Integer.MAX_VALUE, Integer.MIN_VALUE, false);
-        c = cf.getColumn(ByteBufferUtil.bytes("Column1"));
+        c = cf.getColumn(Util.cellname("Column1"));
         assert c != null;
         assert c instanceof CounterColumn;
         assert ((CounterColumn)c).total() == 12L;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/HintedHandOffTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/HintedHandOffTest.java b/test/unit/org/apache/cassandra/db/HintedHandOffTest.java
index 23db4bb..4978c57 100644
--- a/test/unit/org/apache/cassandra/db/HintedHandOffTest.java
+++ b/test/unit/org/apache/cassandra/db/HintedHandOffTest.java
@@ -29,6 +29,7 @@ import java.util.concurrent.TimeUnit;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy;
@@ -63,7 +64,7 @@ public class HintedHandOffTest extends SchemaLoader
 
         // insert 1 hint
         RowMutation rm = new RowMutation(KEYSPACE4, ByteBufferUtil.bytes(1));
-        rm.add(STANDARD1_CF, ByteBufferUtil.bytes(String.valueOf(COLUMN1)), ByteBufferUtil.EMPTY_BYTE_BUFFER, System.currentTimeMillis());
+        rm.add(STANDARD1_CF, Util.cellname(COLUMN1), ByteBufferUtil.EMPTY_BYTE_BUFFER, System.currentTimeMillis());
 
         HintedHandOffManager.instance.hintFor(rm, HintedHandOffManager.calculateHintTTL(rm), UUID.randomUUID()).apply();
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/KeyCacheTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/KeyCacheTest.java b/test/unit/org/apache/cassandra/db/KeyCacheTest.java
index 1f41860..dc5b22c 100644
--- a/test/unit/org/apache/cassandra/db/KeyCacheTest.java
+++ b/test/unit/org/apache/cassandra/db/KeyCacheTest.java
@@ -28,6 +28,7 @@ import org.junit.Test;
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
 import org.apache.cassandra.cache.KeyCacheKey;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.service.CacheService;
@@ -117,10 +118,10 @@ public class KeyCacheTest extends SchemaLoader
 
         // inserts
         rm = new RowMutation(KEYSPACE1, key1.key);
-        rm.add(COLUMN_FAMILY1, ByteBufferUtil.bytes("1"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
+        rm.add(COLUMN_FAMILY1, Util.cellname("1"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
         rm.apply();
         rm = new RowMutation(KEYSPACE1, key2.key);
-        rm.add(COLUMN_FAMILY1, ByteBufferUtil.bytes("2"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
+        rm.add(COLUMN_FAMILY1, Util.cellname("2"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
         rm.apply();
 
         // to make sure we have SSTable
@@ -129,16 +130,16 @@ public class KeyCacheTest extends SchemaLoader
         // reads to cache key position
         cfs.getColumnFamily(QueryFilter.getSliceFilter(key1,
                                                        COLUMN_FAMILY1,
-                                                       ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                                       ByteBufferUtil.EMPTY_BYTE_BUFFER,
+                                                       Composites.EMPTY,
+                                                       Composites.EMPTY,
                                                        false,
                                                        10,
                                                        System.currentTimeMillis()));
 
         cfs.getColumnFamily(QueryFilter.getSliceFilter(key2,
                                                        COLUMN_FAMILY1,
-                                                       ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                                       ByteBufferUtil.EMPTY_BYTE_BUFFER,
+                                                       Composites.EMPTY,
+                                                       Composites.EMPTY,
                                                        false,
                                                        10,
                                                        System.currentTimeMillis()));
@@ -153,16 +154,16 @@ public class KeyCacheTest extends SchemaLoader
         // re-read same keys to verify that key cache didn't grow further
         cfs.getColumnFamily(QueryFilter.getSliceFilter(key1,
                                                        COLUMN_FAMILY1,
-                                                       ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                                       ByteBufferUtil.EMPTY_BYTE_BUFFER,
+                                                       Composites.EMPTY,
+                                                       Composites.EMPTY,
                                                        false,
                                                        10,
                                                        System.currentTimeMillis()));
 
         cfs.getColumnFamily(QueryFilter.getSliceFilter(key2,
                                                        COLUMN_FAMILY1,
-                                                       ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                                                       ByteBufferUtil.EMPTY_BYTE_BUFFER,
+                                                       Composites.EMPTY,
+                                                       Composites.EMPTY,
                                                        false,
                                                        10,
                                                        System.currentTimeMillis()));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/test/unit/org/apache/cassandra/db/KeyCollisionTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/KeyCollisionTest.java b/test/unit/org/apache/cassandra/db/KeyCollisionTest.java
index 345febd..0f3e6d9 100644
--- a/test/unit/org/apache/cassandra/db/KeyCollisionTest.java
+++ b/test/unit/org/apache/cassandra/db/KeyCollisionTest.java
@@ -26,6 +26,7 @@ import java.util.*;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
+import org.apache.cassandra.Util;
 import org.apache.cassandra.db.columniterator.IdentityQueryFilter;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.IntegerType;
@@ -89,7 +90,7 @@ public class KeyCollisionTest extends SchemaLoader
     {
         RowMutation rm;
         rm = new RowMutation(KEYSPACE, ByteBufferUtil.bytes(key));
-        rm.add(CF, ByteBufferUtil.bytes("column"), ByteBufferUtil.bytes("asdf"), 0);
+        rm.add(CF, Util.cellname("column"), ByteBufferUtil.bytes("asdf"), 0);
         rm.apply();
     }
 


[08/13] Push composites support in the storage engine

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java b/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java
index bb00d23..16e74e3 100644
--- a/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java
+++ b/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java
@@ -87,7 +87,7 @@ public class LazilyCompactedRow extends AbstractCompactedRow
             emptyColumnFamily.purgeTombstones(controller.gcBefore);
 
         reducer = new Reducer();
-        merger = Iterators.filter(MergeIterator.get(rows, emptyColumnFamily.getComparator().onDiskAtomComparator, reducer), Predicates.notNull());
+        merger = Iterators.filter(MergeIterator.get(rows, emptyColumnFamily.getComparator().onDiskAtomComparator(), reducer), Predicates.notNull());
     }
 
     private static void removeDeletedAndOldShards(ColumnFamily cf, boolean shouldPurge, DecoratedKey key, CompactionController controller)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/compaction/Scrubber.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/Scrubber.java b/src/java/org/apache/cassandra/db/compaction/Scrubber.java
index 3d52396..bec29d5 100644
--- a/src/java/org/apache/cassandra/db/compaction/Scrubber.java
+++ b/src/java/org/apache/cassandra/db/compaction/Scrubber.java
@@ -106,7 +106,7 @@ public class Scrubber implements Closeable
             ByteBuffer nextIndexKey = ByteBufferUtil.readWithShortLength(indexFile);
             {
                 // throw away variable so we don't have a side effect in the assert
-                long firstRowPositionFromIndex = RowIndexEntry.serializer.deserialize(indexFile, sstable.descriptor.version).position;
+                long firstRowPositionFromIndex = sstable.metadata.comparator.rowIndexEntrySerializer().deserialize(indexFile, sstable.descriptor.version).position;
                 assert firstRowPositionFromIndex == 0 : firstRowPositionFromIndex;
             }
 
@@ -146,7 +146,7 @@ public class Scrubber implements Closeable
                     nextIndexKey = indexFile.isEOF() ? null : ByteBufferUtil.readWithShortLength(indexFile);
                     nextRowPositionFromIndex = indexFile.isEOF()
                                              ? dataFile.length()
-                                             : RowIndexEntry.serializer.deserialize(indexFile, sstable.descriptor.version).position;
+                                             : sstable.metadata.comparator.rowIndexEntrySerializer().deserialize(indexFile, sstable.descriptor.version).position;
                 }
                 catch (Throwable th)
                 {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/AbstractCType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/AbstractCType.java b/src/java/org/apache/cassandra/db/composites/AbstractCType.java
new file mode 100644
index 0000000..2b07a46
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/AbstractCType.java
@@ -0,0 +1,336 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Comparator;
+
+import org.apache.cassandra.db.DeletionInfo;
+import org.apache.cassandra.db.RangeTombstone;
+import org.apache.cassandra.db.RowIndexEntry;
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.db.filter.ColumnSlice;
+import org.apache.cassandra.db.filter.SliceQueryFilter;
+import org.apache.cassandra.db.marshal.AbstractCompositeType;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.io.ISerializer;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+import static org.apache.cassandra.io.sstable.IndexHelper.IndexInfo;
+
+public abstract class AbstractCType implements CType
+{
+    private final Comparator<Composite> reverseComparator;
+    private final Comparator<IndexInfo> indexComparator;
+    private final Comparator<IndexInfo> indexReverseComparator;
+
+    private final Serializer serializer;
+
+    private final ISerializer<IndexInfo> indexSerializer;
+    private final IVersionedSerializer<ColumnSlice> sliceSerializer;
+    private final IVersionedSerializer<SliceQueryFilter> sliceQueryFilterSerializer;
+    private final DeletionInfo.Serializer deletionInfoSerializer;
+    private final RangeTombstone.Serializer rangeTombstoneSerializer;
+    private final RowIndexEntry.Serializer rowIndexEntrySerializer;
+
+    protected AbstractCType()
+    {
+        reverseComparator = new Comparator<Composite>()
+        {
+            public int compare(Composite c1, Composite c2)
+            {
+                return AbstractCType.this.compare(c2, c1);
+            }
+        };
+        indexComparator = new Comparator<IndexInfo>()
+        {
+            public int compare(IndexInfo o1, IndexInfo o2)
+            {
+                return AbstractCType.this.compare(o1.lastName, o2.lastName);
+            }
+        };
+        indexReverseComparator = new Comparator<IndexInfo>()
+        {
+            public int compare(IndexInfo o1, IndexInfo o2)
+            {
+                return AbstractCType.this.compare(o1.firstName, o2.firstName);
+            }
+        };
+
+        serializer = new Serializer(this);
+
+        indexSerializer = new IndexInfo.Serializer(this);
+        sliceSerializer = new ColumnSlice.Serializer(this);
+        sliceQueryFilterSerializer = new SliceQueryFilter.Serializer(this);
+        deletionInfoSerializer = new DeletionInfo.Serializer(this);
+        rangeTombstoneSerializer = new RangeTombstone.Serializer(this);
+        rowIndexEntrySerializer = new RowIndexEntry.Serializer(this);
+    }
+
+    public int compare(Composite c1, Composite c2)
+    {
+        if (c1 == null)
+            return c2 == null ? 0 : -1;
+
+        ByteBuffer previous = null;
+        int i;
+        int minSize = Math.min(c1.size(), c2.size());
+        for (i = 0; i < minSize; i++)
+        {
+            AbstractType<?> comparator = subtype(i);
+            ByteBuffer value1 = c1.get(i);
+            ByteBuffer value2 = c2.get(i);
+
+            int cmp = comparator.compareCollectionMembers(value1, value2, previous);
+            if (cmp != 0)
+                return cmp;
+
+            previous = value1;
+        }
+
+        if (c1.size() == c2.size())
+        {
+            if (c1.eoc() != c2.eoc())
+            {
+                switch (c1.eoc())
+                {
+                    case START: return -1;
+                    case END:   return 1;
+                    case NONE:  return c2.eoc() == Composite.EOC.START ? 1 : -1;
+                }
+            }
+            return 0;
+        }
+
+        if (i == c1.size())
+        {
+            return c1.eoc() == Composite.EOC.END ? 1 : -1;
+        }
+        else
+        {
+            assert i == c2.size();
+            return c2.eoc() == Composite.EOC.END ? -1 : 1;
+        }
+    }
+
+    public void validate(Composite name)
+    {
+        ByteBuffer previous = null;
+        for (int i = 0; i < name.size(); i++)
+        {
+            AbstractType<?> comparator = subtype(i);
+            ByteBuffer value = name.get(i);
+            comparator.validateCollectionMember(value, previous);
+            previous = value;
+        }
+    }
+
+    public boolean isCompatibleWith(CType previous)
+    {
+        if (this == previous)
+            return true;
+
+        // Extending with new components is fine, shrinking is not
+        if (size() < previous.size())
+            return false;
+
+        for (int i = 0; i < previous.size(); i++)
+        {
+            AbstractType<?> tprev = previous.subtype(i);
+            AbstractType<?> tnew = subtype(i);
+            if (!tnew.isCompatibleWith(tprev))
+                return false;
+        }
+        return true;
+    }
+
+    public String getString(Composite c)
+    {
+        StringBuilder sb = new StringBuilder();
+        for (int i = 0; i < c.size(); i++)
+        {
+            if (i > 0)
+                sb.append(":");
+            sb.append(AbstractCompositeType.escape(subtype(i).getString(c.get(i))));
+        }
+        switch (c.eoc())
+        {
+            case START:
+                sb.append(":_");
+                break;
+            case END:
+                sb.append(":!");
+                break;
+        }
+        return sb.toString();
+    }
+
+    public Composite make(Object... components)
+    {
+        if (components.length > size())
+            throw new IllegalArgumentException("Too many components, max is " + size());
+
+        CBuilder builder = builder();
+        for (int i = 0; i < components.length; i++)
+        {
+            Object obj = components[i];
+            if (obj instanceof ByteBuffer)
+                builder.add((ByteBuffer)obj);
+            else
+                builder.add(obj);
+        }
+        return builder.build();
+    }
+
+    public CType.Serializer serializer()
+    {
+        return serializer;
+    }
+
+    public Comparator<Composite> reverseComparator()
+    {
+        return reverseComparator;
+    }
+
+    public Comparator<IndexInfo> indexComparator()
+    {
+        return indexComparator;
+    }
+
+    public Comparator<IndexInfo> indexReverseComparator()
+    {
+        return indexReverseComparator;
+    }
+
+    public ISerializer<IndexInfo> indexSerializer()
+    {
+        return indexSerializer;
+    }
+
+    public IVersionedSerializer<ColumnSlice> sliceSerializer()
+    {
+        return sliceSerializer;
+    }
+
+    public IVersionedSerializer<SliceQueryFilter> sliceQueryFilterSerializer()
+    {
+        return sliceQueryFilterSerializer;
+    }
+
+    public DeletionInfo.Serializer deletionInfoSerializer()
+    {
+        return deletionInfoSerializer;
+    }
+
+    public RangeTombstone.Serializer rangeTombstoneSerializer()
+    {
+        return rangeTombstoneSerializer;
+    }
+
+    public RowIndexEntry.Serializer rowIndexEntrySerializer()
+    {
+        return rowIndexEntrySerializer;
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (o == null)
+            return false;
+
+        if (!getClass().equals(o.getClass()))
+            return false;
+
+        CType c = (CType)o;
+        if (size() != c.size())
+            return false;
+
+        for (int i = 0; i < size(); i++)
+        {
+            if (!subtype(i).equals(c.subtype(i)))
+                return false;
+        }
+        return true;
+    }
+
+    @Override
+    public int hashCode()
+    {
+        int h = 31;
+        for (int i = 0; i < size(); i++)
+            h += subtype(i).hashCode();
+        return h + getClass().hashCode();
+    }
+
+    @Override
+    public String toString()
+    {
+        return asAbstractType().toString();
+    }
+
+    protected static ByteBuffer sliceBytes(ByteBuffer bb, int offs, int length)
+    {
+        ByteBuffer copy = bb.duplicate();
+        copy.position(offs);
+        copy.limit(offs + length);
+        return copy;
+    }
+
+    protected static void checkRemaining(ByteBuffer bb, int offs, int length)
+    {
+        if (offs + length > bb.limit())
+            throw new IllegalArgumentException("Not enough bytes");
+    }
+
+    private static class Serializer implements CType.Serializer
+    {
+        private final CType type;
+
+        public Serializer(CType type)
+        {
+            this.type = type;
+        }
+
+        public void serialize(Composite c, DataOutput out) throws IOException
+        {
+            ByteBufferUtil.writeWithShortLength(c.toByteBuffer(), out);
+        }
+
+        public Composite deserialize(DataInput in) throws IOException
+        {
+            return type.fromByteBuffer(ByteBufferUtil.readWithShortLength(in));
+        }
+
+        public long serializedSize(Composite c, TypeSizes type)
+        {
+            return type.sizeofWithShortLength(c.toByteBuffer());
+        }
+
+        public void skip(DataInput in) throws IOException
+        {
+            ByteBufferUtil.skipShortLength(in);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/AbstractCellNameType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/AbstractCellNameType.java b/src/java/org/apache/cassandra/db/composites/AbstractCellNameType.java
new file mode 100644
index 0000000..420ceab
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/AbstractCellNameType.java
@@ -0,0 +1,356 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.*;
+
+import com.google.common.collect.AbstractIterator;
+import org.apache.cassandra.cql3.CQL3Row;
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.filter.IDiskAtomFilter;
+import org.apache.cassandra.db.filter.NamesQueryFilter;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.CollectionType;
+import org.apache.cassandra.db.marshal.ColumnToCollectionType;
+import org.apache.cassandra.io.ISerializer;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+public abstract class AbstractCellNameType extends AbstractCType implements CellNameType
+{
+    private final Comparator<Column> columnComparator;
+    private final Comparator<Column> columnReverseComparator;
+    private final Comparator<OnDiskAtom> onDiskAtomComparator;
+
+    private final ISerializer<CellName> cellSerializer;
+    private final ColumnSerializer columnSerializer;
+    private final OnDiskAtom.Serializer onDiskAtomSerializer;
+    private final IVersionedSerializer<NamesQueryFilter> namesQueryFilterSerializer;
+    private final IVersionedSerializer<IDiskAtomFilter> diskAtomFilterSerializer;
+
+    protected AbstractCellNameType()
+    {
+        columnComparator = new Comparator<Column>()
+        {
+            public int compare(Column c1, Column c2)
+            {
+                return AbstractCellNameType.this.compare(c1.name(), c2.name());
+            }
+        };
+        columnReverseComparator = new Comparator<Column>()
+        {
+            public int compare(Column c1, Column c2)
+            {
+                return AbstractCellNameType.this.compare(c2.name(), c1.name());
+            }
+        };
+        onDiskAtomComparator = new Comparator<OnDiskAtom>()
+        {
+            public int compare(OnDiskAtom c1, OnDiskAtom c2)
+            {
+                int comp = AbstractCellNameType.this.compare(c1.name(), c2.name());
+                if (comp != 0)
+                    return comp;
+
+                if (c1 instanceof RangeTombstone)
+                {
+                    if (c2 instanceof RangeTombstone)
+                    {
+                        RangeTombstone t1 = (RangeTombstone)c1;
+                        RangeTombstone t2 = (RangeTombstone)c2;
+                        int comp2 = AbstractCellNameType.this.compare(t1.max, t2.max);
+                        return comp2 == 0 ? t1.data.compareTo(t2.data) : comp2;
+                    }
+                    else
+                    {
+                        return -1;
+                    }
+                }
+                else
+                {
+                    return c2 instanceof RangeTombstone ? 1 : 0;
+                }
+            }
+        };
+
+        // A trivial wrapped over the composite serializer
+        cellSerializer = new ISerializer<CellName>()
+        {
+            public void serialize(CellName c, DataOutput out) throws IOException
+            {
+                serializer().serialize(c, out);
+            }
+
+            public CellName deserialize(DataInput in) throws IOException
+            {
+                Composite ct = serializer().deserialize(in);
+                if (ct.isEmpty())
+                    throw ColumnSerializer.CorruptColumnException.create(in, ByteBufferUtil.EMPTY_BYTE_BUFFER);
+
+                assert ct instanceof CellName : ct;
+                return (CellName)ct;
+            }
+
+            public long serializedSize(CellName c, TypeSizes type)
+            {
+                return serializer().serializedSize(c, type);
+            }
+        };
+        columnSerializer = new ColumnSerializer(this);
+        onDiskAtomSerializer = new OnDiskAtom.Serializer(this);
+        namesQueryFilterSerializer = new NamesQueryFilter.Serializer(this);
+        diskAtomFilterSerializer = new IDiskAtomFilter.Serializer(this);
+    }
+
+    public Comparator<Column> columnComparator()
+    {
+        return columnComparator;
+    }
+
+    public Comparator<Column> columnReverseComparator()
+    {
+        return columnReverseComparator;
+    }
+
+    public Comparator<OnDiskAtom> onDiskAtomComparator()
+    {
+        return onDiskAtomComparator;
+    }
+
+    public ISerializer<CellName> cellSerializer()
+    {
+        return cellSerializer;
+    }
+
+    public ColumnSerializer columnSerializer()
+    {
+        return columnSerializer;
+    }
+
+    public OnDiskAtom.Serializer onDiskAtomSerializer()
+    {
+        return onDiskAtomSerializer;
+    }
+
+    public IVersionedSerializer<NamesQueryFilter> namesQueryFilterSerializer()
+    {
+        return namesQueryFilterSerializer;
+    }
+
+    public IVersionedSerializer<IDiskAtomFilter> diskAtomFilterSerializer()
+    {
+        return diskAtomFilterSerializer;
+    }
+
+    public CellName cellFromByteBuffer(ByteBuffer bytes)
+    {
+        return (CellName)fromByteBuffer(bytes);
+    }
+
+    public CellName create(Composite prefix, ColumnIdentifier columnName, ByteBuffer collectionElement)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    public CellName rowMarker(Composite prefix)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    public boolean hasCollections()
+    {
+        return false;
+    }
+
+    public boolean supportCollections()
+    {
+        return false;
+    }
+
+    public ColumnToCollectionType collectionType()
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    public CellNameType addCollection(ColumnIdentifier columnName, CollectionType newCollection)
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public Composite make(Object... components)
+    {
+        return components.length == size() ? makeCellName(components) : super.make(components);
+    }
+
+    public CellName makeCellName(Object... components)
+    {
+        ByteBuffer[] rawComponents = new ByteBuffer[components.length];
+        for (int i = 0; i < components.length; i++)
+        {
+            Object c = components[i];
+            rawComponents[i] = c instanceof ByteBuffer ? (ByteBuffer)c : ((AbstractType)subtype(i)).decompose(c);
+        }
+        return makeCellName(rawComponents);
+    }
+
+    protected abstract CellName makeCellName(ByteBuffer[] components);
+
+    protected static CQL3Row.Builder makeDenseCQL3RowBuilder(final long now)
+    {
+        return new CQL3Row.Builder()
+        {
+            public Iterator<CQL3Row> group(final Iterator<Column> cells)
+            {
+                return new AbstractIterator<CQL3Row>()
+                {
+                    protected CQL3Row computeNext()
+                    {
+                        while (cells.hasNext())
+                        {
+                            final Column cell = cells.next();
+                            if (cell.isMarkedForDelete(now))
+                                continue;
+
+                            return new CQL3Row()
+                            {
+                                public ByteBuffer getClusteringColumn(int i)
+                                {
+                                    return cell.name().get(i);
+                                }
+
+                                public Column getColumn(ColumnIdentifier name)
+                                {
+                                    return cell;
+                                }
+
+                                public List<Column> getCollection(ColumnIdentifier name)
+                                {
+                                    return null;
+                                }
+                            };
+                        }
+                        return endOfData();
+                    }
+                };
+            }
+        };
+    }
+
+    protected static CQL3Row.Builder makeSparseCQL3RowBuilder(final long now)
+    {
+        return new CQL3Row.Builder()
+        {
+            public Iterator<CQL3Row> group(final Iterator<Column> cells)
+            {
+                return new AbstractIterator<CQL3Row>()
+                {
+                    private CellName previous;
+                    private CQL3RowOfSparse currentRow;
+
+                    protected CQL3Row computeNext()
+                    {
+                        while (cells.hasNext())
+                        {
+                            final Column cell = cells.next();
+                            if (cell.isMarkedForDelete(now))
+                                continue;
+
+                            CQL3Row toReturn = null;
+                            CellName current = cell.name();
+                            if (currentRow == null || !current.isSameCQL3RowAs(previous))
+                            {
+                                toReturn = currentRow;
+                                currentRow = new CQL3RowOfSparse(current);
+                            }
+                            currentRow.add(cell);
+                            previous = current;
+
+                            if (toReturn != null)
+                                return toReturn;
+                        }
+                        if (currentRow != null)
+                        {
+                            CQL3Row toReturn = currentRow;
+                            currentRow = null;
+                            return toReturn;
+                        }
+                        return endOfData();
+                    }
+                };
+            }
+        };
+    }
+
+    private static class CQL3RowOfSparse implements CQL3Row
+    {
+        private final CellName cell;
+        private Map<ColumnIdentifier, Column> columns;
+        private Map<ColumnIdentifier, List<Column>> collections;
+
+        CQL3RowOfSparse(CellName cell)
+        {
+            this.cell = cell;
+        }
+
+        public ByteBuffer getClusteringColumn(int i)
+        {
+            return cell.get(i);
+        }
+
+        void add(Column cell)
+        {
+            CellName cellName = cell.name();
+            ColumnIdentifier columnName =  cellName.cql3ColumnName();
+            if (cellName.isCollectionCell())
+            {
+                if (collections == null)
+                    collections = new HashMap<>();
+
+                List<Column> values = collections.get(columnName);
+                if (values == null)
+                {
+                    values = new ArrayList<Column>();
+                    collections.put(columnName, values);
+                }
+                values.add(cell);
+            }
+            else
+            {
+                if (columns == null)
+                    columns = new HashMap<>();
+                columns.put(columnName, cell);
+            }
+        }
+
+        public Column getColumn(ColumnIdentifier name)
+        {
+            return columns == null ? null : columns.get(name);
+        }
+
+        public List<Column> getCollection(ColumnIdentifier name)
+        {
+            return collections == null ? null : collections.get(name);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/AbstractComposite.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/AbstractComposite.java b/src/java/org/apache/cassandra/db/composites/AbstractComposite.java
new file mode 100644
index 0000000..e23c560
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/AbstractComposite.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.filter.ColumnSlice;
+import org.apache.cassandra.db.marshal.AbstractCompositeType;
+
+public abstract class AbstractComposite implements Composite
+{
+    public boolean isEmpty()
+    {
+        return size() == 0;
+    }
+
+    public EOC eoc()
+    {
+        return EOC.NONE;
+    }
+
+    public Composite start()
+    {
+        return withEOC(EOC.START);
+    }
+
+    public Composite end()
+    {
+        return withEOC(EOC.END);
+    }
+
+    public Composite withEOC(EOC newEoc)
+    {
+        // Note: CompositeBound overwrite this so we assume the EOC of this is NONE
+        switch (newEoc)
+        {
+            case START:
+                return BoundedComposite.startOf(this);
+            case END:
+                return BoundedComposite.endOf(this);
+            default:
+                return this;
+        }
+    }
+
+    public ColumnSlice slice()
+    {
+        return new ColumnSlice(start(), end());
+    }
+
+    public ByteBuffer toByteBuffer()
+    {
+        // This is the legacy format of composites.
+        // See org.apache.cassandra.db.marshal.CompositeType for details.
+        ByteBuffer result = ByteBuffer.allocate(dataSize() + 3 * size());
+        for (int i = 0; i < size(); i++)
+        {
+            ByteBuffer bb = get(i);
+            AbstractCompositeType.putShortLength(result, bb.remaining());
+            result.put(bb.duplicate());
+            result.put((byte)0);
+        }
+        result.flip();
+        return result;
+    }
+
+    public int dataSize()
+    {
+        int size = 0;
+        for (int i = 0; i < size(); i++)
+            size += get(i).remaining();
+        return size;
+    }
+
+    public boolean isPrefixOf(Composite c)
+    {
+        if (size() > c.size())
+            return false;
+
+        for (int i = 0; i < size(); i++)
+        {
+            if (!get(i).equals(c.get(i)))
+                return false;
+        }
+        return true;
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if(!(o instanceof Composite))
+            return false;
+
+        Composite c = (Composite)o;
+        if (size() != c.size())
+            return false;
+
+        for (int i = 0; i < size(); i++)
+        {
+            if (!get(i).equals(c.get(i)))
+                return false;
+        }
+        return eoc() == c.eoc();
+    }
+
+    @Override
+    public int hashCode()
+    {
+        int h = 31;
+        for (int i = 0; i < size(); i++)
+            h += get(i).hashCode();
+        return h + eoc().hashCode();
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/AbstractCompoundCellNameType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/AbstractCompoundCellNameType.java b/src/java/org/apache/cassandra/db/composites/AbstractCompoundCellNameType.java
new file mode 100644
index 0000000..0602f50
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/AbstractCompoundCellNameType.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.io.DataInput;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.CompositeType;
+
+public abstract class AbstractCompoundCellNameType extends AbstractCellNameType
+{
+    protected final CompoundCType clusteringType;
+    protected final CompoundCType fullType;
+
+    protected final int clusteringSize;
+    protected final int fullSize;
+
+    protected AbstractCompoundCellNameType(CompoundCType clusteringType, CompoundCType fullType)
+    {
+        this.clusteringType = clusteringType;
+        this.fullType = fullType;
+
+        this.clusteringSize = clusteringType.size();
+        this.fullSize = fullType.size();
+    }
+
+    public int clusteringPrefixSize()
+    {
+        return clusteringSize;
+    }
+
+    public boolean isCompound()
+    {
+        return true;
+    }
+
+    public int size()
+    {
+        return fullSize;
+    }
+
+    public AbstractType<?> subtype(int i)
+    {
+        return fullType.subtype(i);
+    }
+
+    public CBuilder prefixBuilder()
+    {
+        return clusteringType.builder();
+    }
+
+    public CBuilder builder()
+    {
+        return new CompoundCType.CompoundCBuilder(this);
+    }
+
+    @Override
+    public Composite fromByteBuffer(ByteBuffer bytes)
+    {
+        if (!bytes.hasRemaining())
+            return Composites.EMPTY;
+
+        ByteBuffer[] elements = new ByteBuffer[fullSize];
+        int idx = bytes.position(), i = 0;
+        byte eoc = 0;
+        while (idx < bytes.limit())
+        {
+            checkRemaining(bytes, idx, 2);
+            int length = bytes.getShort(idx) & 0xFFFF;
+            idx += 2;
+
+            checkRemaining(bytes, idx, length + 1);
+            elements[i++] = sliceBytes(bytes, idx, length);
+            idx += length;
+            eoc = bytes.get(idx++);
+        }
+
+        return makeWith(elements, i, Composite.EOC.from(eoc));
+    }
+
+    public AbstractType<?> asAbstractType()
+    {
+        return CompositeType.getInstance(fullType.types);
+    }
+
+    public Deserializer newDeserializer(DataInput in)
+    {
+        return new CompositeDeserializer(this, in);
+    }
+
+    protected CellName makeCellName(ByteBuffer[] components)
+    {
+        return (CellName)makeWith(components, components.length, Composite.EOC.NONE);
+    }
+
+    protected abstract Composite makeWith(ByteBuffer[] components, int size, Composite.EOC eoc);
+    protected abstract Composite copyAndMakeWith(ByteBuffer[] components, int size, Composite.EOC eoc);
+
+    private static class CompositeDeserializer implements CellNameType.Deserializer
+    {
+        private static byte[] EMPTY = new byte[0];
+
+        private final AbstractCompoundCellNameType type;
+        private final DataInput in;
+
+        private byte[] nextFull;
+        private int nextIdx;
+
+        private final ByteBuffer[] nextComponents;
+        private int nextSize;
+        private Composite.EOC nextEOC;
+
+        public CompositeDeserializer(AbstractCompoundCellNameType type, DataInput in)
+        {
+            this.type = type;
+            this.in = in;
+            this.nextComponents = new ByteBuffer[type.size()];
+        }
+
+        public boolean hasNext() throws IOException
+        {
+            if (nextFull == null)
+                maybeReadNext();
+            return nextFull != EMPTY;
+        }
+
+        public boolean hasUnprocessed() throws IOException
+        {
+            return nextFull != null;
+        }
+
+        public int compareNextTo(Composite composite) throws IOException
+        {
+            maybeReadNext();
+
+            if (composite.isEmpty())
+                return nextFull == EMPTY ? 0 : 1;
+
+            if (nextFull == EMPTY)
+                return -1;
+
+            ByteBuffer previous = null;
+            for (int i = 0; i < composite.size(); i++)
+            {
+                if (!hasComponent(i))
+                    return nextEOC == Composite.EOC.END ? 1 : -1;
+
+                AbstractType<?> comparator = type.subtype(i);
+                ByteBuffer value1 = nextComponents[i];
+                ByteBuffer value2 = composite.get(i);
+
+                int cmp = comparator.compareCollectionMembers(value1, value2, previous);
+                if (cmp != 0)
+                    return cmp;
+
+                previous = value1;
+            }
+
+            // If we have more component than composite
+            if (!allComponentsDeserialized() || composite.size() < nextSize)
+                return composite.eoc() == Composite.EOC.END ? -1 : 1;
+
+            // same size, check eoc
+            if (nextEOC != composite.eoc())
+            {
+                switch (nextEOC)
+                {
+                    case START: return -1;
+                    case END:   return 1;
+                    case NONE:  return composite.eoc() == Composite.EOC.START ? 1 : -1;
+                }
+            }
+
+            return 0;
+        }
+
+        private boolean hasComponent(int i)
+        {
+            while (i >= nextSize && deserializeOne())
+                continue;
+
+            return i < nextSize;
+        }
+
+        private boolean deserializeOne()
+        {
+            if (allComponentsDeserialized())
+                return false;
+
+            int length = ((nextFull[nextIdx++] & 0xFF) << 8) | (nextFull[nextIdx++] & 0xFF);
+            ByteBuffer component = ByteBuffer.wrap(nextFull, nextIdx, length);
+            nextIdx += length;
+            nextComponents[nextSize++] = component;
+            nextEOC = Composite.EOC.from(nextFull[nextIdx++]);
+            return true;
+        }
+
+        private void deserializeAll()
+        {
+            while (deserializeOne())
+                continue;
+        }
+
+        private boolean allComponentsDeserialized()
+        {
+            return nextIdx >= nextFull.length;
+        }
+
+        private void maybeReadNext() throws IOException
+        {
+            if (nextFull != null)
+                return;
+
+            nextIdx = 0;
+            nextSize = 0;
+
+            int length = in.readShort() & 0xFFFF;
+            // Note that empty is ok because it marks the end of row
+            if (length == 0)
+            {
+                nextFull = EMPTY;
+                return;
+            }
+
+            nextFull = new byte[length];
+            in.readFully(nextFull);
+        }
+
+        public Composite readNext() throws IOException
+        {
+            maybeReadNext();
+            if (nextFull == EMPTY)
+                return Composites.EMPTY;
+
+            deserializeAll();
+            Composite c = type.copyAndMakeWith(nextComponents, nextSize, nextEOC);
+            nextFull = null;
+            return c;
+        }
+
+        public void skipNext() throws IOException
+        {
+            maybeReadNext();
+            nextFull = null;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/AbstractSimpleCellNameType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/AbstractSimpleCellNameType.java b/src/java/org/apache/cassandra/db/composites/AbstractSimpleCellNameType.java
new file mode 100644
index 0000000..94c0c4d
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/AbstractSimpleCellNameType.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.io.DataInput;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+public abstract class AbstractSimpleCellNameType extends AbstractCellNameType
+{
+    protected final AbstractType<?> type;
+
+    protected AbstractSimpleCellNameType(AbstractType<?> type)
+    {
+        this.type = type;
+    }
+
+    public boolean isCompound()
+    {
+        return false;
+    }
+
+    public int size()
+    {
+        return 1;
+    }
+
+    public AbstractType<?> subtype(int i)
+    {
+        if (i != 0)
+            throw new IllegalArgumentException();
+        return type;
+    }
+
+    protected CellName makeCellName(ByteBuffer[] components)
+    {
+        assert components.length == 1;
+        return cellFromByteBuffer(components[0]);
+    }
+
+    public CBuilder builder()
+    {
+        return new SimpleCType.SimpleCBuilder(this);
+    }
+
+    public AbstractType<?> asAbstractType()
+    {
+        return type;
+    }
+
+    public Deserializer newDeserializer(DataInput in)
+    {
+        return new SimpleDeserializer(this, in);
+    }
+
+    private static class SimpleDeserializer implements CellNameType.Deserializer
+    {
+        private final AbstractSimpleCellNameType type;
+        private ByteBuffer next;
+        private final DataInput in;
+
+        public SimpleDeserializer(AbstractSimpleCellNameType type, DataInput in)
+        {
+            this.type = type;
+            this.in = in;
+        }
+
+        public boolean hasNext() throws IOException
+        {
+            if (next == null)
+                maybeReadNext();
+
+            return next.hasRemaining();
+        }
+
+        public boolean hasUnprocessed() throws IOException
+        {
+            return next != null;
+        }
+
+        public int compareNextTo(Composite composite) throws IOException
+        {
+            maybeReadNext();
+
+            if (composite.isEmpty())
+                return next.hasRemaining() ? 1 : 0;
+
+            return type.subtype(0).compare(next, composite.get(0));
+        }
+
+        private void maybeReadNext() throws IOException
+        {
+            if (next != null)
+                return;
+
+            int length = in.readShort() & 0xFFFF;
+            // Note that empty is ok because it marks the end of row
+            if (length == 0)
+            {
+                next = ByteBufferUtil.EMPTY_BYTE_BUFFER;
+                return;
+            }
+
+            byte[] b = new byte[length];
+            in.readFully(b);
+            next = ByteBuffer.wrap(b);
+        }
+
+        public Composite readNext() throws IOException
+        {
+            maybeReadNext();
+            Composite c = type.fromByteBuffer(next);
+            next = null;
+            return c;
+        }
+
+        public void skipNext() throws IOException
+        {
+            maybeReadNext();
+            next = null;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/BoundedComposite.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/BoundedComposite.java b/src/java/org/apache/cassandra/db/composites/BoundedComposite.java
new file mode 100644
index 0000000..9e56dc2
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/BoundedComposite.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.utils.Allocator;
+import org.apache.cassandra.utils.ObjectSizes;
+
+public class BoundedComposite extends AbstractComposite
+{
+    private final Composite wrapped;
+    private final boolean isStart;
+
+    private BoundedComposite(Composite wrapped, boolean isStart)
+    {
+        this.wrapped = wrapped;
+        this.isStart = isStart;
+    }
+
+    static Composite startOf(Composite c)
+    {
+        return new BoundedComposite(c, true);
+    }
+
+    static Composite endOf(Composite c)
+    {
+        return new BoundedComposite(c, false);
+    }
+
+    public int size()
+    {
+        return wrapped.size();
+    }
+
+    public ByteBuffer get(int i)
+    {
+        return wrapped.get(i);
+    }
+
+    @Override
+    public EOC eoc()
+    {
+        return isStart ? EOC.START : EOC.END;
+    }
+
+    @Override
+    public Composite withEOC(EOC eoc)
+    {
+        switch (eoc)
+        {
+            case START:
+                return isStart ? this : startOf(wrapped);
+            case END:
+                return isStart ? endOf(wrapped) : this;
+            default:
+                return wrapped;
+        }
+    }
+
+    @Override
+    public ByteBuffer toByteBuffer()
+    {
+        ByteBuffer bb = wrapped.toByteBuffer();
+        bb.put(bb.remaining() - 1, (byte)(isStart ? -1 : 1));
+        return bb;
+    }
+
+    public long memorySize()
+    {
+        return ObjectSizes.getFieldSize(ObjectSizes.getReferenceSize() + TypeSizes.NATIVE.sizeof(isStart))
+             + wrapped.memorySize();
+    }
+
+    public Composite copy(Allocator allocator)
+    {
+        return new BoundedComposite(wrapped.copy(allocator), isStart);
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/CBuilder.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/CBuilder.java b/src/java/org/apache/cassandra/db/composites/CBuilder.java
new file mode 100644
index 0000000..490fb67
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/CBuilder.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A builder of Composite.
+ */
+public interface CBuilder
+{
+    public int remainingCount();
+
+    public CBuilder add(ByteBuffer value);
+    public CBuilder add(Object value);
+
+    public Composite build();
+    public Composite buildWith(ByteBuffer value);
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/CType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/CType.java b/src/java/org/apache/cassandra/db/composites/CType.java
new file mode 100644
index 0000000..c2516f5
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/CType.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.io.DataInput;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Comparator;
+
+import org.apache.cassandra.db.DeletionInfo;
+import org.apache.cassandra.db.RangeTombstone;
+import org.apache.cassandra.db.RowIndexEntry;
+import org.apache.cassandra.db.filter.ColumnSlice;
+import org.apache.cassandra.db.filter.SliceQueryFilter;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.io.ISerializer;
+import org.apache.cassandra.io.IVersionedSerializer;
+
+import static org.apache.cassandra.io.sstable.IndexHelper.IndexInfo;
+
+/**
+ * A type for a Composite.
+ *
+ * There is essentially 2 types of Composite and such of CType:
+ *   1. the "simple" ones, see SimpleCType.
+ *   2. the "truly-composite" ones, see CompositeCType.
+ *
+ * API-wise, a CType is simply a collection of AbstractType with a few utility
+ * methods.
+ */
+public interface CType extends Comparator<Composite>
+{
+    /**
+     * Returns whether this is a "truly-composite" underneath.
+     */
+    public boolean isCompound();
+
+    /**
+     * The number of subtypes for this CType.
+     */
+    public int size();
+
+    /**
+     * Gets a subtype of this CType.
+     */
+    public AbstractType<?> subtype(int i);
+
+    /**
+     * A builder of Composite.
+     */
+    public CBuilder builder();
+
+    /**
+     * Convenience method to build composites from their component.
+     *
+     * The arguments can be either ByteBuffer or actual objects of the type
+     * corresponding to their position.
+     */
+    public Composite make(Object... components);
+
+    /**
+     * Validates a composite.
+     */
+    public void validate(Composite name);
+
+    /**
+     * Converts a composite to a user-readable string.
+     */
+    public String getString(Composite c);
+
+    /**
+     * See AbstractType#isCompatibleWith.
+     */
+    public boolean isCompatibleWith(CType previous);
+
+    /**
+     * Returns a new CType that is equivalent to this CType but with
+     * one of the subtype replaced by the provided new type.
+     */
+    public CType setSubtype(int position, AbstractType<?> newType);
+
+    /**
+     * Deserialize a Composite from a ByteBuffer.
+     *
+     * This is meant for thrift/cql2 to convert the fully serialized buffer we
+     * get from the clients to composites.
+     */
+    public Composite fromByteBuffer(ByteBuffer bb);
+
+    /**
+     * Returns a AbstractType corresponding to this CType for thrift/cql2 sake.
+     *
+     * If the CType is a "simple" one, this just return the wrapped type, otherwise
+     * it returns the corresponding org.apache.cassandra.db.marshal.CompositeType.
+     *
+     * This is only meant to be use for backward compatibility (particularly for
+     * thrift/cql2) but it's not meant to be used internally.
+     */
+    public AbstractType<?> asAbstractType();
+
+
+    /**********************************************************/
+
+    /*
+     * Follows a number of per-CType instances for the Comparator and Serializer used throughout
+     * the code. The reason we need this is that we want the per-CType/per-CellNameType Composite/CellName
+     * serializers, which means the following instances have to depend on the type too.
+     */
+
+    public Comparator<Composite> reverseComparator();
+    public Comparator<IndexInfo> indexComparator();
+    public Comparator<IndexInfo> indexReverseComparator();
+
+    public Serializer serializer();
+
+    public ISerializer<IndexInfo> indexSerializer();
+    public IVersionedSerializer<ColumnSlice> sliceSerializer();
+    public IVersionedSerializer<SliceQueryFilter> sliceQueryFilterSerializer();
+    public DeletionInfo.Serializer deletionInfoSerializer();
+    public RangeTombstone.Serializer rangeTombstoneSerializer();
+    public RowIndexEntry.Serializer rowIndexEntrySerializer();
+
+    public interface Serializer extends ISerializer<Composite>
+    {
+        public void skip(DataInput in) throws IOException;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/CellName.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/CellName.java b/src/java/org/apache/cassandra/db/composites/CellName.java
new file mode 100644
index 0000000..1ca50eb
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/CellName.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.utils.Allocator;
+
+/**
+ * A CellName is a Composite, but for which, for the sake of CQL3, we
+ * distinguish different parts: a CellName has first a number of clustering
+ * components, followed by the CQL3 column name, and then possibly followed by
+ * a collection element part.
+ *
+ * The clustering prefix can itself be composed of multiple component. It can
+ * also be empty if the table has no clustering keys. In general, the CQL3
+ * column name follows. However, some type of COMPACT STORAGE layout do not
+ * store the CQL3 column name in the cell name and so this part can be null (we
+ * call "dense" the cells whose name don't store the CQL3 column name).
+ *
+ * Lastly, if the cell is part of a CQL3 collection, we'll have a last
+ * component (a UUID for lists, an element for sets and a key for maps).
+ */
+public interface CellName extends Composite
+{
+    /**
+     * The number of clustering components.
+     *
+     * It can be 0 if the table has no clustering columns, and it can be
+     * equal to size() if the table is dense() (in which case cql3ColumnName()
+     * will be null).
+     */
+    public int clusteringSize();
+
+    /**
+     * The name of the CQL3 column this cell represents.
+     *
+     * Will be null for cells of "dense" tables.
+     */
+    public ColumnIdentifier cql3ColumnName();
+
+    /**
+     * The value of the collection element, or null if the cell is not part
+     * of a collection (i.e. if !isCollectionCell()).
+     */
+    public ByteBuffer collectionElement();
+    public boolean isCollectionCell();
+
+    /**
+     * Whether this cell is part of the same CQL3 row as the other cell.
+     */
+    public boolean isSameCQL3RowAs(CellName other);
+
+    // If cellnames were sharing some prefix components, this will break it, so
+    // we might want to try to do better.
+    @Override
+    public CellName copy(Allocator allocator);
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/CellNameType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/CellNameType.java b/src/java/org/apache/cassandra/db/composites/CellNameType.java
new file mode 100644
index 0000000..53d89f2
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/CellNameType.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.io.DataInput;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Comparator;
+
+import org.apache.cassandra.cql3.CQL3Row;
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.db.Column;
+import org.apache.cassandra.db.ColumnSerializer;
+import org.apache.cassandra.db.OnDiskAtom;
+import org.apache.cassandra.db.filter.IDiskAtomFilter;
+import org.apache.cassandra.db.filter.NamesQueryFilter;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.CollectionType;
+import org.apache.cassandra.db.marshal.ColumnToCollectionType;
+import org.apache.cassandra.io.ISerializer;
+import org.apache.cassandra.io.IVersionedSerializer;
+
+/**
+ * The type of CellNames.
+ *
+ * In the same way that a CellName is a Composite, a CellNameType is a CType, but
+ * with a number of method specific to cell names.
+ *
+ * On top of the dichotomy simple/truly-composite of composites, cell names comes
+ * in 2 variants: "dense" and "sparse". The sparse ones are CellName where one of
+ * the component (the last or second-to-last for collections) is used to store the
+ * CQL3 column name. Dense are those for which it's not the case.
+ *
+ * In other words, we have 4 types of CellName/CellNameType which correspond to the
+ * 4 type of table layout that we need to distinguish:
+ *   1. Simple (non-truly-composite) dense: this is the dynamic thrift CFs whose
+ *      comparator is not composite.
+ *   2. Composite dense: this is the dynamic thrift CFs with a CompositeType comparator.
+ *   3. Simple (non-truly-composite) sparse: this is the thrift static CFs (that
+ *      don't have a composite comparator).
+ *   4. Composite sparse: this is the CQL3 layout (note that this is the only one that
+ *      support collections).
+ */
+public interface CellNameType extends CType
+{
+    /**
+     * Whether or not the cell names for this type are dense.
+     */
+    public boolean isDense();
+
+    /**
+     * The number of clustering columns for the table this is the type of.
+     */
+    public int clusteringPrefixSize();
+
+    /**
+     * A builder for the clustering prefix.
+     */
+    public CBuilder prefixBuilder();
+
+    /**
+     * Whether or not there is some collections defined in this type.
+     */
+    public boolean hasCollections();
+
+    /**
+     * Whether or not this type layout support collections.
+     */
+    public boolean supportCollections();
+
+    /**
+     * The type of the collections (or null if the type has not collections).
+     */
+    public ColumnToCollectionType collectionType();
+
+    /**
+     * Return the new type obtained by adding the new collection type for the provided column name
+     * to this type.
+     */
+    public CellNameType addCollection(ColumnIdentifier columnName, CollectionType newCollection);
+
+    /**
+     * Returns a new CellNameType that is equivalent to this one but with one
+     * of the subtype replaced by the provided new type.
+     */
+    @Override
+    public CellNameType setSubtype(int position, AbstractType<?> newType);
+
+    /**
+     * Creates a row marker for the CQL3 having the provided clustering prefix.
+     *
+     * Note that this is only valid for CQL3 tables (isCompound() and !isDense()) and should
+     * only be called for them.
+     */
+    public CellName rowMarker(Composite prefix);
+
+    /**
+     * Creates a new CellName given a clustering prefix and a CQL3 columnName.
+     *
+     * Note that for dense types, the columnName can be null.
+     */
+    public CellName create(Composite prefix, ColumnIdentifier columnName);
+
+    /**
+     * Creates a new collection CellName given a clustering prefix, a CQL3 columnName and the collection element.
+     */
+    public CellName create(Composite prefix, ColumnIdentifier columnName, ByteBuffer collectionElement);
+
+    /**
+     * Convenience method to create cell names given its components.
+     *
+     * This is equivalent to CType#make() but return a full cell name (and thus
+     * require all the components of the name).
+     */
+    public CellName makeCellName(Object... components);
+
+    /**
+     * Deserialize a Composite from a ByteBuffer.
+     *
+     * This is equilvalent to CType#fromByteBuffer but assumes the buffer is a full cell
+     * name. This is meant for thrift/cql2 to convert the fully serialized buffer we
+     * get from the clients.
+     */
+    public CellName cellFromByteBuffer(ByteBuffer bb);
+
+    /**
+     * Creates a new CQL3Row builder for this type. See CQL3Row for details.
+     */
+    public CQL3Row.Builder CQL3RowBuilder(long now);
+
+    // The two following methods are used to pass the declared regular column names (in CFMetaData)
+    // to the CellNameType. This is only used for optimization sake, see SparseCellNameType.
+    public void addCQL3Column(ColumnIdentifier id);
+    public void removeCQL3Column(ColumnIdentifier id);
+
+    /**
+     * Creates a new Deserializer. This is used by AtomDeserializer to do incremental and on-demand
+     * deserialization of the on disk atoms. See AtomDeserializer for details.
+     */
+    public Deserializer newDeserializer(DataInput in);
+
+    /*
+     * Same as in CType, follows a number of per-CellNameType instances for the Comparator and Serializer used
+     * throughout the code (those that require full CellName versus just Composite).
+     */
+
+    // Ultimately, those might be split into an IVersionedSerializer and an ISSTableSerializer
+    public ISerializer<CellName> cellSerializer();
+
+    public Comparator<Column> columnComparator();
+    public Comparator<Column> columnReverseComparator();
+    public Comparator<OnDiskAtom> onDiskAtomComparator();
+
+    public ColumnSerializer columnSerializer();
+    public OnDiskAtom.Serializer onDiskAtomSerializer();
+    public IVersionedSerializer<NamesQueryFilter> namesQueryFilterSerializer();
+    public IVersionedSerializer<IDiskAtomFilter> diskAtomFilterSerializer();
+
+    public interface Deserializer
+    {
+        /**
+         * Whether this deserializer is done or not, i.e. whether we're reached the end of row marker.
+         */
+        public boolean hasNext() throws IOException;
+
+        /**
+         * Whether or not some name has been read but not consumed by readNext.
+         */
+        public boolean hasUnprocessed() throws IOException;
+
+        /**
+         * Comparare the next name to read to the provided Composite.
+         * This does not consume the next name.
+         */
+        public int compareNextTo(Composite composite) throws IOException;
+
+        /**
+         * Actually consume the next name and return it.
+         */
+        public Composite readNext() throws IOException;
+
+        /**
+         * Skip the next name (consuming it).
+         */
+        public void skipNext() throws IOException;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/CellNames.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/CellNames.java b/src/java/org/apache/cassandra/db/composites/CellNames.java
new file mode 100644
index 0000000..15f4e16
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/CellNames.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+
+import org.apache.cassandra.db.Column;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.ColumnToCollectionType;
+import org.apache.cassandra.db.marshal.CompositeType;
+import org.apache.cassandra.db.marshal.UTF8Type;
+
+public abstract class CellNames
+{
+    private CellNames() {}
+
+    public static CellNameType fromAbstractType(AbstractType<?> type, boolean isDense)
+    {
+        if (isDense)
+        {
+            if (type instanceof CompositeType)
+            {
+                return new CompoundDenseCellNameType(((CompositeType)type).types);
+            }
+            else
+            {
+                return new SimpleDenseCellNameType(type);
+            }
+        }
+        else
+        {
+            if (type instanceof CompositeType)
+            {
+                List<AbstractType<?>> types = ((CompositeType)type).types;
+                if (types.get(types.size() - 1) instanceof ColumnToCollectionType)
+                {
+                    assert types.get(types.size() - 2) instanceof UTF8Type;
+                    return new CompoundSparseCellNameType.WithCollection(types.subList(0, types.size() - 2), (ColumnToCollectionType)types.get(types.size() - 1));
+                }
+                else
+                {
+                    assert types.get(types.size() - 1) instanceof UTF8Type;
+                    return new CompoundSparseCellNameType(types.subList(0, types.size() - 1));
+                }
+            }
+            else
+            {
+                return new SimpleSparseCellNameType(type);
+            }
+        }
+    }
+
+    // Mainly for tests and a few cases where we know what we need and didn't wanted to pass the type around.
+    // Avoid in general, prefer the CellNameType methods.
+    public static CellName simpleDense(ByteBuffer bb)
+    {
+        assert bb.hasRemaining();
+        return new SimpleDenseCellName(bb);
+    }
+
+    // Mainly for tests and a few cases where we know what we need and didn't wanted to pass the type around
+    // Avoid in general, prefer the CellNameType methods.
+    public static CellName compositeDense(ByteBuffer... bbs)
+    {
+        return new CompoundDenseCellName(bbs);
+    }
+
+    public static String getColumnsString(CellNameType type, Iterable<Column> columns)
+    {
+        StringBuilder builder = new StringBuilder();
+        for (Column column : columns)
+            builder.append(column.getString(type)).append(",");
+        return builder.toString();
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/Composite.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/Composite.java b/src/java/org/apache/cassandra/db/composites/Composite.java
new file mode 100644
index 0000000..7001384
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/Composite.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.cache.IMeasurableMemory;
+import org.apache.cassandra.db.filter.ColumnSlice;
+import org.apache.cassandra.utils.Allocator;
+
+/**
+ * A composite value.
+ *
+ * This can be though as a list of ByteBuffer, except that this also include an
+ * 'end-of-component' flag, that allow precise selection of composite ranges.
+ *
+ * We also make a difference between "true" composites and the "simple" ones. The
+ * non-truly composite will have a size() == 1 but differs from true composites with
+ * size() == 1 in the way they are stored. Most code shouldn't have to care about the
+ * difference.
+ */
+public interface Composite extends IMeasurableMemory
+{
+    public enum EOC
+    {
+        START, NONE, END;
+
+        public static EOC from(int eoc)
+        {
+            return eoc == 0 ? NONE : (eoc < 0 ? START : END);
+        }
+
+        public byte toByte()
+        {
+            switch (this)
+            {
+                case START: return (byte)-1;
+                case NONE:  return (byte) 0;
+                case END:   return (byte) 1;
+                default: throw new AssertionError();
+            }
+        }
+    }
+
+    public int size();
+    public boolean isEmpty();
+    public ByteBuffer get(int i);
+
+    public EOC eoc();
+    public Composite withEOC(EOC eoc);
+    public Composite start();
+    public Composite end();
+    public ColumnSlice slice();
+
+    public boolean isPrefixOf(Composite other);
+
+    public ByteBuffer toByteBuffer();
+
+    public int dataSize();
+    public Composite copy(Allocator allocator);
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/Composites.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/Composites.java b/src/java/org/apache/cassandra/db/composites/Composites.java
new file mode 100644
index 0000000..e02725c
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/Composites.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.filter.ColumnSlice;
+import org.apache.cassandra.utils.Allocator;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+public abstract class Composites
+{
+    private Composites() {}
+
+    public static final Composite EMPTY = new EmptyComposite();
+
+    static final CBuilder EMPTY_BUILDER = new CBuilder()
+    {
+        public int remainingCount() { return 0; }
+
+        public CBuilder add(ByteBuffer value) { throw new IllegalStateException(); }
+        public CBuilder add(Object value) { throw new IllegalStateException(); }
+
+        public Composite build() { return EMPTY; }
+        public Composite buildWith(ByteBuffer value) { throw new IllegalStateException(); }
+    };
+
+    private static class EmptyComposite implements Composite
+    {
+        public boolean isEmpty()
+        {
+            return true;
+        }
+
+        public int size()
+        {
+            return 0;
+        }
+
+        public ByteBuffer get(int i)
+        {
+            throw new IndexOutOfBoundsException();
+        }
+
+        public EOC eoc()
+        {
+            return EOC.NONE;
+        }
+
+        public Composite start()
+        {
+            return this;
+        }
+
+        public Composite end()
+        {
+            return this;
+        }
+
+        public Composite withEOC(EOC newEoc)
+        {
+            return this;
+        }
+
+        public ColumnSlice slice()
+        {
+            return ColumnSlice.ALL_COLUMNS;
+        }
+
+        public ByteBuffer toByteBuffer()
+        {
+            return ByteBufferUtil.EMPTY_BYTE_BUFFER;
+        }
+
+        public int dataSize()
+        {
+            return 0;
+        }
+
+        public long memorySize()
+        {
+            return 0;
+        }
+
+        public boolean isPrefixOf(Composite c)
+        {
+            return true;
+        }
+
+        public Composite copy(Allocator allocator)
+        {
+            return this;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/CompoundCType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/CompoundCType.java b/src/java/org/apache/cassandra/db/composites/CompoundCType.java
new file mode 100644
index 0000000..bbad550
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/CompoundCType.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.CompositeType;
+
+/**
+ * A truly-composite CType.
+ */
+public class CompoundCType extends AbstractCType
+{
+    final List<AbstractType<?>> types;
+
+    // It's up to the caller to pass a list that is effectively immutable
+    public CompoundCType(List<AbstractType<?>> types)
+    {
+        this.types = types;
+    }
+
+    public boolean isCompound()
+    {
+        return true;
+    }
+
+    public int size()
+    {
+        return types.size();
+    }
+
+    public AbstractType<?> subtype(int i)
+    {
+        return types.get(i);
+    }
+
+    public Composite fromByteBuffer(ByteBuffer bytes)
+    {
+        if (!bytes.hasRemaining())
+            return Composites.EMPTY;
+
+        ByteBuffer[] elements = new ByteBuffer[size()];
+        int idx = bytes.position(), i = 0;
+        byte eoc = 0;
+        while (idx < bytes.limit())
+        {
+            checkRemaining(bytes, idx, 2);
+            int length = bytes.getShort(idx) & 0xFFFF;
+            idx += 2;
+
+            checkRemaining(bytes, idx, length + 1);
+            elements[i++] = sliceBytes(bytes, idx, length);
+            idx += length;
+            eoc = bytes.get(idx++);
+        }
+        return new CompoundComposite(elements, i).withEOC(Composite.EOC.from(eoc));
+    }
+
+    public CBuilder builder()
+    {
+        return new CompoundCBuilder(this);
+    }
+
+    public CompoundCType setSubtype(int position, AbstractType<?> newType)
+    {
+        List<AbstractType<?>> newTypes = new ArrayList<AbstractType<?>>(types);
+        newTypes.set(position, newType);
+        return new CompoundCType(newTypes);
+    }
+
+    public AbstractType<?> asAbstractType()
+    {
+        return CompositeType.getInstance(types);
+    }
+
+    public static class CompoundCBuilder implements CBuilder
+    {
+        private final CType type;
+        private final ByteBuffer[] values;
+        private int size;
+        private boolean built;
+
+        public CompoundCBuilder(CType type)
+        {
+            this.type = type;
+            this.values = new ByteBuffer[type.size()];
+        }
+
+        public int remainingCount()
+        {
+            return values.length - size;
+        }
+
+        public CBuilder add(ByteBuffer value)
+        {
+            if (isDone())
+                throw new IllegalStateException();
+            values[size++] = value;
+            return this;
+        }
+
+        public CBuilder add(Object value)
+        {
+            return add(((AbstractType)type.subtype(size)).decompose(value));
+        }
+
+        private boolean isDone()
+        {
+            return remainingCount() == 0 || built;
+        }
+
+        public Composite build()
+        {
+            if (size == 0)
+                return Composites.EMPTY;
+
+            // We don't allow to add more element to a builder that has been built so
+            // that we don't have to copy values.
+            built = true;
+
+            // If the builder is full and we're building a dense cell name, then we can
+            // directly allocate the CellName object as it's complete.
+            if (size == values.length && type instanceof CellNameType && ((CellNameType)type).isDense())
+                return new CompoundDenseCellName(values);
+            return new CompoundComposite(values, size);
+        }
+
+        public Composite buildWith(ByteBuffer value)
+        {
+            ByteBuffer[] newValues = Arrays.copyOf(values, values.length);
+            newValues[size] = value;
+            // Same as above
+            if (size+1 == newValues.length && type instanceof CellNameType && ((CellNameType)type).isDense())
+                return new CompoundDenseCellName(newValues);
+
+            return new CompoundComposite(newValues, size+1);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/CompoundComposite.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/CompoundComposite.java b/src/java/org/apache/cassandra/db/composites/CompoundComposite.java
new file mode 100644
index 0000000..0a258b3
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/CompoundComposite.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.utils.Allocator;
+import org.apache.cassandra.utils.ObjectSizes;
+
+/**
+ * A "truly-composite" Composite.
+ */
+public class CompoundComposite extends AbstractComposite
+{
+    // We could use a List, but we'll create such object *a lot* and using a array+size is not
+    // all that harder, so we save the List object allocation.
+    final ByteBuffer[] elements;
+    final int size;
+
+    CompoundComposite(ByteBuffer[] elements, int size)
+    {
+        this.elements = elements;
+        this.size = size;
+    }
+
+    public int size()
+    {
+        return size;
+    }
+
+    public ByteBuffer get(int i)
+    {
+        return elements[i];
+    }
+
+    protected ByteBuffer[] elementsCopy(Allocator allocator)
+    {
+        ByteBuffer[] elementsCopy = new ByteBuffer[size];
+        for (int i = 0; i < size; i++)
+            elementsCopy[i] = allocator.clone(elements[i]);
+        return elementsCopy;
+    }
+
+    public long memorySize()
+    {
+        return ObjectSizes.getFieldSize(TypeSizes.NATIVE.sizeof(size))
+             + ObjectSizes.getArraySize(elements);
+    }
+
+    public Composite copy(Allocator allocator)
+    {
+        return new CompoundComposite(elementsCopy(allocator), size);
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/CompoundDenseCellName.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/CompoundDenseCellName.java b/src/java/org/apache/cassandra/db/composites/CompoundDenseCellName.java
new file mode 100644
index 0000000..10f1a11
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/CompoundDenseCellName.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.utils.Allocator;
+import org.apache.cassandra.utils.ObjectSizes;
+
+public class CompoundDenseCellName extends CompoundComposite implements CellName
+{
+    // Not meant to be used directly, you should use the CellNameType method instead
+    CompoundDenseCellName(ByteBuffer[] elements)
+    {
+        super(elements, elements.length);
+    }
+
+    public int clusteringSize()
+    {
+        return size;
+    }
+
+    public ColumnIdentifier cql3ColumnName()
+    {
+        return null;
+    }
+
+    public ByteBuffer collectionElement()
+    {
+        return null;
+    }
+
+    public boolean isCollectionCell()
+    {
+        return false;
+    }
+
+    public boolean isSameCQL3RowAs(CellName other)
+    {
+        // Dense cell imply one cell by CQL row so no other cell will be the same row.
+        return equals(other);
+    }
+
+    @Override
+    public long memorySize()
+    {
+        return ObjectSizes.getSuperClassFieldSize(super.memorySize());
+    }
+
+    public CellName copy(Allocator allocator)
+    {
+        return new CompoundDenseCellName(elementsCopy(allocator));
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/db/composites/CompoundDenseCellNameType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/composites/CompoundDenseCellNameType.java b/src/java/org/apache/cassandra/db/composites/CompoundDenseCellNameType.java
new file mode 100644
index 0000000..bb7a06e
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/composites/CompoundDenseCellNameType.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.composites;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.cassandra.cql3.CQL3Row;
+import org.apache.cassandra.cql3.ColumnIdentifier;
+import org.apache.cassandra.db.marshal.AbstractType;
+
+public class CompoundDenseCellNameType extends AbstractCompoundCellNameType
+{
+    public CompoundDenseCellNameType(List<AbstractType<?>> types)
+    {
+        this(new CompoundCType(types));
+    }
+
+    private CompoundDenseCellNameType(CompoundCType type)
+    {
+        super(type, type);
+    }
+
+    public CellNameType setSubtype(int position, AbstractType<?> newType)
+    {
+        if (position != 0)
+            throw new IllegalArgumentException();
+        return new SimpleDenseCellNameType(newType);
+    }
+
+    public boolean isDense()
+    {
+        return true;
+    }
+
+    public CellName create(Composite prefix, ColumnIdentifier columnName)
+    {
+        // We ignore the columnName because it's just the COMPACT_VALUE name which is not store in the cell name
+        assert prefix.size() == fullSize;
+        if (prefix instanceof CellName)
+            return (CellName)prefix;
+
+        assert prefix instanceof CompoundComposite;
+        CompoundComposite lc = (CompoundComposite)prefix;
+        assert lc.elements.length == lc.size;
+        return new CompoundDenseCellName(lc.elements);
+    }
+
+    protected Composite makeWith(ByteBuffer[] components, int size, Composite.EOC eoc)
+    {
+        if (size < fullSize || eoc != Composite.EOC.NONE)
+            return new CompoundComposite(components, size).withEOC(eoc);
+
+        assert components.length == size;
+        return new CompoundDenseCellName(components);
+    }
+
+    protected Composite copyAndMakeWith(ByteBuffer[] components, int size, Composite.EOC eoc)
+    {
+        return makeWith(Arrays.copyOfRange(components, 0, size), size, eoc);
+    }
+
+    public void addCQL3Column(ColumnIdentifier id) {}
+    public void removeCQL3Column(ColumnIdentifier id) {}
+
+    public CQL3Row.Builder CQL3RowBuilder(long now)
+    {
+        return makeDenseCQL3RowBuilder(now);
+    }
+}