You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by sl...@apache.org on 2013/12/17 17:03:41 UTC

[05/13] Push composites support in the storage engine

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
index 86348aa..c038b02 100644
--- a/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java
@@ -31,12 +31,12 @@ import com.google.common.collect.ImmutableMap;
 import org.apache.cassandra.cql3.statements.*;
 import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.config.*;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Murmur3Partitioner;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.exceptions.RequestValidationException;
-import org.apache.cassandra.io.compress.CompressionParameters;
 import org.apache.cassandra.locator.AbstractReplicationStrategy;
 import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.utils.Pair;
@@ -190,14 +190,14 @@ public class CQLSSTableWriter
             throw new InvalidRequestException(String.format("Invalid number of arguments, expecting %d values but got %d", boundNames.size(), values.size()));
 
         List<ByteBuffer> keys = insert.buildPartitionKeyNames(values);
-        ColumnNameBuilder clusteringPrefix = insert.createClusteringPrefixBuilder(values);
+        Composite clusteringPrefix = insert.createClusteringPrefix(values);
 
         long now = System.currentTimeMillis() * 1000;
         UpdateParameters params = new UpdateParameters(insert.cfm,
                                                        values,
                                                        insert.getTimestamp(now, values),
                                                        insert.getTimeToLive(values),
-                                                       Collections.<ByteBuffer, ColumnGroupMap>emptyMap());
+                                                       Collections.<ByteBuffer, CQL3Row>emptyMap());
 
         for (ByteBuffer key: keys)
         {
@@ -321,13 +321,20 @@ public class CQLSSTableWriter
 
                 // We need to register the keyspace/table metadata through Schema, otherwise we won't be able to properly
                 // build the insert statement in using().
-                KSMetaData ksm = KSMetaData.newKeyspace(this.schema.ksName,
-                                                        AbstractReplicationStrategy.getClass("org.apache.cassandra.locator.SimpleStrategy"),
-                                                        ImmutableMap.of("replication_factor", "1"),
-                                                        true,
-                                                        Collections.singleton(this.schema));
+                if (Schema.instance.getKSMetaData(this.schema.ksName) == null)
+                {
+                    KSMetaData ksm = KSMetaData.newKeyspace(this.schema.ksName,
+                                                            AbstractReplicationStrategy.getClass("org.apache.cassandra.locator.SimpleStrategy"),
+                                                            ImmutableMap.of("replication_factor", "1"),
+                                                            true,
+                                                            Collections.singleton(this.schema));
+                    Schema.instance.load(ksm);
+                }
+                else if (Schema.instance.getCFMetaData(this.schema.ksName, this.schema.cfName) == null)
+                {
+                    Schema.instance.load(this.schema);
+                }
 
-                Schema.instance.load(ksm);
                 return this;
             }
             catch (RequestValidationException e)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/ColumnNameHelper.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/ColumnNameHelper.java b/src/java/org/apache/cassandra/io/sstable/ColumnNameHelper.java
index aaed765..d390518 100644
--- a/src/java/org/apache/cassandra/io/sstable/ColumnNameHelper.java
+++ b/src/java/org/apache/cassandra/io/sstable/ColumnNameHelper.java
@@ -19,112 +19,91 @@ package org.apache.cassandra.io.sstable;
 
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
 import java.util.List;
 
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.CellNameType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.ColumnToCollectionType;
-import org.apache.cassandra.db.marshal.CompositeType;
 
 import static org.apache.cassandra.utils.ByteBufferUtil.minimalBufferFor;
 
 public class ColumnNameHelper
 {
+    private static List<ByteBuffer> maybeGrow(List<ByteBuffer> l, int size)
+    {
+        if (l.size() >= size)
+            return l;
+
+        List<ByteBuffer> nl = new ArrayList<>(size);
+        nl.addAll(l);
+        for (int i = l.size(); i < size; i++)
+            nl.add(null);
+        return nl;
+    }
+
+    private static List<ByteBuffer> getComponents(Composite prefix, int size)
+    {
+        List<ByteBuffer> l = new ArrayList<>(size);
+        for (int i = 0; i < size; i++)
+            l.add(prefix.get(i));
+        return l;
+    }
+
     /**
-     * finds the max column name(s)
-     *
-     * if comparator is of CompositeType, candidate will be split into its components, and each
-     * component is compared to the component on the same place in maxSeen, and then returning the list
-     * with the max columns.
-     *
-     * will collect at most the number of types in the comparator.
+     * finds the max cell name component(s)
      *
-     * if comparator is not CompositeType, maxSeen is assumed to be of size 1 and the item there is
-     * compared to the candidate.
+     * Note that this method *can modify maxSeen*.
      *
      * @param maxSeen the max columns seen so far
      * @param candidate the candidate column(s)
      * @param comparator the comparator to use
      * @return a list with the max column(s)
      */
-    public static List<ByteBuffer> maxComponents(List<ByteBuffer> maxSeen, ByteBuffer candidate, AbstractType<?> comparator)
+    public static List<ByteBuffer> maxComponents(List<ByteBuffer> maxSeen, Composite candidate, CellNameType comparator)
     {
-        if (comparator instanceof CompositeType)
-        {
-            CompositeType ct = (CompositeType)comparator;
-            if (maxSeen.isEmpty())
-                return Arrays.asList(ct.split(candidate));
-
-            int typeCount = getTypeCount(ct);
-
-            List<ByteBuffer> components = Arrays.asList(ct.split(candidate));
-            List<ByteBuffer> biggest = maxSeen.size() > components.size() ? maxSeen : components;
-            // if typecount is less than both the components and maxseen, we only keep typecount columns.
-            int minSize = Math.min(typeCount, Math.min(components.size(), maxSeen.size()));
-            int maxSize = Math.min(typeCount, biggest.size());
-            List<ByteBuffer> retList = new ArrayList<ByteBuffer>(maxSize);
-
-            for (int i = 0; i < minSize; i++)
-                retList.add(ColumnNameHelper.max(maxSeen.get(i), components.get(i), ct.types.get(i)));
-            for (int i = minSize; i < maxSize; i++)
-                retList.add(biggest.get(i));
-
-            return retList;
-        }
-        else
-        {
-            if (maxSeen.size() == 0)
-                return Collections.singletonList(candidate);
-            return Collections.singletonList(ColumnNameHelper.max(maxSeen.get(0), candidate, comparator));
-        }
+        // For a cell name, no reason to look more than the clustering prefix
+        // (and comparing the collection element would actually crash)
+        int size = candidate instanceof CellName ? ((CellName)candidate).clusteringSize() : candidate.size();
+
+        if (maxSeen.isEmpty())
+            return getComponents(candidate, size);
+
+        // In most case maxSeen is big enough to hold the result so update it in place in those cases
+        maxSeen = maybeGrow(maxSeen, size);
+
+        for (int i = 0; i < size; i++)
+            maxSeen.set(i, max(maxSeen.get(i), candidate.get(i), comparator.subtype(i)));
+
+        return maxSeen;
     }
+
     /**
-     * finds the min column name(s)
-     *
-     * if comparator is of CompositeType, candidate will be split into its components, and each
-     * component is compared to the component on the same place in minSeen, and then returning the list
-     * with the min columns.
+     * finds the min cell name component(s)
      *
-     * if comparator is not CompositeType, maxSeen is assumed to be of size 1 and the item there is
-     * compared to the candidate.
+     * Note that this method *can modify maxSeen*.
      *
      * @param minSeen the max columns seen so far
      * @param candidate the candidate column(s)
      * @param comparator the comparator to use
      * @return a list with the min column(s)
      */
-    public static List<ByteBuffer> minComponents(List<ByteBuffer> minSeen, ByteBuffer candidate, AbstractType<?> comparator)
+    public static List<ByteBuffer> minComponents(List<ByteBuffer> minSeen, Composite candidate, CellNameType comparator)
     {
-        if (comparator instanceof CompositeType)
-        {
-            CompositeType ct = (CompositeType)comparator;
-            if (minSeen.isEmpty())
-                return Arrays.asList(ct.split(candidate));
-
-            int typeCount = getTypeCount(ct);
-
-            List<ByteBuffer> components = Arrays.asList(ct.split(candidate));
-            List<ByteBuffer> biggest = minSeen.size() > components.size() ? minSeen : components;
-            // if typecount is less than both the components and maxseen, we only collect typecount columns.
-            int minSize = Math.min(typeCount, Math.min(components.size(), minSeen.size()));
-            int maxSize = Math.min(typeCount, biggest.size());
-            List<ByteBuffer> retList = new ArrayList<ByteBuffer>(maxSize);
-
-            for (int i = 0; i < minSize; i++)
-                retList.add(ColumnNameHelper.min(minSeen.get(i), components.get(i), ct.types.get(i)));
-            for (int i = minSize; i < maxSize; i++)
-                retList.add(biggest.get(i));
-
-            return retList;
-        }
-        else
-        {
-            if (minSeen.size() == 0)
-                return Collections.singletonList(candidate);
-            return Collections.singletonList(ColumnNameHelper.min(minSeen.get(0), candidate, comparator));
-
-        }
+        // For a cell name, no reason to look more than the clustering prefix
+        // (and comparing the collection element would actually crash)
+        int size = candidate instanceof CellName ? ((CellName)candidate).clusteringSize() : candidate.size();
+
+        if (minSeen.isEmpty())
+            return getComponents(candidate, size);
+
+        // In most case maxSeen is big enough to hold the result so update it in place in those cases
+        minSeen = maybeGrow(minSeen, size);
+
+        for (int i = 0; i < size; i++)
+            minSeen.set(i, min(minSeen.get(i), candidate.get(i), comparator.subtype(i)));
+
+        return minSeen;
     }
 
     /**
@@ -139,6 +118,11 @@ public class ColumnNameHelper
      */
     private static ByteBuffer min(ByteBuffer b1, ByteBuffer b2, AbstractType<?> comparator)
     {
+        if (b1 == null)
+            return b2;
+        if (b2 == null)
+            return b1;
+
         if (comparator.compare(b1, b2) >= 0)
             return b2;
         return b1;
@@ -156,24 +140,25 @@ public class ColumnNameHelper
      */
     private static ByteBuffer max(ByteBuffer b1, ByteBuffer b2, AbstractType<?> comparator)
     {
+        if (b1 == null)
+            return b2;
+        if (b2 == null)
+            return b1;
+
         if (comparator.compare(b1, b2) >= 0)
             return b1;
         return b2;
     }
 
     /**
-     * if columnNameComparator is CompositeType the columns are compared by components using the subcomparator
-     * on the same position.
-     *
-     * if comparator is not CompositeType, the lists are assumed to be of max size 1 and compared using the comparator
-     * directly.
+     * Merge 2 lists of min cell name components.
      *
      * @param minColumnNames lhs
      * @param candidates rhs
-     * @param columnNameComparator comparator to use
+     * @param comparator comparator to use
      * @return a list with smallest column names according to (sub)comparator
      */
-    public static List<ByteBuffer> mergeMin(List<ByteBuffer> minColumnNames, List<ByteBuffer> candidates, AbstractType<?> columnNameComparator)
+    public static List<ByteBuffer> mergeMin(List<ByteBuffer> minColumnNames, List<ByteBuffer> candidates, CellNameType comparator)
     {
         if (minColumnNames.isEmpty())
             return minimalBuffersFor(candidates);
@@ -181,27 +166,18 @@ public class ColumnNameHelper
         if (candidates.isEmpty())
             return minColumnNames;
 
-        if (columnNameComparator instanceof CompositeType)
-        {
-            CompositeType ct = (CompositeType)columnNameComparator;
-            List<ByteBuffer> biggest = minColumnNames.size() > candidates.size() ? minColumnNames : candidates;
-            int typeCount = getTypeCount(ct);
-            int minSize = Math.min(typeCount, Math.min(minColumnNames.size(), candidates.size()));
-            int maxSize = Math.min(typeCount, biggest.size());
-
-            List<ByteBuffer> retList = new ArrayList<ByteBuffer>(maxSize);
-
-            for (int i = 0; i < minSize; i++)
-                retList.add(minimalBufferFor(min(minColumnNames.get(i), candidates.get(i), ct.types.get(i))));
-            for (int i = minSize; i < maxSize; i++)
-                retList.add(minimalBufferFor(biggest.get(i)));
-
-            return retList;
-        }
-        else
-        {
-            return Collections.singletonList(minimalBufferFor(min(minColumnNames.get(0), candidates.get(0), columnNameComparator)));
-        }
+        List<ByteBuffer> biggest = minColumnNames.size() > candidates.size() ? minColumnNames : candidates;
+        List<ByteBuffer> smallest = minColumnNames.size() > candidates.size() ? candidates : minColumnNames;
+
+        // We want to always copy the smallest list, and maybeGrow does it only if it's actually smaller
+        List<ByteBuffer> retList = smallest.size() == biggest.size()
+                                 ? new ArrayList<>(smallest)
+                                 : maybeGrow(smallest, biggest.size());
+
+        for (int i = 0; i < biggest.size(); i++)
+            retList.set(i, minimalBufferFor(min(retList.get(i), biggest.get(i), comparator.subtype(i))));
+
+        return retList;
     }
 
     private static List<ByteBuffer> minimalBuffersFor(List<ByteBuffer> candidates)
@@ -213,18 +189,14 @@ public class ColumnNameHelper
     }
 
     /**
-     * if columnNameComparator is CompositeType the columns are compared by components using the subcomparator
-     * on the same position.
-     *
-     * if comparator is not CompositeType, the lists are assumed to be of max size 1 and compared using the comparator
-     * directly.
+     * Merge 2 lists of max cell name components.
      *
      * @param maxColumnNames lhs
      * @param candidates rhs
-     * @param columnNameComparator comparator to use
+     * @param comparator comparator to use
      * @return a list with biggest column names according to (sub)comparator
      */
-    public static List<ByteBuffer> mergeMax(List<ByteBuffer> maxColumnNames, List<ByteBuffer> candidates, AbstractType<?> columnNameComparator)
+    public static List<ByteBuffer> mergeMax(List<ByteBuffer> maxColumnNames, List<ByteBuffer> candidates, CellNameType comparator)
     {
         if (maxColumnNames.isEmpty())
             return minimalBuffersFor(candidates);
@@ -232,31 +204,17 @@ public class ColumnNameHelper
         if (candidates.isEmpty())
             return maxColumnNames;
 
-        if (columnNameComparator instanceof CompositeType)
-        {
-            CompositeType ct = (CompositeType)columnNameComparator;
-            List<ByteBuffer> biggest = maxColumnNames.size() > candidates.size() ? maxColumnNames : candidates;
-            int typeCount = getTypeCount(ct);
-            int minSize = Math.min(typeCount, Math.min(maxColumnNames.size(), candidates.size()));
-            int maxSize = Math.min(typeCount, biggest.size());
-            List<ByteBuffer> retList = new ArrayList<ByteBuffer>(maxSize);
-
-            for (int i = 0; i < minSize; i++)
-                retList.add(minimalBufferFor(max(maxColumnNames.get(i), candidates.get(i), ct.types.get(i))));
-            for (int i = minSize; i < maxSize; i++)
-                retList.add(minimalBufferFor(biggest.get(i)));
-
-            return retList;
-        }
-        else
-        {
-            return Collections.singletonList(minimalBufferFor(max(maxColumnNames.get(0), candidates.get(0), columnNameComparator)));
-        }
+        List<ByteBuffer> biggest = maxColumnNames.size() > candidates.size() ? maxColumnNames : candidates;
+        List<ByteBuffer> smallest = maxColumnNames.size() > candidates.size() ? candidates : maxColumnNames;
 
-    }
+        // We want to always copy the smallest list, and maybeGrow does it only if it's actually smaller
+        List<ByteBuffer> retList = smallest.size() == biggest.size()
+                                 ? new ArrayList<>(smallest)
+                                 : maybeGrow(smallest, biggest.size());
 
-    private static int getTypeCount(CompositeType ct)
-    {
-        return ct.types.get(ct.types.size() - 1) instanceof ColumnToCollectionType ? ct.types.size() - 1 : ct.types.size();
+        for (int i = 0; i < biggest.size(); i++)
+            retList.set(i, minimalBufferFor(max(retList.get(i), biggest.get(i), comparator.subtype(i))));
+
+        return retList;
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/IndexHelper.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/IndexHelper.java b/src/java/org/apache/cassandra/io/sstable/IndexHelper.java
index d70ff19..e644974 100644
--- a/src/java/org/apache/cassandra/io/sstable/IndexHelper.java
+++ b/src/java/org/apache/cassandra/io/sstable/IndexHelper.java
@@ -18,14 +18,15 @@
 package org.apache.cassandra.io.sstable;
 
 import java.io.*;
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
 
+import org.apache.cassandra.db.composites.CType;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.TypeSizes;
-import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.io.ISerializer;
 import org.apache.cassandra.io.util.FileDataInput;
 import org.apache.cassandra.io.util.FileMark;
 import org.apache.cassandra.io.util.FileUtils;
@@ -67,21 +68,23 @@ public class IndexHelper
     /**
      * Deserialize the index into a structure and return it
      *
-     * @param in - input source
+     * @param in input source
+     * @param type the comparator type for the column family
      *
      * @return ArrayList<IndexInfo> - list of de-serialized indexes
      * @throws IOException if an I/O error occurs.
      */
-    public static List<IndexInfo> deserializeIndex(FileDataInput in) throws IOException
+    public static List<IndexInfo> deserializeIndex(FileDataInput in, CType type) throws IOException
     {
         int columnIndexSize = in.readInt();
         if (columnIndexSize == 0)
             return Collections.<IndexInfo>emptyList();
         ArrayList<IndexInfo> indexList = new ArrayList<IndexInfo>();
         FileMark mark = in.mark();
+        ISerializer<IndexInfo> serializer = type.indexSerializer();
         while (in.bytesPastMark(mark) < columnIndexSize)
         {
-            indexList.add(IndexInfo.deserialize(in));
+            indexList.add(serializer.deserialize(in));
         }
         assert in.bytesPastMark(mark) == columnIndexSize;
 
@@ -105,9 +108,9 @@ public class IndexHelper
      *
      * @return int index
      */
-    public static int indexFor(ByteBuffer name, List<IndexInfo> indexList, AbstractType<?> comparator, boolean reversed, int lastIndex)
+    public static int indexFor(Composite name, List<IndexInfo> indexList, CType comparator, boolean reversed, int lastIndex)
     {
-        if (name.remaining() == 0 && reversed)
+        if (name.isEmpty() && reversed)
             return indexList.size() - 1;
 
         if (lastIndex >= indexList.size())
@@ -145,19 +148,19 @@ public class IndexHelper
         return startIdx + (index < 0 ? -index - (reversed ? 2 : 1) : index);
     }
 
-    public static Comparator<IndexInfo> getComparator(final AbstractType<?> nameComparator, boolean reversed)
+    public static Comparator<IndexInfo> getComparator(final CType nameComparator, boolean reversed)
     {
-        return reversed ? nameComparator.indexReverseComparator : nameComparator.indexComparator;
+        return reversed ? nameComparator.indexReverseComparator() : nameComparator.indexComparator();
     }
 
     public static class IndexInfo
     {
         public final long width;
-        public final ByteBuffer lastName;
-        public final ByteBuffer firstName;
+        public final Composite lastName;
+        public final Composite firstName;
         public final long offset;
 
-        public IndexInfo(ByteBuffer firstName, ByteBuffer lastName, long offset, long width)
+        public IndexInfo(Composite firstName, Composite lastName, long offset, long width)
         {
             this.firstName = firstName;
             this.lastName = lastName;
@@ -165,26 +168,38 @@ public class IndexHelper
             this.width = width;
         }
 
-        public void serialize(DataOutput out) throws IOException
+        public static class Serializer implements ISerializer<IndexInfo>
         {
-            ByteBufferUtil.writeWithShortLength(firstName, out);
-            ByteBufferUtil.writeWithShortLength(lastName, out);
-            out.writeLong(offset);
-            out.writeLong(width);
-        }
+            private final CType type;
 
-        public int serializedSize(TypeSizes typeSizes)
-        {
-            int firstNameSize = firstName.remaining();
-            int lastNameSize = lastName.remaining();
-            return typeSizes.sizeof((short) firstNameSize) + firstNameSize +
-                   typeSizes.sizeof((short) lastNameSize) + lastNameSize +
-                   typeSizes.sizeof(offset) + typeSizes.sizeof(width);
-        }
+            public Serializer(CType type)
+            {
+                this.type = type;
+            }
 
-        public static IndexInfo deserialize(DataInput in) throws IOException
-        {
-            return new IndexInfo(ByteBufferUtil.readWithShortLength(in), ByteBufferUtil.readWithShortLength(in), in.readLong(), in.readLong());
+            public void serialize(IndexInfo info, DataOutput out) throws IOException
+            {
+                type.serializer().serialize(info.firstName, out);
+                type.serializer().serialize(info.lastName, out);
+                out.writeLong(info.offset);
+                out.writeLong(info.width);
+            }
+
+            public IndexInfo deserialize(DataInput in) throws IOException
+            {
+                return new IndexInfo(type.serializer().deserialize(in),
+                                     type.serializer().deserialize(in),
+                                     in.readLong(),
+                                     in.readLong());
+            }
+
+            public long serializedSize(IndexInfo info, TypeSizes typeSizes)
+            {
+                return type.serializer().serializedSize(info.firstName, typeSizes)
+                     + type.serializer().serializedSize(info.lastName, typeSizes)
+                     + typeSizes.sizeof(info.offset)
+                     + typeSizes.sizeof(info.width);
+            }
         }
 
         public long memorySize()
@@ -195,7 +210,7 @@ public class IndexHelper
                                             ObjectSizes.getReferenceSize() +
                                             TypeSizes.NATIVE.sizeof(offset) +
                                             TypeSizes.NATIVE.sizeof(width))
-                   + ObjectSizes.getSize(firstName) + ObjectSizes.getSize(lastName);
+                   + firstName.memorySize() + lastName.memorySize();
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/KeyIterator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/KeyIterator.java b/src/java/org/apache/cassandra/io/sstable/KeyIterator.java
index 0c36f62..4d1c663 100644
--- a/src/java/org/apache/cassandra/io/sstable/KeyIterator.java
+++ b/src/java/org/apache/cassandra/io/sstable/KeyIterator.java
@@ -46,7 +46,7 @@ public class KeyIterator extends AbstractIterator<DecoratedKey> implements Close
             if (in.isEOF())
                 return endOfData();
             DecoratedKey key = StorageService.getPartitioner().decorateKey(ByteBufferUtil.readWithShortLength(in));
-            RowIndexEntry.serializer.skip(in); // skip remainder of the entry
+            RowIndexEntry.Serializer.skip(in); // skip remainder of the entry
             return key;
         }
         catch (IOException e)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/SSTable.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTable.java b/src/java/org/apache/cassandra/io/sstable/SSTable.java
index c13c423..dfccf28 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTable.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTable.java
@@ -218,7 +218,7 @@ public abstract class SSTable
         while (ifile.getFilePointer() < BYTES_CAP && keys < SAMPLES_CAP)
         {
             ByteBufferUtil.skipShortLength(ifile);
-            RowIndexEntry.serializer.skip(ifile);
+            RowIndexEntry.Serializer.skip(ifile);
             keys++;
         }
         assert keys > 0 && ifile.getFilePointer() > 0 && ifile.length() > 0 : "Unexpected empty index file: " + ifile;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/SSTableReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/SSTableReader.java
index 055f4b6..de877bc 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableReader.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableReader.java
@@ -563,7 +563,7 @@ public class SSTableReader extends SSTable implements Closeable
             while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
             {
                 ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
-                RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(primaryIndex, descriptor.version);
+                RowIndexEntry indexEntry = metadata.comparator.rowIndexEntrySerializer().deserialize(primaryIndex, descriptor.version);
                 DecoratedKey decoratedKey = partitioner.decorateKey(key);
                 if (first == null)
                     first = decoratedKey;
@@ -731,7 +731,7 @@ public class SSTableReader extends SSTable implements Closeable
             while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
             {
                 summaryBuilder.maybeAddEntry(partitioner.decorateKey(ByteBufferUtil.readWithShortLength(primaryIndex)), indexPosition);
-                RowIndexEntry.serializer.skip(primaryIndex);
+                RowIndexEntry.Serializer.skip(primaryIndex);
             }
 
             return summaryBuilder.build(partitioner);
@@ -1168,7 +1168,7 @@ public class SSTableReader extends SSTable implements Closeable
                     if (opSatisfied)
                     {
                         // read data position from index entry
-                        RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(in, descriptor.version);
+                        RowIndexEntry indexEntry = metadata.comparator.rowIndexEntrySerializer().deserialize(in, descriptor.version);
                         if (exactMatch && updateCacheAndStats)
                         {
                             assert key instanceof DecoratedKey; // key can be == to the index key only if it's a true row key
@@ -1193,7 +1193,7 @@ public class SSTableReader extends SSTable implements Closeable
                         return indexEntry;
                     }
 
-                    RowIndexEntry.serializer.skip(in);
+                    RowIndexEntry.Serializer.skip(in);
                 }
             }
             catch (IOException e)
@@ -1235,7 +1235,7 @@ public class SSTableReader extends SSTable implements Closeable
                     if (indexDecoratedKey.compareTo(token) > 0)
                         return indexDecoratedKey;
 
-                    RowIndexEntry.serializer.skip(in);
+                    RowIndexEntry.Serializer.skip(in);
                 }
             }
             catch (IOException e)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/SSTableScanner.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableScanner.java b/src/java/org/apache/cassandra/io/sstable/SSTableScanner.java
index 703b6e9..3dc0ded 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableScanner.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableScanner.java
@@ -139,7 +139,7 @@ public class SSTableScanner implements ICompactionScanner
                 }
                 else
                 {
-                    RowIndexEntry.serializer.skip(ifile);
+                    sstable.metadata.comparator.rowIndexEntrySerializer().skip(ifile);
                 }
             }
         }
@@ -220,7 +220,7 @@ public class SSTableScanner implements ICompactionScanner
                             return endOfData();
 
                         currentKey = sstable.partitioner.decorateKey(ByteBufferUtil.readWithShortLength(ifile));
-                        currentEntry = RowIndexEntry.serializer.deserialize(ifile, sstable.descriptor.version);
+                        currentEntry = sstable.metadata.comparator.rowIndexEntrySerializer().deserialize(ifile, sstable.descriptor.version);
                     } while (!currentRange.contains(currentKey));
                 }
                 else
@@ -241,7 +241,7 @@ public class SSTableScanner implements ICompactionScanner
                 {
                     // we need the position of the start of the next key, regardless of whether it falls in the current range
                     nextKey = sstable.partitioner.decorateKey(ByteBufferUtil.readWithShortLength(ifile));
-                    nextEntry = RowIndexEntry.serializer.deserialize(ifile, sstable.descriptor.version);
+                    nextEntry = sstable.metadata.comparator.rowIndexEntrySerializer().deserialize(ifile, sstable.descriptor.version);
                     readEnd = nextEntry.position;
 
                     if (!currentRange.contains(nextKey))

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/SSTableSimpleUnsortedWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleUnsortedWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleUnsortedWriter.java
index 6b39024..c881395 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleUnsortedWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleUnsortedWriter.java
@@ -77,7 +77,7 @@ public class SSTableSimpleUnsortedWriter extends AbstractSSTableSimpleWriter
                                        int bufferSizeInMB,
                                        CompressionParameters compressParameters)
     {
-        this(directory, new CFMetaData(keyspace, columnFamily, subComparator == null ? ColumnFamilyType.Standard : ColumnFamilyType.Super, comparator, subComparator).compressionParameters(compressParameters), partitioner, bufferSizeInMB);
+        this(directory, CFMetaData.denseCFMetaData(keyspace, columnFamily, comparator, subComparator).compressionParameters(compressParameters), partitioner, bufferSizeInMB);
     }
 
     public SSTableSimpleUnsortedWriter(File directory,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/SSTableSimpleWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleWriter.java
index 9b584f0..054d780 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleWriter.java
@@ -56,8 +56,7 @@ public class SSTableSimpleWriter extends AbstractSSTableSimpleWriter
                                AbstractType<?> comparator,
                                AbstractType<?> subComparator)
     {
-        this(directory,
-             new CFMetaData(keyspace, columnFamily, subComparator == null ? ColumnFamilyType.Standard : ColumnFamilyType.Super, comparator, subComparator), partitioner);
+        this(directory, CFMetaData.denseCFMetaData(keyspace, columnFamily, comparator, subComparator), partitioner);
     }
 
     public SSTableSimpleWriter(File directory, CFMetaData metadata, IPartitioner partitioner)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
index 60bb8d1..17d9b0e 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
@@ -433,7 +433,7 @@ public class SSTableWriter extends SSTable
             try
             {
                 ByteBufferUtil.writeWithShortLength(key.key, indexFile.stream);
-                RowIndexEntry.serializer.serialize(indexEntry, indexFile.stream);
+                metadata.comparator.rowIndexEntrySerializer().serialize(indexEntry, indexFile.stream);
             }
             catch (IOException e)
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java
index 4b9329f..c125a98 100644
--- a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java
+++ b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java
@@ -24,7 +24,7 @@ import java.util.*;
 import com.google.common.collect.Maps;
 
 import org.apache.cassandra.db.commitlog.ReplayPosition;
-import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.io.sstable.*;
 import org.apache.cassandra.utils.EstimatedHistogram;
 import org.apache.cassandra.utils.StreamingHistogram;
@@ -77,14 +77,14 @@ public class MetadataCollector
     protected int sstableLevel;
     protected List<ByteBuffer> minColumnNames = Collections.emptyList();
     protected List<ByteBuffer> maxColumnNames = Collections.emptyList();
-    private final AbstractType<?> columnNameComparator;
+    private final CellNameType columnNameComparator;
 
-    public MetadataCollector(AbstractType<?> columnNameComparator)
+    public MetadataCollector(CellNameType columnNameComparator)
     {
         this.columnNameComparator = columnNameComparator;
     }
 
-    public MetadataCollector(Collection<SSTableReader> sstables, AbstractType<?> columnNameComparator, int level)
+    public MetadataCollector(Collection<SSTableReader> sstables, CellNameType columnNameComparator, int level)
     {
         this(columnNameComparator);
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/io/util/MappedFileDataInput.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/util/MappedFileDataInput.java b/src/java/org/apache/cassandra/io/util/MappedFileDataInput.java
index 786d312..f397ddc 100644
--- a/src/java/org/apache/cassandra/io/util/MappedFileDataInput.java
+++ b/src/java/org/apache/cassandra/io/util/MappedFileDataInput.java
@@ -150,9 +150,10 @@ public class MappedFileDataInput extends AbstractDataInput implements FileDataIn
     }
 
     @Override
-    public final void readFully(byte[] buffer) throws IOException
+    public final void readFully(byte[] bytes) throws IOException
     {
-        throw new UnsupportedOperationException("use readBytes instead");
+        ByteBufferUtil.arrayCopy(buffer, buffer.position() + position, bytes, 0, bytes.length);
+        position += bytes.length;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/service/CacheService.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/CacheService.java b/src/java/org/apache/cassandra/service/CacheService.java
index f626e17..4fe7109 100644
--- a/src/java/org/apache/cassandra/service/CacheService.java
+++ b/src/java/org/apache/cassandra/service/CacheService.java
@@ -39,7 +39,9 @@ import org.apache.cassandra.cache.*;
 import org.apache.cassandra.cache.AutoSavingCache.CacheSerializer;
 import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.concurrent.StageManager;
+import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.db.ColumnFamily;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
@@ -342,7 +344,8 @@ public class CacheService implements CacheServiceMBean
             Descriptor desc = key.desc;
             out.writeInt(desc.generation);
             out.writeBoolean(true);
-            RowIndexEntry.serializer.serialize(entry, out);
+            CFMetaData cfm = Schema.instance.getCFMetaData(key.desc.ksname, key.desc.cfname);
+            cfm.comparator.rowIndexEntrySerializer().serialize(entry, out);
         }
 
         public Future<Pair<KeyCacheKey, RowIndexEntry>> deserialize(DataInputStream input, ColumnFamilyStore cfs) throws IOException
@@ -359,10 +362,10 @@ public class CacheService implements CacheServiceMBean
             input.readBoolean(); // backwards compatibility for "promoted indexes" boolean
             if (reader == null)
             {
-                RowIndexEntry.serializer.skipPromotedIndex(input);
+                RowIndexEntry.Serializer.skipPromotedIndex(input);
                 return null;
             }
-            RowIndexEntry entry = RowIndexEntry.serializer.deserialize(input, reader.descriptor.version);
+            RowIndexEntry entry = reader.metadata.comparator.rowIndexEntrySerializer().deserialize(input, reader.descriptor.version);
             return Futures.immediateFuture(Pair.create(new KeyCacheKey(reader.descriptor, key), entry));
         }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/service/StorageProxy.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/StorageProxy.java b/src/java/org/apache/cassandra/service/StorageProxy.java
index 55125e6..cf53865 100644
--- a/src/java/org/apache/cassandra/service/StorageProxy.java
+++ b/src/java/org/apache/cassandra/service/StorageProxy.java
@@ -41,9 +41,10 @@ import org.apache.cassandra.concurrent.StageManager;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.Schema;
-import org.apache.cassandra.cql3.ColumnNameBuilder;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.composites.Composite;
+import org.apache.cassandra.db.filter.ColumnSlice;
 import org.apache.cassandra.db.filter.NamesQueryFilter;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
 import org.apache.cassandra.db.index.SecondaryIndex;
@@ -208,7 +209,7 @@ public class StorageProxy implements StorageProxyMBean
     public static ColumnFamily cas(String keyspaceName,
                                    String cfName,
                                    ByteBuffer key,
-                                   ColumnNameBuilder prefix,
+                                   Composite prefix,
                                    ColumnFamily expected,
                                    ColumnFamily updates,
                                    ConsistencyLevel consistencyForPaxos,
@@ -238,8 +239,8 @@ public class StorageProxy implements StorageProxyMBean
             if (expected == null || expected.isEmpty())
             {
                 SliceQueryFilter filter = prefix == null
-                                        ? new SliceQueryFilter(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1)
-                                        : new SliceQueryFilter(prefix.build(), prefix.buildAsEndOfRange(), false, 1, prefix.componentCount());
+                                        ? new SliceQueryFilter(ColumnSlice.ALL_COLUMNS_ARRAY, false, 1)
+                                        : new SliceQueryFilter(prefix.slice(), false, 1, prefix.size());
                 readCommand = new SliceFromReadCommand(keyspaceName, key, cfName, timestamp, filter);
             }
             else
@@ -1474,7 +1475,7 @@ public class StorageProxy implements StorageProxyMBean
         }
         else
         {
-            if (cfs.metadata.isDense())
+            if (cfs.metadata.comparator.isDense())
             {
                 // one storage row per result row, so use key estimate directly
                 resultRowsPerRange = cfs.estimateKeys();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java b/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
index 9372665..d4748b0 100644
--- a/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
+++ b/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.service.pager;
 
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -26,6 +25,7 @@ import java.util.Iterator;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.filter.ColumnCounter;
 import org.apache.cassandra.db.filter.IDiskAtomFilter;
 import org.apache.cassandra.exceptions.RequestExecutionException;
@@ -289,12 +289,12 @@ abstract class AbstractQueryPager implements QueryPager
         return Math.min(liveCount, toDiscard);
     }
 
-    protected static ByteBuffer firstName(ColumnFamily cf)
+    protected static CellName firstName(ColumnFamily cf)
     {
         return cf.iterator().next().name();
     }
 
-    protected static ByteBuffer lastName(ColumnFamily cf)
+    protected static CellName lastName(ColumnFamily cf)
     {
         return cf.getReverseSortedColumns().iterator().next().name();
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/service/pager/RangeSliceQueryPager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/pager/RangeSliceQueryPager.java b/src/java/org/apache/cassandra/service/pager/RangeSliceQueryPager.java
index 1f4ba78..9b07163 100644
--- a/src/java/org/apache/cassandra/service/pager/RangeSliceQueryPager.java
+++ b/src/java/org/apache/cassandra/service/pager/RangeSliceQueryPager.java
@@ -17,10 +17,11 @@
  */
 package org.apache.cassandra.service.pager;
 
-import java.nio.ByteBuffer;
 import java.util.List;
 
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
+import org.apache.cassandra.db.composites.Composite;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
 import org.apache.cassandra.dht.*;
 import org.apache.cassandra.exceptions.RequestExecutionException;
@@ -37,7 +38,7 @@ public class RangeSliceQueryPager extends AbstractQueryPager
 {
     private final RangeSliceCommand command;
     private volatile DecoratedKey lastReturnedKey;
-    private volatile ByteBuffer lastReturnedName;
+    private volatile CellName lastReturnedName;
 
     // Don't use directly, use QueryPagers method instead
     RangeSliceQueryPager(RangeSliceCommand command, ConsistencyLevel consistencyLevel, boolean localQuery)
@@ -54,7 +55,7 @@ public class RangeSliceQueryPager extends AbstractQueryPager
         if (state != null)
         {
             lastReturnedKey = StorageService.getPartitioner().decorateKey(state.partitionKey);
-            lastReturnedName = state.cellName;
+            lastReturnedName = cfm.comparator.cellFromByteBuffer(state.cellName);
             restoreState(state.remaining, true);
         }
     }
@@ -63,7 +64,7 @@ public class RangeSliceQueryPager extends AbstractQueryPager
     {
         return lastReturnedKey == null
              ? null
-             : new PagingState(lastReturnedKey.key, lastReturnedName, maxRemaining());
+             : new PagingState(lastReturnedKey.key, lastReturnedName.toByteBuffer(), maxRemaining());
     }
 
     protected List<Row> queryNextPage(int pageSize, ConsistencyLevel consistencyLevel, boolean localQuery)
@@ -71,7 +72,7 @@ public class RangeSliceQueryPager extends AbstractQueryPager
     {
         SliceQueryFilter sf = (SliceQueryFilter)columnFilter;
         AbstractBounds<RowPosition> keyRange = lastReturnedKey == null ? command.keyRange : makeIncludingKeyBounds(lastReturnedKey);
-        ByteBuffer start = lastReturnedName == null ? sf.start() : lastReturnedName;
+        Composite start = lastReturnedName == null ? sf.start() : lastReturnedName;
         PagedRangeCommand pageCmd = new PagedRangeCommand(command.keyspace,
                                                           command.columnFamily,
                                                           command.timestamp,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/service/pager/SliceQueryPager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/pager/SliceQueryPager.java b/src/java/org/apache/cassandra/service/pager/SliceQueryPager.java
index e3825a9..bafb43e 100644
--- a/src/java/org/apache/cassandra/service/pager/SliceQueryPager.java
+++ b/src/java/org/apache/cassandra/service/pager/SliceQueryPager.java
@@ -17,11 +17,11 @@
  */
 package org.apache.cassandra.service.pager;
 
-import java.nio.ByteBuffer;
 import java.util.Collections;
 import java.util.List;
 
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
 import org.apache.cassandra.exceptions.RequestValidationException;
 import org.apache.cassandra.exceptions.RequestExecutionException;
@@ -34,7 +34,7 @@ public class SliceQueryPager extends AbstractQueryPager implements SinglePartiti
 {
     private final SliceFromReadCommand command;
 
-    private volatile ByteBuffer lastReturned;
+    private volatile CellName lastReturned;
 
     // Don't use directly, use QueryPagers method instead
     SliceQueryPager(SliceFromReadCommand command, ConsistencyLevel consistencyLevel, boolean localQuery)
@@ -49,7 +49,7 @@ public class SliceQueryPager extends AbstractQueryPager implements SinglePartiti
 
         if (state != null)
         {
-            lastReturned = state.cellName;
+            lastReturned = cfm.comparator.cellFromByteBuffer(state.cellName);
             restoreState(state.remaining, true);
         }
     }
@@ -58,7 +58,7 @@ public class SliceQueryPager extends AbstractQueryPager implements SinglePartiti
     {
         return lastReturned == null
              ? null
-             : new PagingState(null, lastReturned, maxRemaining());
+             : new PagingState(null, lastReturned.toByteBuffer(), maxRemaining());
     }
 
     protected List<Row> queryNextPage(int pageSize, ConsistencyLevel consistencyLevel, boolean localQuery)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/thrift/CassandraServer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/thrift/CassandraServer.java b/src/java/org/apache/cassandra/thrift/CassandraServer.java
index 0b8836d..4705513 100644
--- a/src/java/org/apache/cassandra/thrift/CassandraServer.java
+++ b/src/java/org/apache/cassandra/thrift/CassandraServer.java
@@ -46,11 +46,11 @@ import org.apache.cassandra.cql.CQLStatement;
 import org.apache.cassandra.cql.QueryProcessor;
 import org.apache.cassandra.cql3.QueryOptions;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.context.CounterContext;
 import org.apache.cassandra.db.filter.IDiskAtomFilter;
 import org.apache.cassandra.db.filter.NamesQueryFilter;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
-import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.db.marshal.TimeUUIDType;
 import org.apache.cassandra.dht.*;
 import org.apache.cassandra.exceptions.*;
@@ -136,7 +136,7 @@ public class CassandraServer implements Cassandra.Iface
             if (column.isMarkedForDelete(now))
                 continue;
 
-            thriftColumns.add(thriftifyColumnWithName(column, column.name()));
+            thriftColumns.add(thriftifyColumnWithName(column, column.name().toByteBuffer()));
         }
 
         // we have to do the reversing here, since internally we pass results around in ColumnFamily
@@ -159,7 +159,7 @@ public class CassandraServer implements Cassandra.Iface
     {
         assert !(column instanceof org.apache.cassandra.db.CounterColumn);
 
-        Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
+        Column thrift_column = new Column(column.name().toByteBuffer()).setValue(column.value()).setTimestamp(column.timestamp());
         if (column instanceof ExpiringColumn)
         {
             thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
@@ -183,7 +183,7 @@ public class CassandraServer implements Cassandra.Iface
     private CounterColumn thriftifySubCounter(org.apache.cassandra.db.Column column)
     {
         assert column instanceof org.apache.cassandra.db.CounterColumn;
-        return new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
+        return new CounterColumn(column.name().toByteBuffer(), CounterContext.instance().total(column.value()));
     }
 
     private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<org.apache.cassandra.db.Column> columns,
@@ -377,28 +377,34 @@ public class CassandraServer implements Cassandra.Iface
 
     private SliceQueryFilter toInternalFilter(CFMetaData metadata, ColumnParent parent, SliceRange range)
     {
-        SliceQueryFilter filter = new SliceQueryFilter(range.start, range.finish, range.reversed, range.count);
+        Composite start = metadata.comparator.fromByteBuffer(range.start);
+        Composite finish = metadata.comparator.fromByteBuffer(range.finish);
+        SliceQueryFilter filter = new SliceQueryFilter(start, finish, range.reversed, range.count);
         if (metadata.isSuper())
-            filter = SuperColumns.fromSCSliceFilter((CompositeType)metadata.comparator, parent.bufferForSuper_column(), filter);
+            filter = SuperColumns.fromSCSliceFilter(metadata.comparator, parent.bufferForSuper_column(), filter);
         return filter;
     }
 
     private IDiskAtomFilter toInternalFilter(CFMetaData metadata, ColumnParent parent, SlicePredicate predicate)
     {
         IDiskAtomFilter filter;
+        CellNameType columnType = metadata.isSuper()
+                                ? new SimpleDenseCellNameType(metadata.comparator.subtype(parent.isSetSuper_column() ? 1 : 0))
+                                : metadata.comparator;
         if (predicate.column_names != null)
         {
             if (metadata.isSuper())
             {
-                CompositeType type = (CompositeType)metadata.comparator;
-                SortedSet s = new TreeSet<ByteBuffer>(parent.isSetSuper_column() ? type.types.get(1) : type.types.get(0));
-                s.addAll(predicate.column_names);
-                filter = SuperColumns.fromSCNamesFilter(type, parent.bufferForSuper_column(), new NamesQueryFilter(s));
+                SortedSet<CellName> s = new TreeSet<CellName>(columnType);
+                for (ByteBuffer bb : predicate.column_names)
+                    s.add(columnType.cellFromByteBuffer(bb));
+                filter = SuperColumns.fromSCNamesFilter(metadata.comparator, parent.bufferForSuper_column(), new NamesQueryFilter(s));
             }
             else
             {
-                SortedSet s = new TreeSet<ByteBuffer>(metadata.comparator);
-                s.addAll(predicate.column_names);
+                SortedSet<CellName> s = new TreeSet<CellName>(metadata.comparator);
+                for (ByteBuffer bb : predicate.column_names)
+                    s.add(metadata.comparator.cellFromByteBuffer(bb));
                 filter = new NamesQueryFilter(s);
             }
         }
@@ -469,15 +475,15 @@ public class CassandraServer implements Cassandra.Iface
             IDiskAtomFilter filter;
             if (metadata.isSuper())
             {
-                CompositeType type = (CompositeType)metadata.comparator;
-                SortedSet names = new TreeSet<ByteBuffer>(column_path.column == null ? type.types.get(0) : type.types.get(1));
+                CellNameType type = metadata.comparator;
+                SortedSet names = new TreeSet<ByteBuffer>(column_path.column == null ? type.subtype(0) : type.subtype(1));
                 names.add(column_path.column == null ? column_path.super_column : column_path.column);
                 filter = SuperColumns.fromSCNamesFilter(type, column_path.column == null ? null : column_path.bufferForSuper_column(), new NamesQueryFilter(names));
             }
             else
             {
-                SortedSet<ByteBuffer> names = new TreeSet<ByteBuffer>(metadata.comparator);
-                names.add(column_path.column);
+                SortedSet<CellName> names = new TreeSet<CellName>(metadata.comparator);
+                names.add(metadata.comparator.cellFromByteBuffer(column_path.column));
                 filter = new NamesQueryFilter(names);
             }
 
@@ -647,14 +653,14 @@ public class CassandraServer implements Cassandra.Iface
             throw new org.apache.cassandra.exceptions.InvalidRequestException("missing mandatory super column name for super CF " + column_parent.column_family);
         }
         ThriftValidation.validateColumnNames(metadata, column_parent, Arrays.asList(column.name));
-        ThriftValidation.validateColumnData(metadata, column, column_parent.super_column != null);
+        ThriftValidation.validateColumnData(metadata, column_parent.super_column, column);
 
         RowMutation rm;
         try
         {
-            ByteBuffer name = column.name;
-            if (metadata.isSuper())
-                name = CompositeType.build(column_parent.super_column, name);
+            CellName name = metadata.isSuper()
+                          ? metadata.comparator.makeCellName(column_parent.super_column, column.name)
+                          : metadata.comparator.cellFromByteBuffer(column.name);
 
             ColumnFamily cf = ArrayBackedSortedColumns.factory.create(cState.getKeyspace(), column_parent.column_family);
             cf.addColumn(name, column.value, column.timestamp, column.ttl);
@@ -740,12 +746,12 @@ public class CassandraServer implements Cassandra.Iface
             });
             ThriftValidation.validateColumnNames(metadata, new ColumnParent(column_family), names);
             for (Column column : updates)
-                ThriftValidation.validateColumnData(metadata, column, false);
+                ThriftValidation.validateColumnData(metadata, null, column);
 
             CFMetaData cfm = Schema.instance.getCFMetaData(cState.getKeyspace(), column_family);
             UnsortedColumns cfUpdates = UnsortedColumns.factory.create(cfm);
             for (Column column : updates)
-                cfUpdates.addColumn(column.name, column.value, column.timestamp);
+                cfUpdates.addColumn(cfm.comparator.cellFromByteBuffer(column.name), column.value, column.timestamp);
 
             ColumnFamily cfExpected;
             if (expected.isEmpty())
@@ -756,7 +762,7 @@ public class CassandraServer implements Cassandra.Iface
             {
                 cfExpected = TreeMapBackedSortedColumns.factory.create(cfm);
                 for (Column column : expected)
-                    cfExpected.addColumn(column.name, column.value, column.timestamp);
+                    cfExpected.addColumn(cfm.comparator.cellFromByteBuffer(column.name), column.value, column.timestamp);
             }
 
             schedule(DatabaseDescriptor.getWriteRpcTimeout());
@@ -837,11 +843,11 @@ public class CassandraServer implements Cassandra.Iface
 
                     if (mutation.deletion != null)
                     {
-                        deleteColumnOrSuperColumn(rm, cfName, mutation.deletion);
+                        deleteColumnOrSuperColumn(rm, metadata, mutation.deletion);
                     }
                     if (mutation.column_or_supercolumn != null)
                     {
-                        addColumnOrSuperColumn(rm, cfName, mutation.column_or_supercolumn);
+                        addColumnOrSuperColumn(rm, metadata, mutation.column_or_supercolumn);
                     }
                 }
             }
@@ -860,67 +866,70 @@ public class CassandraServer implements Cassandra.Iface
         return rowMutations;
     }
 
-    private void addColumnOrSuperColumn(RowMutation rm, String cfName, ColumnOrSuperColumn cosc)
+    private void addColumnOrSuperColumn(RowMutation rm, CFMetaData cfm, ColumnOrSuperColumn cosc)
     {
         if (cosc.super_column != null)
         {
             for (Column column : cosc.super_column.columns)
             {
-                rm.add(cfName, CompositeType.build(cosc.super_column.name, column.name), column.value, column.timestamp, column.ttl);
+                rm.add(cfm.cfName, cfm.comparator.makeCellName(cosc.super_column.name, column.name), column.value, column.timestamp, column.ttl);
             }
         }
         else if (cosc.column != null)
         {
-            rm.add(cfName, cosc.column.name, cosc.column.value, cosc.column.timestamp, cosc.column.ttl);
+            rm.add(cfm.cfName, cfm.comparator.cellFromByteBuffer(cosc.column.name), cosc.column.value, cosc.column.timestamp, cosc.column.ttl);
         }
         else if (cosc.counter_super_column != null)
         {
             for (CounterColumn column : cosc.counter_super_column.columns)
             {
-                rm.addCounter(cfName, CompositeType.build(cosc.counter_super_column.name, column.name), column.value);
+                rm.addCounter(cfm.cfName, cfm.comparator.makeCellName(cosc.counter_super_column.name, column.name), column.value);
             }
         }
         else // cosc.counter_column != null
         {
-            rm.addCounter(cfName, cosc.counter_column.name, cosc.counter_column.value);
+            rm.addCounter(cfm.cfName, cfm.comparator.cellFromByteBuffer(cosc.counter_column.name), cosc.counter_column.value);
         }
     }
 
-    private void deleteColumnOrSuperColumn(RowMutation rm, String cfName, Deletion del)
+    private void deleteColumnOrSuperColumn(RowMutation rm, CFMetaData cfm, Deletion del)
     {
         if (del.predicate != null && del.predicate.column_names != null)
         {
             for (ByteBuffer c : del.predicate.column_names)
             {
-                if (del.super_column == null && Schema.instance.getColumnFamilyType(rm.getKeyspaceName(), cfName) == ColumnFamilyType.Super)
-                    rm.deleteRange(cfName, SuperColumns.startOf(c), SuperColumns.endOf(c), del.timestamp);
+                if (del.super_column == null && cfm.isSuper())
+                    rm.deleteRange(cfm.cfName, SuperColumns.startOf(c), SuperColumns.endOf(c), del.timestamp);
                 else if (del.super_column != null)
-                    rm.delete(cfName, CompositeType.build(del.super_column, c), del.timestamp);
+                    rm.delete(cfm.cfName, cfm.comparator.makeCellName(del.super_column, c), del.timestamp);
                 else
-                    rm.delete(cfName, c, del.timestamp);
+                    rm.delete(cfm.cfName, cfm.comparator.cellFromByteBuffer(c), del.timestamp);
             }
         }
         else if (del.predicate != null && del.predicate.slice_range != null)
         {
-            if (del.super_column == null && Schema.instance.getColumnFamilyType(rm.getKeyspaceName(), cfName) == ColumnFamilyType.Super)
-                rm.deleteRange(cfName,
+            if (del.super_column == null && cfm.isSuper())
+                rm.deleteRange(cfm.cfName,
                                SuperColumns.startOf(del.predicate.getSlice_range().start),
                                SuperColumns.startOf(del.predicate.getSlice_range().finish),
                                del.timestamp);
             else if (del.super_column != null)
-                rm.deleteRange(cfName,
-                               CompositeType.build(del.super_column, del.predicate.getSlice_range().start),
-                               CompositeType.build(del.super_column, del.predicate.getSlice_range().finish),
+                rm.deleteRange(cfm.cfName,
+                               cfm.comparator.makeCellName(del.super_column, del.predicate.getSlice_range().start),
+                               cfm.comparator.makeCellName(del.super_column, del.predicate.getSlice_range().finish),
                                del.timestamp);
             else
-                rm.deleteRange(cfName, del.predicate.getSlice_range().start, del.predicate.getSlice_range().finish, del.timestamp);
+                rm.deleteRange(cfm.cfName,
+                               cfm.comparator.cellFromByteBuffer(del.predicate.getSlice_range().start),
+                               cfm.comparator.cellFromByteBuffer(del.predicate.getSlice_range().finish),
+                               del.timestamp);
         }
         else
         {
             if (del.super_column != null)
-                rm.deleteRange(cfName, SuperColumns.startOf(del.super_column), SuperColumns.endOf(del.super_column), del.timestamp);
+                rm.deleteRange(cfm.cfName, SuperColumns.startOf(del.super_column), SuperColumns.endOf(del.super_column), del.timestamp);
             else
-                rm.delete(cfName, del.timestamp);
+                rm.delete(cfm.cfName, del.timestamp);
         }
     }
 
@@ -1007,11 +1016,11 @@ public class CassandraServer implements Cassandra.Iface
         if (column_path.super_column == null && column_path.column == null)
             rm.delete(column_path.column_family, timestamp);
         else if (column_path.super_column == null)
-            rm.delete(column_path.column_family, column_path.column, timestamp);
+            rm.delete(column_path.column_family, metadata.comparator.cellFromByteBuffer(column_path.column), timestamp);
         else if (column_path.column == null)
             rm.deleteRange(column_path.column_family, SuperColumns.startOf(column_path.super_column), SuperColumns.endOf(column_path.super_column), timestamp);
         else
-            rm.delete(column_path.column_family, CompositeType.build(column_path.super_column, column_path.column), timestamp);
+            rm.delete(column_path.column_family, metadata.comparator.makeCellName(column_path.super_column, column_path.column), timestamp);
 
         if (isCommutativeOp)
             doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, ThriftConversion.fromThrift(consistency_level))));
@@ -1768,9 +1777,9 @@ public class CassandraServer implements Cassandra.Iface
             try
             {
                 if (metadata.isSuper())
-                    rm.addCounter(column_parent.column_family, CompositeType.build(column_parent.super_column, column.name), column.value);
+                    rm.addCounter(column_parent.column_family, metadata.comparator.makeCellName(column_parent.super_column, column.name), column.value);
                 else
-                    rm.addCounter(column_parent.column_family, column.name, column.value);
+                    rm.addCounter(column_parent.column_family, metadata.comparator.cellFromByteBuffer(column.name), column.value);
             }
             catch (MarshalException e)
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/thrift/ThriftValidation.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/thrift/ThriftValidation.java b/src/java/org/apache/cassandra/thrift/ThriftValidation.java
index e68b070..b1eb50b 100644
--- a/src/java/org/apache/cassandra/thrift/ThriftValidation.java
+++ b/src/java/org/apache/cassandra/thrift/ThriftValidation.java
@@ -20,23 +20,23 @@ package org.apache.cassandra.thrift;
 import java.nio.ByteBuffer;
 import java.util.*;
 
-import org.apache.cassandra.serializers.MarshalException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.config.*;
 import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.filter.IDiskAtomFilter;
 import org.apache.cassandra.db.filter.NamesQueryFilter;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
 import org.apache.cassandra.db.index.SecondaryIndexManager;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.ColumnToCollectionType;
-import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
@@ -229,29 +229,28 @@ public class ThriftValidation
             if (isCQL3Table)
             {
                 // CQL3 table don't support having only part of their composite column names set
-                CompositeType composite = (CompositeType)comparator;
-                ByteBuffer[] components = composite.split(name);
-                int minComponents = composite.types.size() - (metadata.hasCollections() ? 1 : 0);
-                if (components.length < minComponents)
+                Composite composite = metadata.comparator.fromByteBuffer(name);
+
+                int minComponents = metadata.comparator.clusteringPrefixSize() + 1;
+                if (composite.size() < minComponents)
                     throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Not enough components (found %d but %d expected) for column name since %s is a CQL3 table",
-                                                                                                    components.length, minComponents, metadata.cfName));
+                                                                                                    composite.size(), minComponents, metadata.cfName));
 
                 // Furthermore, the column name must be a declared one.
-                int columnIndex = composite.types.size() - (metadata.hasCollections() ? 2 : 1);
-                ByteBuffer CQL3ColumnName = components[columnIndex];
+                int columnIndex = metadata.comparator.clusteringPrefixSize();
+                ByteBuffer CQL3ColumnName = composite.get(columnIndex);
                 if (!CQL3ColumnName.hasRemaining())
                     continue; // Row marker, ok
 
-                ColumnIdentifier columnId = new ColumnIdentifier(CQL3ColumnName, composite.types.get(columnIndex));
+                ColumnIdentifier columnId = new ColumnIdentifier(CQL3ColumnName, metadata.comparator.subtype(columnIndex));
                 if (metadata.getColumnDefinition(columnId) == null)
                     throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Invalid cell for CQL3 table %s. The CQL3 column component (%s) does not correspond to a defined CQL3 column",
                                                                                                     metadata.cfName, columnId));
 
                 // On top of that, if we have a collection component, he (CQL3) column must be a collection
-                if (metadata.hasCollections() && components.length == composite.types.size())
+                if (metadata.comparator.hasCollections() && composite.size() == metadata.comparator.size())
                 {
-                    assert components.length >= 2;
-                    ColumnToCollectionType collectionType = (ColumnToCollectionType)composite.types.get(composite.types.size() - 1);
+                    ColumnToCollectionType collectionType = metadata.comparator.collectionType();
                     if (!collectionType.defined.containsKey(CQL3ColumnName))
                         throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Invalid collection component, %s is not a collection", UTF8Type.instance.getString(CQL3ColumnName)));
                 }
@@ -310,7 +309,7 @@ public class ThriftValidation
 
             validateTtl(cosc.column);
             validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column((ByteBuffer)null).setColumn(cosc.column.name));
-            validateColumnData(metadata, cosc.column, false);
+            validateColumnData(metadata, null, cosc.column);
         }
 
         if (cosc.super_column != null)
@@ -321,7 +320,7 @@ public class ThriftValidation
             for (Column c : cosc.super_column.columns)
             {
                 validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column(cosc.super_column.name).setColumn(c.name));
-                validateColumnData(metadata, c, true);
+                validateColumnData(metadata, cosc.super_column.name, c);
             }
         }
 
@@ -426,7 +425,7 @@ public class ThriftValidation
     /**
      * Validates the data part of the column (everything in the Column object but the name, which is assumed to be valid)
      */
-    public static void validateColumnData(CFMetaData metadata, Column column, boolean isSubColumn) throws org.apache.cassandra.exceptions.InvalidRequestException
+    public static void validateColumnData(CFMetaData metadata, ByteBuffer scName, Column column) throws org.apache.cassandra.exceptions.InvalidRequestException
     {
         validateTtl(column);
         if (!column.isSetValue())
@@ -434,7 +433,10 @@ public class ThriftValidation
         if (!column.isSetTimestamp())
             throw new org.apache.cassandra.exceptions.InvalidRequestException("Column timestamp is required");
 
-        ColumnDefinition columnDef = metadata.getColumnDefinitionFromCellName(column.name);
+        CellName cn = scName == null
+                    ? metadata.comparator.cellFromByteBuffer(column.name)
+                    : metadata.comparator.makeCellName(scName, column.name);
+        ColumnDefinition columnDef = metadata.getColumnDefinition(cn);
         try
         {
             AbstractType<?> validator = metadata.getValueValidator(columnDef);
@@ -450,11 +452,11 @@ public class ThriftValidation
                                                                       me.getMessage(),
                                                                       metadata.ksName,
                                                                       metadata.cfName,
-                                                                      (SuperColumns.getComparatorFor(metadata, isSubColumn)).getString(column.name)));
+                                                                      (SuperColumns.getComparatorFor(metadata, scName != null)).getString(column.name)));
         }
 
         // Indexed column values cannot be larger than 64K.  See CASSANDRA-3057/4240 for more details
-        if (!Keyspace.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager.validate(asDBColumn(column)))
+        if (!Keyspace.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager.validate(asDBColumn(cn, column)))
                     throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Can't index column value of size %d for index %s in CF %s of KS %s",
                                                                               column.value.remaining(),
                                                                               columnDef.getIndexName(),
@@ -462,12 +464,12 @@ public class ThriftValidation
                                                                               metadata.ksName));
     }
 
-    private static org.apache.cassandra.db.Column asDBColumn(Column column)
+    private static org.apache.cassandra.db.Column asDBColumn(CellName name, Column column)
     {
         if (column.ttl <= 0)
-            return new org.apache.cassandra.db.Column(column.name, column.value, column.timestamp);
+            return new org.apache.cassandra.db.Column(name, column.value, column.timestamp);
         else
-            return new org.apache.cassandra.db.ExpiringColumn(column.name, column.value, column.timestamp, column.ttl);
+            return new org.apache.cassandra.db.ExpiringColumn(name, column.value, column.timestamp, column.ttl);
     }
 
     /**
@@ -588,7 +590,8 @@ public class ThriftValidation
             if (expression.value.remaining() > 0xFFFF)
                 throw new org.apache.cassandra.exceptions.InvalidRequestException("Index expression values may not be larger than 64K");
 
-            AbstractType<?> valueValidator = metadata.getValueValidatorFromCellName(expression.column_name);
+            CellName name = metadata.comparator.cellFromByteBuffer(expression.column_name);
+            AbstractType<?> valueValidator = metadata.getValueValidator(name);
             try
             {
                 valueValidator.validate(expression.value);
@@ -601,7 +604,7 @@ public class ThriftValidation
                                                                                   me.getMessage()));
             }
 
-            isIndexed |= (expression.op == IndexOperator.EQ) && idxManager.indexes(expression.column_name);
+            isIndexed |= (expression.op == IndexOperator.EQ) && idxManager.indexes(name);
         }
 
         return isIndexed;
@@ -631,23 +634,28 @@ public class ThriftValidation
     {
         SliceRange sr = sp.slice_range;
         IDiskAtomFilter filter;
+
+        CellNameType comparator = metadata.isSuper()
+                                ? new SimpleDenseCellNameType(metadata.comparator.subtype(superColumn == null ? 0 : 1))
+                                : metadata.comparator;
         if (sr == null)
         {
-            AbstractType<?> comparator = metadata.isSuper()
-                    ? ((CompositeType)metadata.comparator).types.get(superColumn == null ? 0 : 1)
-                    : metadata.comparator;
 
-            SortedSet<ByteBuffer> ss = new TreeSet<ByteBuffer>(comparator);
-            ss.addAll(sp.column_names);
+            SortedSet<CellName> ss = new TreeSet<CellName>(comparator);
+            for (ByteBuffer bb : sp.column_names)
+                ss.add(comparator.cellFromByteBuffer(bb));
             filter = new NamesQueryFilter(ss);
         }
         else
         {
-            filter = new SliceQueryFilter(sr.start, sr.finish, sr.reversed, sr.count);
+            filter = new SliceQueryFilter(comparator.fromByteBuffer(sr.start),
+                                          comparator.fromByteBuffer(sr.finish),
+                                          sr.reversed,
+                                          sr.count);
         }
 
         if (metadata.isSuper())
-            filter = SuperColumns.fromSCFilter((CompositeType)metadata.comparator, superColumn, filter);
+            filter = SuperColumns.fromSCFilter(metadata.comparator, superColumn, filter);
         return filter;
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/tools/BulkLoader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/BulkLoader.java b/src/java/org/apache/cassandra/tools/BulkLoader.java
index c89bb83..38c51ac 100644
--- a/src/java/org/apache/cassandra/tools/BulkLoader.java
+++ b/src/java/org/apache/cassandra/tools/BulkLoader.java
@@ -37,6 +37,7 @@ import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.SystemKeyspace;
+import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.io.sstable.SSTableLoader;
@@ -208,14 +209,24 @@ public class BulkLoader
                         }
                     }
 
-                    String query = String.format("SELECT * FROM %s.%s WHERE keyspace_name = '%s'",
+                    String cfQuery = String.format("SELECT * FROM %s.%s WHERE keyspace_name = '%s'",
                                                  Keyspace.SYSTEM_KS,
                                                  SystemKeyspace.SCHEMA_COLUMNFAMILIES_CF,
                                                  keyspace);
-                    CqlResult result = client.execute_cql3_query(ByteBufferUtil.bytes(query), Compression.NONE, ConsistencyLevel.ONE);
-                    for (CqlRow row : result.rows)
+                    CqlResult cfRes = client.execute_cql3_query(ByteBufferUtil.bytes(cfQuery), Compression.NONE, ConsistencyLevel.ONE);
+
+
+                    for (CqlRow row : cfRes.rows)
                     {
-                        CFMetaData metadata = CFMetaData.fromThriftCqlRow(row);
+                        String columnFamily = UTF8Type.instance.getString(row.columns.get(1).bufferForName());
+                        String columnsQuery = String.format("SELECT * FROM %s.%s WHERE keyspace_name = '%s' AND columnfamily_name = '%s'",
+                                                            Keyspace.SYSTEM_KS,
+                                                            SystemKeyspace.SCHEMA_COLUMNS_CF,
+                                                            keyspace,
+                                                            columnFamily);
+                        CqlResult columnsRes = client.execute_cql3_query(ByteBufferUtil.bytes(columnsQuery), Compression.NONE, ConsistencyLevel.ONE);
+
+                        CFMetaData metadata = CFMetaData.fromThriftCqlRow(row, columnsRes);
                         knownCfs.put(metadata.cfName, metadata);
                     }
                     break;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/362cc053/src/java/org/apache/cassandra/tools/SSTableExport.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/SSTableExport.java b/src/java/org/apache/cassandra/tools/SSTableExport.java
index dab7760..c5fc970 100644
--- a/src/java/org/apache/cassandra/tools/SSTableExport.java
+++ b/src/java/org/apache/cassandra/tools/SSTableExport.java
@@ -29,6 +29,7 @@ import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.composites.CellNameType;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.exceptions.ConfigurationException;
@@ -146,18 +147,17 @@ public class SSTableExport
 
     private static List<Object> serializeAtom(OnDiskAtom atom, CFMetaData cfMetaData)
     {
-        AbstractType<?> comparator = cfMetaData.comparator;
         if (atom instanceof Column)
         {
-            return serializeColumn((Column) atom, comparator, cfMetaData);
+            return serializeColumn((Column) atom, cfMetaData);
         }
         else
         {
             assert atom instanceof RangeTombstone;
             RangeTombstone rt = (RangeTombstone) atom;
             ArrayList<Object> serializedColumn = new ArrayList<Object>();
-            serializedColumn.add(comparator.getString(rt.min));
-            serializedColumn.add(comparator.getString(rt.max));
+            serializedColumn.add(cfMetaData.comparator.getString(rt.min));
+            serializedColumn.add(cfMetaData.comparator.getString(rt.max));
             serializedColumn.add(rt.data.markedForDeleteAt);
             serializedColumn.add("t");
             serializedColumn.add(rt.data.localDeletionTime);
@@ -173,21 +173,21 @@ public class SSTableExport
      * @param cfMetaData Column Family metadata (to get validator)
      * @return column as serialized list
      */
-    private static List<Object> serializeColumn(Column column, AbstractType<?> comparator, CFMetaData cfMetaData)
+    private static List<Object> serializeColumn(Column column, CFMetaData cfMetaData)
     {
+        CellNameType comparator = cfMetaData.comparator;
         ArrayList<Object> serializedColumn = new ArrayList<Object>();
 
-        ByteBuffer name = ByteBufferUtil.clone(column.name());
         ByteBuffer value = ByteBufferUtil.clone(column.value());
 
-        serializedColumn.add(comparator.getString(name));
+        serializedColumn.add(comparator.getString(column.name()));
         if (column instanceof DeletedColumn)
         {
             serializedColumn.add(ByteBufferUtil.bytesToHex(value));
         }
         else
         {
-            AbstractType<?> validator = cfMetaData.getValueValidator(cfMetaData.getColumnDefinitionFromCellName(name));
+            AbstractType<?> validator = cfMetaData.getValueValidator(column.name());
             serializedColumn.add(validator.getString(value));
         }
         serializedColumn.add(column.timestamp());