You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by sl...@apache.org on 2013/01/02 13:41:16 UTC

[4/9] Replace supercolumns internally by composites

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/src/java/org/apache/cassandra/service/RangeSliceVerbHandler.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/RangeSliceVerbHandler.java b/src/java/org/apache/cassandra/service/RangeSliceVerbHandler.java
index ba8283f..d8588a8 100644
--- a/src/java/org/apache/cassandra/service/RangeSliceVerbHandler.java
+++ b/src/java/org/apache/cassandra/service/RangeSliceVerbHandler.java
@@ -43,7 +43,7 @@ public class RangeSliceVerbHandler implements IVerbHandler<RangeSliceCommand>
         if (cfs.indexManager.hasIndexFor(command.row_filter))
             return cfs.search(command.row_filter, command.range, command.maxResults, command.predicate, command.countCQL3Rows);
         else
-            return cfs.getRangeSlice(command.super_column, command.range, command.maxResults, command.predicate, command.row_filter, command.countCQL3Rows, command.isPaging);
+            return cfs.getRangeSlice(command.range, command.maxResults, command.predicate, command.row_filter, command.countCQL3Rows, command.isPaging);
     }
 
     public void doVerb(MessageIn<RangeSliceCommand> message, String id)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/src/java/org/apache/cassandra/service/RowRepairResolver.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/RowRepairResolver.java b/src/java/org/apache/cassandra/service/RowRepairResolver.java
index 21cf5ab..45b9e2c 100644
--- a/src/java/org/apache/cassandra/service/RowRepairResolver.java
+++ b/src/java/org/apache/cassandra/service/RowRepairResolver.java
@@ -30,7 +30,6 @@ import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.columniterator.IdentityQueryFilter;
 import org.apache.cassandra.db.filter.IDiskAtomFilter;
 import org.apache.cassandra.db.filter.QueryFilter;
-import org.apache.cassandra.db.filter.QueryPath;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
 import org.apache.cassandra.net.IAsyncResult;
 import org.apache.cassandra.net.MessageIn;
@@ -153,8 +152,8 @@ public class RowRepairResolver extends AbstractRowResolver
         // mimic the collectCollatedColumn + removeDeleted path that getColumnFamily takes.
         // this will handle removing columns and subcolumns that are supressed by a row or
         // supercolumn tombstone.
-        QueryFilter filter = new QueryFilter(null, new QueryPath(resolved.metadata().cfName), new IdentityQueryFilter());
-        List<CloseableIterator<IColumn>> iters = new ArrayList<CloseableIterator<IColumn>>();
+        QueryFilter filter = new QueryFilter(null, resolved.metadata().cfName, new IdentityQueryFilter());
+        List<CloseableIterator<Column>> iters = new ArrayList<CloseableIterator<Column>>();
         for (ColumnFamily version : versions)
         {
             if (version == null)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/src/java/org/apache/cassandra/service/StorageProxy.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/StorageProxy.java b/src/java/org/apache/cassandra/service/StorageProxy.java
index 6408c0b..212765e 100644
--- a/src/java/org/apache/cassandra/service/StorageProxy.java
+++ b/src/java/org/apache/cassandra/service/StorageProxy.java
@@ -43,7 +43,6 @@ import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.Table;
 import org.apache.cassandra.db.filter.ColumnSlice;
 import org.apache.cassandra.db.filter.IDiskAtomFilter;
-import org.apache.cassandra.db.filter.QueryPath;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
 import org.apache.cassandra.db.marshal.UUIDType;
 import org.apache.cassandra.dht.AbstractBounds;
@@ -322,7 +321,7 @@ public class StorageProxy implements StorageProxyMBean
     private static void asyncRemoveFromBatchlog(Collection<InetAddress> endpoints, UUID uuid)
     {
         RowMutation rm = new RowMutation(Table.SYSTEM_KS, UUIDType.instance.decompose(uuid));
-        rm.delete(new QueryPath(SystemTable.BATCHLOG_CF), FBUtilities.timestampMicros());
+        rm.delete(SystemTable.BATCHLOG_CF, FBUtilities.timestampMicros());
         AbstractWriteResponseHandler handler = new WriteResponseHandler(endpoints, Collections.<InetAddress>emptyList(), ConsistencyLevel.ANY, Table.SYSTEM_KS, null, WriteType.SIMPLE);
         updateBatchlog(rm, endpoints, handler);
     }
@@ -1099,7 +1098,6 @@ public class StorageProxy implements StorageProxyMBean
             {
                 RangeSliceCommand nodeCmd = new RangeSliceCommand(command.keyspace,
                                                                   command.column_family,
-                                                                  command.super_column,
                                                                   commandPredicate,
                                                                   range,
                                                                   command.row_filter,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/src/java/org/apache/cassandra/streaming/IncomingStreamReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/streaming/IncomingStreamReader.java b/src/java/org/apache/cassandra/streaming/IncomingStreamReader.java
index 656a99d..547e7e9 100644
--- a/src/java/org/apache/cassandra/streaming/IncomingStreamReader.java
+++ b/src/java/org/apache/cassandra/streaming/IncomingStreamReader.java
@@ -30,11 +30,11 @@ import org.slf4j.LoggerFactory;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamily;
 import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.ColumnSerializer;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.Table;
 import org.apache.cassandra.db.compaction.CompactionController;
 import org.apache.cassandra.db.compaction.PrecompactedRow;
-import org.apache.cassandra.io.IColumnSerializer;
 import org.apache.cassandra.io.sstable.*;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.metrics.StreamingMetrics;
@@ -164,7 +164,7 @@ public class IncomingStreamReader
                     {
                         // need to update row cache
                         // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
-                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, localFile.getFilename(), key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
+                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, localFile.getFilename(), key, 0, dataSize, ColumnSerializer.Flag.FROM_REMOTE);
                         PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                         // We don't expire anything so the row shouldn't be empty
                         assert !row.isEmpty();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/src/java/org/apache/cassandra/thrift/CassandraServer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/thrift/CassandraServer.java b/src/java/org/apache/cassandra/thrift/CassandraServer.java
index f25e093..1e4c0ed 100644
--- a/src/java/org/apache/cassandra/thrift/CassandraServer.java
+++ b/src/java/org/apache/cassandra/thrift/CassandraServer.java
@@ -42,8 +42,8 @@ import org.apache.cassandra.cql.CQLStatement;
 import org.apache.cassandra.cql.QueryProcessor;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.context.CounterContext;
-import org.apache.cassandra.db.filter.IDiskAtomFilter;
-import org.apache.cassandra.db.filter.QueryPath;
+import org.apache.cassandra.db.filter.*;
+import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.db.marshal.MarshalException;
 import org.apache.cassandra.db.marshal.TimeUUIDType;
 import org.apache.cassandra.dht.*;
@@ -121,7 +121,7 @@ public class CassandraServer implements Cassandra.Iface
         return columnFamilyKeyMap;
     }
 
-    public List<Column> thriftifySubColumns(Collection<IColumn> columns)
+    public List<Column> thriftifySubColumns(Collection<org.apache.cassandra.db.Column> columns)
     {
         if (columns == null || columns.isEmpty())
         {
@@ -129,7 +129,7 @@ public class CassandraServer implements Cassandra.Iface
         }
 
         ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
-        for (IColumn column : columns)
+        for (org.apache.cassandra.db.Column column : columns)
         {
             if (column.isMarkedForDelete())
             {
@@ -146,7 +146,7 @@ public class CassandraServer implements Cassandra.Iface
         return thriftColumns;
     }
 
-    public List<CounterColumn> thriftifyCounterSubColumns(Collection<IColumn> columns)
+    public List<CounterColumn> thriftifyCounterSubColumns(Collection<org.apache.cassandra.db.Column> columns)
     {
         if (columns == null || columns.isEmpty())
         {
@@ -154,7 +154,7 @@ public class CassandraServer implements Cassandra.Iface
         }
 
         ArrayList<CounterColumn> thriftColumns = new ArrayList<CounterColumn>(columns.size());
-        for (IColumn column : columns)
+        for (org.apache.cassandra.db.Column column : columns)
         {
             if (column.isMarkedForDelete())
             {
@@ -168,29 +168,15 @@ public class CassandraServer implements Cassandra.Iface
         return thriftColumns;
     }
 
-    public List<ColumnOrSuperColumn> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder)
+    public List<ColumnOrSuperColumn> thriftifyColumns(Collection<org.apache.cassandra.db.Column> columns, boolean reverseOrder)
     {
         ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
-        for (IColumn column : columns)
+        for (org.apache.cassandra.db.Column column : columns)
         {
             if (column.isMarkedForDelete())
-            {
                 continue;
-            }
-            if (column instanceof org.apache.cassandra.db.CounterColumn)
-            {
-                CounterColumn thrift_column = new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
-                thriftColumns.add(new ColumnOrSuperColumn().setCounter_column(thrift_column));
-            }
-            else
-            {
-                Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
-                if (column instanceof ExpiringColumn)
-                {
-                    thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
-                }
-                thriftColumns.add(new ColumnOrSuperColumn().setColumn(thrift_column));
-            }
+
+            thriftColumns.add(thriftifyColumn(column));
         }
 
         // we have to do the reversing here, since internally we pass results around in ColumnFamily
@@ -201,26 +187,80 @@ public class CassandraServer implements Cassandra.Iface
         return thriftColumns;
     }
 
-    private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder, boolean isCounterCF)
+    private ColumnOrSuperColumn thriftifyColumnWithName(org.apache.cassandra.db.Column column, ByteBuffer newName)
     {
-        if (isCounterCF)
-            return thriftifyCounterSuperColumns(columns, reverseOrder);
+        assert !column.isMarkedForDelete();
+
+        if (column instanceof org.apache.cassandra.db.CounterColumn)
+            return new ColumnOrSuperColumn().setCounter_column(thriftifySubCounter(column).setName(newName));
         else
-            return thriftifySuperColumns(columns, reverseOrder);
+            return new ColumnOrSuperColumn().setColumn(thriftifySubColumn(column).setName(newName));
     }
 
-    private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<IColumn> columns, boolean reverseOrder)
+    private ColumnOrSuperColumn thriftifyColumn(org.apache.cassandra.db.Column column)
     {
-        ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
-        for (IColumn column : columns)
+        return thriftifyColumnWithName(column, column.name());
+    }
+
+    private Column thriftifySubColumn(org.apache.cassandra.db.Column column)
+    {
+        assert !column.isMarkedForDelete() && !(column instanceof org.apache.cassandra.db.CounterColumn);
+
+        Column thrift_column = new Column(column.name()).setValue(column.value()).setTimestamp(column.timestamp());
+        if (column instanceof ExpiringColumn)
+        {
+            thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
+        }
+        return thrift_column;
+    }
+
+    private CounterColumn thriftifySubCounter(org.apache.cassandra.db.Column column)
+    {
+        assert !column.isMarkedForDelete() && (column instanceof org.apache.cassandra.db.CounterColumn);
+        return new CounterColumn(column.name(), CounterContext.instance().total(column.value()));
+    }
+
+    private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<org.apache.cassandra.db.Column> columns, boolean reverseOrder, boolean subcolumnsOnly, boolean isCounterCF)
+    {
+        if (subcolumnsOnly)
         {
-            List<Column> subcolumns = thriftifySubColumns(column.getSubColumns());
-            if (subcolumns.isEmpty())
+            ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
+            for (org.apache.cassandra.db.Column column : columns)
             {
+                if (column.isMarkedForDelete())
+                    continue;
+
+                thriftSuperColumns.add(thriftifyColumnWithName(column, SuperColumns.subName(column.name())));
+            }
+            if (reverseOrder)
+                Collections.reverse(thriftSuperColumns);
+            return thriftSuperColumns;
+        }
+        else
+        {
+            if (isCounterCF)
+                return thriftifyCounterSuperColumns(columns, reverseOrder);
+            else
+                return thriftifySuperColumns(columns, reverseOrder);
+        }
+    }
+
+    private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<org.apache.cassandra.db.Column> columns, boolean reverseOrder)
+    {
+        ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
+        SuperColumn current = null;
+        for (org.apache.cassandra.db.Column column : columns)
+        {
+            if (column.isMarkedForDelete())
                 continue;
+
+            ByteBuffer scName = SuperColumns.scName(column.name());
+            if (current == null || !scName.equals(current.bufferForName()))
+            {
+                current = new SuperColumn(scName, new ArrayList<Column>());
+                thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(current));
             }
-            SuperColumn superColumn = new SuperColumn(column.name(), subcolumns);
-            thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(superColumn));
+            current.getColumns().add(thriftifySubColumn(column).setName(SuperColumns.subName(column.name())));
         }
 
         if (reverseOrder)
@@ -229,18 +269,22 @@ public class CassandraServer implements Cassandra.Iface
         return thriftSuperColumns;
     }
 
-    private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<IColumn> columns, boolean reverseOrder)
+    private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<org.apache.cassandra.db.Column> columns, boolean reverseOrder)
     {
         ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
-        for (IColumn column : columns)
+        CounterSuperColumn current = null;
+        for (org.apache.cassandra.db.Column column : columns)
         {
-            List<CounterColumn> subcolumns = thriftifyCounterSubColumns(column.getSubColumns());
-            if (subcolumns.isEmpty())
-            {
+            if (column.isMarkedForDelete())
                 continue;
+
+            ByteBuffer scName = SuperColumns.scName(column.name());
+            if (current == null || !scName.equals(current.bufferForName()))
+            {
+                current = new CounterSuperColumn(scName, new ArrayList<CounterColumn>());
+                thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(current));
             }
-            CounterSuperColumn superColumn = new CounterSuperColumn(column.name(), subcolumns);
-            thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
+            current.getColumns().add(thriftifySubCounter(column).setName(SuperColumns.subName(column.name())));
         }
 
         if (reverseOrder)
@@ -249,7 +293,7 @@ public class CassandraServer implements Cassandra.Iface
         return thriftSuperColumns;
     }
 
-    private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, org.apache.cassandra.db.ConsistencyLevel consistency_level)
+    private Map<ByteBuffer, List<ColumnOrSuperColumn>> getSlice(List<ReadCommand> commands, boolean subColumnsOnly, org.apache.cassandra.db.ConsistencyLevel consistency_level)
     throws org.apache.cassandra.exceptions.InvalidRequestException, UnavailableException, TimedOutException
     {
         Map<DecoratedKey, ColumnFamily> columnFamilies = readColumnFamily(commands, consistency_level);
@@ -258,7 +302,7 @@ public class CassandraServer implements Cassandra.Iface
         {
             ColumnFamily cf = columnFamilies.get(StorageService.getPartitioner().decorateKey(command.key));
             boolean reverseOrder = command instanceof SliceFromReadCommand && ((SliceFromReadCommand)command).filter.reversed;
-            List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, reverseOrder);
+            List<ColumnOrSuperColumn> thriftifiedColumns = thriftifyColumnFamily(cf, subColumnsOnly, reverseOrder);
             columnFamiliesMap.put(command.key, thriftifiedColumns);
         }
 
@@ -269,19 +313,11 @@ public class CassandraServer implements Cassandra.Iface
     {
         if (cf == null || cf.isEmpty())
             return EMPTY_COLUMNS;
-        if (subcolumnsOnly)
-        {
-            IColumn column = cf.iterator().next();
-            Collection<IColumn> subcolumns = column.getSubColumns();
-            if (subcolumns == null || subcolumns.isEmpty())
-                return EMPTY_COLUMNS;
-            else
-                return thriftifyColumns(subcolumns, reverseOrder);
-        }
-        if (cf.isSuper())
+
+        if (cf.metadata().isSuper())
         {
             boolean isCounterCF = cf.metadata().getDefaultValidator().isCommutative();
-            return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, isCounterCF);
+            return thriftifySuperColumns(cf.getSortedColumns(), reverseOrder, subcolumnsOnly, isCounterCF);
         }
         else
         {
@@ -369,25 +405,38 @@ public class CassandraServer implements Cassandra.Iface
         consistencyLevel.validateForRead(keyspace);
 
         List<ReadCommand> commands = new ArrayList<ReadCommand>(keys.size());
+        IDiskAtomFilter filter;
         if (predicate.column_names != null)
         {
-            for (ByteBuffer key: keys)
+            if (metadata.isSuper())
             {
-                ThriftValidation.validateKey(metadata, key);
-                commands.add(new SliceByNamesReadCommand(keyspace, key, column_parent, predicate.column_names));
+                CompositeType type = (CompositeType)metadata.comparator;
+                SortedSet s = new TreeSet<ByteBuffer>(column_parent.isSetSuper_column() ? type.types.get(1) : type.types.get(0));
+                s.addAll(predicate.column_names);
+                filter = SuperColumns.fromSCNamesFilter(type, column_parent.bufferForSuper_column(), new NamesQueryFilter(s));
+            }
+            else
+            {
+                SortedSet s = new TreeSet<ByteBuffer>(metadata.comparator);
+                s.addAll(predicate.column_names);
+                filter = new NamesQueryFilter(s);
             }
         }
         else
         {
             SliceRange range = predicate.slice_range;
-            for (ByteBuffer key: keys)
-            {
-                ThriftValidation.validateKey(metadata, key);
-                commands.add(new SliceFromReadCommand(keyspace, key, column_parent, range.start, range.finish, range.reversed, range.count));
-            }
+            filter = new SliceQueryFilter(range.start, range.finish, range.reversed, range.count);
+            if (metadata.isSuper())
+                filter = SuperColumns.fromSCFilter((CompositeType)metadata.comparator, column_parent.bufferForSuper_column(), filter);
+        }
+
+        for (ByteBuffer key: keys)
+        {
+            ThriftValidation.validateKey(metadata, key);
+            commands.add(ReadCommand.create(keyspace, key, column_parent.getColumn_family(), filter));
         }
 
-        return getSlice(commands, consistencyLevel);
+        return getSlice(commands, column_parent.isSetSuper_column(), consistencyLevel);
     }
 
     private ColumnOrSuperColumn internal_get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level)
@@ -402,10 +451,24 @@ public class CassandraServer implements Cassandra.Iface
         org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level);
         consistencyLevel.validateForRead(keyspace);
 
-        QueryPath path = new QueryPath(column_path.column_family, column_path.column == null ? null : column_path.super_column);
-        List<ByteBuffer> nameAsList = Arrays.asList(column_path.column == null ? column_path.super_column : column_path.column);
         ThriftValidation.validateKey(metadata, key);
-        ReadCommand command = new SliceByNamesReadCommand(keyspace, key, path, nameAsList);
+
+        IDiskAtomFilter filter;
+        if (metadata.isSuper())
+        {
+            CompositeType type = (CompositeType)metadata.comparator;
+            SortedSet names = new TreeSet<ByteBuffer>(column_path.column == null ? type.types.get(0) : type.types.get(1));
+            names.add(column_path.column == null ? column_path.super_column : column_path.column);
+            filter = SuperColumns.fromSCNamesFilter(type, column_path.column == null ? null : column_path.bufferForSuper_column(), new NamesQueryFilter(names));
+        }
+        else
+        {
+            SortedSet<ByteBuffer> names = new TreeSet<ByteBuffer>(metadata.comparator);
+            names.add(column_path.column);
+            filter = new NamesQueryFilter(names);
+        }
+
+        ReadCommand command = ReadCommand.create(keyspace, key, column_path.column_family, filter);
 
         Map<DecoratedKey, ColumnFamily> cfamilies = readColumnFamily(Arrays.asList(command), consistencyLevel);
 
@@ -413,7 +476,7 @@ public class CassandraServer implements Cassandra.Iface
 
         if (cf == null)
             throw new NotFoundException();
-        List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, command.queryPath.superColumnName != null, false);
+        List<ColumnOrSuperColumn> tcolumns = thriftifyColumnFamily(cf, metadata.isSuper() && column_path.column != null, false);
         if (tcolumns.isEmpty())
             throw new NotFoundException();
         assert tcolumns.size() == 1;
@@ -613,7 +676,11 @@ public class CassandraServer implements Cassandra.Iface
         RowMutation rm = new RowMutation(cState.getKeyspace(), key);
         try
         {
-            rm.add(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value, column.timestamp, column.ttl);
+            ByteBuffer name = column.name;
+            if (metadata.isSuper())
+                name = CompositeType.build(column_parent.super_column, name);
+
+            rm.add(column_parent.column_family, name, column.value, column.timestamp, column.ttl);
         }
         catch (MarshalException e)
         {
@@ -705,11 +772,11 @@ public class CassandraServer implements Cassandra.Iface
 
                     if (mutation.deletion != null)
                     {
-                        rm.deleteColumnOrSuperColumn(cfName, mutation.deletion);
+                        deleteColumnOrSuperColumn(rm, cfName, mutation.deletion);
                     }
                     if (mutation.column_or_supercolumn != null)
                     {
-                        rm.addColumnOrSuperColumn(cfName, mutation.column_or_supercolumn);
+                        addColumnOrSuperColumn(rm, cfName, mutation.column_or_supercolumn);
                     }
                 }
             }
@@ -728,6 +795,55 @@ public class CassandraServer implements Cassandra.Iface
         return rowMutations;
     }
 
+    private void addColumnOrSuperColumn(RowMutation rm, String cfName, ColumnOrSuperColumn cosc)
+    {
+        if (cosc.super_column != null)
+        {
+            for (Column column : cosc.super_column.columns)
+            {
+                rm.add(cfName, CompositeType.build(cosc.super_column.name, column.name), column.value, column.timestamp, column.ttl);
+            }
+        }
+        else if (cosc.column != null)
+        {
+            rm.add(cfName, cosc.column.name, cosc.column.value, cosc.column.timestamp, cosc.column.ttl);
+        }
+        else if (cosc.counter_super_column != null)
+        {
+            for (CounterColumn column : cosc.counter_super_column.columns)
+            {
+                rm.addCounter(cfName, CompositeType.build(cosc.counter_super_column.name, column.name), column.value);
+            }
+        }
+        else // cosc.counter_column != null
+        {
+            rm.addCounter(cfName, cosc.counter_column.name, cosc.counter_column.value);
+        }
+    }
+
+    private void deleteColumnOrSuperColumn(RowMutation rm, String cfName, Deletion del)
+    {
+        if (del.predicate != null && del.predicate.column_names != null)
+        {
+            for(ByteBuffer c : del.predicate.column_names)
+            {
+                if (del.super_column == null && Schema.instance.getColumnFamilyType(rm.getTable(), cfName) == ColumnFamilyType.Super)
+                    rm.deleteRange(cfName, SuperColumns.startOf(c), SuperColumns.endOf(c), del.timestamp);
+                else if (del.super_column != null)
+                    rm.delete(cfName, CompositeType.build(del.super_column, c), del.timestamp);
+                else
+                    rm.delete(cfName, c, del.timestamp);
+            }
+        }
+        else
+        {
+            if (del.super_column != null)
+                rm.deleteRange(cfName, SuperColumns.startOf(del.super_column), SuperColumns.endOf(del.super_column), del.timestamp);
+            else
+                rm.delete(cfName, del.timestamp);
+        }
+    }
+
     public void batch_mutate(Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map, ConsistencyLevel consistency_level)
     throws InvalidRequestException, UnavailableException, TimedOutException
     {
@@ -808,7 +924,14 @@ public class CassandraServer implements Cassandra.Iface
             ThriftConversion.fromThrift(consistency_level).validateCounterForWrite(metadata);
 
         RowMutation rm = new RowMutation(keyspace, key);
-        rm.delete(new QueryPath(column_path), timestamp);
+        if (column_path.super_column == null && column_path.column == null)
+            rm.delete(column_path.column_family, timestamp);
+        else if (column_path.super_column == null)
+            rm.delete(column_path.column_family, column_path.column, timestamp);
+        else if (column_path.column == null)
+            rm.deleteRange(column_path.column_family, SuperColumns.startOf(column_path.super_column), SuperColumns.endOf(column_path.super_column), timestamp);
+        else
+            rm.delete(column_path.column_family, CompositeType.build(column_path.super_column, column_path.column), timestamp);
 
         if (isCommutativeOp)
             doInsert(consistency_level, Arrays.asList(new CounterMutation(rm, ThriftConversion.fromThrift(consistency_level))));
@@ -942,8 +1065,8 @@ public class CassandraServer implements Cassandra.Iface
             schedule(DatabaseDescriptor.getRangeRpcTimeout());
             try
             {
-                IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, metadata.getComparatorFor(column_parent.super_column));
-                rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent, filter, bounds,
+                IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, metadata, column_parent.super_column);
+                rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_parent.column_family, filter, bounds,
                                                                         range.row_filter, range.count), consistencyLevel);
             }
             finally
@@ -1030,8 +1153,8 @@ public class CassandraServer implements Cassandra.Iface
             schedule(DatabaseDescriptor.getRangeRpcTimeout());
             try
             {
-                IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, metadata.comparator);
-                rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_family, null, filter,
+                IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, metadata, null);
+                rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_family, filter,
                                                                         bounds, range.row_filter, range.count, true, true), consistencyLevel);
             }
             finally
@@ -1110,10 +1233,9 @@ public class CassandraServer implements Cassandra.Iface
             AbstractBounds<RowPosition> bounds = new Bounds<RowPosition>(RowPosition.forKey(index_clause.start_key, p),
                                                                          p.getMinimumToken().minKeyBound());
 
-            IDiskAtomFilter filter = ThriftValidation.asIFilter(column_predicate, metadata.getComparatorFor(column_parent.super_column));
+            IDiskAtomFilter filter = ThriftValidation.asIFilter(column_predicate, metadata, column_parent.super_column);
             RangeSliceCommand command = new RangeSliceCommand(keyspace,
                                                               column_parent.column_family,
-                                                              null,
                                                               filter,
                                                               bounds,
                                                               index_clause.expressions,
@@ -1528,7 +1650,10 @@ public class CassandraServer implements Cassandra.Iface
             RowMutation rm = new RowMutation(keyspace, key);
             try
             {
-                rm.addCounter(new QueryPath(column_parent.column_family, column_parent.super_column, column.name), column.value);
+                if (metadata.isSuper())
+                    rm.addCounter(column_parent.column_family, CompositeType.build(column_parent.super_column, column.name), column.value);
+                else
+                    rm.addCounter(column_parent.column_family, column.name, column.value);
             }
             catch (MarshalException e)
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/src/java/org/apache/cassandra/thrift/ThriftValidation.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/thrift/ThriftValidation.java b/src/java/org/apache/cassandra/thrift/ThriftValidation.java
index 6473acd..101102f 100644
--- a/src/java/org/apache/cassandra/thrift/ThriftValidation.java
+++ b/src/java/org/apache/cassandra/thrift/ThriftValidation.java
@@ -30,6 +30,7 @@ import org.apache.cassandra.db.filter.NamesQueryFilter;
 import org.apache.cassandra.db.filter.SliceQueryFilter;
 import org.apache.cassandra.db.index.SecondaryIndexManager;
 import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.db.marshal.MarshalException;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.RandomPartitioner;
@@ -195,20 +196,22 @@ public class ThriftValidation
     private static void validateColumnNames(CFMetaData metadata, ByteBuffer superColumnName, Iterable<ByteBuffer> column_names)
     throws org.apache.cassandra.exceptions.InvalidRequestException
     {
+        int maxNameLength = org.apache.cassandra.db.Column.MAX_NAME_LENGTH;
+
         if (superColumnName != null)
         {
-            if (superColumnName.remaining() > IColumn.MAX_NAME_LENGTH)
-                throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn name length must not be greater than " + IColumn.MAX_NAME_LENGTH);
+            if (superColumnName.remaining() > maxNameLength)
+                throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn name length must not be greater than " + maxNameLength);
             if (superColumnName.remaining() == 0)
                 throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn name must not be empty");
             if (metadata.cfType == ColumnFamilyType.Standard)
                 throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn specified to ColumnFamily " + metadata.cfName + " containing normal columns");
         }
-        AbstractType<?> comparator = metadata.getComparatorFor(superColumnName);
+        AbstractType<?> comparator = SuperColumns.getComparatorFor(metadata, superColumnName);
         for (ByteBuffer name : column_names)
         {
-            if (name.remaining() > IColumn.MAX_NAME_LENGTH)
-                throw new org.apache.cassandra.exceptions.InvalidRequestException("column name length must not be greater than " + IColumn.MAX_NAME_LENGTH);
+            if (name.remaining() > maxNameLength)
+                throw new org.apache.cassandra.exceptions.InvalidRequestException("column name length must not be greater than " + maxNameLength);
             if (name.remaining() == 0)
                 throw new org.apache.cassandra.exceptions.InvalidRequestException("column name must not be empty");
             try
@@ -229,7 +232,7 @@ public class ThriftValidation
 
     public static void validateRange(CFMetaData metadata, ColumnParent column_parent, SliceRange range) throws org.apache.cassandra.exceptions.InvalidRequestException
     {
-        AbstractType<?> comparator = metadata.getComparatorFor(column_parent.super_column);
+        AbstractType<?> comparator = SuperColumns.getComparatorFor(metadata, column_parent.super_column);
         try
         {
             comparator.validate(range.start);
@@ -412,15 +415,16 @@ public class ThriftValidation
         {
             if (logger.isDebugEnabled())
                 logger.debug("rejecting invalid value " + ByteBufferUtil.bytesToHex(summarize(column.value)));
+
             throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("(%s) [%s][%s][%s] failed validation",
                                                                       me.getMessage(),
                                                                       metadata.ksName,
                                                                       metadata.cfName,
-                                                                      (isSubColumn ? metadata.subcolumnComparator : metadata.comparator).getString(column.name)));
+                                                                      (SuperColumns.getComparatorFor(metadata, isSubColumn)).getString(column.name)));
         }
 
         // Indexed column values cannot be larger than 64K.  See CASSANDRA-3057/4240 for more details
-        if (!Table.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager.validate(column))
+        if (!Table.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager.validate(asDBColumn(column)))
                     throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Can't index column value of size %d for index %s in CF %s of KS %s",
                                                                               column.value.remaining(),
                                                                               columnDef.getIndexName(),
@@ -428,6 +432,14 @@ public class ThriftValidation
                                                                               metadata.ksName));
     }
 
+    private static org.apache.cassandra.db.Column asDBColumn(Column column)
+    {
+        if (column.ttl <= 0)
+            return new org.apache.cassandra.db.Column(column.name, column.value, column.timestamp);
+        else
+            return new org.apache.cassandra.db.ExpiringColumn(column.name, column.value, column.timestamp, column.ttl);
+    }
+
     /**
      * Return, at most, the first 64K of the buffer. This avoids very large column values being
      * logged in their entirety.
@@ -527,7 +539,7 @@ public class ThriftValidation
             return false;
 
         SecondaryIndexManager idxManager = Table.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager;
-        AbstractType<?> nameValidator =  ColumnFamily.getComparatorFor(metadata.ksName, metadata.cfName, null);
+        AbstractType<?> nameValidator = SuperColumns.getComparatorFor(metadata, null);
 
         boolean isIndexed = false;
         for (IndexExpression expression : index_clause)
@@ -586,18 +598,26 @@ public class ThriftValidation
             throw new org.apache.cassandra.exceptions.InvalidRequestException("system keyspace is not user-modifiable");
     }
 
-    public static IDiskAtomFilter asIFilter(SlicePredicate sp, AbstractType<?> comparator)
+    public static IDiskAtomFilter asIFilter(SlicePredicate sp, CFMetaData metadata, ByteBuffer superColumn)
     {
+        AbstractType<?> comparator = metadata.isSuper()
+                                   ? ((CompositeType)metadata.comparator).types.get(superColumn == null ? 0 : 1)
+                                   : metadata.comparator;
         SliceRange sr = sp.slice_range;
+        IDiskAtomFilter filter;
         if (sr == null)
         {
             SortedSet<ByteBuffer> ss = new TreeSet<ByteBuffer>(comparator);
             ss.addAll(sp.column_names);
-            return new NamesQueryFilter(ss);
+            filter = new NamesQueryFilter(ss);
         }
         else
         {
-            return new SliceQueryFilter(sr.start, sr.finish, sr.reversed, sr.count);
+            filter = new SliceQueryFilter(sr.start, sr.finish, sr.reversed, sr.count);
         }
+
+        if (metadata.isSuper())
+            filter = SuperColumns.fromSCFilter((CompositeType)metadata.comparator, superColumn, filter);
+        return filter;
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/src/java/org/apache/cassandra/tools/SSTableExport.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/SSTableExport.java b/src/java/org/apache/cassandra/tools/SSTableExport.java
index fe7d6c0..5085b52 100644
--- a/src/java/org/apache/cassandra/tools/SSTableExport.java
+++ b/src/java/org/apache/cassandra/tools/SSTableExport.java
@@ -51,10 +51,9 @@ import org.apache.cassandra.db.DeletedColumn;
 import org.apache.cassandra.db.DeletionInfo;
 import org.apache.cassandra.db.DeletionTime;
 import org.apache.cassandra.db.ExpiringColumn;
-import org.apache.cassandra.db.IColumn;
+import org.apache.cassandra.db.Column;
 import org.apache.cassandra.db.OnDiskAtom;
 import org.apache.cassandra.db.RangeTombstone;
-import org.apache.cassandra.db.SuperColumn;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.io.sstable.Descriptor;
@@ -136,20 +135,6 @@ public class SSTableExport
             }
             return;
         }
-
-        if (columnContainer instanceof SuperColumn)
-        {
-            SuperColumn superColumn = (SuperColumn) columnContainer;
-            DeletionInfo deletionInfo = new DeletionInfo(superColumn.getMarkedForDeleteAt(),
-                    superColumn.getLocalDeletionTime());
-            if (!deletionInfo.equals(DeletionInfo.LIVE))
-            {
-                writeKey(out, "metadata");
-                writeDeletionInfo(out, deletionInfo.getTopLevelDeletion());
-                out.print(",");
-            }
-            return;
-        }
     }
 
     private static void writeDeletionInfo(PrintStream out, DeletionTime deletionTime)
@@ -169,18 +154,18 @@ public class SSTableExport
      * @param comparator columns comparator
      * @param cfMetaData Column Family metadata (to get validator)
      */
-    private static void serializeColumns(Iterator<OnDiskAtom> columns, PrintStream out, AbstractType<?> comparator, CFMetaData cfMetaData)
+    private static void serializeAtoms(Iterator<OnDiskAtom> atoms, PrintStream out, AbstractType<?> comparator, CFMetaData cfMetaData)
     {
-        while (columns.hasNext())
+        while (atoms.hasNext())
         {
-            writeJSON(out, serializeColumn(columns.next(), comparator, cfMetaData));
+            writeJSON(out, serializeAtom(atoms.next(), comparator, cfMetaData));
 
-            if (columns.hasNext())
+            if (atoms.hasNext())
                 out.print(", ");
         }
     }
 
-    private static void serializeIColumns(Iterator<IColumn> columns, PrintStream out, AbstractType<?> comparator, CFMetaData cfMetaData)
+    private static void serializeColumns(Iterator<Column> columns, PrintStream out, AbstractType<?> comparator, CFMetaData cfMetaData)
     {
         while (columns.hasNext())
         {
@@ -191,16 +176,16 @@ public class SSTableExport
         }
     }
 
-    private static List<Object> serializeColumn(OnDiskAtom column, AbstractType<?> comparator, CFMetaData cfMetaData)
+    private static List<Object> serializeAtom(OnDiskAtom atom, AbstractType<?> comparator, CFMetaData cfMetaData)
     {
-        if (column instanceof IColumn)
+        if (atom instanceof Column)
         {
-            return serializeColumn((IColumn)column, comparator, cfMetaData);
+            return serializeColumn((Column)atom, comparator, cfMetaData);
         }
         else
         {
-            assert column instanceof RangeTombstone;
-            RangeTombstone rt = (RangeTombstone)column;
+            assert atom instanceof RangeTombstone;
+            RangeTombstone rt = (RangeTombstone)atom;
             ArrayList<Object> serializedColumn = new ArrayList<Object>();
             serializedColumn.add(comparator.getString(rt.min));
             serializedColumn.add(comparator.getString(rt.max));
@@ -220,7 +205,7 @@ public class SSTableExport
      *
      * @return column as serialized list
      */
-    private static List<Object> serializeColumn(IColumn column, AbstractType<?> comparator, CFMetaData cfMetaData)
+    private static List<Object> serializeColumn(Column column, AbstractType<?> comparator, CFMetaData cfMetaData)
     {
         ArrayList<Object> serializedColumn = new ArrayList<Object>();
 
@@ -267,7 +252,6 @@ public class SSTableExport
     private static void serializeRow(SSTableIdentityIterator row, DecoratedKey key, PrintStream out)
     {
         ColumnFamily columnFamily = row.getColumnFamily();
-        boolean isSuperCF = columnFamily.isSuper();
         CFMetaData cfMetaData = columnFamily.metadata();
         AbstractType<?> comparator = columnFamily.getComparator();
 
@@ -279,34 +263,11 @@ public class SSTableExport
         writeMeta(out, columnFamily);
 
         writeKey(out, "columns");
-        out.print(isSuperCF ? "{" : "[");
+        out.print("[");
 
-        if (isSuperCF)
-        {
-            while (row.hasNext())
-            {
-                SuperColumn scol = (SuperColumn)row.next();
-                assert scol instanceof IColumn;
-                IColumn column = (IColumn)scol;
-                writeKey(out, comparator.getString(column.name()));
-                out.print("{");
-                writeMeta(out, scol);
-                writeKey(out, "subColumns");
-                out.print("[");
-                serializeIColumns(column.getSubColumns().iterator(), out, columnFamily.getSubComparator(), cfMetaData);
-                out.print("]");
-                out.print("}");
-
-                if (row.hasNext())
-                    out.print(", ");
-            }
-        }
-        else
-        {
-            serializeColumns(row, out, comparator, cfMetaData);
-        }
+        serializeAtoms(row, out, comparator, cfMetaData);
 
-        out.print(isSuperCF ? "}" : "]");
+        out.print("]");
         out.print("}");
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/src/java/org/apache/cassandra/tools/SSTableImport.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/SSTableImport.java b/src/java/org/apache/cassandra/tools/SSTableImport.java
index 74a86ca..06ccaa7 100644
--- a/src/java/org/apache/cassandra/tools/SSTableImport.java
+++ b/src/java/org/apache/cassandra/tools/SSTableImport.java
@@ -46,10 +46,10 @@ import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.DeletionInfo;
 import org.apache.cassandra.db.ExpiringColumn;
 import org.apache.cassandra.db.RangeTombstone;
-import org.apache.cassandra.db.SuperColumn;
-import org.apache.cassandra.db.filter.QueryPath;
+import org.apache.cassandra.db.SuperColumns;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.BytesType;
+import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.db.marshal.MarshalException;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.io.sstable.SSTableWriter;
@@ -69,12 +69,14 @@ public class SSTableImport
     private static final String COLUMN_FAMILY_OPTION = "c";
     private static final String KEY_COUNT_OPTION = "n";
     private static final String IS_SORTED_OPTION = "s";
+    private static final String OLD_SC_FORMAT_OPTION = "S";
 
     private static final Options options = new Options();
     private static CommandLine cmd;
 
     private Integer keyCountToImport;
     private final boolean isSorted;
+    private final boolean oldSCFormat;
 
     private static final JsonFactory factory = new MappingJsonFactory().configure(
             JsonParser.Feature.INTERN_FIELD_NAMES, false);
@@ -91,6 +93,7 @@ public class SSTableImport
 
         options.addOption(new Option(KEY_COUNT_OPTION, true, "Number of keys to import (Optional)."));
         options.addOption(new Option(IS_SORTED_OPTION, false, "Assume JSON file as already sorted (e.g. created by sstable2json tool) (Optional)."));
+        options.addOption(new Option(OLD_SC_FORMAT_OPTION, false, "Assume JSON file use legacy super column format (Optional)."));
     }
 
     private static class JsonColumn<T>
@@ -107,11 +110,11 @@ public class SSTableImport
         // Counter columns
         private long timestampOfLastDelete;
 
-        public JsonColumn(T json, CFMetaData meta, boolean isSubColumn)
+        public JsonColumn(T json, CFMetaData meta, boolean oldSCFormat, boolean isSubColumn)
         {
             if (json instanceof List)
             {
-                AbstractType<?> comparator = (isSubColumn) ? meta.subcolumnComparator : meta.comparator;
+                AbstractType<?> comparator = oldSCFormat ? SuperColumns.getComparatorFor(meta, isSubColumn) : meta.comparator;
                 List fields = (List<?>) json;
 
                 assert fields.size() >= 3 : "Column definition should have at least 3";
@@ -193,18 +196,24 @@ public class SSTableImport
 
     public SSTableImport()
     {
-        this(null, false);
+        this(null, false, false);
     }
 
     public SSTableImport(boolean isSorted)
     {
-        this(null, isSorted);
+        this(isSorted, false);
     }
 
-    public SSTableImport(Integer keyCountToImport, boolean isSorted)
+    public SSTableImport(boolean isSorted, boolean oldSCFormat)
+    {
+        this(null, isSorted, oldSCFormat);
+    }
+
+    public SSTableImport(Integer keyCountToImport, boolean isSorted, boolean oldSCFormat)
     {
         this.keyCountToImport = keyCountToImport;
         this.isSorted = isSorted;
+        this.oldSCFormat = oldSCFormat;
     }
 
     private void addToStandardCF(List<?> row, ColumnFamily cfamily)
@@ -226,33 +235,34 @@ public class SSTableImport
 
         for (Object c : row)
         {
-            JsonColumn col = new JsonColumn<List>((List) c, cfm, (superName != null));
-            QueryPath path = new QueryPath(cfm.cfName, superName, col.getName());
+            JsonColumn col = new JsonColumn<List>((List) c, cfm, oldSCFormat, (superName != null));
+            ByteBuffer cname = superName == null ? col.getName() : CompositeType.build(superName, col.getName());
 
             if (col.isExpiring())
             {
-                cfamily.addColumn(null, new ExpiringColumn(col.getName(), col.getValue(), col.timestamp, col.ttl, col.localExpirationTime));
+                cfamily.addColumn(new ExpiringColumn(cname, col.getValue(), col.timestamp, col.ttl, col.localExpirationTime));
             }
             else if (col.isCounter())
             {
-                cfamily.addColumn(null, new CounterColumn(col.getName(), col.getValue(), col.timestamp, col.timestampOfLastDelete));
+                cfamily.addColumn(new CounterColumn(cname, col.getValue(), col.timestamp, col.timestampOfLastDelete));
             }
             else if (col.isDeleted())
             {
-                cfamily.addTombstone(path, col.getValue(), col.timestamp);
+                cfamily.addTombstone(cname, col.getValue(), col.timestamp);
             }
             else if (col.isRangeTombstone())
             {
-                cfamily.addAtom(new RangeTombstone(col.getName(), col.getValue(), col.timestamp, col.localExpirationTime));
+                ByteBuffer end = superName == null ? col.getValue() : CompositeType.build(superName, col.getValue());
+                cfamily.addAtom(new RangeTombstone(cname, end, col.timestamp, col.localExpirationTime));
             }
             else
             {
-                cfamily.addColumn(path, col.getValue(), col.timestamp);
+                cfamily.addColumn(cname, col.getValue(), col.timestamp);
             }
         }
     }
 
-    private void parseMeta(Map<?, ?> map, AbstractColumnContainer columnContainer)
+    private void parseMeta(Map<?, ?> map, ColumnFamily cf, ByteBuffer superColumnName)
     {
 
         // deletionInfo is the only metadata we store for now
@@ -262,7 +272,10 @@ public class SSTableImport
             Number number = (Number) unparsedDeletionInfo.get("markedForDeleteAt");
             long markedForDeleteAt = number instanceof Long ? (Long) number : ((Integer) number).longValue();
             int localDeletionTime = (Integer) unparsedDeletionInfo.get("localDeletionTime");
-            columnContainer.setDeletionInfo(new DeletionInfo(markedForDeleteAt, localDeletionTime));
+            if (superColumnName == null)
+                cf.setDeletionInfo(new DeletionInfo(markedForDeleteAt, localDeletionTime));
+            else
+                cf.addAtom(new RangeTombstone(SuperColumns.startOf(superColumnName), SuperColumns.endOf(superColumnName), markedForDeleteAt, localDeletionTime));
         }
     }
 
@@ -284,13 +297,13 @@ public class SSTableImport
         {
             Map<?, ?> data = (Map<?, ?>) entry.getValue();
 
-            ByteBuffer superName = stringAsType((String) entry.getKey(), comparator);
+            ByteBuffer superName = stringAsType((String) entry.getKey(), ((CompositeType)comparator).types.get(0));
 
             addColumnsToCF((List<?>) data.get("subColumns"), superName, cfamily);
 
             if (data.containsKey("metadata"))
             {
-                parseMeta((Map<?, ?>) data.get("metadata"), (SuperColumn) cfamily.getColumn(superName));
+                parseMeta((Map<?, ?>) data.get("metadata"), cfamily, superName);
             }
         }
     }
@@ -346,11 +359,11 @@ public class SSTableImport
         {
             if (row.getValue().containsKey("metadata"))
             {
-                parseMeta((Map<?, ?>) row.getValue().get("metadata"), columnFamily);
+                parseMeta((Map<?, ?>) row.getValue().get("metadata"), columnFamily, null);
             }
 
             Object columns = row.getValue().get("columns");
-            if (columnFamily.getType() == ColumnFamilyType.Super)
+            if (columnFamily.getType() == ColumnFamilyType.Super && oldSCFormat)
                 addToSuperCF((Map<?, ?>) columns, columnFamily);
             else
                 addToStandardCF((List<?>) columns, columnFamily);
@@ -421,10 +434,9 @@ public class SSTableImport
             DecoratedKey currentKey = partitioner.decorateKey(hexToBytes((String) row.get("key")));
 
             if (row.containsKey("metadata"))
-                parseMeta((Map<?, ?>) row.get("metadata"), columnFamily);
+                parseMeta((Map<?, ?>) row.get("metadata"), columnFamily, null);
 
-
-            if (columnFamily.getType() == ColumnFamilyType.Super)
+            if (columnFamily.getType() == ColumnFamilyType.Super && oldSCFormat)
                 addToSuperCF((Map<?, ?>)row.get("columns"), columnFamily);
             else
                 addToStandardCF((List<?>)row.get("columns"), columnFamily);
@@ -511,6 +523,7 @@ public class SSTableImport
 
         Integer keyCountToImport = null;
         boolean isSorted = false;
+        boolean oldSCFormat = false;
 
         if (cmd.hasOption(KEY_COUNT_OPTION))
         {
@@ -522,6 +535,11 @@ public class SSTableImport
             isSorted = true;
         }
 
+        if (cmd.hasOption(OLD_SC_FORMAT_OPTION))
+        {
+            oldSCFormat = true;
+        }
+
         DatabaseDescriptor.loadSchemas();
         if (Schema.instance.getNonSystemTables().size() < 1)
         {
@@ -532,7 +550,7 @@ public class SSTableImport
 
         try
         {
-           new SSTableImport(keyCountToImport,isSorted).importJson(json, keyspace, cfamily, ssTable);
+           new SSTableImport(keyCountToImport, isSorted, oldSCFormat).importJson(json, keyspace, cfamily, ssTable);
         }
         catch (Exception e)
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/db.RangeSliceCommand.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/db.RangeSliceCommand.bin b/test/data/serialization/2.0/db.RangeSliceCommand.bin
new file mode 100644
index 0000000..e96746d
Binary files /dev/null and b/test/data/serialization/2.0/db.RangeSliceCommand.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/db.Row.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/db.Row.bin b/test/data/serialization/2.0/db.Row.bin
new file mode 100644
index 0000000..bfc671d
Binary files /dev/null and b/test/data/serialization/2.0/db.Row.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/db.RowMutation.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/db.RowMutation.bin b/test/data/serialization/2.0/db.RowMutation.bin
new file mode 100644
index 0000000..0f024ac
Binary files /dev/null and b/test/data/serialization/2.0/db.RowMutation.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/db.SliceByNamesReadCommand.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/db.SliceByNamesReadCommand.bin b/test/data/serialization/2.0/db.SliceByNamesReadCommand.bin
new file mode 100644
index 0000000..0e143a6
Binary files /dev/null and b/test/data/serialization/2.0/db.SliceByNamesReadCommand.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/db.SliceFromReadCommand.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/db.SliceFromReadCommand.bin b/test/data/serialization/2.0/db.SliceFromReadCommand.bin
new file mode 100644
index 0000000..7b357aa
Binary files /dev/null and b/test/data/serialization/2.0/db.SliceFromReadCommand.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/db.Truncation.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/db.Truncation.bin b/test/data/serialization/2.0/db.Truncation.bin
new file mode 100644
index 0000000..ea67995
Binary files /dev/null and b/test/data/serialization/2.0/db.Truncation.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/db.WriteResponse.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/db.WriteResponse.bin b/test/data/serialization/2.0/db.WriteResponse.bin
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/gms.EndpointState.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/gms.EndpointState.bin b/test/data/serialization/2.0/gms.EndpointState.bin
new file mode 100644
index 0000000..ffbb00d
Binary files /dev/null and b/test/data/serialization/2.0/gms.EndpointState.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/gms.Gossip.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/gms.Gossip.bin b/test/data/serialization/2.0/gms.Gossip.bin
new file mode 100644
index 0000000..af5ac57
Binary files /dev/null and b/test/data/serialization/2.0/gms.Gossip.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/service.TreeRequest.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/service.TreeRequest.bin b/test/data/serialization/2.0/service.TreeRequest.bin
new file mode 100644
index 0000000..b12a1b8
Binary files /dev/null and b/test/data/serialization/2.0/service.TreeRequest.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/service.TreeResponse.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/service.TreeResponse.bin b/test/data/serialization/2.0/service.TreeResponse.bin
new file mode 100644
index 0000000..4beb410
Binary files /dev/null and b/test/data/serialization/2.0/service.TreeResponse.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/streaming.PendingFile.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/streaming.PendingFile.bin b/test/data/serialization/2.0/streaming.PendingFile.bin
new file mode 100644
index 0000000..efc8f77
Binary files /dev/null and b/test/data/serialization/2.0/streaming.PendingFile.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/streaming.StreamHeader.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/streaming.StreamHeader.bin b/test/data/serialization/2.0/streaming.StreamHeader.bin
new file mode 100644
index 0000000..f7e5edc
Binary files /dev/null and b/test/data/serialization/2.0/streaming.StreamHeader.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/streaming.StreamReply.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/streaming.StreamReply.bin b/test/data/serialization/2.0/streaming.StreamReply.bin
new file mode 100644
index 0000000..0094ecc
Binary files /dev/null and b/test/data/serialization/2.0/streaming.StreamReply.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/streaming.StreamRequestMessage.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/streaming.StreamRequestMessage.bin b/test/data/serialization/2.0/streaming.StreamRequestMessage.bin
new file mode 100644
index 0000000..71aaf78
Binary files /dev/null and b/test/data/serialization/2.0/streaming.StreamRequestMessage.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/utils.BloomFilter.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/utils.BloomFilter.bin b/test/data/serialization/2.0/utils.BloomFilter.bin
new file mode 100644
index 0000000..63e561a
Binary files /dev/null and b/test/data/serialization/2.0/utils.BloomFilter.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/utils.EstimatedHistogram.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/utils.EstimatedHistogram.bin b/test/data/serialization/2.0/utils.EstimatedHistogram.bin
new file mode 100644
index 0000000..bedd39b
Binary files /dev/null and b/test/data/serialization/2.0/utils.EstimatedHistogram.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/data/serialization/2.0/utils.LegacyBloomFilter.bin
----------------------------------------------------------------------
diff --git a/test/data/serialization/2.0/utils.LegacyBloomFilter.bin b/test/data/serialization/2.0/utils.LegacyBloomFilter.bin
new file mode 100644
index 0000000..faef1b8
Binary files /dev/null and b/test/data/serialization/2.0/utils.LegacyBloomFilter.bin differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/long/org/apache/cassandra/db/LongTableTest.java
----------------------------------------------------------------------
diff --git a/test/long/org/apache/cassandra/db/LongTableTest.java b/test/long/org/apache/cassandra/db/LongTableTest.java
index 47ea238..a630fca 100644
--- a/test/long/org/apache/cassandra/db/LongTableTest.java
+++ b/test/long/org/apache/cassandra/db/LongTableTest.java
@@ -26,7 +26,6 @@ import org.apache.cassandra.utils.WrappedRunnable;
 import static org.apache.cassandra.Util.column;
 
 import org.apache.cassandra.Util;
-import org.apache.cassandra.db.filter.QueryPath;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 
@@ -57,7 +56,7 @@ public class LongTableTest extends SchemaLoader
                 {
                     for (int j = 0; j < i; j++)
                     {
-                        cf = cfStore.getColumnFamily(QueryFilter.getNamesFilter(Util.dk("key" + i), new QueryPath("Standard1"), ByteBufferUtil.bytes("c" + j)));
+                        cf = cfStore.getColumnFamily(QueryFilter.getNamesFilter(Util.dk("key" + i), "Standard1", ByteBufferUtil.bytes("c" + j)));
                         TableTest.assertColumns(cf, "c" + j);
                     }
                 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
----------------------------------------------------------------------
diff --git a/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java b/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
index de44468..65a8a17 100644
--- a/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
+++ b/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
@@ -30,7 +30,6 @@ import org.junit.Test;
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
 import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.filter.QueryPath;
 import org.apache.cassandra.io.sstable.SSTableReader;
 import org.apache.cassandra.io.sstable.SSTableUtils;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -83,7 +82,7 @@ public class LongCompactionsTest extends SchemaLoader
             for (int j = 0; j < rowsPerSSTable; j++)
             {
                 String key = String.valueOf(j);
-                IColumn[] cols = new IColumn[colsPerRow];
+                Column[] cols = new Column[colsPerRow];
                 for (int i = 0; i < colsPerRow; i++)
                 {
                     // last sstable has highest timestamps
@@ -131,7 +130,7 @@ public class LongCompactionsTest extends SchemaLoader
                 DecoratedKey key = Util.dk(String.valueOf(i % 2));
                 RowMutation rm = new RowMutation(TABLE1, key.key);
                 long timestamp = j * ROWS_PER_SSTABLE + i;
-                rm.add(new QueryPath("Standard1", null, ByteBufferUtil.bytes(String.valueOf(i / 2))),
+                rm.add("Standard1", ByteBufferUtil.bytes(String.valueOf(i / 2)),
                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
                        timestamp);
                 maxTimestampExpected = Math.max(timestamp, maxTimestampExpected);
@@ -152,48 +151,6 @@ public class LongCompactionsTest extends SchemaLoader
         cfs.truncate();
     }
 
-    @Test
-    public void testSuperColumnCompactions() throws IOException, ExecutionException, InterruptedException
-    {
-        Table table = Table.open(TABLE1);
-        ColumnFamilyStore cfs = table.getColumnFamilyStore("Super1");
-
-        final int ROWS_PER_SSTABLE = 10;
-        final int SSTABLES = cfs.metadata.getIndexInterval() * 3 / ROWS_PER_SSTABLE;
-
-        //disable compaction while flushing
-        cfs.disableAutoCompaction();
-
-        long maxTimestampExpected = Long.MIN_VALUE;
-        Set<DecoratedKey> inserted = new HashSet<DecoratedKey>();
-        ByteBuffer superColumn = ByteBufferUtil.bytes("TestSuperColumn");
-        for (int j = 0; j < SSTABLES; j++)
-        {
-            for (int i = 0; i < ROWS_PER_SSTABLE; i++)
-            {
-                DecoratedKey key = Util.dk(String.valueOf(i % 2));
-                RowMutation rm = new RowMutation(TABLE1, key.key);
-                long timestamp = j * ROWS_PER_SSTABLE + i;
-                rm.add(new QueryPath("Super1", superColumn, ByteBufferUtil.bytes((long)(i / 2))),
-                       ByteBufferUtil.EMPTY_BYTE_BUFFER,
-                       timestamp);
-                maxTimestampExpected = Math.max(timestamp, maxTimestampExpected);
-                rm.apply();
-                inserted.add(key);
-            }
-            cfs.forceBlockingFlush();
-            CompactionsTest.assertMaxTimestamp(cfs, maxTimestampExpected);
-            assertEquals(inserted.toString(), inserted.size(), Util.getRangeSlice(cfs, superColumn).size());
-        }
-
-        forceCompactions(cfs);
-
-        assertEquals(inserted.size(), Util.getRangeSlice(cfs, superColumn).size());
-
-        // make sure max timestamp of compacted sstables is recorded properly after compaction.
-        CompactionsTest.assertMaxTimestamp(cfs, maxTimestampExpected);
-    }
-
     private void forceCompactions(ColumnFamilyStore cfs) throws ExecutionException, InterruptedException
     {
         // re-enable compaction with thresholds low enough to force a few rounds

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java
----------------------------------------------------------------------
diff --git a/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java b/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java
index 0ba9d7c..beae23d 100644
--- a/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java
+++ b/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java
@@ -31,7 +31,6 @@ import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.RowMutation;
 import org.apache.cassandra.db.Table;
-import org.apache.cassandra.db.filter.QueryPath;
 import org.apache.cassandra.io.sstable.SSTable;
 import org.apache.cassandra.io.sstable.SSTableReader;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -63,7 +62,7 @@ public class LongLeveledCompactionStrategyTest extends SchemaLoader
             RowMutation rm = new RowMutation(ksname, key.key);
             for (int c = 0; c < columns; c++)
             {
-                rm.add(new QueryPath(cfname, null, ByteBufferUtil.bytes("column" + c)), value, 0);
+                rm.add(cfname, ByteBufferUtil.bytes("column" + c), value, 0);
             }
             rm.apply();
             store.forceBlockingFlush();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/system/__init__.py
----------------------------------------------------------------------
diff --git a/test/system/__init__.py b/test/system/__init__.py
index d9b88ee..6dd6be9 100644
--- a/test/system/__init__.py
+++ b/test/system/__init__.py
@@ -132,6 +132,8 @@ class BaseTester(object):
                 if not is_alive(spid):
                     break
                 slept += 0.5
+            # Give time for cassandra to shutdown
+            time.sleep(1)
             if (slept > max_wait and is_alive(spid)):
                 os.kill(spid, signal.SIGKILL)
                 fpath = os.path.join(root, pid_fname)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/AbstractSerializationsTester.java b/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
index aafe98b..807fd98 100644
--- a/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
+++ b/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
@@ -35,12 +35,13 @@ import java.util.Map;
 
 public class AbstractSerializationsTester extends SchemaLoader
 {
-    protected static final String CUR_VER = System.getProperty("cassandra.version", "1.2");
+    protected static final String CUR_VER = System.getProperty("cassandra.version", "2.0");
     protected static final Map<String, Integer> VERSION_MAP = new HashMap<String, Integer> ()
     {{
         put("0.7", 1);
         put("1.0", 3);
         put("1.2", MessagingService.VERSION_12);
+        put("2.0", MessagingService.VERSION_20);
     }};
 
     // TODO ant doesn't pass this -D up to the test, so it's kind of useless

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/unit/org/apache/cassandra/SchemaLoader.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/SchemaLoader.java b/test/unit/org/apache/cassandra/SchemaLoader.java
index 48fbc04..b7abacb 100644
--- a/test/unit/org/apache/cassandra/SchemaLoader.java
+++ b/test/unit/org/apache/cassandra/SchemaLoader.java
@@ -33,7 +33,7 @@ import org.apache.cassandra.config.*;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.db.compaction.LeveledCompactionStrategy;
-import org.apache.cassandra.db.filter.QueryPath;
+import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.db.index.composites.CompositesIndex;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.exceptions.ConfigurationException;
@@ -411,9 +411,7 @@ public class SchemaLoader
         {
             ByteBuffer key = ByteBufferUtil.bytes("key" + i);
             RowMutation rowMutation = new RowMutation(keyspace, key);
-            QueryPath path = new QueryPath(columnFamily, null, ByteBufferUtil.bytes("col" + i));
-
-            rowMutation.add(path, ByteBufferUtil.bytes("val" + i), System.currentTimeMillis());
+            rowMutation.add(columnFamily, ByteBufferUtil.bytes("col" + i), ByteBufferUtil.bytes("val" + i), System.currentTimeMillis());
             rowMutation.applyUnsafe();
         }
     }
@@ -425,9 +423,7 @@ public class SchemaLoader
         for (int i = offset; i < offset + numberOfRows; i++)
         {
             DecoratedKey key = Util.dk("key" + i);
-            QueryPath path = new QueryPath(columnFamily, null, ByteBufferUtil.bytes("col" + i));
-
-            store.getColumnFamily(key, path, ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1);
+            store.getColumnFamily(QueryFilter.getNamesFilter(key, columnFamily, ByteBufferUtil.bytes("col" + i)));
         }
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/unit/org/apache/cassandra/Util.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/Util.java b/test/unit/org/apache/cassandra/Util.java
index 1e9031e..3df8267 100644
--- a/test/unit/org/apache/cassandra/Util.java
+++ b/test/unit/org/apache/cassandra/Util.java
@@ -37,8 +37,10 @@ import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.compaction.CompactionTask;
 import org.apache.cassandra.db.columniterator.IdentityQueryFilter;
+import org.apache.cassandra.db.filter.IDiskAtomFilter;
 import org.apache.cassandra.db.filter.QueryFilter;
-import org.apache.cassandra.db.filter.QueryPath;
+import org.apache.cassandra.db.filter.SliceQueryFilter;
+import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.dht.*;
 import org.apache.cassandra.gms.ApplicationState;
 import org.apache.cassandra.gms.Gossiper;
@@ -90,14 +92,6 @@ public class Util
         return new CounterUpdateColumn(ByteBufferUtil.bytes(name), value, timestamp);
     }
 
-    public static SuperColumn superColumn(ColumnFamily cf, String name, Column... columns)
-    {
-        SuperColumn sc = new SuperColumn(ByteBufferUtil.bytes(name), cf.metadata().comparator);
-        for (Column c : columns)
-            sc.addColumn(c);
-        return sc;
-    }
-
     public static Token token(String key)
     {
         return StorageService.getPartitioner().getToken(ByteBufferUtil.bytes(key));
@@ -120,7 +114,10 @@ public class Util
 
     public static void addMutation(RowMutation rm, String columnFamilyName, String superColumnName, long columnName, String value, long timestamp)
     {
-        rm.add(new QueryPath(columnFamilyName, ByteBufferUtil.bytes(superColumnName), getBytes(columnName)), ByteBufferUtil.bytes(value), timestamp);
+        ByteBuffer cname = superColumnName == null
+                         ? getBytes(columnName)
+                         : CompositeType.build(ByteBufferUtil.bytes(superColumnName), getBytes(columnName));
+        rm.add(columnFamilyName, cname, ByteBufferUtil.bytes(value), timestamp);
     }
 
     public static ByteBuffer getBytes(long v)
@@ -148,11 +145,14 @@ public class Util
 
     public static List<Row> getRangeSlice(ColumnFamilyStore cfs, ByteBuffer superColumn) throws IOException, ExecutionException, InterruptedException
     {
+        IDiskAtomFilter filter = superColumn == null
+                               ? new IdentityQueryFilter()
+                               : new SliceQueryFilter(SuperColumns.startOf(superColumn), SuperColumns.endOf(superColumn), false, Integer.MAX_VALUE);
+
         Token min = StorageService.getPartitioner().getMinimumToken();
-        return cfs.getRangeSlice(superColumn,
-                                 new Bounds<Token>(min, min).toRowBounds(),
+        return cfs.getRangeSlice(new Bounds<Token>(min, min).toRowBounds(),
                                  10000,
-                                 new IdentityQueryFilter(),
+                                 filter,
                                  null);
     }
 
@@ -180,7 +180,7 @@ public class Util
     {
         ColumnFamilyStore cfStore = table.getColumnFamilyStore(cfName);
         assert cfStore != null : "Column family " + cfName + " has not been defined";
-        return cfStore.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
+        return cfStore.getColumnFamily(QueryFilter.getIdentityFilter(key, cfName));
     }
 
     public static byte[] concatByteArrays(byte[] first, byte[]... remaining)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/unit/org/apache/cassandra/config/ColumnDefinitionTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/config/ColumnDefinitionTest.java b/test/unit/org/apache/cassandra/config/ColumnDefinitionTest.java
index c7ba1df..1e2ef9c 100644
--- a/test/unit/org/apache/cassandra/config/ColumnDefinitionTest.java
+++ b/test/unit/org/apache/cassandra/config/ColumnDefinitionTest.java
@@ -53,7 +53,7 @@ public class ColumnDefinitionTest
 
     protected void testSerializeDeserialize(ColumnDefinition cd) throws Exception
     {
-        ColumnDefinition newCd = ColumnDefinition.fromThrift(cd.toThrift());
+        ColumnDefinition newCd = ColumnDefinition.fromThrift(cd.toThrift(), false);
         assert cd != newCd;
         assert cd.hashCode() == newCd.hashCode();
         assert cd.equals(newCd);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3a005df3/test/unit/org/apache/cassandra/config/DefsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/config/DefsTest.java b/test/unit/org/apache/cassandra/config/DefsTest.java
index 804cb29..ef30668 100644
--- a/test/unit/org/apache/cassandra/config/DefsTest.java
+++ b/test/unit/org/apache/cassandra/config/DefsTest.java
@@ -28,7 +28,6 @@ import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.filter.QueryFilter;
-import org.apache.cassandra.db.filter.QueryPath;
 import org.apache.cassandra.db.marshal.BytesType;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.db.marshal.TimeUUIDType;
@@ -66,8 +65,7 @@ public class DefsTest extends SchemaLoader
         CFMetaData cfm = new CFMetaData("Keyspace1",
                                         "TestApplyCFM_CF",
                                         ColumnFamilyType.Standard,
-                                        BytesType.instance,
-                                        null);
+                                        BytesType.instance);
 
         cfm.comment("No comment")
            .readRepairChance(0.5)
@@ -188,15 +186,15 @@ public class DefsTest extends SchemaLoader
         // now read and write to it.
         DecoratedKey dk = Util.dk("key0");
         RowMutation rm = new RowMutation(ks, dk.key);
-        rm.add(new QueryPath(cf, null, ByteBufferUtil.bytes("col0")), ByteBufferUtil.bytes("value0"), 1L);
+        rm.add(cf, ByteBufferUtil.bytes("col0"), ByteBufferUtil.bytes("value0"), 1L);
         rm.apply();
         ColumnFamilyStore store = Table.open(ks).getColumnFamilyStore(cf);
         assert store != null;
         store.forceBlockingFlush();
 
-        ColumnFamily cfam = store.getColumnFamily(QueryFilter.getNamesFilter(dk, new QueryPath(cf), ByteBufferUtil.bytes("col0")));
+        ColumnFamily cfam = store.getColumnFamily(QueryFilter.getNamesFilter(dk, cf, ByteBufferUtil.bytes("col0")));
         assert cfam.getColumn(ByteBufferUtil.bytes("col0")) != null;
-        IColumn col = cfam.getColumn(ByteBufferUtil.bytes("col0"));
+        Column col = cfam.getColumn(ByteBufferUtil.bytes("col0"));
         assert ByteBufferUtil.bytes("value0").equals(col.value());
     }
 
@@ -213,7 +211,7 @@ public class DefsTest extends SchemaLoader
         // write some data, force a flush, then verify that files exist on disk.
         RowMutation rm = new RowMutation(ks.name, dk.key);
         for (int i = 0; i < 100; i++)
-            rm.add(new QueryPath(cfm.cfName, null, ByteBufferUtil.bytes(("col" + i))), ByteBufferUtil.bytes("anyvalue"), 1L);
+            rm.add(cfm.cfName, ByteBufferUtil.bytes(("col" + i)), ByteBufferUtil.bytes("anyvalue"), 1L);
         rm.apply();
         ColumnFamilyStore store = Table.open(cfm.ksName).getColumnFamilyStore(cfm.cfName);
         assert store != null;
@@ -229,7 +227,7 @@ public class DefsTest extends SchemaLoader
         boolean success = true;
         try
         {
-            rm.add(new QueryPath("Standard1", null, ByteBufferUtil.bytes("col0")), ByteBufferUtil.bytes("value0"), 1L);
+            rm.add("Standard1", ByteBufferUtil.bytes("col0"), ByteBufferUtil.bytes("value0"), 1L);
             rm.apply();
         }
         catch (Throwable th)
@@ -261,15 +259,15 @@ public class DefsTest extends SchemaLoader
 
         // test reads and writes.
         RowMutation rm = new RowMutation(newCf.ksName, dk.key);
-        rm.add(new QueryPath(newCf.cfName, null, ByteBufferUtil.bytes("col0")), ByteBufferUtil.bytes("value0"), 1L);
+        rm.add(newCf.cfName, ByteBufferUtil.bytes("col0"), ByteBufferUtil.bytes("value0"), 1L);
         rm.apply();
         ColumnFamilyStore store = Table.open(newCf.ksName).getColumnFamilyStore(newCf.cfName);
         assert store != null;
         store.forceBlockingFlush();
 
-        ColumnFamily cfam = store.getColumnFamily(QueryFilter.getNamesFilter(dk, new QueryPath(newCf.cfName), ByteBufferUtil.bytes("col0")));
+        ColumnFamily cfam = store.getColumnFamily(QueryFilter.getNamesFilter(dk, newCf.cfName, ByteBufferUtil.bytes("col0")));
         assert cfam.getColumn(ByteBufferUtil.bytes("col0")) != null;
-        IColumn col = cfam.getColumn(ByteBufferUtil.bytes("col0"));
+        Column col = cfam.getColumn(ByteBufferUtil.bytes("col0"));
         assert ByteBufferUtil.bytes("value0").equals(col.value());
     }
 
@@ -286,7 +284,7 @@ public class DefsTest extends SchemaLoader
         // write some data, force a flush, then verify that files exist on disk.
         RowMutation rm = new RowMutation(ks.name, dk.key);
         for (int i = 0; i < 100; i++)
-            rm.add(new QueryPath(cfm.cfName, null, ByteBufferUtil.bytes(("col" + i))), ByteBufferUtil.bytes("anyvalue"), 1L);
+            rm.add(cfm.cfName, ByteBufferUtil.bytes(("col" + i)), ByteBufferUtil.bytes("anyvalue"), 1L);
         rm.apply();
         ColumnFamilyStore store = Table.open(cfm.ksName).getColumnFamilyStore(cfm.cfName);
         assert store != null;
@@ -302,7 +300,7 @@ public class DefsTest extends SchemaLoader
         boolean success = true;
         try
         {
-            rm.add(new QueryPath("Standard1", null, ByteBufferUtil.bytes("col0")), ByteBufferUtil.bytes("value0"), 1L);
+            rm.add("Standard1", ByteBufferUtil.bytes("col0"), ByteBufferUtil.bytes("value0"), 1L);
             rm.apply();
         }
         catch (Throwable th)
@@ -337,7 +335,7 @@ public class DefsTest extends SchemaLoader
         // write some data
         RowMutation rm = new RowMutation(ks.name, dk.key);
         for (int i = 0; i < 100; i++)
-            rm.add(new QueryPath(cfm.cfName, null, ByteBufferUtil.bytes(("col" + i))), ByteBufferUtil.bytes("anyvalue"), 1L);
+            rm.add(cfm.cfName, ByteBufferUtil.bytes(("col" + i)), ByteBufferUtil.bytes("anyvalue"), 1L);
         rm.apply();
 
         MigrationManager.announceKeyspaceDrop(ks.name);
@@ -369,15 +367,15 @@ public class DefsTest extends SchemaLoader
         // now read and write to it.
         DecoratedKey dk = Util.dk("key0");
         RowMutation rm = new RowMutation(newKs.name, dk.key);
-        rm.add(new QueryPath(newCf.cfName, null, ByteBufferUtil.bytes("col0")), ByteBufferUtil.bytes("value0"), 1L);
+        rm.add(newCf.cfName, ByteBufferUtil.bytes("col0"), ByteBufferUtil.bytes("value0"), 1L);
         rm.apply();
         ColumnFamilyStore store = Table.open(newKs.name).getColumnFamilyStore(newCf.cfName);
         assert store != null;
         store.forceBlockingFlush();
 
-        ColumnFamily cfam = store.getColumnFamily(QueryFilter.getNamesFilter(dk, new QueryPath(newCf.cfName), ByteBufferUtil.bytes("col0")));
+        ColumnFamily cfam = store.getColumnFamily(QueryFilter.getNamesFilter(dk, newCf.cfName, ByteBufferUtil.bytes("col0")));
         assert cfam.getColumn(ByteBufferUtil.bytes("col0")) != null;
-        IColumn col = cfam.getColumn(ByteBufferUtil.bytes("col0"));
+        Column col = cfam.getColumn(ByteBufferUtil.bytes("col0"));
         assert ByteBufferUtil.bytes("value0").equals(col.value());
     }
 
@@ -460,7 +458,7 @@ public class DefsTest extends SchemaLoader
         assert Schema.instance.getCFMetaData(cf.ksName, cf.cfName).getDefaultValidator() == UTF8Type.instance;
 
         // Change cfId
-        newCfm = new CFMetaData(cf.ksName, cf.cfName, cf.cfType, cf.comparator, cf.subcolumnComparator, UUID.randomUUID());
+        newCfm = new CFMetaData(cf.ksName, cf.cfName, cf.cfType, cf.comparator, UUID.randomUUID());
         CFMetaData.copyOpts(newCfm, cf);
         try
         {
@@ -470,7 +468,7 @@ public class DefsTest extends SchemaLoader
         catch (ConfigurationException expected) {}
 
         // Change cfName
-        newCfm = new CFMetaData(cf.ksName, cf.cfName + "_renamed", cf.cfType, cf.comparator, cf.subcolumnComparator);
+        newCfm = new CFMetaData(cf.ksName, cf.cfName + "_renamed", cf.cfType, cf.comparator);
         CFMetaData.copyOpts(newCfm, cf);
         try
         {
@@ -480,7 +478,7 @@ public class DefsTest extends SchemaLoader
         catch (ConfigurationException expected) {}
 
         // Change ksName
-        newCfm = new CFMetaData(cf.ksName + "_renamed", cf.cfName, cf.cfType, cf.comparator, cf.subcolumnComparator);
+        newCfm = new CFMetaData(cf.ksName + "_renamed", cf.cfName, cf.cfType, cf.comparator);
         CFMetaData.copyOpts(newCfm, cf);
         try
         {
@@ -490,7 +488,7 @@ public class DefsTest extends SchemaLoader
         catch (ConfigurationException expected) {}
 
         // Change cf type
-        newCfm = new CFMetaData(cf.ksName, cf.cfName, ColumnFamilyType.Super, cf.comparator, cf.subcolumnComparator);
+        newCfm = new CFMetaData(cf.ksName, cf.cfName, ColumnFamilyType.Super, cf.comparator);
         CFMetaData.copyOpts(newCfm, cf);
         try
         {
@@ -500,7 +498,7 @@ public class DefsTest extends SchemaLoader
         catch (ConfigurationException expected) {}
 
         // Change comparator
-        newCfm = new CFMetaData(cf.ksName, cf.cfName, cf.cfType, TimeUUIDType.instance, cf.subcolumnComparator);
+        newCfm = new CFMetaData(cf.ksName, cf.cfName, cf.cfType, TimeUUIDType.instance);
         CFMetaData.copyOpts(newCfm, cf);
         try
         {
@@ -518,8 +516,8 @@ public class DefsTest extends SchemaLoader
 
         // insert some data.  save the sstable descriptor so we can make sure it's marked for delete after the drop
         RowMutation rm = new RowMutation("Keyspace6", ByteBufferUtil.bytes("k1"));
-        rm.add(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("notbirthdate")), ByteBufferUtil.bytes(1L), 0);
-        rm.add(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("birthdate")), ByteBufferUtil.bytes(1L), 0);
+        rm.add("Indexed1", ByteBufferUtil.bytes("notbirthdate"), ByteBufferUtil.bytes(1L), 0);
+        rm.add("Indexed1", ByteBufferUtil.bytes("birthdate"), ByteBufferUtil.bytes(1L), 0);
         rm.apply();
         ColumnFamilyStore cfs = Table.open("Keyspace6").getColumnFamilyStore("Indexed1");
         cfs.forceBlockingFlush();
@@ -541,7 +539,7 @@ public class DefsTest extends SchemaLoader
 
     private CFMetaData addTestCF(String ks, String cf, String comment)
     {
-        CFMetaData newCFMD = new CFMetaData(ks, cf, ColumnFamilyType.Standard, UTF8Type.instance, null);
+        CFMetaData newCFMD = new CFMetaData(ks, cf, ColumnFamilyType.Standard, UTF8Type.instance);
         newCFMD.comment(comment)
                .readRepairChance(0.0);