You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by sl...@apache.org on 2013/12/17 17:53:11 UTC

[3/6] Rename Column to Cell

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnCollectionValue.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnCollectionValue.java b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnCollectionValue.java
index 9bf297b..1d780cd 100644
--- a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnCollectionValue.java
+++ b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnCollectionValue.java
@@ -62,9 +62,9 @@ public class CompositesIndexOnCollectionValue extends CompositesIndex
         return ((CollectionType)columnDef.type).valueComparator();
     }
 
-    protected ByteBuffer getIndexedValue(ByteBuffer rowKey, Column column)
+    protected ByteBuffer getIndexedValue(ByteBuffer rowKey, Cell cell)
     {
-        return column.value();
+        return cell.value();
     }
 
     protected Composite makeIndexColumnPrefix(ByteBuffer rowKey, Composite cellName)
@@ -77,7 +77,7 @@ public class CompositesIndexOnCollectionValue extends CompositesIndex
         return builder.build();
     }
 
-    public IndexedEntry decodeEntry(DecoratedKey indexedValue, Column indexEntry)
+    public IndexedEntry decodeEntry(DecoratedKey indexedValue, Cell indexEntry)
     {
         int prefixSize = columnDef.position();
         CellName name = indexEntry.name();
@@ -98,11 +98,11 @@ public class CompositesIndexOnCollectionValue extends CompositesIndex
     public boolean isStale(IndexedEntry entry, ColumnFamily data, long now)
     {
         CellName name = data.getComparator().create(entry.indexedEntryPrefix, columnDef.name, entry.indexedEntryCollectionKey);
-        Column liveColumn = data.getColumn(name);
-        if (liveColumn == null || liveColumn.isMarkedForDelete(now))
+        Cell liveCell = data.getColumn(name);
+        if (liveCell == null || liveCell.isMarkedForDelete(now))
             return true;
 
-        ByteBuffer liveValue = liveColumn.value();
+        ByteBuffer liveValue = liveCell.value();
         return ((CollectionType)columnDef.type).valueComparator().compare(entry.indexValue.key, liveValue) != 0;
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnPartitionKey.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnPartitionKey.java b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnPartitionKey.java
index 6df1e8d..b791545 100644
--- a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnPartitionKey.java
+++ b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnPartitionKey.java
@@ -58,7 +58,7 @@ public class CompositesIndexOnPartitionKey extends CompositesIndex
         return new CompoundDenseCellNameType(types);
     }
 
-    protected ByteBuffer getIndexedValue(ByteBuffer rowKey, Column column)
+    protected ByteBuffer getIndexedValue(ByteBuffer rowKey, Cell cell)
     {
         CompositeType keyComparator = (CompositeType)baseCfs.metadata.getKeyValidator();
         ByteBuffer[] components = keyComparator.split(rowKey);
@@ -75,7 +75,7 @@ public class CompositesIndexOnPartitionKey extends CompositesIndex
         return builder.build();
     }
 
-    public IndexedEntry decodeEntry(DecoratedKey indexedValue, Column indexEntry)
+    public IndexedEntry decodeEntry(DecoratedKey indexedValue, Cell indexEntry)
     {
         int ckCount = baseCfs.metadata.clusteringColumns().size();
         CBuilder builder = baseCfs.getComparator().builder();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnRegular.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnRegular.java b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnRegular.java
index 6903b77..f1b0954 100644
--- a/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnRegular.java
+++ b/src/java/org/apache/cassandra/db/index/composites/CompositesIndexOnRegular.java
@@ -57,9 +57,9 @@ public class CompositesIndexOnRegular extends CompositesIndex
         return new CompoundDenseCellNameType(types);
     }
 
-    protected ByteBuffer getIndexedValue(ByteBuffer rowKey, Column column)
+    protected ByteBuffer getIndexedValue(ByteBuffer rowKey, Cell cell)
     {
-        return column.value();
+        return cell.value();
     }
 
     protected Composite makeIndexColumnPrefix(ByteBuffer rowKey, Composite cellName)
@@ -71,7 +71,7 @@ public class CompositesIndexOnRegular extends CompositesIndex
         return builder.build();
     }
 
-    public IndexedEntry decodeEntry(DecoratedKey indexedValue, Column indexEntry)
+    public IndexedEntry decodeEntry(DecoratedKey indexedValue, Cell indexEntry)
     {
         CBuilder builder = baseCfs.getComparator().builder();
         for (int i = 0; i < columnDef.position(); i++)
@@ -90,11 +90,11 @@ public class CompositesIndexOnRegular extends CompositesIndex
     public boolean isStale(IndexedEntry entry, ColumnFamily data, long now)
     {
         CellName name = data.getComparator().create(entry.indexedEntryPrefix, columnDef.name);
-        Column liveColumn = data.getColumn(name);
-        if (liveColumn == null || liveColumn.isMarkedForDelete(now))
+        Cell liveCell = data.getColumn(name);
+        if (liveCell == null || liveCell.isMarkedForDelete(now))
             return true;
 
-        ByteBuffer liveValue = liveColumn.value();
+        ByteBuffer liveValue = liveCell.value();
         return columnDef.type.compare(entry.indexValue.key, liveValue) != 0;
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java b/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
index 97602af..7dba9b0 100644
--- a/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
+++ b/src/java/org/apache/cassandra/db/index/composites/CompositesSearcher.java
@@ -104,7 +104,7 @@ public class CompositesSearcher extends SecondaryIndexSearcher
         return new ColumnFamilyStore.AbstractScanIterator()
         {
             private Composite lastSeenPrefix = startPrefix;
-            private Deque<Column> indexColumns;
+            private Deque<Cell> indexCells;
             private int columnsRead = Integer.MAX_VALUE;
             private int limit = filter.currentLimit();
             private int columnsCount = 0;
@@ -145,7 +145,7 @@ public class CompositesSearcher extends SecondaryIndexSearcher
                     if (columnsCount >= limit)
                         return makeReturn(currentKey, data);
 
-                    if (indexColumns == null || indexColumns.isEmpty())
+                    if (indexCells == null || indexCells.isEmpty())
                     {
                         if (columnsRead < rowsPerQuery)
                         {
@@ -168,31 +168,31 @@ public class CompositesSearcher extends SecondaryIndexSearcher
                         if (indexRow == null || indexRow.getColumnCount() == 0)
                             return makeReturn(currentKey, data);
 
-                        Collection<Column> sortedColumns = indexRow.getSortedColumns();
-                        columnsRead = sortedColumns.size();
-                        indexColumns = new ArrayDeque<>(sortedColumns);
-                        Column firstColumn = sortedColumns.iterator().next();
+                        Collection<Cell> sortedCells = indexRow.getSortedColumns();
+                        columnsRead = sortedCells.size();
+                        indexCells = new ArrayDeque<>(sortedCells);
+                        Cell firstCell = sortedCells.iterator().next();
 
                         // Paging is racy, so it is possible the first column of a page is not the last seen one.
-                        if (lastSeenPrefix != startPrefix && lastSeenPrefix.equals(firstColumn.name()))
+                        if (lastSeenPrefix != startPrefix && lastSeenPrefix.equals(firstCell.name()))
                         {
                             // skip the row we already saw w/ the last page of results
-                            indexColumns.poll();
-                            logger.trace("Skipping {}", indexComparator.getString(firstColumn.name()));
+                            indexCells.poll();
+                            logger.trace("Skipping {}", indexComparator.getString(firstCell.name()));
                         }
                     }
 
-                    while (!indexColumns.isEmpty() && columnsCount <= limit)
+                    while (!indexCells.isEmpty() && columnsCount <= limit)
                     {
-                        Column column = indexColumns.poll();
-                        lastSeenPrefix = column.name();
-                        if (column.isMarkedForDelete(filter.timestamp))
+                        Cell cell = indexCells.poll();
+                        lastSeenPrefix = cell.name();
+                        if (cell.isMarkedForDelete(filter.timestamp))
                         {
-                            logger.trace("skipping {}", column.name());
+                            logger.trace("skipping {}", cell.name());
                             continue;
                         }
 
-                        CompositesIndex.IndexedEntry entry = index.decodeEntry(indexKey, column);
+                        CompositesIndex.IndexedEntry entry = index.decodeEntry(indexKey, cell);
                         DecoratedKey dk = baseCfs.partitioner.decorateKey(entry.indexedKey);
 
                         // Are we done for this row?
@@ -206,7 +206,7 @@ public class CompositesSearcher extends SecondaryIndexSearcher
                             currentKey = dk;
 
                             // We're done with the previous row, return it if it had data, continue otherwise
-                            indexColumns.addFirst(column);
+                            indexCells.addFirst(cell);
                             if (data == null)
                                 continue;
                             else
@@ -229,7 +229,7 @@ public class CompositesSearcher extends SecondaryIndexSearcher
                             }
                         }
 
-                        // Check if this entry cannot be a hit due to the original column filter
+                        // Check if this entry cannot be a hit due to the original cell filter
                         Composite start = entry.indexedEntryPrefix;
                         if (!filter.columnFilter(dk.key).maySelectPrefix(baseComparator, start))
                             continue;
@@ -244,7 +244,7 @@ public class CompositesSearcher extends SecondaryIndexSearcher
                         else
                             previousPrefix = null;
 
-                        logger.trace("Adding index hit to current row for {}", indexComparator.getString(column.name()));
+                        logger.trace("Adding index hit to current row for {}", indexComparator.getString(cell.name()));
 
                         // We always query the whole CQL3 row. In the case where the original filter was a name filter this might be
                         // slightly wasteful, but this probably doesn't matter in practice and it simplify things.

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/db/index/keys/KeysIndex.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/keys/KeysIndex.java b/src/java/org/apache/cassandra/db/index/keys/KeysIndex.java
index ee56c36..e532a53 100644
--- a/src/java/org/apache/cassandra/db/index/keys/KeysIndex.java
+++ b/src/java/org/apache/cassandra/db/index/keys/KeysIndex.java
@@ -20,10 +20,10 @@ package org.apache.cassandra.db.index.keys;
 import java.nio.ByteBuffer;
 import java.util.Set;
 
+import org.apache.cassandra.db.Cell;
 import org.apache.cassandra.db.composites.CellName;
 import org.apache.cassandra.db.composites.CellNames;
 import org.apache.cassandra.db.ColumnFamily;
-import org.apache.cassandra.db.Column;
 import org.apache.cassandra.db.index.AbstractSimplePerColumnSecondaryIndex;
 import org.apache.cassandra.db.index.SecondaryIndexSearcher;
 import org.apache.cassandra.db.marshal.AbstractType;
@@ -35,12 +35,12 @@ import org.apache.cassandra.exceptions.ConfigurationException;
  */
 public class KeysIndex extends AbstractSimplePerColumnSecondaryIndex
 {
-    protected ByteBuffer getIndexedValue(ByteBuffer rowKey, Column column)
+    protected ByteBuffer getIndexedValue(ByteBuffer rowKey, Cell cell)
     {
-        return column.value();
+        return cell.value();
     }
 
-    protected CellName makeIndexColumnName(ByteBuffer rowKey, Column column)
+    protected CellName makeIndexColumnName(ByteBuffer rowKey, Cell cell)
     {
         return CellNames.simpleDense(rowKey);
     }
@@ -52,11 +52,11 @@ public class KeysIndex extends AbstractSimplePerColumnSecondaryIndex
 
     public boolean isIndexEntryStale(ByteBuffer indexedValue, ColumnFamily data, long now)
     {
-        Column liveColumn = data.getColumn(data.getComparator().makeCellName(columnDef.name.bytes));
-        if (liveColumn == null || liveColumn.isMarkedForDelete(now))
+        Cell liveCell = data.getColumn(data.getComparator().makeCellName(columnDef.name.bytes));
+        if (liveCell == null || liveCell.isMarkedForDelete(now))
             return true;
 
-        ByteBuffer liveValue = liveColumn.value();
+        ByteBuffer liveValue = liveCell.value();
         return columnDef.type.compare(indexedValue, liveValue) != 0;
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java b/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java
index 0101a0b..932d446 100644
--- a/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java
+++ b/src/java/org/apache/cassandra/db/index/keys/KeysSearcher.java
@@ -87,7 +87,7 @@ public class KeysSearcher extends SecondaryIndexSearcher
         return new ColumnFamilyStore.AbstractScanIterator()
         {
             private Composite lastSeenKey = startKey;
-            private Iterator<Column> indexColumns;
+            private Iterator<Cell> indexColumns;
             private int columnsRead = Integer.MAX_VALUE;
 
             protected Row computeNext()
@@ -124,19 +124,19 @@ public class KeysSearcher extends SecondaryIndexSearcher
                             return endOfData();
                         }
 
-                        Collection<Column> sortedColumns = indexRow.getSortedColumns();
-                        columnsRead = sortedColumns.size();
-                        indexColumns = sortedColumns.iterator();
-                        Column firstColumn = sortedColumns.iterator().next();
+                        Collection<Cell> sortedCells = indexRow.getSortedColumns();
+                        columnsRead = sortedCells.size();
+                        indexColumns = sortedCells.iterator();
+                        Cell firstCell = sortedCells.iterator().next();
 
                         // Paging is racy, so it is possible the first column of a page is not the last seen one.
-                        if (lastSeenKey != startKey && lastSeenKey.equals(firstColumn.name()))
+                        if (lastSeenKey != startKey && lastSeenKey.equals(firstCell.name()))
                         {
                             // skip the row we already saw w/ the last page of results
                             indexColumns.next();
-                            logger.trace("Skipping {}", baseCfs.metadata.getKeyValidator().getString(firstColumn.name().toByteBuffer()));
+                            logger.trace("Skipping {}", baseCfs.metadata.getKeyValidator().getString(firstCell.name().toByteBuffer()));
                         }
-                        else if (range instanceof Range && indexColumns.hasNext() && firstColumn.name().equals(startKey))
+                        else if (range instanceof Range && indexColumns.hasNext() && firstCell.name().equals(startKey))
                         {
                             // skip key excluded by range
                             indexColumns.next();
@@ -146,11 +146,11 @@ public class KeysSearcher extends SecondaryIndexSearcher
 
                     while (indexColumns.hasNext())
                     {
-                        Column column = indexColumns.next();
-                        lastSeenKey = column.name();
-                        if (column.isMarkedForDelete(filter.timestamp))
+                        Cell cell = indexColumns.next();
+                        lastSeenKey = cell.name();
+                        if (cell.isMarkedForDelete(filter.timestamp))
                         {
-                            logger.trace("skipping {}", column.name());
+                            logger.trace("skipping {}", cell.name());
                             continue;
                         }
 
@@ -168,7 +168,7 @@ public class KeysSearcher extends SecondaryIndexSearcher
 
                         logger.trace("Returning index hit for {}", dk);
                         ColumnFamily data = baseCfs.getColumnFamily(new QueryFilter(dk, baseCfs.name, filter.columnFilter(lastSeenKey.toByteBuffer()), filter.timestamp));
-                        // While the column family we'll get in the end should contains the primary clause column, the initialFilter may not have found it and can thus be null
+                        // While the column family we'll get in the end should contains the primary clause cell, the initialFilter may not have found it and can thus be null
                         if (data == null)
                             data = TreeMapBackedSortedColumns.factory.create(baseCfs.metadata);
 
@@ -185,8 +185,8 @@ public class KeysSearcher extends SecondaryIndexSearcher
                         if (((KeysIndex)index).isIndexEntryStale(indexKey.key, data, filter.timestamp))
                         {
                             // delete the index entry w/ its own timestamp
-                            Column dummyColumn = new Column(primaryColumn, indexKey.key, column.timestamp());
-                            ((PerColumnSecondaryIndex)index).delete(dk.key, dummyColumn);
+                            Cell dummyCell = new Cell(primaryColumn, indexKey.key, cell.timestamp());
+                            ((PerColumnSecondaryIndex)index).delete(dk.key, dummyCell);
                             continue;
                         }
                         return new Row(dk, data);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/db/marshal/AbstractCommutativeType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/AbstractCommutativeType.java b/src/java/org/apache/cassandra/db/marshal/AbstractCommutativeType.java
index a7162ae..4b26d5d 100644
--- a/src/java/org/apache/cassandra/db/marshal/AbstractCommutativeType.java
+++ b/src/java/org/apache/cassandra/db/marshal/AbstractCommutativeType.java
@@ -18,8 +18,9 @@
 package org.apache.cassandra.db.marshal;
 
 import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.Cell;
 import org.apache.cassandra.db.composites.CellName;
-import org.apache.cassandra.db.Column;
 import org.apache.cassandra.db.context.CounterContext;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
@@ -45,5 +46,5 @@ public abstract class AbstractCommutativeType extends AbstractType<Long>
     /**
      * create commutative column
      */
-    public abstract Column createColumn(CellName name, ByteBuffer value, long timestamp);
+    public abstract Cell createColumn(CellName name, ByteBuffer value, long timestamp);
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/db/marshal/CollectionType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/CollectionType.java b/src/java/org/apache/cassandra/db/marshal/CollectionType.java
index 0f3f564..b9816a6 100644
--- a/src/java/org/apache/cassandra/db/marshal/CollectionType.java
+++ b/src/java/org/apache/cassandra/db/marshal/CollectionType.java
@@ -24,10 +24,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.cql3.CQL3Type;
-import org.apache.cassandra.db.Column;
+import org.apache.cassandra.db.Cell;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.Pair;
 
 /**
  * The abstract validator that is the base for maps, sets and lists.
@@ -58,7 +57,7 @@ public abstract class CollectionType<T> extends AbstractType<T>
 
     protected abstract void appendToStringBuilder(StringBuilder sb);
 
-    public abstract ByteBuffer serialize(List<Column> columns);
+    public abstract ByteBuffer serialize(List<Cell> cells);
 
     @Override
     public String toString()
@@ -113,14 +112,14 @@ public abstract class CollectionType<T> extends AbstractType<T>
         return (ByteBuffer)result.flip();
     }
 
-    protected List<Column> enforceLimit(List<Column> columns)
+    protected List<Cell> enforceLimit(List<Cell> cells)
     {
-        if (columns.size() <= MAX_ELEMENTS)
-            return columns;
+        if (cells.size() <= MAX_ELEMENTS)
+            return cells;
 
         logger.error("Detected collection with {} elements, more than the {} limit. Only the first {} elements will be returned to the client. "
-                   + "Please see http://cassandra.apache.org/doc/cql3/CQL.html#collections for more details.", columns.size(), MAX_ELEMENTS, MAX_ELEMENTS);
-        return columns.subList(0, MAX_ELEMENTS);
+                   + "Please see http://cassandra.apache.org/doc/cql3/CQL.html#collections for more details.", cells.size(), MAX_ELEMENTS, MAX_ELEMENTS);
+        return cells.subList(0, MAX_ELEMENTS);
     }
 
     public static ByteBuffer pack(List<ByteBuffer> buffers, int elements)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/db/marshal/CounterColumnType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/CounterColumnType.java b/src/java/org/apache/cassandra/db/marshal/CounterColumnType.java
index 37cd59b..e1a886d 100644
--- a/src/java/org/apache/cassandra/db/marshal/CounterColumnType.java
+++ b/src/java/org/apache/cassandra/db/marshal/CounterColumnType.java
@@ -48,9 +48,9 @@ public class CounterColumnType extends AbstractCommutativeType
     /**
      * create commutative column
      */
-    public Column createColumn(CellName name, ByteBuffer value, long timestamp)
+    public Cell createColumn(CellName name, ByteBuffer value, long timestamp)
     {
-        return new CounterUpdateColumn(name, value, timestamp);
+        return new CounterUpdateCell(name, value, timestamp);
     }
 
     public ByteBuffer fromString(String source)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/db/marshal/ListType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/ListType.java b/src/java/org/apache/cassandra/db/marshal/ListType.java
index 58ba6f1..eabda0b 100644
--- a/src/java/org/apache/cassandra/db/marshal/ListType.java
+++ b/src/java/org/apache/cassandra/db/marshal/ListType.java
@@ -20,12 +20,11 @@ package org.apache.cassandra.db.marshal;
 import java.nio.ByteBuffer;
 import java.util.*;
 
-import org.apache.cassandra.db.Column;
+import org.apache.cassandra.db.Cell;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.serializers.TypeSerializer;
 import org.apache.cassandra.serializers.ListSerializer;
-import org.apache.cassandra.utils.Pair;
 
 public class ListType<T> extends CollectionType<List<T>>
 {
@@ -82,17 +81,17 @@ public class ListType<T> extends CollectionType<List<T>>
         sb.append(getClass().getName()).append(TypeParser.stringifyTypeParameters(Collections.<AbstractType<?>>singletonList(elements)));
     }
 
-    public ByteBuffer serialize(List<Column> columns)
+    public ByteBuffer serialize(List<Cell> cells)
     {
-        columns = enforceLimit(columns);
+        cells = enforceLimit(cells);
 
-        List<ByteBuffer> bbs = new ArrayList<ByteBuffer>(columns.size());
+        List<ByteBuffer> bbs = new ArrayList<ByteBuffer>(cells.size());
         int size = 0;
-        for (Column c : columns)
+        for (Cell c : cells)
         {
             bbs.add(c.value());
             size += 2 + c.value().remaining();
         }
-        return pack(bbs, columns.size(), size);
+        return pack(bbs, cells.size(), size);
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/db/marshal/MapType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/MapType.java b/src/java/org/apache/cassandra/db/marshal/MapType.java
index 17bd7a7..4d834e7 100644
--- a/src/java/org/apache/cassandra/db/marshal/MapType.java
+++ b/src/java/org/apache/cassandra/db/marshal/MapType.java
@@ -20,7 +20,7 @@ package org.apache.cassandra.db.marshal;
 import java.nio.ByteBuffer;
 import java.util.*;
 
-import org.apache.cassandra.db.Column;
+import org.apache.cassandra.db.Cell;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.serializers.TypeSerializer;
@@ -89,13 +89,13 @@ public class MapType<K, V> extends CollectionType<Map<K, V>>
     /**
      * Creates the same output than serialize, but from the internal representation.
      */
-    public ByteBuffer serialize(List<Column> columns)
+    public ByteBuffer serialize(List<Cell> cells)
     {
-        columns = enforceLimit(columns);
+        cells = enforceLimit(cells);
 
-        List<ByteBuffer> bbs = new ArrayList<ByteBuffer>(2 * columns.size());
+        List<ByteBuffer> bbs = new ArrayList<ByteBuffer>(2 * cells.size());
         int size = 0;
-        for (Column c : columns)
+        for (Cell c : cells)
         {
             ByteBuffer key = c.name().collectionElement();
             ByteBuffer value = c.value();
@@ -103,6 +103,6 @@ public class MapType<K, V> extends CollectionType<Map<K, V>>
             bbs.add(value);
             size += 4 + key.remaining() + value.remaining();
         }
-        return pack(bbs, columns.size(), size);
+        return pack(bbs, cells.size(), size);
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/db/marshal/SetType.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/marshal/SetType.java b/src/java/org/apache/cassandra/db/marshal/SetType.java
index 9e45f8f..292b832 100644
--- a/src/java/org/apache/cassandra/db/marshal/SetType.java
+++ b/src/java/org/apache/cassandra/db/marshal/SetType.java
@@ -20,12 +20,11 @@ package org.apache.cassandra.db.marshal;
 import java.nio.ByteBuffer;
 import java.util.*;
 
-import org.apache.cassandra.db.Column;
+import org.apache.cassandra.db.Cell;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.serializers.TypeSerializer;
 import org.apache.cassandra.serializers.SetSerializer;
-import org.apache.cassandra.utils.Pair;
 
 public class SetType<T> extends CollectionType<Set<T>>
 {
@@ -82,18 +81,18 @@ public class SetType<T> extends CollectionType<Set<T>>
         sb.append(getClass().getName()).append(TypeParser.stringifyTypeParameters(Collections.<AbstractType<?>>singletonList(elements)));
     }
 
-    public ByteBuffer serialize(List<Column> columns)
+    public ByteBuffer serialize(List<Cell> cells)
     {
-        columns = enforceLimit(columns);
+        cells = enforceLimit(cells);
 
-        List<ByteBuffer> bbs = new ArrayList<ByteBuffer>(columns.size());
+        List<ByteBuffer> bbs = new ArrayList<ByteBuffer>(cells.size());
         int size = 0;
-        for (Column c : columns)
+        for (Cell c : cells)
         {
             ByteBuffer key = c.name().collectionElement();
             bbs.add(key);
             size += 2 + key.remaining();
         }
-        return pack(bbs, columns.size(), size);
+        return pack(bbs, cells.size(), size);
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/hadoop/ColumnFamilyInputFormat.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/ColumnFamilyInputFormat.java b/src/java/org/apache/cassandra/hadoop/ColumnFamilyInputFormat.java
index 4327aa9..fbd5bf2 100644
--- a/src/java/org/apache/cassandra/hadoop/ColumnFamilyInputFormat.java
+++ b/src/java/org/apache/cassandra/hadoop/ColumnFamilyInputFormat.java
@@ -21,8 +21,8 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.*;
 
+import org.apache.cassandra.db.Cell;
 import org.apache.cassandra.db.composites.CellName;
-import org.apache.cassandra.db.Column;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Reporter;
@@ -45,15 +45,15 @@ import org.apache.hadoop.mapreduce.*;
  *
  * The default split size is 64k rows.
  */
-public class ColumnFamilyInputFormat extends AbstractColumnFamilyInputFormat<ByteBuffer, SortedMap<CellName, Column>>
+public class ColumnFamilyInputFormat extends AbstractColumnFamilyInputFormat<ByteBuffer, SortedMap<CellName, Cell>>
 {
     
-    public RecordReader<ByteBuffer, SortedMap<CellName, Column>> createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException
+    public RecordReader<ByteBuffer, SortedMap<CellName, Cell>> createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException
     {
         return new ColumnFamilyRecordReader();
     }
 
-    public org.apache.hadoop.mapred.RecordReader<ByteBuffer, SortedMap<CellName, Column>> getRecordReader(org.apache.hadoop.mapred.InputSplit split, JobConf jobConf, final Reporter reporter) throws IOException
+    public org.apache.hadoop.mapred.RecordReader<ByteBuffer, SortedMap<CellName, Cell>> getRecordReader(org.apache.hadoop.mapred.InputSplit split, JobConf jobConf, final Reporter reporter) throws IOException
     {
         TaskAttemptContext tac = new TaskAttemptContext(jobConf, TaskAttemptID.forName(jobConf.get(MAPRED_TASK_ID)))
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java b/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java
index 7bda3fb..be18f5f 100644
--- a/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java
+++ b/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java
@@ -24,11 +24,11 @@ import java.nio.ByteBuffer;
 import java.util.*;
 
 import com.google.common.collect.*;
+import org.apache.cassandra.db.Cell;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.db.Column;
 import org.apache.cassandra.db.composites.*;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.TypeParser;
@@ -44,8 +44,8 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.thrift.TException;
 import org.apache.thrift.transport.TTransport;
 
-public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap<CellName, Column>>
-    implements org.apache.hadoop.mapred.RecordReader<ByteBuffer, SortedMap<CellName, Column>>
+public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap<CellName, Cell>>
+    implements org.apache.hadoop.mapred.RecordReader<ByteBuffer, SortedMap<CellName, Cell>>
 {
     private static final Logger logger = LoggerFactory.getLogger(ColumnFamilyRecordReader.class);
 
@@ -53,7 +53,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
 
     private ColumnFamilySplit split;
     private RowIterator iter;
-    private Pair<ByteBuffer, SortedMap<CellName, Column>> currentRow;
+    private Pair<ByteBuffer, SortedMap<CellName, Cell>> currentRow;
     private SlicePredicate predicate;
     private boolean isEmptyPredicate;
     private int totalRowCount; // total number of rows to fetch
@@ -92,7 +92,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
         return currentRow.left;
     }
 
-    public SortedMap<CellName, Column> getCurrentValue()
+    public SortedMap<CellName, Cell> getCurrentValue()
     {
         return currentRow.right;
     }
@@ -210,7 +210,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
         return split.getLocations()[0];
     }
 
-    private abstract class RowIterator extends AbstractIterator<Pair<ByteBuffer, SortedMap<CellName, Column>>>
+    private abstract class RowIterator extends AbstractIterator<Pair<ByteBuffer, SortedMap<CellName, Cell>>>
     {
         protected List<KeySlice> rows;
         protected int totalRead = 0;
@@ -279,50 +279,50 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
             return totalRead;
         }
 
-        protected List<Column> unthriftify(ColumnOrSuperColumn cosc)
+        protected List<Cell> unthriftify(ColumnOrSuperColumn cosc)
         {
             if (cosc.counter_column != null)
-                return Collections.<Column>singletonList(unthriftifyCounter(cosc.counter_column));
+                return Collections.<Cell>singletonList(unthriftifyCounter(cosc.counter_column));
             if (cosc.counter_super_column != null)
                 return unthriftifySuperCounter(cosc.counter_super_column);
             if (cosc.super_column != null)
                 return unthriftifySuper(cosc.super_column);
             assert cosc.column != null;
-            return Collections.<Column>singletonList(unthriftifySimple(cosc.column));
+            return Collections.<Cell>singletonList(unthriftifySimple(cosc.column));
         }
 
-        private List<Column> unthriftifySuper(SuperColumn super_column)
+        private List<Cell> unthriftifySuper(SuperColumn super_column)
         {
-            List<Column> columns = new ArrayList<Column>(super_column.columns.size());
+            List<Cell> cells = new ArrayList<Cell>(super_column.columns.size());
             for (org.apache.cassandra.thrift.Column column : super_column.columns)
             {
-                Column c = unthriftifySimple(column);
-                columns.add(c.withUpdatedName(comparator.makeCellName(super_column.name, c.name().toByteBuffer())));
+                Cell c = unthriftifySimple(column);
+                cells.add(c.withUpdatedName(comparator.makeCellName(super_column.name, c.name().toByteBuffer())));
             }
-            return columns;
+            return cells;
         }
 
-        protected Column unthriftifySimple(org.apache.cassandra.thrift.Column column)
+        protected Cell unthriftifySimple(org.apache.cassandra.thrift.Column column)
         {
-            return new Column(comparator.cellFromByteBuffer(column.name), column.value, column.timestamp);
+            return new Cell(comparator.cellFromByteBuffer(column.name), column.value, column.timestamp);
         }
 
-        private Column unthriftifyCounter(CounterColumn column)
+        private Cell unthriftifyCounter(CounterColumn column)
         {
             //CounterColumns read the counterID from the System keyspace, so need the StorageService running and access
-            //to cassandra.yaml. To avoid a Hadoop needing access to yaml return a regular Column.
-            return new Column(comparator.cellFromByteBuffer(column.name), ByteBufferUtil.bytes(column.value), 0);
+            //to cassandra.yaml. To avoid a Hadoop needing access to yaml return a regular Cell.
+            return new Cell(comparator.cellFromByteBuffer(column.name), ByteBufferUtil.bytes(column.value), 0);
         }
 
-        private List<Column> unthriftifySuperCounter(CounterSuperColumn super_column)
+        private List<Cell> unthriftifySuperCounter(CounterSuperColumn super_column)
         {
-            List<Column> columns = new ArrayList<Column>(super_column.columns.size());
+            List<Cell> cells = new ArrayList<Cell>(super_column.columns.size());
             for (CounterColumn column : super_column.columns)
             {
-                Column c = unthriftifyCounter(column);
-                columns.add(c.withUpdatedName(comparator.makeCellName(super_column.name, c.name().toByteBuffer())));
+                Cell c = unthriftifyCounter(column);
+                cells.add(c.withUpdatedName(comparator.makeCellName(super_column.name, c.name().toByteBuffer())));
             }
-            return columns;
+            return cells;
         }
     }
 
@@ -401,7 +401,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
             }
         }
 
-        protected Pair<ByteBuffer, SortedMap<CellName, Column>> computeNext()
+        protected Pair<ByteBuffer, SortedMap<CellName, Cell>> computeNext()
         {
             maybeInit();
             if (rows == null)
@@ -409,12 +409,12 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
 
             totalRead++;
             KeySlice ks = rows.get(i++);
-            SortedMap<CellName, Column> map = new TreeMap<CellName, Column>(comparator);
+            SortedMap<CellName, Cell> map = new TreeMap<CellName, Cell>(comparator);
             for (ColumnOrSuperColumn cosc : ks.columns)
             {
-                List<Column> columns = unthriftify(cosc);
-                for (Column column : columns)
-                    map.put(column.name(), column);
+                List<Cell> cells = unthriftify(cosc);
+                for (Cell cell : cells)
+                    map.put(cell.name(), cell);
             }
             return Pair.create(ks.key, map);
         }
@@ -422,7 +422,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
 
     private class WideRowIterator extends RowIterator
     {
-        private PeekingIterator<Pair<ByteBuffer, SortedMap<CellName, Column>>> wideColumns;
+        private PeekingIterator<Pair<ByteBuffer, SortedMap<CellName, Cell>>> wideColumns;
         private Composite lastColumn = Composites.EMPTY;
         private ByteBuffer lastCountedKey = ByteBufferUtil.EMPTY_BYTE_BUFFER;
 
@@ -471,13 +471,13 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
             }
         }
 
-        protected Pair<ByteBuffer, SortedMap<CellName, Column>> computeNext()
+        protected Pair<ByteBuffer, SortedMap<CellName, Cell>> computeNext()
         {
             maybeInit();
             if (rows == null)
                 return endOfData();
 
-            Pair<ByteBuffer, SortedMap<CellName, Column>> next = wideColumns.next();
+            Pair<ByteBuffer, SortedMap<CellName, Cell>> next = wideColumns.next();
             lastColumn = next.right.values().iterator().next().name();
 
             maybeIncreaseRowCounter(next);
@@ -489,7 +489,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
          * Increases the row counter only if we really moved to the next row.
          * @param next just fetched row slice
          */
-        private void maybeIncreaseRowCounter(Pair<ByteBuffer, SortedMap<CellName, Column>> next)
+        private void maybeIncreaseRowCounter(Pair<ByteBuffer, SortedMap<CellName, Cell>> next)
         {
             ByteBuffer currentKey = next.left;
             if (!currentKey.equals(lastCountedKey))
@@ -499,7 +499,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
             }
         }
 
-        private class WideColumnIterator extends AbstractIterator<Pair<ByteBuffer, SortedMap<CellName, Column>>>
+        private class WideColumnIterator extends AbstractIterator<Pair<ByteBuffer, SortedMap<CellName, Cell>>>
         {
             private final Iterator<KeySlice> rows;
             private Iterator<ColumnOrSuperColumn> columns;
@@ -520,7 +520,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
                 columns = currentRow.columns.iterator();
             }
 
-            protected Pair<ByteBuffer, SortedMap<CellName, Column>> computeNext()
+            protected Pair<ByteBuffer, SortedMap<CellName, Cell>> computeNext()
             {
                 CellNameType cellType = subComparator == null
                                       ? comparator
@@ -530,20 +530,20 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
                     if (columns.hasNext())
                     {
                         ColumnOrSuperColumn cosc = columns.next();
-                        SortedMap<CellName, Column> map;
-                        List<Column> columns = unthriftify(cosc);
-                        if (columns.size() == 1)
+                        SortedMap<CellName, Cell> map;
+                        List<Cell> cells = unthriftify(cosc);
+                        if (cells.size() == 1)
                         {
-                            map = ImmutableSortedMap.of(columns.get(0).name(), columns.get(0));
+                            map = ImmutableSortedMap.of(cells.get(0).name(), cells.get(0));
                         }
                         else
                         {
                             assert isSuper;
-                            map = new TreeMap<CellName, Column>(cellType);
-                            for (Column column : columns)
-                                map.put(column.name(), column);
+                            map = new TreeMap<CellName, Cell>(cellType);
+                            for (Cell cell : cells)
+                                map.put(cell.name(), cell);
                         }
-                        return Pair.<ByteBuffer, SortedMap<CellName, Column>>create(currentRow.key, map);
+                        return Pair.<ByteBuffer, SortedMap<CellName, Cell>>create(currentRow.key, map);
                     }
 
                     if (!rows.hasNext())
@@ -560,7 +560,7 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
     // to the old. Thus, expect a small performance hit.
     // And obviously this wouldn't work for wide rows. But since ColumnFamilyInputFormat
     // and ColumnFamilyRecordReader don't support them, it should be fine for now.
-    public boolean next(ByteBuffer key, SortedMap<CellName, Column> value) throws IOException
+    public boolean next(ByteBuffer key, SortedMap<CellName, Cell> value) throws IOException
     {
         if (this.nextKeyValue())
         {
@@ -581,9 +581,9 @@ public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap
         return ByteBuffer.wrap(new byte[this.keyBufferSize]);
     }
 
-    public SortedMap<CellName, Column> createValue()
+    public SortedMap<CellName, Cell> createValue()
     {
-        return new TreeMap<CellName, Column>();
+        return new TreeMap<CellName, Cell>();
     }
 
     public long getPos() throws IOException

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/hadoop/pig/AbstractCassandraStorage.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/pig/AbstractCassandraStorage.java b/src/java/org/apache/cassandra/hadoop/pig/AbstractCassandraStorage.java
index e5b8bb1..25ffc29 100644
--- a/src/java/org/apache/cassandra/hadoop/pig/AbstractCassandraStorage.java
+++ b/src/java/org/apache/cassandra/hadoop/pig/AbstractCassandraStorage.java
@@ -26,13 +26,12 @@ import java.nio.charset.CharacterCodingException;
 import java.util.*;
 
 
+import org.apache.cassandra.db.Cell;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.auth.IAuthenticator;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
-import org.apache.cassandra.cql3.ColumnIdentifier;
-import org.apache.cassandra.db.Column;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.db.marshal.AbstractCompositeType.CompositeComponent;
 import org.apache.cassandra.hadoop.*;
@@ -118,7 +117,7 @@ public abstract class AbstractCassandraStorage extends LoadFunc implements Store
     }
 
     /** convert a column to a tuple */
-    protected Tuple columnToTuple(Column col, CfInfo cfInfo, AbstractType comparator) throws IOException
+    protected Tuple columnToTuple(Cell col, CfInfo cfInfo, AbstractType comparator) throws IOException
     {
         CfDef cfDef = cfInfo.cfDef;
         Tuple pair = TupleFactory.getInstance().newTuple(2);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/hadoop/pig/CassandraStorage.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/pig/CassandraStorage.java b/src/java/org/apache/cassandra/hadoop/pig/CassandraStorage.java
index aeec4a6..ae18d20 100644
--- a/src/java/org/apache/cassandra/hadoop/pig/CassandraStorage.java
+++ b/src/java/org/apache/cassandra/hadoop/pig/CassandraStorage.java
@@ -22,10 +22,10 @@ import java.nio.ByteBuffer;
 import java.nio.charset.CharacterCodingException;
 import java.util.*;
 
+import org.apache.cassandra.db.Cell;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.db.Column;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.InvalidRequestException;
@@ -65,7 +65,7 @@ public class CassandraStorage extends AbstractCassandraStorage
     private boolean slice_reverse = false;
     private boolean allow_deletes = false;
 
-    private RecordReader<ByteBuffer, Map<ByteBuffer, Column>> reader;
+    private RecordReader<ByteBuffer, Map<ByteBuffer, Cell>> reader;
     private RecordWriter<ByteBuffer, List<Mutation>> writer;
 
     private boolean widerows = false;
@@ -73,7 +73,7 @@ public class CassandraStorage extends AbstractCassandraStorage
     
     // wide row hacks
     private ByteBuffer lastKey;
-    private Map<ByteBuffer, Column> lastRow;
+    private Map<ByteBuffer, Cell> lastRow;
     private boolean hasNext = true;
 
     public CassandraStorage()
@@ -125,7 +125,7 @@ public class CassandraStorage extends AbstractCassandraStorage
                             key = (ByteBuffer)reader.getCurrentKey();
                             tuple = keyToTuple(key, cfDef, parseType(cfDef.getKey_validation_class()));
                         }
-                        for (Map.Entry<ByteBuffer, Column> entry : lastRow.entrySet())
+                        for (Map.Entry<ByteBuffer, Cell> entry : lastRow.entrySet())
                         {
                             bag.add(columnToTuple(entry.getValue(), cfInfo, parseType(cfDef.getComparator_type())));
                         }
@@ -149,7 +149,7 @@ public class CassandraStorage extends AbstractCassandraStorage
                 {
                     // read too much, hold on to it for next time
                     lastKey = (ByteBuffer)reader.getCurrentKey();
-                    lastRow = (SortedMap<ByteBuffer, Column>)reader.getCurrentValue();
+                    lastRow = (SortedMap<ByteBuffer, Cell>)reader.getCurrentValue();
                     // but return what we have so far
                     tuple.append(bag);
                     return tuple;
@@ -163,13 +163,13 @@ public class CassandraStorage extends AbstractCassandraStorage
                             tuple = keyToTuple(lastKey, cfDef, parseType(cfDef.getKey_validation_class()));
                         else
                             addKeyToTuple(tuple, lastKey, cfDef, parseType(cfDef.getKey_validation_class()));
-                        for (Map.Entry<ByteBuffer, Column> entry : lastRow.entrySet())
+                        for (Map.Entry<ByteBuffer, Cell> entry : lastRow.entrySet())
                         {
                             bag.add(columnToTuple(entry.getValue(), cfInfo, parseType(cfDef.getComparator_type())));
                         }
                         tuple.append(bag);
                         lastKey = key;
-                        lastRow = (SortedMap<ByteBuffer, Column>)reader.getCurrentValue();
+                        lastRow = (SortedMap<ByteBuffer, Cell>)reader.getCurrentValue();
                         return tuple;
                     }
                     if (tuple == null)
@@ -177,17 +177,17 @@ public class CassandraStorage extends AbstractCassandraStorage
                     else
                         addKeyToTuple(tuple, lastKey, cfDef, parseType(cfDef.getKey_validation_class()));
                 }
-                SortedMap<ByteBuffer, Column> row = (SortedMap<ByteBuffer, Column>)reader.getCurrentValue();
+                SortedMap<ByteBuffer, Cell> row = (SortedMap<ByteBuffer, Cell>)reader.getCurrentValue();
                 if (lastRow != null) // prepend what was read last time
                 {
-                    for (Map.Entry<ByteBuffer, Column> entry : lastRow.entrySet())
+                    for (Map.Entry<ByteBuffer, Cell> entry : lastRow.entrySet())
                     {
                         bag.add(columnToTuple(entry.getValue(), cfInfo, parseType(cfDef.getComparator_type())));
                     }
                     lastKey = null;
                     lastRow = null;
                 }
-                for (Map.Entry<ByteBuffer, Column> entry : row.entrySet())
+                for (Map.Entry<ByteBuffer, Cell> entry : row.entrySet())
                 {
                     bag.add(columnToTuple(entry.getValue(), cfInfo, parseType(cfDef.getComparator_type())));
                 }
@@ -214,7 +214,7 @@ public class CassandraStorage extends AbstractCassandraStorage
             CfInfo cfInfo = getCfInfo(loadSignature);
             CfDef cfDef = cfInfo.cfDef;
             ByteBuffer key = reader.getCurrentKey();
-            Map<ByteBuffer, Column> cf = reader.getCurrentValue();
+            Map<ByteBuffer, Cell> cf = reader.getCurrentValue();
             assert key != null && cf != null;
 
             // output tuple, will hold the key, each indexed column in a tuple, then a bag of the rest
@@ -248,7 +248,7 @@ public class CassandraStorage extends AbstractCassandraStorage
                 added.put(cdef.name, true);
             }
             // now add all the other columns
-            for (Map.Entry<ByteBuffer, Column> entry : cf.entrySet())
+            for (Map.Entry<ByteBuffer, Cell> entry : cf.entrySet())
             {
                 if (!added.containsKey(entry.getKey()))
                     bag.add(columnToTuple(entry.getValue(), cfInfo, parseType(cfDef.getComparator_type())));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/hadoop/pig/CqlStorage.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/pig/CqlStorage.java b/src/java/org/apache/cassandra/hadoop/pig/CqlStorage.java
index 7ce78de..3dda264 100644
--- a/src/java/org/apache/cassandra/hadoop/pig/CqlStorage.java
+++ b/src/java/org/apache/cassandra/hadoop/pig/CqlStorage.java
@@ -25,7 +25,7 @@ import java.util.*;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.db.composites.CellNames;
-import org.apache.cassandra.db.Column;
+import org.apache.cassandra.db.Cell;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.hadoop.*;
@@ -112,9 +112,9 @@ public class CqlStorage extends AbstractCassandraStorage
                 ByteBuffer columnValue = columns.get(ByteBufferUtil.string(cdef.name.duplicate()));
                 if (columnValue != null)
                 {
-                    Column column = new Column(CellNames.simpleDense(cdef.name), columnValue);
+                    Cell cell = new Cell(CellNames.simpleDense(cdef.name), columnValue);
                     AbstractType<?> validator = getValidatorMap(cfDef).get(cdef.name);
-                    setTupleValue(tuple, i, cqlColumnToObj(column, cfDef), validator);
+                    setTupleValue(tuple, i, cqlColumnToObj(cell, cfDef), validator);
                 }
                 else
                     tuple.set(i, null);
@@ -183,7 +183,7 @@ public class CqlStorage extends AbstractCassandraStorage
     }
 
     /** convert a cql column to an object */
-    private Object cqlColumnToObj(Column col, CfDef cfDef) throws IOException
+    private Object cqlColumnToObj(Cell col, CfDef cfDef) throws IOException
     {
         // standard
         Map<ByteBuffer,AbstractType> validators = getValidatorMap(cfDef);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java b/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java
index 27f1c12..83037ae 100644
--- a/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java
@@ -109,16 +109,16 @@ public abstract class AbstractSSTableSimpleWriter
         currentSuperColumn = name;
     }
 
-    private void addColumn(Column column)
+    private void addColumn(Cell cell)
     {
         if (columnFamily.metadata().isSuper())
         {
             if (currentSuperColumn == null)
-                throw new IllegalStateException("Trying to add a column to a super column family, but no super column has been started.");
+                throw new IllegalStateException("Trying to add a cell to a super column family, but no super cell has been started.");
 
-            column = column.withUpdatedName(columnFamily.getComparator().makeCellName(currentSuperColumn, column.name().toByteBuffer()));
+            cell = cell.withUpdatedName(columnFamily.getComparator().makeCellName(currentSuperColumn, cell.name().toByteBuffer()));
         }
-        columnFamily.addColumn(column);
+        columnFamily.addColumn(cell);
     }
 
     /**
@@ -129,7 +129,7 @@ public abstract class AbstractSSTableSimpleWriter
      */
     public void addColumn(ByteBuffer name, ByteBuffer value, long timestamp)
     {
-        addColumn(new Column(metadata.comparator.cellFromByteBuffer(name), value, timestamp));
+        addColumn(new Cell(metadata.comparator.cellFromByteBuffer(name), value, timestamp));
     }
 
     /**
@@ -144,7 +144,7 @@ public abstract class AbstractSSTableSimpleWriter
      */
     public void addExpiringColumn(ByteBuffer name, ByteBuffer value, long timestamp, int ttl, long expirationTimestampMS)
     {
-        addColumn(new ExpiringColumn(metadata.comparator.cellFromByteBuffer(name), value, timestamp, ttl, (int)(expirationTimestampMS / 1000)));
+        addColumn(new ExpiringCell(metadata.comparator.cellFromByteBuffer(name), value, timestamp, ttl, (int)(expirationTimestampMS / 1000)));
     }
 
     /**
@@ -154,7 +154,7 @@ public abstract class AbstractSSTableSimpleWriter
      */
     public void addCounterColumn(ByteBuffer name, long value)
     {
-        addColumn(new CounterColumn(metadata.comparator.cellFromByteBuffer(name), CounterContext.instance().create(counterid, 1L, value, false), System.currentTimeMillis()));
+        addColumn(new CounterCell(metadata.comparator.cellFromByteBuffer(name), CounterContext.instance().create(counterid, 1L, value, false), System.currentTimeMillis()));
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
index 17d9b0e..ac8e2b2 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
@@ -246,8 +246,8 @@ public class SSTableWriter extends SSTable
                 OnDiskAtom atom = iter.next();
                 if (atom == null)
                     break;
-                if (atom instanceof CounterColumn)
-                    atom = ((CounterColumn) atom).markDeltaToBeCleared();
+                if (atom instanceof CounterCell)
+                    atom = ((CounterCell) atom).markDeltaToBeCleared();
 
                 int deletionTime = atom.getLocalDeletionTime();
                 if (deletionTime < Integer.MAX_VALUE)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/service/RowDataResolver.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/RowDataResolver.java b/src/java/org/apache/cassandra/service/RowDataResolver.java
index 2cbda33..f1948cd 100644
--- a/src/java/org/apache/cassandra/service/RowDataResolver.java
+++ b/src/java/org/apache/cassandra/service/RowDataResolver.java
@@ -149,7 +149,7 @@ public class RowDataResolver extends AbstractRowResolver
         // this will handle removing columns and subcolumns that are supressed by a row or
         // supercolumn tombstone.
         QueryFilter filter = new QueryFilter(null, resolved.metadata().cfName, new IdentityQueryFilter(), now);
-        List<CloseableIterator<Column>> iters = new ArrayList<CloseableIterator<Column>>();
+        List<CloseableIterator<Cell>> iters = new ArrayList<CloseableIterator<Cell>>();
         for (ColumnFamily version : versions)
         {
             if (version == null)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/service/StorageProxy.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/StorageProxy.java b/src/java/org/apache/cassandra/service/StorageProxy.java
index cf53865..6f362db 100644
--- a/src/java/org/apache/cassandra/service/StorageProxy.java
+++ b/src/java/org/apache/cassandra/service/StorageProxy.java
@@ -297,9 +297,9 @@ public class StorageProxy implements StorageProxyMBean
         // that excepted don't have. So we just check that for each columns in expected:
         //   - if it is a tombstone, whether current has no column or a tombstone;
         //   - otherwise, that current has a live column with the same value.
-        for (Column e : expected)
+        for (Cell e : expected)
         {
-            Column c = current.getColumn(e.name());
+            Cell c = current.getColumn(e.name());
             if (e.isLive(now))
             {
                 if (!(c != null && c.isLive(now) && c.value().equals(e.value())))

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java b/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
index d4748b0..825f270 100644
--- a/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
+++ b/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
@@ -249,14 +249,14 @@ abstract class AbstractQueryPager implements QueryPager
              : discardTail(cf, toDiscard, newCf, cf.iterator(), tester);
     }
 
-    private int discardHead(ColumnFamily cf, int toDiscard, ColumnFamily copy, Iterator<Column> iter, DeletionInfo.InOrderTester tester)
+    private int discardHead(ColumnFamily cf, int toDiscard, ColumnFamily copy, Iterator<Cell> iter, DeletionInfo.InOrderTester tester)
     {
         ColumnCounter counter = columnCounter();
 
         // Discard the first 'toDiscard' live
         while (iter.hasNext())
         {
-            Column c = iter.next();
+            Cell c = iter.next();
             counter.count(c, tester);
             if (counter.live() > toDiscard)
             {
@@ -268,7 +268,7 @@ abstract class AbstractQueryPager implements QueryPager
         return Math.min(counter.live(), toDiscard);
     }
 
-    private int discardTail(ColumnFamily cf, int toDiscard, ColumnFamily copy, Iterator<Column> iter, DeletionInfo.InOrderTester tester)
+    private int discardTail(ColumnFamily cf, int toDiscard, ColumnFamily copy, Iterator<Cell> iter, DeletionInfo.InOrderTester tester)
     {
         // Redoing the counting like that is not extremely efficient.
         // This is called only for reversed slices or in the case of a race between
@@ -279,7 +279,7 @@ abstract class AbstractQueryPager implements QueryPager
         // Discard the last 'toDiscard' live (so stop adding as sound as we're past 'liveCount - toDiscard')
         while (iter.hasNext())
         {
-            Column c = iter.next();
+            Cell c = iter.next();
             counter.count(c, tester);
             if (counter.live() > liveCount - toDiscard)
                 break;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/service/paxos/Commit.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/paxos/Commit.java b/src/java/org/apache/cassandra/service/paxos/Commit.java
index 9904045..1f95d04 100644
--- a/src/java/org/apache/cassandra/service/paxos/Commit.java
+++ b/src/java/org/apache/cassandra/service/paxos/Commit.java
@@ -30,7 +30,7 @@ import java.nio.ByteBuffer;
 import com.google.common.base.Objects;
 
 import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.db.Column;
+import org.apache.cassandra.db.Cell;
 import org.apache.cassandra.db.ColumnFamily;
 import org.apache.cassandra.db.ColumnSerializer;
 import org.apache.cassandra.db.EmptyColumns;
@@ -120,8 +120,8 @@ public class Commit
         // the collection and we want that to have a lower timestamp and our new values. Since tombstones wins over normal insert, using t-1
         // should not be a problem in general (see #6069).
         cf.deletionInfo().updateAllTimestamp(t-1);
-        for (Column column : updates)
-            cf.addAtom(column.withUpdatedTimestamp(t));
+        for (Cell cell : updates)
+            cf.addAtom(cell.withUpdatedTimestamp(t));
         return cf;
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/thrift/CassandraServer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/thrift/CassandraServer.java b/src/java/org/apache/cassandra/thrift/CassandraServer.java
index 4705513..8c0e594 100644
--- a/src/java/org/apache/cassandra/thrift/CassandraServer.java
+++ b/src/java/org/apache/cassandra/thrift/CassandraServer.java
@@ -128,65 +128,65 @@ public class CassandraServer implements Cassandra.Iface
         return columnFamilyKeyMap;
     }
 
-    public List<ColumnOrSuperColumn> thriftifyColumns(Collection<org.apache.cassandra.db.Column> columns, boolean reverseOrder, long now)
+    public List<ColumnOrSuperColumn> thriftifyColumns(Collection<Cell> cells, boolean reverseOrder, long now)
     {
-        ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
-        for (org.apache.cassandra.db.Column column : columns)
+        ArrayList<ColumnOrSuperColumn> thriftColumns = new ArrayList<ColumnOrSuperColumn>(cells.size());
+        for (Cell cell : cells)
         {
-            if (column.isMarkedForDelete(now))
+            if (cell.isMarkedForDelete(now))
                 continue;
 
-            thriftColumns.add(thriftifyColumnWithName(column, column.name().toByteBuffer()));
+            thriftColumns.add(thriftifyColumnWithName(cell, cell.name().toByteBuffer()));
         }
 
         // we have to do the reversing here, since internally we pass results around in ColumnFamily
-        // objects, which always sort their columns in the "natural" order
+        // objects, which always sort their cells in the "natural" order
         // TODO this is inconvenient for direct users of StorageProxy
         if (reverseOrder)
             Collections.reverse(thriftColumns);
         return thriftColumns;
     }
 
-    private ColumnOrSuperColumn thriftifyColumnWithName(org.apache.cassandra.db.Column column, ByteBuffer newName)
+    private ColumnOrSuperColumn thriftifyColumnWithName(Cell cell, ByteBuffer newName)
     {
-        if (column instanceof org.apache.cassandra.db.CounterColumn)
-            return new ColumnOrSuperColumn().setCounter_column(thriftifySubCounter(column).setName(newName));
+        if (cell instanceof CounterCell)
+            return new ColumnOrSuperColumn().setCounter_column(thriftifySubCounter(cell).setName(newName));
         else
-            return new ColumnOrSuperColumn().setColumn(thriftifySubColumn(column).setName(newName));
+            return new ColumnOrSuperColumn().setColumn(thriftifySubColumn(cell).setName(newName));
     }
 
-    private Column thriftifySubColumn(org.apache.cassandra.db.Column column)
+    private Column thriftifySubColumn(Cell cell)
     {
-        assert !(column instanceof org.apache.cassandra.db.CounterColumn);
+        assert !(cell instanceof CounterCell);
 
-        Column thrift_column = new Column(column.name().toByteBuffer()).setValue(column.value()).setTimestamp(column.timestamp());
-        if (column instanceof ExpiringColumn)
+        Column thrift_column = new Column(cell.name().toByteBuffer()).setValue(cell.value()).setTimestamp(cell.timestamp());
+        if (cell instanceof ExpiringCell)
         {
-            thrift_column.setTtl(((ExpiringColumn) column).getTimeToLive());
+            thrift_column.setTtl(((ExpiringCell) cell).getTimeToLive());
         }
         return thrift_column;
     }
 
-    private List<Column> thriftifyColumnsAsColumns(Collection<org.apache.cassandra.db.Column> columns, long now)
+    private List<Column> thriftifyColumnsAsColumns(Collection<Cell> cells, long now)
     {
-        List<Column> thriftColumns = new ArrayList<Column>(columns.size());
-        for (org.apache.cassandra.db.Column column : columns)
+        List<Column> thriftColumns = new ArrayList<Column>(cells.size());
+        for (Cell cell : cells)
         {
-            if (column.isMarkedForDelete(now))
+            if (cell.isMarkedForDelete(now))
                 continue;
 
-            thriftColumns.add(thriftifySubColumn(column));
+            thriftColumns.add(thriftifySubColumn(cell));
         }
         return thriftColumns;
     }
 
-    private CounterColumn thriftifySubCounter(org.apache.cassandra.db.Column column)
+    private CounterColumn thriftifySubCounter(Cell cell)
     {
-        assert column instanceof org.apache.cassandra.db.CounterColumn;
-        return new CounterColumn(column.name().toByteBuffer(), CounterContext.instance().total(column.value()));
+        assert cell instanceof CounterCell;
+        return new CounterColumn(cell.name().toByteBuffer(), CounterContext.instance().total(cell.value()));
     }
 
-    private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<org.apache.cassandra.db.Column> columns,
+    private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<Cell> cells,
                                                             boolean reverseOrder,
                                                             long now,
                                                             boolean subcolumnsOnly,
@@ -194,13 +194,13 @@ public class CassandraServer implements Cassandra.Iface
     {
         if (subcolumnsOnly)
         {
-            ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
-            for (org.apache.cassandra.db.Column column : columns)
+            ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(cells.size());
+            for (Cell cell : cells)
             {
-                if (column.isMarkedForDelete(now))
+                if (cell.isMarkedForDelete(now))
                     continue;
 
-                thriftSuperColumns.add(thriftifyColumnWithName(column, SuperColumns.subName(column.name())));
+                thriftSuperColumns.add(thriftifyColumnWithName(cell, SuperColumns.subName(cell.name())));
             }
             if (reverseOrder)
                 Collections.reverse(thriftSuperColumns);
@@ -209,28 +209,28 @@ public class CassandraServer implements Cassandra.Iface
         else
         {
             if (isCounterCF)
-                return thriftifyCounterSuperColumns(columns, reverseOrder, now);
+                return thriftifyCounterSuperColumns(cells, reverseOrder, now);
             else
-                return thriftifySuperColumns(columns, reverseOrder, now);
+                return thriftifySuperColumns(cells, reverseOrder, now);
         }
     }
 
-    private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<org.apache.cassandra.db.Column> columns, boolean reverseOrder, long now)
+    private List<ColumnOrSuperColumn> thriftifySuperColumns(Collection<Cell> cells, boolean reverseOrder, long now)
     {
-        ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
+        ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(cells.size());
         SuperColumn current = null;
-        for (org.apache.cassandra.db.Column column : columns)
+        for (Cell cell : cells)
         {
-            if (column.isMarkedForDelete(now))
+            if (cell.isMarkedForDelete(now))
                 continue;
 
-            ByteBuffer scName = SuperColumns.scName(column.name());
+            ByteBuffer scName = SuperColumns.scName(cell.name());
             if (current == null || !scName.equals(current.bufferForName()))
             {
                 current = new SuperColumn(scName, new ArrayList<Column>());
                 thriftSuperColumns.add(new ColumnOrSuperColumn().setSuper_column(current));
             }
-            current.getColumns().add(thriftifySubColumn(column).setName(SuperColumns.subName(column.name())));
+            current.getColumns().add(thriftifySubColumn(cell).setName(SuperColumns.subName(cell.name())));
         }
 
         if (reverseOrder)
@@ -239,22 +239,22 @@ public class CassandraServer implements Cassandra.Iface
         return thriftSuperColumns;
     }
 
-    private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<org.apache.cassandra.db.Column> columns, boolean reverseOrder, long now)
+    private List<ColumnOrSuperColumn> thriftifyCounterSuperColumns(Collection<Cell> cells, boolean reverseOrder, long now)
     {
-        ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(columns.size());
+        ArrayList<ColumnOrSuperColumn> thriftSuperColumns = new ArrayList<ColumnOrSuperColumn>(cells.size());
         CounterSuperColumn current = null;
-        for (org.apache.cassandra.db.Column column : columns)
+        for (Cell cell : cells)
         {
-            if (column.isMarkedForDelete(now))
+            if (cell.isMarkedForDelete(now))
                 continue;
 
-            ByteBuffer scName = SuperColumns.scName(column.name());
+            ByteBuffer scName = SuperColumns.scName(cell.name());
             if (current == null || !scName.equals(current.bufferForName()))
             {
                 current = new CounterSuperColumn(scName, new ArrayList<CounterColumn>());
                 thriftSuperColumns.add(new ColumnOrSuperColumn().setCounter_super_column(current));
             }
-            current.getColumns().add(thriftifySubCounter(column).setName(SuperColumns.subName(column.name())));
+            current.getColumns().add(thriftifySubCounter(cell).setName(SuperColumns.subName(cell.name())));
         }
 
         if (reverseOrder)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/thrift/ThriftValidation.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/thrift/ThriftValidation.java b/src/java/org/apache/cassandra/thrift/ThriftValidation.java
index b1eb50b..c0c22f1 100644
--- a/src/java/org/apache/cassandra/thrift/ThriftValidation.java
+++ b/src/java/org/apache/cassandra/thrift/ThriftValidation.java
@@ -198,7 +198,7 @@ public class ThriftValidation
     private static void validateColumnNames(CFMetaData metadata, ByteBuffer superColumnName, Iterable<ByteBuffer> column_names)
     throws org.apache.cassandra.exceptions.InvalidRequestException
     {
-        int maxNameLength = org.apache.cassandra.db.Column.MAX_NAME_LENGTH;
+        int maxNameLength = Cell.MAX_NAME_LENGTH;
 
         if (superColumnName != null)
         {
@@ -349,8 +349,8 @@ public class ThriftValidation
             if (column.ttl <= 0)
                 throw new org.apache.cassandra.exceptions.InvalidRequestException("ttl must be positive");
 
-            if (column.ttl > ExpiringColumn.MAX_TTL)
-                throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("ttl is too large. requested (%d) maximum (%d)", column.ttl, ExpiringColumn.MAX_TTL));
+            if (column.ttl > ExpiringCell.MAX_TTL)
+                throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("ttl is too large. requested (%d) maximum (%d)", column.ttl, ExpiringCell.MAX_TTL));
         }
         else
         {
@@ -423,7 +423,7 @@ public class ThriftValidation
     }
 
     /**
-     * Validates the data part of the column (everything in the Column object but the name, which is assumed to be valid)
+     * Validates the data part of the column (everything in the column object but the name, which is assumed to be valid)
      */
     public static void validateColumnData(CFMetaData metadata, ByteBuffer scName, Column column) throws org.apache.cassandra.exceptions.InvalidRequestException
     {
@@ -464,12 +464,12 @@ public class ThriftValidation
                                                                               metadata.ksName));
     }
 
-    private static org.apache.cassandra.db.Column asDBColumn(CellName name, Column column)
+    private static Cell asDBColumn(CellName name, Column column)
     {
         if (column.ttl <= 0)
-            return new org.apache.cassandra.db.Column(name, column.value, column.timestamp);
+            return new Cell(name, column.value, column.timestamp);
         else
-            return new org.apache.cassandra.db.ExpiringColumn(name, column.value, column.timestamp, column.ttl);
+            return new ExpiringCell(name, column.value, column.timestamp, column.ttl);
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/tools/SSTableExport.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/SSTableExport.java b/src/java/org/apache/cassandra/tools/SSTableExport.java
index c5fc970..6cce81d 100644
--- a/src/java/org/apache/cassandra/tools/SSTableExport.java
+++ b/src/java/org/apache/cassandra/tools/SSTableExport.java
@@ -147,9 +147,9 @@ public class SSTableExport
 
     private static List<Object> serializeAtom(OnDiskAtom atom, CFMetaData cfMetaData)
     {
-        if (atom instanceof Column)
+        if (atom instanceof Cell)
         {
-            return serializeColumn((Column) atom, cfMetaData);
+            return serializeColumn((Cell) atom, cfMetaData);
         }
         else
         {
@@ -166,46 +166,46 @@ public class SSTableExport
     }
 
     /**
-     * Serialize a given column to the JSON format
+     * Serialize a given cell to the JSON format
      *
-     * @param column     column presentation
+     * @param cell     cell presentation
      * @param comparator columns comparator
      * @param cfMetaData Column Family metadata (to get validator)
-     * @return column as serialized list
+     * @return cell as serialized list
      */
-    private static List<Object> serializeColumn(Column column, CFMetaData cfMetaData)
+    private static List<Object> serializeColumn(Cell cell, CFMetaData cfMetaData)
     {
         CellNameType comparator = cfMetaData.comparator;
         ArrayList<Object> serializedColumn = new ArrayList<Object>();
 
-        ByteBuffer value = ByteBufferUtil.clone(column.value());
+        ByteBuffer value = ByteBufferUtil.clone(cell.value());
 
-        serializedColumn.add(comparator.getString(column.name()));
-        if (column instanceof DeletedColumn)
+        serializedColumn.add(comparator.getString(cell.name()));
+        if (cell instanceof DeletedCell)
         {
             serializedColumn.add(ByteBufferUtil.bytesToHex(value));
         }
         else
         {
-            AbstractType<?> validator = cfMetaData.getValueValidator(column.name());
+            AbstractType<?> validator = cfMetaData.getValueValidator(cell.name());
             serializedColumn.add(validator.getString(value));
         }
-        serializedColumn.add(column.timestamp());
+        serializedColumn.add(cell.timestamp());
 
-        if (column instanceof DeletedColumn)
+        if (cell instanceof DeletedCell)
         {
             serializedColumn.add("d");
         }
-        else if (column instanceof ExpiringColumn)
+        else if (cell instanceof ExpiringCell)
         {
             serializedColumn.add("e");
-            serializedColumn.add(((ExpiringColumn) column).getTimeToLive());
-            serializedColumn.add(column.getLocalDeletionTime());
+            serializedColumn.add(((ExpiringCell) cell).getTimeToLive());
+            serializedColumn.add(cell.getLocalDeletionTime());
         }
-        else if (column instanceof CounterColumn)
+        else if (cell instanceof CounterCell)
         {
             serializedColumn.add("c");
-            serializedColumn.add(((CounterColumn) column).timestampOfLastDelete());
+            serializedColumn.add(((CounterCell) cell).timestampOfLastDelete());
         }
 
         return serializedColumn;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/tools/SSTableImport.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/SSTableImport.java b/src/java/org/apache/cassandra/tools/SSTableImport.java
index 0a6ef6b..71b687b 100644
--- a/src/java/org/apache/cassandra/tools/SSTableImport.java
+++ b/src/java/org/apache/cassandra/tools/SSTableImport.java
@@ -110,7 +110,7 @@ public class SSTableImport
                 CellNameType comparator = oldSCFormat ? new SimpleDenseCellNameType(SuperColumns.getComparatorFor(meta, isSubColumn)) : meta.comparator;
                 List fields = (List<?>) json;
 
-                assert fields.size() >= 3 : "Column definition should have at least 3";
+                assert fields.size() >= 3 : "Cell definition should have at least 3";
 
                 name  = stringAsType((String) fields.get(0), comparator.asAbstractType());
                 timestamp = (Long) fields.get(2);
@@ -257,11 +257,11 @@ public class SSTableImport
 
             if (col.isExpiring())
             {
-                cfamily.addColumn(new ExpiringColumn(cname, col.getValue(), col.timestamp, col.ttl, col.localExpirationTime));
+                cfamily.addColumn(new ExpiringCell(cname, col.getValue(), col.timestamp, col.ttl, col.localExpirationTime));
             }
             else if (col.isCounter())
             {
-                cfamily.addColumn(new CounterColumn(cname, col.getValue(), col.timestamp, col.timestampOfLastDelete));
+                cfamily.addColumn(new CounterCell(cname, col.getValue(), col.timestamp, col.timestampOfLastDelete));
             }
             else if (col.isDeleted())
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java b/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
index aeec284..d4b0b77 100644
--- a/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
+++ b/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
@@ -76,7 +76,7 @@ public class SSTableMetadataViewer
         long[] ecch = metadata.estimatedColumnCount.getBuckets(false);
 
         out.println(String.format("%-10s%18s%18s",
-                                  "Count", "Row Size", "Column Count"));
+                                  "Count", "Row Size", "Cell Count"));
 
         for (int i = 0; i < offsets.length; i++)
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/tracing/Tracing.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tracing/Tracing.java b/src/java/org/apache/cassandra/tracing/Tracing.java
index b1f618d..963ee0c 100644
--- a/src/java/org/apache/cassandra/tracing/Tracing.java
+++ b/src/java/org/apache/cassandra/tracing/Tracing.java
@@ -93,14 +93,14 @@ public class Tracing
 
     private static void addColumn(ColumnFamily cf, CellName name, ByteBuffer value)
     {
-        cf.addColumn(new ExpiringColumn(name, value, System.currentTimeMillis(), TTL));
+        cf.addColumn(new ExpiringCell(name, value, System.currentTimeMillis(), TTL));
     }
 
     public void addParameterColumns(ColumnFamily cf, Map<String, String> rawPayload)
     {
         for (Map.Entry<String, String> entry : rawPayload.entrySet())
         {
-            cf.addColumn(new ExpiringColumn(buildName(cf.metadata(), "parameters", entry.getKey()),
+            cf.addColumn(new ExpiringCell(buildName(cf.metadata(), "parameters", entry.getKey()),
                                             bytes(entry.getValue()), System.currentTimeMillis(), TTL));
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/utils/BloomCalculations.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/utils/BloomCalculations.java b/src/java/org/apache/cassandra/utils/BloomCalculations.java
index 17966e8..b73f531 100644
--- a/src/java/org/apache/cassandra/utils/BloomCalculations.java
+++ b/src/java/org/apache/cassandra/utils/BloomCalculations.java
@@ -35,7 +35,7 @@ class BloomCalculations {
 
     /**
      * In the following keyspaceName, the row 'i' shows false positive rates if i buckets
-     * per element are used.  Column 'j' shows false positive rates if j hash
+     * per element are used.  Cell 'j' shows false positive rates if j hash
      * functions are used.  The first row is 'i=0', the first column is 'j=0'.
      * Each cell (i,j) the false positive rate determined by using i buckets per
      * element and j hash functions.

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/src/java/org/apache/cassandra/utils/CounterId.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/utils/CounterId.java b/src/java/org/apache/cassandra/utils/CounterId.java
index 4b6fd46..a8545c0 100644
--- a/src/java/org/apache/cassandra/utils/CounterId.java
+++ b/src/java/org/apache/cassandra/utils/CounterId.java
@@ -26,7 +26,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import com.google.common.base.Objects;
 
-import org.apache.cassandra.db.CounterColumn;
+import org.apache.cassandra.db.CounterCell;
 import org.apache.cassandra.db.SystemKeyspace;
 
 public class CounterId implements Comparable<CounterId>
@@ -174,7 +174,7 @@ public class CounterId implements Comparable<CounterId>
             initialId = getLocalId();
         }
 
-        public void maybeRenew(CounterColumn column)
+        public void maybeRenew(CounterCell column)
         {
             if (!renewed && column.hasCounterId(initialId))
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/test/long/org/apache/cassandra/db/MeteredFlusherTest.java
----------------------------------------------------------------------
diff --git a/test/long/org/apache/cassandra/db/MeteredFlusherTest.java b/test/long/org/apache/cassandra/db/MeteredFlusherTest.java
index 99ae65a..07ddbd6 100644
--- a/test/long/org/apache/cassandra/db/MeteredFlusherTest.java
+++ b/test/long/org/apache/cassandra/db/MeteredFlusherTest.java
@@ -54,7 +54,7 @@ public class MeteredFlusherTest extends SchemaLoader
                 ColumnFamily cf = TreeMapBackedSortedColumns.factory.create("Keyspace1", "_CF" + i);
                 // don't cheat by allocating this outside of the loop; that defeats the purpose of deliberately using lots of memory
                 ByteBuffer value = ByteBuffer.allocate(100000);
-                cf.addColumn(new Column(Util.cellname("c"), value));
+                cf.addColumn(new Cell(Util.cellname("c"), value));
                 rm.add(cf);
                 rm.applyUnsafe();
             }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e50d6af1/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
----------------------------------------------------------------------
diff --git a/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java b/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
index 68a2489..5d68a3b 100644
--- a/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
+++ b/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
@@ -81,7 +81,7 @@ public class LongCompactionsTest extends SchemaLoader
             for (int j = 0; j < rowsPerSSTable; j++)
             {
                 String key = String.valueOf(j);
-                Column[] cols = new Column[colsPerRow];
+                Cell[] cols = new Cell[colsPerRow];
                 for (int i = 0; i < colsPerRow; i++)
                 {
                     // last sstable has highest timestamps