You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by jb...@apache.org on 2009/07/30 17:30:27 UTC

svn commit: r799331 [9/29] - in /incubator/cassandra/trunk: ./ src/java/org/apache/cassandra/concurrent/ src/java/org/apache/cassandra/config/ src/java/org/apache/cassandra/db/ src/java/org/apache/cassandra/dht/ src/java/org/apache/cassandra/gms/ src/j...

Modified: incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SliceByNamesReadCommand.java
URL: http://svn.apache.org/viewvc/incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SliceByNamesReadCommand.java?rev=799331&r1=799330&r2=799331&view=diff
==============================================================================
--- incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SliceByNamesReadCommand.java (original)
+++ incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SliceByNamesReadCommand.java Thu Jul 30 15:30:21 2009
@@ -1,122 +1,122 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.db;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.util.*;
-
-import org.apache.commons.lang.StringUtils;
-
-import org.apache.cassandra.service.ColumnParent;
-import org.apache.cassandra.db.filter.QueryPath;
-import org.apache.cassandra.db.filter.NamesQueryFilter;
-import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.config.DatabaseDescriptor;
-
-public class SliceByNamesReadCommand extends ReadCommand
-{
-    public final QueryPath columnParent;
-    public final SortedSet<byte[]> columnNames;
-
-    public SliceByNamesReadCommand(String table, String key, ColumnParent column_parent, Collection<byte[]> columnNames)
-    {
-        this(table, key, new QueryPath(column_parent), columnNames);
-    }
-
-    public SliceByNamesReadCommand(String table, String key, QueryPath path, Collection<byte[]> columnNames)
-    {
-        super(table, key, CMD_TYPE_GET_SLICE_BY_NAMES);
-        this.columnParent = path;
-        this.columnNames = new TreeSet<byte[]>(getComparator());
-        this.columnNames.addAll(columnNames);
-    }
-
-    @Override
-    public String getColumnFamilyName()
-    {
-        return columnParent.columnFamilyName;
-    }
-
-    @Override
-    public ReadCommand copy()
-    {
-        ReadCommand readCommand= new SliceByNamesReadCommand(table, key, columnParent, columnNames);
-        readCommand.setDigestQuery(isDigestQuery());
-        return readCommand;
-    }
-    
-    @Override
-    public Row getRow(Table table) throws IOException
-    {        
-        return table.getRow(new NamesQueryFilter(key, columnParent, columnNames));
-    }
-
-    @Override
-    public String toString()
-    {
-        return "SliceByNamesReadCommand(" +
-               "table='" + table + '\'' +
-               ", key='" + key + '\'' +
-               ", columnParent='" + columnParent + '\'' +
-               ", columns=[" + getComparator().getString(columnNames) + "]" +
-               ')';
-    }
-
-}
-
-class SliceByNamesReadCommandSerializer extends ReadCommandSerializer
-{
-    @Override
-    public void serialize(ReadCommand rm, DataOutputStream dos) throws IOException
-    {
-        SliceByNamesReadCommand realRM = (SliceByNamesReadCommand)rm;
-        dos.writeBoolean(realRM.isDigestQuery());
-        dos.writeUTF(realRM.table);
-        dos.writeUTF(realRM.key);
-        realRM.columnParent.serialize(dos);
-        dos.writeInt(realRM.columnNames.size());
-        if (realRM.columnNames.size() > 0)
-        {
-            for (byte[] cName : realRM.columnNames)
-            {
-                ColumnSerializer.writeName(cName, dos);
-            }
-        }
-    }
-
-    @Override
-    public ReadCommand deserialize(DataInputStream dis) throws IOException
-    {
-        boolean isDigest = dis.readBoolean();
-        String table = dis.readUTF();
-        String key = dis.readUTF();
-        QueryPath columnParent = QueryPath.deserialize(dis);
-
-        int size = dis.readInt();
-        List<byte[]> columns = new ArrayList<byte[]>();
-        for (int i = 0; i < size; ++i)
-        {
-            columns.add(ColumnSerializer.readName(dis));
-        }
-        SliceByNamesReadCommand rm = new SliceByNamesReadCommand(table, key, columnParent, columns);
-        rm.setDigestQuery(isDigest);
-        return rm;
-    }
-}
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.*;
+
+import org.apache.commons.lang.StringUtils;
+
+import org.apache.cassandra.service.ColumnParent;
+import org.apache.cassandra.db.filter.QueryPath;
+import org.apache.cassandra.db.filter.NamesQueryFilter;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.config.DatabaseDescriptor;
+
+public class SliceByNamesReadCommand extends ReadCommand
+{
+    public final QueryPath columnParent;
+    public final SortedSet<byte[]> columnNames;
+
+    public SliceByNamesReadCommand(String table, String key, ColumnParent column_parent, Collection<byte[]> columnNames)
+    {
+        this(table, key, new QueryPath(column_parent), columnNames);
+    }
+
+    public SliceByNamesReadCommand(String table, String key, QueryPath path, Collection<byte[]> columnNames)
+    {
+        super(table, key, CMD_TYPE_GET_SLICE_BY_NAMES);
+        this.columnParent = path;
+        this.columnNames = new TreeSet<byte[]>(getComparator());
+        this.columnNames.addAll(columnNames);
+    }
+
+    @Override
+    public String getColumnFamilyName()
+    {
+        return columnParent.columnFamilyName;
+    }
+
+    @Override
+    public ReadCommand copy()
+    {
+        ReadCommand readCommand= new SliceByNamesReadCommand(table, key, columnParent, columnNames);
+        readCommand.setDigestQuery(isDigestQuery());
+        return readCommand;
+    }
+    
+    @Override
+    public Row getRow(Table table) throws IOException
+    {        
+        return table.getRow(new NamesQueryFilter(key, columnParent, columnNames));
+    }
+
+    @Override
+    public String toString()
+    {
+        return "SliceByNamesReadCommand(" +
+               "table='" + table + '\'' +
+               ", key='" + key + '\'' +
+               ", columnParent='" + columnParent + '\'' +
+               ", columns=[" + getComparator().getString(columnNames) + "]" +
+               ')';
+    }
+
+}
+
+class SliceByNamesReadCommandSerializer extends ReadCommandSerializer
+{
+    @Override
+    public void serialize(ReadCommand rm, DataOutputStream dos) throws IOException
+    {
+        SliceByNamesReadCommand realRM = (SliceByNamesReadCommand)rm;
+        dos.writeBoolean(realRM.isDigestQuery());
+        dos.writeUTF(realRM.table);
+        dos.writeUTF(realRM.key);
+        realRM.columnParent.serialize(dos);
+        dos.writeInt(realRM.columnNames.size());
+        if (realRM.columnNames.size() > 0)
+        {
+            for (byte[] cName : realRM.columnNames)
+            {
+                ColumnSerializer.writeName(cName, dos);
+            }
+        }
+    }
+
+    @Override
+    public ReadCommand deserialize(DataInputStream dis) throws IOException
+    {
+        boolean isDigest = dis.readBoolean();
+        String table = dis.readUTF();
+        String key = dis.readUTF();
+        QueryPath columnParent = QueryPath.deserialize(dis);
+
+        int size = dis.readInt();
+        List<byte[]> columns = new ArrayList<byte[]>();
+        for (int i = 0; i < size; ++i)
+        {
+            columns.add(ColumnSerializer.readName(dis));
+        }
+        SliceByNamesReadCommand rm = new SliceByNamesReadCommand(table, key, columnParent, columns);
+        rm.setDigestQuery(isDigest);
+        return rm;
+    }
+}

Modified: incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SliceFromReadCommand.java
URL: http://svn.apache.org/viewvc/incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SliceFromReadCommand.java?rev=799331&r1=799330&r2=799331&view=diff
==============================================================================
--- incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SliceFromReadCommand.java (original)
+++ incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SliceFromReadCommand.java Thu Jul 30 15:30:21 2009
@@ -1,115 +1,115 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.db;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-
-import org.apache.cassandra.db.filter.QueryPath;
-import org.apache.cassandra.db.filter.SliceQueryFilter;
-import org.apache.cassandra.service.ColumnParent;
-
-public class SliceFromReadCommand extends ReadCommand
-{
-    public final QueryPath column_parent;
-    public final byte[] start, finish;
-    public final boolean isAscending;
-    public final int count;
-
-    public SliceFromReadCommand(String table, String key, ColumnParent column_parent, byte[] start, byte[] finish, boolean isAscending, int count)
-    {
-        this(table, key, new QueryPath(column_parent), start, finish, isAscending, count);
-    }
-
-    public SliceFromReadCommand(String table, String key, QueryPath columnParent, byte[] start, byte[] finish, boolean isAscending, int count)
-    {
-        super(table, key, CMD_TYPE_GET_SLICE);
-        this.column_parent = columnParent;
-        this.start = start;
-        this.finish = finish;
-        this.isAscending = isAscending;
-        this.count = count;
-    }
-
-    @Override
-    public String getColumnFamilyName()
-    {
-        return column_parent.columnFamilyName;
-    }
-
-    @Override
-    public ReadCommand copy()
-    {
-        ReadCommand readCommand = new SliceFromReadCommand(table, key, column_parent, start, finish, isAscending, count);
-        readCommand.setDigestQuery(isDigestQuery());
-        return readCommand;
-    }
-
-    @Override
-    public Row getRow(Table table) throws IOException
-    {
-        return table.getRow(new SliceQueryFilter(key, column_parent, start, finish, isAscending, count));
-    }
-
-    @Override
-    public String toString()
-    {
-        return "SliceFromReadCommand(" +
-               "table='" + table + '\'' +
-               ", key='" + key + '\'' +
-               ", column_parent='" + column_parent + '\'' +
-               ", start='" + getComparator().getString(start) + '\'' +
-               ", finish='" + getComparator().getString(finish) + '\'' +
-               ", isAscending=" + isAscending +
-               ", count=" + count +
-               ')';
-    }
-}
-
-class SliceFromReadCommandSerializer extends ReadCommandSerializer
-{
-    @Override
-    public void serialize(ReadCommand rm, DataOutputStream dos) throws IOException
-    {
-        SliceFromReadCommand realRM = (SliceFromReadCommand)rm;
-        dos.writeBoolean(realRM.isDigestQuery());
-        dos.writeUTF(realRM.table);
-        dos.writeUTF(realRM.key);
-        realRM.column_parent.serialize(dos);
-        ColumnSerializer.writeName(realRM.start, dos);
-        ColumnSerializer.writeName(realRM.finish, dos);
-        dos.writeBoolean(realRM.isAscending);
-        dos.writeInt(realRM.count);
-    }
-
-    @Override
-    public ReadCommand deserialize(DataInputStream dis) throws IOException
-    {
-        boolean isDigest = dis.readBoolean();
-        SliceFromReadCommand rm = new SliceFromReadCommand(dis.readUTF(),
-                                                           dis.readUTF(),
-                                                           QueryPath.deserialize(dis),
-                                                           ColumnSerializer.readName(dis),
-                                                           ColumnSerializer.readName(dis),
-                                                           dis.readBoolean(), 
-                                                           dis.readInt());
-        rm.setDigestQuery(isDigest);
-        return rm;
-    }
-}
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+import org.apache.cassandra.db.filter.QueryPath;
+import org.apache.cassandra.db.filter.SliceQueryFilter;
+import org.apache.cassandra.service.ColumnParent;
+
+public class SliceFromReadCommand extends ReadCommand
+{
+    public final QueryPath column_parent;
+    public final byte[] start, finish;
+    public final boolean isAscending;
+    public final int count;
+
+    public SliceFromReadCommand(String table, String key, ColumnParent column_parent, byte[] start, byte[] finish, boolean isAscending, int count)
+    {
+        this(table, key, new QueryPath(column_parent), start, finish, isAscending, count);
+    }
+
+    public SliceFromReadCommand(String table, String key, QueryPath columnParent, byte[] start, byte[] finish, boolean isAscending, int count)
+    {
+        super(table, key, CMD_TYPE_GET_SLICE);
+        this.column_parent = columnParent;
+        this.start = start;
+        this.finish = finish;
+        this.isAscending = isAscending;
+        this.count = count;
+    }
+
+    @Override
+    public String getColumnFamilyName()
+    {
+        return column_parent.columnFamilyName;
+    }
+
+    @Override
+    public ReadCommand copy()
+    {
+        ReadCommand readCommand = new SliceFromReadCommand(table, key, column_parent, start, finish, isAscending, count);
+        readCommand.setDigestQuery(isDigestQuery());
+        return readCommand;
+    }
+
+    @Override
+    public Row getRow(Table table) throws IOException
+    {
+        return table.getRow(new SliceQueryFilter(key, column_parent, start, finish, isAscending, count));
+    }
+
+    @Override
+    public String toString()
+    {
+        return "SliceFromReadCommand(" +
+               "table='" + table + '\'' +
+               ", key='" + key + '\'' +
+               ", column_parent='" + column_parent + '\'' +
+               ", start='" + getComparator().getString(start) + '\'' +
+               ", finish='" + getComparator().getString(finish) + '\'' +
+               ", isAscending=" + isAscending +
+               ", count=" + count +
+               ')';
+    }
+}
+
+class SliceFromReadCommandSerializer extends ReadCommandSerializer
+{
+    @Override
+    public void serialize(ReadCommand rm, DataOutputStream dos) throws IOException
+    {
+        SliceFromReadCommand realRM = (SliceFromReadCommand)rm;
+        dos.writeBoolean(realRM.isDigestQuery());
+        dos.writeUTF(realRM.table);
+        dos.writeUTF(realRM.key);
+        realRM.column_parent.serialize(dos);
+        ColumnSerializer.writeName(realRM.start, dos);
+        ColumnSerializer.writeName(realRM.finish, dos);
+        dos.writeBoolean(realRM.isAscending);
+        dos.writeInt(realRM.count);
+    }
+
+    @Override
+    public ReadCommand deserialize(DataInputStream dis) throws IOException
+    {
+        boolean isDigest = dis.readBoolean();
+        SliceFromReadCommand rm = new SliceFromReadCommand(dis.readUTF(),
+                                                           dis.readUTF(),
+                                                           QueryPath.deserialize(dis),
+                                                           ColumnSerializer.readName(dis),
+                                                           ColumnSerializer.readName(dis),
+                                                           dis.readBoolean(), 
+                                                           dis.readInt());
+        rm.setDigestQuery(isDigest);
+        return rm;
+    }
+}

Modified: incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SuperColumn.java
URL: http://svn.apache.org/viewvc/incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SuperColumn.java?rev=799331&r1=799330&r2=799331&view=diff
==============================================================================
--- incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SuperColumn.java (original)
+++ incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SuperColumn.java Thu Jul 30 15:30:21 2009
@@ -1,379 +1,379 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.db;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.Collection;
-import java.util.Arrays;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.ConcurrentSkipListMap;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.log4j.Logger;
-
-import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.io.ICompactSerializer;
-import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.MarshalException;
-
-/**
- * Author : Avinash Lakshman ( alakshman@facebook.com) & Prashant Malik ( pmalik@facebook.com )
- */
-
-public final class SuperColumn implements IColumn
-{
-	private static Logger logger_ = Logger.getLogger(SuperColumn.class);
-
-    static SuperColumnSerializer serializer(AbstractType comparator)
-    {
-        return new SuperColumnSerializer(comparator);
-    }
-
-    private byte[] name_;
-    // TODO make subcolumn comparator configurable
-    private ConcurrentSkipListMap<byte[], IColumn> columns_;
-    private int localDeletionTime = Integer.MIN_VALUE;
-	private long markedForDeleteAt = Long.MIN_VALUE;
-    private AtomicInteger size_ = new AtomicInteger(0);
-
-    SuperColumn(byte[] name, AbstractType comparator)
-    {
-    	name_ = name;
-        columns_ = new ConcurrentSkipListMap<byte[], IColumn>(comparator);
-    }
-
-    public AbstractType getComparator()
-    {
-        return (AbstractType)columns_.comparator();
-    }
-
-    public SuperColumn cloneMeShallow()
-    {
-        SuperColumn sc = new SuperColumn(name_, getComparator());
-        sc.markForDeleteAt(localDeletionTime, markedForDeleteAt);
-        return sc;
-    }
-
-	public boolean isMarkedForDelete()
-	{
-		return markedForDeleteAt > Long.MIN_VALUE;
-	}
-
-    public byte[] name()
-    {
-    	return name_;
-    }
-
-    public Collection<IColumn> getSubColumns()
-    {
-    	return columns_.values();
-    }
-
-    public IColumn getSubColumn(byte[] columnName)
-    {
-        IColumn column = columns_.get(columnName);
-        assert column == null || column instanceof Column;
-        return column;
-    }
-
-    public int size()
-    {
-        /*
-         * return the size of the individual columns
-         * that make up the super column. This is an
-         * APPROXIMATION of the size used only from the
-         * Memtable.
-        */
-        return size_.get();
-    }
-
-    /**
-     * This returns the size of the super-column when serialized.
-     * @see org.apache.cassandra.db.IColumn#serializedSize()
-    */
-    public int serializedSize()
-    {
-        /*
-         * Size of a super-column is =
-         *   size of a name (UtfPrefix + length of the string)
-         * + 1 byte to indicate if the super-column has been deleted
-         * + 4 bytes for size of the sub-columns
-         * + 4 bytes for the number of sub-columns
-         * + size of all the sub-columns.
-        */
-
-    	/*
-    	 * We store the string as UTF-8 encoded, so when we calculate the length, it
-    	 * should be converted to UTF-8.
-    	 */
-    	/*
-    	 * We need to keep the way we are calculating the column size in sync with the
-    	 * way we are calculating the size for the column family serializer.
-    	 */
-    	return IColumn.UtfPrefix_ + name_.length + DBConstants.boolSize_ + DBConstants.intSize_ + DBConstants.intSize_ + getSizeOfAllColumns();
-    }
-
-    /**
-     * This calculates the exact size of the sub columns on the fly
-     */
-    int getSizeOfAllColumns()
-    {
-        int size = 0;
-        Collection<IColumn> subColumns = getSubColumns();
-        for ( IColumn subColumn : subColumns )
-        {
-            size += subColumn.serializedSize();
-        }
-        return size;
-    }
-
-    public void remove(byte[] columnName)
-    {
-    	columns_.remove(columnName);
-    }
-
-    public long timestamp()
-    {
-    	throw new UnsupportedOperationException("This operation is not supported for Super Columns.");
-    }
-
-    public long timestamp(byte[] columnName)
-    {
-    	IColumn column = columns_.get(columnName);
-    	if ( column instanceof SuperColumn )
-    		throw new UnsupportedOperationException("A super column cannot hold other super columns.");
-    	if ( column != null )
-    		return column.timestamp();
-    	throw new IllegalArgumentException("Timestamp was requested for a column that does not exist.");
-    }
-
-    public byte[] value()
-    {
-    	throw new UnsupportedOperationException("This operation is not supported for Super Columns.");
-    }
-
-    public byte[] value(byte[] columnName)
-    {
-    	IColumn column = columns_.get(columnName);
-    	if ( column != null )
-    		return column.value();
-    	throw new IllegalArgumentException("Value was requested for a column that does not exist.");
-    }
-
-    public void addColumn(IColumn column)
-    {
-    	if (!(column instanceof Column))
-    		throw new UnsupportedOperationException("A super column can only contain simple columns.");
-        try
-        {
-            getComparator().validate(column.name());
-        }
-        catch (Exception e)
-        {
-            throw new MarshalException("Invalid column name in supercolumn for " + getComparator().getClass().getName());
-        }
-    	IColumn oldColumn = columns_.get(column.name());
-    	if ( oldColumn == null )
-        {
-    		columns_.put(column.name(), column);
-            size_.addAndGet(column.size());
-        }
-    	else
-    	{
-    		if (((Column)oldColumn).comparePriority((Column)column) <= 0)
-            {
-    			columns_.put(column.name(), column);
-                int delta = (-1)*oldColumn.size();
-                /* subtract the size of the oldColumn */
-                size_.addAndGet(delta);
-                /* add the size of the new column */
-                size_.addAndGet(column.size());
-            }
-    	}
-    }
-
-    /*
-     * Go through each sub column if it exists then as it to resolve itself
-     * if the column does not exist then create it.
-     */
-    public void putColumn(IColumn column)
-    {
-        if (!(column instanceof SuperColumn))
-        {
-            throw new UnsupportedOperationException("Only Super column objects should be put here");
-        }
-        if (!Arrays.equals(name_, column.name()))
-        {
-            throw new IllegalArgumentException("The name should match the name of the current column or super column");
-        }
-
-        for (IColumn subColumn : column.getSubColumns())
-        {
-        	addColumn(subColumn);
-        }
-        if (column.getMarkedForDeleteAt() > markedForDeleteAt)
-        {
-            markForDeleteAt(column.getLocalDeletionTime(),  column.getMarkedForDeleteAt());
-        }
-    }
-
-    public int getObjectCount()
-    {
-    	return 1 + columns_.size();
-    }
-
-    public long getMarkedForDeleteAt() {
-        return markedForDeleteAt;
-    }
-
-    int getColumnCount()
-    {
-    	return columns_.size();
-    }
-
-    public IColumn diff(IColumn columnNew)
-    {
-    	IColumn columnDiff = new SuperColumn(columnNew.name(), ((SuperColumn)columnNew).getComparator());
-        if (columnNew.getMarkedForDeleteAt() > getMarkedForDeleteAt())
-        {
-            ((SuperColumn)columnDiff).markForDeleteAt(columnNew.getLocalDeletionTime(), columnNew.getMarkedForDeleteAt());
-        }
-
-        // (don't need to worry about columnNew containing subColumns that are shadowed by
-        // the delete tombstone, since columnNew was generated by CF.resolve, which
-        // takes care of those for us.)
-        for (IColumn subColumn : columnNew.getSubColumns())
-        {
-        	IColumn columnInternal = columns_.get(subColumn.name());
-        	if(columnInternal == null )
-        	{
-        		columnDiff.addColumn(subColumn);
-        	}
-        	else
-        	{
-            	IColumn subColumnDiff = columnInternal.diff(subColumn);
-        		if(subColumnDiff != null)
-        		{
-            		columnDiff.addColumn(subColumnDiff);
-        		}
-        	}
-        }
-
-        if (!columnDiff.getSubColumns().isEmpty() || columnNew.isMarkedForDelete())
-        	return columnDiff;
-        else
-        	return null;
-    }
-
-    public byte[] digest()
-    {
-    	byte[] xorHash = ArrayUtils.EMPTY_BYTE_ARRAY;
-    	if(name_ == null)
-    		return xorHash;
-    	xorHash = name_.clone();
-    	for(IColumn column : columns_.values())
-    	{
-			xorHash = FBUtilities.xor(xorHash, column.digest());
-    	}
-    	return xorHash;
-    }
-
-    public String getString(AbstractType comparator)
-    {
-    	StringBuilder sb = new StringBuilder();
-        sb.append("SuperColumn(");
-    	sb.append(comparator.getString(name_));
-
-        if (isMarkedForDelete()) {
-            sb.append(" -delete at ").append(getMarkedForDeleteAt()).append("-");
-        }
-
-        sb.append(" [");
-        sb.append(getComparator().getColumnsString(columns_.values()));
-        sb.append("])");
-
-        return sb.toString();
-    }
-
-    public int getLocalDeletionTime()
-    {
-        return localDeletionTime;
-    }
-
-    public void markForDeleteAt(int localDeleteTime, long timestamp)
-    {
-        this.localDeletionTime = localDeleteTime;
-        this.markedForDeleteAt = timestamp;
-    }
-}
-
-class SuperColumnSerializer implements ICompactSerializer<IColumn>
-{
-    private AbstractType comparator;
-
-    public SuperColumnSerializer(AbstractType comparator)
-    {
-        this.comparator = comparator;
-    }
-
-    public AbstractType getComparator()
-    {
-        return comparator;
-    }
-
-    public void serialize(IColumn column, DataOutputStream dos) throws IOException
-    {
-    	SuperColumn superColumn = (SuperColumn)column;
-        ColumnSerializer.writeName(column.name(), dos);
-        dos.writeInt(superColumn.getLocalDeletionTime());
-        dos.writeLong(superColumn.getMarkedForDeleteAt());
-
-        Collection<IColumn> columns  = column.getSubColumns();
-        int size = columns.size();
-        dos.writeInt(size);
-
-        dos.writeInt(superColumn.getSizeOfAllColumns());
-        for ( IColumn subColumn : columns )
-        {
-            Column.serializer().serialize(subColumn, dos);
-        }
-    }
-
-    public IColumn deserialize(DataInputStream dis) throws IOException
-    {
-        byte[] name = ColumnSerializer.readName(dis);
-        SuperColumn superColumn = new SuperColumn(name, comparator);
-        superColumn.markForDeleteAt(dis.readInt(), dis.readLong());
-        assert dis.available() > 0;
-
-        /* read the number of columns */
-        int size = dis.readInt();
-        /* read the size of all columns */
-        dis.readInt();
-        for ( int i = 0; i < size; ++i )
-        {
-            IColumn subColumn = Column.serializer().deserialize(dis);
-            superColumn.addColumn(subColumn);
-        }
-        return superColumn;
-    }
-}
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Collection;
+import java.util.Arrays;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.ConcurrentSkipListMap;
+
+import org.apache.commons.lang.ArrayUtils;
+import org.apache.log4j.Logger;
+
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.io.ICompactSerializer;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.MarshalException;
+
+/**
+ * Author : Avinash Lakshman ( alakshman@facebook.com) & Prashant Malik ( pmalik@facebook.com )
+ */
+
+public final class SuperColumn implements IColumn
+{
+	private static Logger logger_ = Logger.getLogger(SuperColumn.class);
+
+    static SuperColumnSerializer serializer(AbstractType comparator)
+    {
+        return new SuperColumnSerializer(comparator);
+    }
+
+    private byte[] name_;
+    // TODO make subcolumn comparator configurable
+    private ConcurrentSkipListMap<byte[], IColumn> columns_;
+    private int localDeletionTime = Integer.MIN_VALUE;
+	private long markedForDeleteAt = Long.MIN_VALUE;
+    private AtomicInteger size_ = new AtomicInteger(0);
+
+    SuperColumn(byte[] name, AbstractType comparator)
+    {
+    	name_ = name;
+        columns_ = new ConcurrentSkipListMap<byte[], IColumn>(comparator);
+    }
+
+    public AbstractType getComparator()
+    {
+        return (AbstractType)columns_.comparator();
+    }
+
+    public SuperColumn cloneMeShallow()
+    {
+        SuperColumn sc = new SuperColumn(name_, getComparator());
+        sc.markForDeleteAt(localDeletionTime, markedForDeleteAt);
+        return sc;
+    }
+
+	public boolean isMarkedForDelete()
+	{
+		return markedForDeleteAt > Long.MIN_VALUE;
+	}
+
+    public byte[] name()
+    {
+    	return name_;
+    }
+
+    public Collection<IColumn> getSubColumns()
+    {
+    	return columns_.values();
+    }
+
+    public IColumn getSubColumn(byte[] columnName)
+    {
+        IColumn column = columns_.get(columnName);
+        assert column == null || column instanceof Column;
+        return column;
+    }
+
+    public int size()
+    {
+        /*
+         * return the size of the individual columns
+         * that make up the super column. This is an
+         * APPROXIMATION of the size used only from the
+         * Memtable.
+        */
+        return size_.get();
+    }
+
+    /**
+     * This returns the size of the super-column when serialized.
+     * @see org.apache.cassandra.db.IColumn#serializedSize()
+    */
+    public int serializedSize()
+    {
+        /*
+         * Size of a super-column is =
+         *   size of a name (UtfPrefix + length of the string)
+         * + 1 byte to indicate if the super-column has been deleted
+         * + 4 bytes for size of the sub-columns
+         * + 4 bytes for the number of sub-columns
+         * + size of all the sub-columns.
+        */
+
+    	/*
+    	 * We store the string as UTF-8 encoded, so when we calculate the length, it
+    	 * should be converted to UTF-8.
+    	 */
+    	/*
+    	 * We need to keep the way we are calculating the column size in sync with the
+    	 * way we are calculating the size for the column family serializer.
+    	 */
+    	return IColumn.UtfPrefix_ + name_.length + DBConstants.boolSize_ + DBConstants.intSize_ + DBConstants.intSize_ + getSizeOfAllColumns();
+    }
+
+    /**
+     * This calculates the exact size of the sub columns on the fly
+     */
+    int getSizeOfAllColumns()
+    {
+        int size = 0;
+        Collection<IColumn> subColumns = getSubColumns();
+        for ( IColumn subColumn : subColumns )
+        {
+            size += subColumn.serializedSize();
+        }
+        return size;
+    }
+
+    public void remove(byte[] columnName)
+    {
+    	columns_.remove(columnName);
+    }
+
+    public long timestamp()
+    {
+    	throw new UnsupportedOperationException("This operation is not supported for Super Columns.");
+    }
+
+    public long timestamp(byte[] columnName)
+    {
+    	IColumn column = columns_.get(columnName);
+    	if ( column instanceof SuperColumn )
+    		throw new UnsupportedOperationException("A super column cannot hold other super columns.");
+    	if ( column != null )
+    		return column.timestamp();
+    	throw new IllegalArgumentException("Timestamp was requested for a column that does not exist.");
+    }
+
+    public byte[] value()
+    {
+    	throw new UnsupportedOperationException("This operation is not supported for Super Columns.");
+    }
+
+    public byte[] value(byte[] columnName)
+    {
+    	IColumn column = columns_.get(columnName);
+    	if ( column != null )
+    		return column.value();
+    	throw new IllegalArgumentException("Value was requested for a column that does not exist.");
+    }
+
+    public void addColumn(IColumn column)
+    {
+    	if (!(column instanceof Column))
+    		throw new UnsupportedOperationException("A super column can only contain simple columns.");
+        try
+        {
+            getComparator().validate(column.name());
+        }
+        catch (Exception e)
+        {
+            throw new MarshalException("Invalid column name in supercolumn for " + getComparator().getClass().getName());
+        }
+    	IColumn oldColumn = columns_.get(column.name());
+    	if ( oldColumn == null )
+        {
+    		columns_.put(column.name(), column);
+            size_.addAndGet(column.size());
+        }
+    	else
+    	{
+    		if (((Column)oldColumn).comparePriority((Column)column) <= 0)
+            {
+    			columns_.put(column.name(), column);
+                int delta = (-1)*oldColumn.size();
+                /* subtract the size of the oldColumn */
+                size_.addAndGet(delta);
+                /* add the size of the new column */
+                size_.addAndGet(column.size());
+            }
+    	}
+    }
+
+    /*
+     * Go through each sub column if it exists then as it to resolve itself
+     * if the column does not exist then create it.
+     */
+    public void putColumn(IColumn column)
+    {
+        if (!(column instanceof SuperColumn))
+        {
+            throw new UnsupportedOperationException("Only Super column objects should be put here");
+        }
+        if (!Arrays.equals(name_, column.name()))
+        {
+            throw new IllegalArgumentException("The name should match the name of the current column or super column");
+        }
+
+        for (IColumn subColumn : column.getSubColumns())
+        {
+        	addColumn(subColumn);
+        }
+        if (column.getMarkedForDeleteAt() > markedForDeleteAt)
+        {
+            markForDeleteAt(column.getLocalDeletionTime(),  column.getMarkedForDeleteAt());
+        }
+    }
+
+    public int getObjectCount()
+    {
+    	return 1 + columns_.size();
+    }
+
+    public long getMarkedForDeleteAt() {
+        return markedForDeleteAt;
+    }
+
+    int getColumnCount()
+    {
+    	return columns_.size();
+    }
+
+    public IColumn diff(IColumn columnNew)
+    {
+    	IColumn columnDiff = new SuperColumn(columnNew.name(), ((SuperColumn)columnNew).getComparator());
+        if (columnNew.getMarkedForDeleteAt() > getMarkedForDeleteAt())
+        {
+            ((SuperColumn)columnDiff).markForDeleteAt(columnNew.getLocalDeletionTime(), columnNew.getMarkedForDeleteAt());
+        }
+
+        // (don't need to worry about columnNew containing subColumns that are shadowed by
+        // the delete tombstone, since columnNew was generated by CF.resolve, which
+        // takes care of those for us.)
+        for (IColumn subColumn : columnNew.getSubColumns())
+        {
+        	IColumn columnInternal = columns_.get(subColumn.name());
+        	if(columnInternal == null )
+        	{
+        		columnDiff.addColumn(subColumn);
+        	}
+        	else
+        	{
+            	IColumn subColumnDiff = columnInternal.diff(subColumn);
+        		if(subColumnDiff != null)
+        		{
+            		columnDiff.addColumn(subColumnDiff);
+        		}
+        	}
+        }
+
+        if (!columnDiff.getSubColumns().isEmpty() || columnNew.isMarkedForDelete())
+        	return columnDiff;
+        else
+        	return null;
+    }
+
+    public byte[] digest()
+    {
+    	byte[] xorHash = ArrayUtils.EMPTY_BYTE_ARRAY;
+    	if(name_ == null)
+    		return xorHash;
+    	xorHash = name_.clone();
+    	for(IColumn column : columns_.values())
+    	{
+			xorHash = FBUtilities.xor(xorHash, column.digest());
+    	}
+    	return xorHash;
+    }
+
+    public String getString(AbstractType comparator)
+    {
+    	StringBuilder sb = new StringBuilder();
+        sb.append("SuperColumn(");
+    	sb.append(comparator.getString(name_));
+
+        if (isMarkedForDelete()) {
+            sb.append(" -delete at ").append(getMarkedForDeleteAt()).append("-");
+        }
+
+        sb.append(" [");
+        sb.append(getComparator().getColumnsString(columns_.values()));
+        sb.append("])");
+
+        return sb.toString();
+    }
+
+    public int getLocalDeletionTime()
+    {
+        return localDeletionTime;
+    }
+
+    public void markForDeleteAt(int localDeleteTime, long timestamp)
+    {
+        this.localDeletionTime = localDeleteTime;
+        this.markedForDeleteAt = timestamp;
+    }
+}
+
+class SuperColumnSerializer implements ICompactSerializer<IColumn>
+{
+    private AbstractType comparator;
+
+    public SuperColumnSerializer(AbstractType comparator)
+    {
+        this.comparator = comparator;
+    }
+
+    public AbstractType getComparator()
+    {
+        return comparator;
+    }
+
+    public void serialize(IColumn column, DataOutputStream dos) throws IOException
+    {
+    	SuperColumn superColumn = (SuperColumn)column;
+        ColumnSerializer.writeName(column.name(), dos);
+        dos.writeInt(superColumn.getLocalDeletionTime());
+        dos.writeLong(superColumn.getMarkedForDeleteAt());
+
+        Collection<IColumn> columns  = column.getSubColumns();
+        int size = columns.size();
+        dos.writeInt(size);
+
+        dos.writeInt(superColumn.getSizeOfAllColumns());
+        for ( IColumn subColumn : columns )
+        {
+            Column.serializer().serialize(subColumn, dos);
+        }
+    }
+
+    public IColumn deserialize(DataInputStream dis) throws IOException
+    {
+        byte[] name = ColumnSerializer.readName(dis);
+        SuperColumn superColumn = new SuperColumn(name, comparator);
+        superColumn.markForDeleteAt(dis.readInt(), dis.readLong());
+        assert dis.available() > 0;
+
+        /* read the number of columns */
+        int size = dis.readInt();
+        /* read the size of all columns */
+        dis.readInt();
+        for ( int i = 0; i < size; ++i )
+        {
+            IColumn subColumn = Column.serializer().deserialize(dis);
+            superColumn.addColumn(subColumn);
+        }
+        return superColumn;
+    }
+}

Modified: incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SystemTable.java
URL: http://svn.apache.org/viewvc/incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SystemTable.java?rev=799331&r1=799330&r2=799331&view=diff
==============================================================================
--- incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SystemTable.java (original)
+++ incubator/cassandra/trunk/src/java/org/apache/cassandra/db/SystemTable.java Thu Jul 30 15:30:21 2009
@@ -1,151 +1,151 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.db;
-
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-
-import org.apache.log4j.Logger;
-
-import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.dht.Token;
-import org.apache.cassandra.dht.IPartitioner;
-import org.apache.cassandra.utils.BasicUtilities;
-import org.apache.cassandra.db.filter.NamesQueryFilter;
-import org.apache.cassandra.db.filter.QueryPath;
-import org.apache.cassandra.db.filter.QueryFilter;
-
-/**
- * Author : Avinash Lakshman ( alakshman@facebook.com) & Prashant Malik ( pmalik@facebook.com )
- */
-
-public class SystemTable
-{
-    private static Logger logger_ = Logger.getLogger(SystemTable.class);
-    public static final String LOCATION_CF = "LocationInfo";
-    private static final String LOCATION_KEY = "L"; // only one row in Location CF
-    private static final byte[] TOKEN = utf8("Token");
-    private static final byte[] GENERATION = utf8("Generation");
-
-    private static byte[] utf8(String str)
-    {
-        try
-        {
-            return str.getBytes("UTF-8");
-        }
-        catch (UnsupportedEncodingException e)
-        {
-            throw new RuntimeException(e);
-        }
-    }
-
-    /*
-     * This method is used to update the SystemTable with the new token.
-    */
-    public static void updateToken(Token token) throws IOException
-    {
-        IPartitioner p = StorageService.getPartitioner();
-        Table table = Table.open(Table.SYSTEM_TABLE);
-        /* Retrieve the "LocationInfo" column family */
-        QueryFilter filter = new NamesQueryFilter(LOCATION_KEY, new QueryPath(LOCATION_CF), TOKEN);
-        ColumnFamily cf = table.getColumnFamilyStore(LOCATION_CF).getColumnFamily(filter);
-        long oldTokenColumnTimestamp = cf.getColumn(SystemTable.TOKEN).timestamp();
-        /* create the "Token" whose value is the new token. */
-        IColumn tokenColumn = new Column(SystemTable.TOKEN, p.getTokenFactory().toByteArray(token), oldTokenColumnTimestamp + 1);
-        /* replace the old "Token" column with this new one. */
-        if (logger_.isDebugEnabled())
-          logger_.debug("Replacing old token " + p.getTokenFactory().fromByteArray(cf.getColumn(SystemTable.TOKEN).value()) + " with " + token);
-        RowMutation rm = new RowMutation(Table.SYSTEM_TABLE, LOCATION_KEY);
-        cf.addColumn(tokenColumn);
-        rm.add(cf);
-        rm.apply();
-    }
-
-    /*
-     * This method reads the system table and retrieves the metadata
-     * associated with this storage instance. Currently we store the
-     * metadata in a Column Family called LocatioInfo which has two
-     * columns namely "Token" and "Generation". This is the token that
-     * gets gossiped around and the generation info is used for FD.
-    */
-    public static StorageMetadata initMetadata() throws IOException
-    {
-        /* Read the system table to retrieve the storage ID and the generation */
-        Table table = Table.open(Table.SYSTEM_TABLE);
-        QueryFilter filter = new NamesQueryFilter(LOCATION_KEY, new QueryPath(LOCATION_CF), GENERATION);
-        ColumnFamily cf = table.getColumnFamilyStore(LOCATION_CF).getColumnFamily(filter);
-
-        IPartitioner p = StorageService.getPartitioner();
-        if (cf == null)
-        {
-            Token token = p.getDefaultToken();
-            int generation = 1;
-
-            RowMutation rm = new RowMutation(Table.SYSTEM_TABLE, LOCATION_KEY);
-            cf = ColumnFamily.create(Table.SYSTEM_TABLE, SystemTable.LOCATION_CF);
-            cf.addColumn(new Column(TOKEN, p.getTokenFactory().toByteArray(token)));
-            cf.addColumn(new Column(GENERATION, BasicUtilities.intToByteArray(generation)) );
-            rm.add(cf);
-            rm.apply();
-            return new StorageMetadata(token, generation);
-        }
-
-        /* we crashed and came back up need to bump generation # */
-        IColumn tokenColumn = cf.getColumn(TOKEN);
-        Token token = p.getTokenFactory().fromByteArray(tokenColumn.value());
-
-        IColumn generation = cf.getColumn(GENERATION);
-        int gen = BasicUtilities.byteArrayToInt(generation.value()) + 1;
-
-        RowMutation rm = new RowMutation(Table.SYSTEM_TABLE, LOCATION_KEY);
-        cf = ColumnFamily.create(Table.SYSTEM_TABLE, SystemTable.LOCATION_CF);
-        Column generation2 = new Column(GENERATION, BasicUtilities.intToByteArray(gen), generation.timestamp() + 1);
-        cf.addColumn(generation2);
-        rm.add(cf);
-        rm.apply();
-        return new StorageMetadata(token, gen);
-    }
-
-    public static class StorageMetadata
-    {
-        private Token myToken;
-        private int generation_;
-
-        StorageMetadata(Token storageId, int generation)
-        {
-            myToken = storageId;
-            generation_ = generation;
-        }
-
-        public Token getStorageId()
-        {
-            return myToken;
-        }
-
-        public void setStorageId(Token storageId)
-        {
-            myToken = storageId;
-        }
-
-        public int getGeneration()
-        {
-            return generation_;
-        }
-    }
-}
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+
+import org.apache.log4j.Logger;
+
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.dht.IPartitioner;
+import org.apache.cassandra.utils.BasicUtilities;
+import org.apache.cassandra.db.filter.NamesQueryFilter;
+import org.apache.cassandra.db.filter.QueryPath;
+import org.apache.cassandra.db.filter.QueryFilter;
+
+/**
+ * Author : Avinash Lakshman ( alakshman@facebook.com) & Prashant Malik ( pmalik@facebook.com )
+ */
+
+public class SystemTable
+{
+    private static Logger logger_ = Logger.getLogger(SystemTable.class);
+    public static final String LOCATION_CF = "LocationInfo";
+    private static final String LOCATION_KEY = "L"; // only one row in Location CF
+    private static final byte[] TOKEN = utf8("Token");
+    private static final byte[] GENERATION = utf8("Generation");
+
+    private static byte[] utf8(String str)
+    {
+        try
+        {
+            return str.getBytes("UTF-8");
+        }
+        catch (UnsupportedEncodingException e)
+        {
+            throw new RuntimeException(e);
+        }
+    }
+
+    /*
+     * This method is used to update the SystemTable with the new token.
+    */
+    public static void updateToken(Token token) throws IOException
+    {
+        IPartitioner p = StorageService.getPartitioner();
+        Table table = Table.open(Table.SYSTEM_TABLE);
+        /* Retrieve the "LocationInfo" column family */
+        QueryFilter filter = new NamesQueryFilter(LOCATION_KEY, new QueryPath(LOCATION_CF), TOKEN);
+        ColumnFamily cf = table.getColumnFamilyStore(LOCATION_CF).getColumnFamily(filter);
+        long oldTokenColumnTimestamp = cf.getColumn(SystemTable.TOKEN).timestamp();
+        /* create the "Token" whose value is the new token. */
+        IColumn tokenColumn = new Column(SystemTable.TOKEN, p.getTokenFactory().toByteArray(token), oldTokenColumnTimestamp + 1);
+        /* replace the old "Token" column with this new one. */
+        if (logger_.isDebugEnabled())
+          logger_.debug("Replacing old token " + p.getTokenFactory().fromByteArray(cf.getColumn(SystemTable.TOKEN).value()) + " with " + token);
+        RowMutation rm = new RowMutation(Table.SYSTEM_TABLE, LOCATION_KEY);
+        cf.addColumn(tokenColumn);
+        rm.add(cf);
+        rm.apply();
+    }
+
+    /*
+     * This method reads the system table and retrieves the metadata
+     * associated with this storage instance. Currently we store the
+     * metadata in a Column Family called LocatioInfo which has two
+     * columns namely "Token" and "Generation". This is the token that
+     * gets gossiped around and the generation info is used for FD.
+    */
+    public static StorageMetadata initMetadata() throws IOException
+    {
+        /* Read the system table to retrieve the storage ID and the generation */
+        Table table = Table.open(Table.SYSTEM_TABLE);
+        QueryFilter filter = new NamesQueryFilter(LOCATION_KEY, new QueryPath(LOCATION_CF), GENERATION);
+        ColumnFamily cf = table.getColumnFamilyStore(LOCATION_CF).getColumnFamily(filter);
+
+        IPartitioner p = StorageService.getPartitioner();
+        if (cf == null)
+        {
+            Token token = p.getDefaultToken();
+            int generation = 1;
+
+            RowMutation rm = new RowMutation(Table.SYSTEM_TABLE, LOCATION_KEY);
+            cf = ColumnFamily.create(Table.SYSTEM_TABLE, SystemTable.LOCATION_CF);
+            cf.addColumn(new Column(TOKEN, p.getTokenFactory().toByteArray(token)));
+            cf.addColumn(new Column(GENERATION, BasicUtilities.intToByteArray(generation)) );
+            rm.add(cf);
+            rm.apply();
+            return new StorageMetadata(token, generation);
+        }
+
+        /* we crashed and came back up need to bump generation # */
+        IColumn tokenColumn = cf.getColumn(TOKEN);
+        Token token = p.getTokenFactory().fromByteArray(tokenColumn.value());
+
+        IColumn generation = cf.getColumn(GENERATION);
+        int gen = BasicUtilities.byteArrayToInt(generation.value()) + 1;
+
+        RowMutation rm = new RowMutation(Table.SYSTEM_TABLE, LOCATION_KEY);
+        cf = ColumnFamily.create(Table.SYSTEM_TABLE, SystemTable.LOCATION_CF);
+        Column generation2 = new Column(GENERATION, BasicUtilities.intToByteArray(gen), generation.timestamp() + 1);
+        cf.addColumn(generation2);
+        rm.add(cf);
+        rm.apply();
+        return new StorageMetadata(token, gen);
+    }
+
+    public static class StorageMetadata
+    {
+        private Token myToken;
+        private int generation_;
+
+        StorageMetadata(Token storageId, int generation)
+        {
+            myToken = storageId;
+            generation_ = generation;
+        }
+
+        public Token getStorageId()
+        {
+            return myToken;
+        }
+
+        public void setStorageId(Token storageId)
+        {
+            myToken = storageId;
+        }
+
+        public int getGeneration()
+        {
+            return generation_;
+        }
+    }
+}