You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by sl...@apache.org on 2013/10/03 11:10:57 UTC
[1/3] git commit: Fix skipping columns with multiple slices
Updated Branches:
refs/heads/trunk c7af3040c -> 12c4734b5
Fix skipping columns with multiple slices
patch by frousseau & slebresne; reviewed by frousseau & slebresne for CASSANDRA-6119
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/20a80502
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/20a80502
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/20a80502
Branch: refs/heads/trunk
Commit: 20a805023fa26ab1b2f70b574b35357df9652cd3
Parents: c8f0e3a
Author: Sylvain Lebresne <sy...@datastax.com>
Authored: Thu Oct 3 11:06:12 2013 +0200
Committer: Sylvain Lebresne <sy...@datastax.com>
Committed: Thu Oct 3 11:06:12 2013 +0200
----------------------------------------------------------------------
CHANGES.txt | 1 +
.../db/columniterator/IndexedSliceReader.java | 17 +-
.../cassandra/db/ColumnFamilyStoreTest.java | 198 +++++++++++++++++++
3 files changed, 213 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/20a80502/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index cc04eca..c1d1991 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -12,6 +12,7 @@
* Add tombstone debug threshold and histogram (CASSANDRA-6042, 6057)
* Fix fat client schema pull NPE (CASSANDRA-6089)
* Fix memtable flushing for indexed tables (CASSANDRA-6112)
+ * Fix skipping columns with multiple slices (CASSANDRA-6119)
1.2.10
http://git-wip-us.apache.org/repos/asf/cassandra/blob/20a80502/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java b/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java
index 472ecfc..df916b2 100644
--- a/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java
+++ b/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java
@@ -391,7 +391,7 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA
// scan from index start
OnDiskAtom column = null;
- while (file.bytesPastMark(mark) < currentIndex.width)
+ while (file.bytesPastMark(mark) < currentIndex.width || column != null)
{
// Only fetch a new column if we haven't dealt with the previous one.
if (column == null)
@@ -467,20 +467,31 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA
OnDiskAtom.Serializer atomSerializer = emptyColumnFamily.getOnDiskSerializer();
int columns = file.readInt();
- for (int i = 0; i < columns; i++)
+ OnDiskAtom column = null;
+ int i = 0;
+ while (i < columns || column != null)
{
- OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
+ // Only fetch a new column if we haven't dealt with the previous one.
+ if (column == null)
+ {
+ column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
+ i++;
+ }
// col is before slice
// (If in slice, don't bother checking that until we change slice)
if (!inSlice && isColumnBeforeSliceStart(column))
+ {
+ column = null;
continue;
+ }
// col is within slice
if (isColumnBeforeSliceFinish(column))
{
inSlice = true;
addColumn(column);
+ column = null;
}
// col is after slice. more slices?
else
http://git-wip-us.apache.org/repos/asf/cassandra/blob/20a80502/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
index abe3f05..cd30297 100644
--- a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
+++ b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
@@ -1160,6 +1160,204 @@ public class ColumnFamilyStoreTest extends SchemaLoader
@SuppressWarnings("unchecked")
@Test
+ public void testMultiRangeSomeEmptyNoIndex() throws Throwable
+ {
+ // in order not to change thrift interfaces at this stage we build SliceQueryFilter
+ // directly instead of using QueryFilter to build it for us
+ ColumnSlice[] ranges = new ColumnSlice[] {
+ new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colA")),
+ new ColumnSlice(bytes("colC"), bytes("colE")),
+ new ColumnSlice(bytes("colF"), bytes("colF")),
+ new ColumnSlice(bytes("colG"), bytes("colG")),
+ new ColumnSlice(bytes("colI"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+
+ ColumnSlice[] rangesReversed = new ColumnSlice[] {
+ new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colI")),
+ new ColumnSlice(bytes("colG"), bytes("colG")),
+ new ColumnSlice(bytes("colF"), bytes("colF")),
+ new ColumnSlice(bytes("colE"), bytes("colC")),
+ new ColumnSlice(bytes("colA"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+
+ String tableName = "Keyspace1";
+ String cfName = "Standard1";
+ Table table = Table.open(tableName);
+ ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
+ cfs.clearUnsafe();
+
+ String[] letters = new String[] { "a", "b", "c", "d", "i" };
+ Column[] cols = new Column[letters.length];
+ for (int i = 0; i < cols.length; i++)
+ {
+ cols[i] = new Column(ByteBufferUtil.bytes("col" + letters[i].toUpperCase()),
+ ByteBuffer.wrap(new byte[1]), 1);
+ }
+
+ putColsStandard(cfs, dk("a"), cols);
+
+ cfs.forceBlockingFlush();
+
+ SliceQueryFilter multiRangeForward = new SliceQueryFilter(ranges, false, 100);
+ SliceQueryFilter multiRangeForwardWithCounting = new SliceQueryFilter(ranges, false, 3);
+ SliceQueryFilter multiRangeReverse = new SliceQueryFilter(rangesReversed, true, 100);
+ SliceQueryFilter multiRangeReverseWithCounting = new SliceQueryFilter(rangesReversed, true, 3);
+
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeForward, "a", "colA", "colC", "colD", "colI");
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeForwardWithCounting, "a", "colA", "colC", "colD");
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeReverse, "a", "colI", "colD", "colC", "colA");
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeReverseWithCounting, "a", "colI", "colD", "colC");
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testMultiRangeSomeEmptyIndexed() throws Throwable
+ {
+ // in order not to change thrift interfaces at this stage we build SliceQueryFilter
+ // directly instead of using QueryFilter to build it for us
+ ColumnSlice[] ranges = new ColumnSlice[] {
+ new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colA")),
+ new ColumnSlice(bytes("colC"), bytes("colE")),
+ new ColumnSlice(bytes("colF"), bytes("colF")),
+ new ColumnSlice(bytes("colG"), bytes("colG")),
+ new ColumnSlice(bytes("colI"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+
+ ColumnSlice[] rangesReversed = new ColumnSlice[] {
+ new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colI")),
+ new ColumnSlice(bytes("colG"), bytes("colG")),
+ new ColumnSlice(bytes("colF"), bytes("colF")),
+ new ColumnSlice(bytes("colE"), bytes("colC")),
+ new ColumnSlice(bytes("colA"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+
+ String tableName = "Keyspace1";
+ String cfName = "Standard1";
+ Table table = Table.open(tableName);
+ ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
+ cfs.clearUnsafe();
+
+ String[] letters = new String[] { "a", "b", "c", "d", "i" };
+ Column[] cols = new Column[letters.length];
+ for (int i = 0; i < cols.length; i++)
+ {
+ cols[i] = new Column(ByteBufferUtil.bytes("col" + letters[i].toUpperCase()),
+ ByteBuffer.wrap(new byte[1366]), 1);
+ }
+
+ putColsStandard(cfs, dk("a"), cols);
+
+ cfs.forceBlockingFlush();
+
+ SliceQueryFilter multiRangeForward = new SliceQueryFilter(ranges, false, 100);
+ SliceQueryFilter multiRangeForwardWithCounting = new SliceQueryFilter(ranges, false, 3);
+ SliceQueryFilter multiRangeReverse = new SliceQueryFilter(rangesReversed, true, 100);
+ SliceQueryFilter multiRangeReverseWithCounting = new SliceQueryFilter(rangesReversed, true, 3);
+
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeForward, "a", "colA", "colC", "colD", "colI");
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeForwardWithCounting, "a", "colA", "colC", "colD");
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeReverse, "a", "colI", "colD", "colC", "colA");
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeReverseWithCounting, "a", "colI", "colD", "colC");
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testMultiRangeContiguousNoIndex() throws Throwable
+ {
+ // in order not to change thrift interfaces at this stage we build SliceQueryFilter
+ // directly instead of using QueryFilter to build it for us
+ ColumnSlice[] ranges = new ColumnSlice[] {
+ new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colA")),
+ new ColumnSlice(bytes("colC"), bytes("colE")),
+ new ColumnSlice(bytes("colF"), bytes("colF")),
+ new ColumnSlice(bytes("colG"), bytes("colG")),
+ new ColumnSlice(bytes("colI"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+
+ ColumnSlice[] rangesReversed = new ColumnSlice[] {
+ new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colI")),
+ new ColumnSlice(bytes("colG"), bytes("colG")),
+ new ColumnSlice(bytes("colF"), bytes("colF")),
+ new ColumnSlice(bytes("colE"), bytes("colC")),
+ new ColumnSlice(bytes("colA"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+
+ String tableName = "Keyspace1";
+ String cfName = "Standard1";
+ Table table = Table.open(tableName);
+ ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
+ cfs.clearUnsafe();
+
+ String[] letters = new String[] { "a", "b", "c", "d", "e", "f", "g", "h", "i" };
+ Column[] cols = new Column[letters.length];
+ for (int i = 0; i < cols.length; i++)
+ {
+ cols[i] = new Column(ByteBufferUtil.bytes("col" + letters[i].toUpperCase()),
+ ByteBuffer.wrap(new byte[1]), 1);
+ }
+
+ putColsStandard(cfs, dk("a"), cols);
+
+ cfs.forceBlockingFlush();
+
+ SliceQueryFilter multiRangeForward = new SliceQueryFilter(ranges, false, 100);
+ SliceQueryFilter multiRangeForwardWithCounting = new SliceQueryFilter(ranges, false, 3);
+ SliceQueryFilter multiRangeReverse = new SliceQueryFilter(rangesReversed, true, 100);
+ SliceQueryFilter multiRangeReverseWithCounting = new SliceQueryFilter(rangesReversed, true, 3);
+
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeForward, "a", "colA", "colC", "colD", "colE", "colF", "colG", "colI");
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeForwardWithCounting, "a", "colA", "colC", "colD");
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeReverse, "a", "colI", "colG", "colF", "colE", "colD", "colC", "colA");
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeReverseWithCounting, "a", "colI", "colG", "colF");
+
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testMultiRangeContiguousIndexed() throws Throwable
+ {
+ // in order not to change thrift interfaces at this stage we build SliceQueryFilter
+ // directly instead of using QueryFilter to build it for us
+ ColumnSlice[] ranges = new ColumnSlice[] {
+ new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colA")),
+ new ColumnSlice(bytes("colC"), bytes("colE")),
+ new ColumnSlice(bytes("colF"), bytes("colF")),
+ new ColumnSlice(bytes("colG"), bytes("colG")),
+ new ColumnSlice(bytes("colI"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+
+ ColumnSlice[] rangesReversed = new ColumnSlice[] {
+ new ColumnSlice(ByteBuffer.wrap(EMPTY_BYTE_ARRAY), bytes("colI")),
+ new ColumnSlice(bytes("colG"), bytes("colG")),
+ new ColumnSlice(bytes("colF"), bytes("colF")),
+ new ColumnSlice(bytes("colE"), bytes("colC")),
+ new ColumnSlice(bytes("colA"), ByteBuffer.wrap(EMPTY_BYTE_ARRAY)) };
+
+ String tableName = "Keyspace1";
+ String cfName = "Standard1";
+ Table table = Table.open(tableName);
+ ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
+ cfs.clearUnsafe();
+
+ String[] letters = new String[] { "a", "b", "c", "d", "e", "f", "g", "h", "i" };
+ Column[] cols = new Column[letters.length];
+ for (int i = 0; i < cols.length; i++)
+ {
+ cols[i] = new Column(ByteBufferUtil.bytes("col" + letters[i].toUpperCase()),
+ ByteBuffer.wrap(new byte[1366]), 1);
+ }
+
+ putColsStandard(cfs, dk("a"), cols);
+
+ cfs.forceBlockingFlush();
+
+ SliceQueryFilter multiRangeForward = new SliceQueryFilter(ranges, false, 100);
+ SliceQueryFilter multiRangeForwardWithCounting = new SliceQueryFilter(ranges, false, 3);
+ SliceQueryFilter multiRangeReverse = new SliceQueryFilter(rangesReversed, true, 100);
+ SliceQueryFilter multiRangeReverseWithCounting = new SliceQueryFilter(rangesReversed, true, 3);
+
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeForward, "a", "colA", "colC", "colD", "colE", "colF", "colG", "colI");
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeForwardWithCounting, "a", "colA", "colC", "colD");
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeReverse, "a", "colI", "colG", "colF", "colE", "colD", "colC", "colA");
+ findRowGetSlicesAndAssertColsFound(cfs, multiRangeReverseWithCounting, "a", "colI", "colG", "colF");
+
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
public void testMultiRangeIndexed() throws Throwable
{
// in order not to change thrift interfaces at this stage we build SliceQueryFilter
[2/3] git commit: Merge branch 'cassandra-1.2' into cassandra-2.0
Posted by sl...@apache.org.
Merge branch 'cassandra-1.2' into cassandra-2.0
Conflicts:
src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4106f569
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4106f569
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4106f569
Branch: refs/heads/trunk
Commit: 4106f56945a8bc82762338ff1737d387abe0060a
Parents: 4bd8626 20a8050
Author: Sylvain Lebresne <sy...@datastax.com>
Authored: Thu Oct 3 11:10:35 2013 +0200
Committer: Sylvain Lebresne <sy...@datastax.com>
Committed: Thu Oct 3 11:10:35 2013 +0200
----------------------------------------------------------------------
CHANGES.txt | 1 +
.../db/columniterator/IndexedSliceReader.java | 13 +-
.../cassandra/db/ColumnFamilyStoreTest.java | 198 +++++++++++++++++++
3 files changed, 209 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4106f569/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index c1023f6,c1d1991..994e8c3
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -27,46 -10,12 +27,47 @@@ Merged from 1.2
* Do not open non-ssl storage port if encryption option is all (CASSANDRA-3916)
* Move batchlog replay to its own executor (CASSANDRA-6079)
* Add tombstone debug threshold and histogram (CASSANDRA-6042, 6057)
+ * Enable tcp keepalive on incoming connections (CASSANDRA-4053)
* Fix fat client schema pull NPE (CASSANDRA-6089)
* Fix memtable flushing for indexed tables (CASSANDRA-6112)
+ * Fix skipping columns with multiple slices (CASSANDRA-6119)
-1.2.10
+2.0.1
+ * Fix bug that could allow reading deleted data temporarily (CASSANDRA-6025)
+ * Improve memory use defaults (CASSANDRA-5069)
+ * Make ThriftServer more easlly extensible (CASSANDRA-6058)
+ * Remove Hadoop dependency from ITransportFactory (CASSANDRA-6062)
+ * add file_cache_size_in_mb setting (CASSANDRA-5661)
+ * Improve error message when yaml contains invalid properties (CASSANDRA-5958)
+ * Improve leveled compaction's ability to find non-overlapping L0 compactions
+ to work on concurrently (CASSANDRA-5921)
+ * Notify indexer of columns shadowed by range tombstones (CASSANDRA-5614)
+ * Log Merkle tree stats (CASSANDRA-2698)
+ * Switch from crc32 to adler32 for compressed sstable checksums (CASSANDRA-5862)
+ * Improve offheap memcpy performance (CASSANDRA-5884)
+ * Use a range aware scanner for cleanup (CASSANDRA-2524)
+ * Cleanup doesn't need to inspect sstables that contain only local data
+ (CASSANDRA-5722)
+ * Add ability for CQL3 to list partition keys (CASSANDRA-4536)
+ * Improve native protocol serialization (CASSANDRA-5664)
+ * Upgrade Thrift to 0.9.1 (CASSANDRA-5923)
+ * Require superuser status for adding triggers (CASSANDRA-5963)
+ * Make standalone scrubber handle old and new style leveled manifest
+ (CASSANDRA-6005)
+ * Fix paxos bugs (CASSANDRA-6012, 6013, 6023)
+ * Fix paged ranges with multiple replicas (CASSANDRA-6004)
+ * Fix potential AssertionError during tracing (CASSANDRA-6041)
+ * Fix NPE in sstablesplit (CASSANDRA-6027)
+ * Migrate pre-2.0 key/value/column aliases to system.schema_columns
+ (CASSANDRA-6009)
+ * Paging filter empty rows too agressively (CASSANDRA-6040)
+ * Support variadic parameters for IN clauses (CASSANDRA-4210)
+ * cqlsh: return the result of CAS writes (CASSANDRA-5796)
+ * Fix validation of IN clauses with 2ndary indexes (CASSANDRA-6050)
+ * Support named bind variables in CQL (CASSANDRA-6033)
+Merged from 1.2:
+ * Allow cache-keys-to-save to be set at runtime (CASSANDRA-5980)
* Avoid second-guessing out-of-space state (CASSANDRA-5605)
* Tuning knobs for dealing with large blobs and many CFs (CASSANDRA-5982)
* (Hadoop) Fix CQLRW for thrift tables (CASSANDRA-6002)
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4106f569/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java
index 27d307a,df916b2..036d0cf
--- a/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java
+++ b/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java
@@@ -445,11 -464,19 +445,14 @@@ class IndexedSliceReader extends Abstra
// We remenber when we are whithin a slice to avoid some comparison
boolean inSlice = false;
- OnDiskAtom.Serializer atomSerializer = emptyColumnFamily.getOnDiskSerializer();
- int columns = file.readInt();
-
+ int columnCount = sstable.descriptor.version.hasRowSizeAndColumnCount ? file.readInt() : Integer.MAX_VALUE;
+ Iterator<OnDiskAtom> atomIterator = emptyColumnFamily.metadata().getOnDiskIterator(file, columnCount, sstable.descriptor.version);
- while (atomIterator.hasNext())
+ OnDiskAtom column = null;
- int i = 0;
- while (i < columns || column != null)
++ while (atomIterator.hasNext() || column != null)
{
- OnDiskAtom column = atomIterator.next();
+ // Only fetch a new column if we haven't dealt with the previous one.
+ if (column == null)
- {
- column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
- i++;
- }
++ column = atomIterator.next();
// col is before slice
// (If in slice, don't bother checking that until we change slice)
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4106f569/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
----------------------------------------------------------------------
[3/3] git commit: Merge branch 'cassandra-2.0' into trunk
Posted by sl...@apache.org.
Merge branch 'cassandra-2.0' into trunk
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/12c4734b
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/12c4734b
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/12c4734b
Branch: refs/heads/trunk
Commit: 12c4734b5d657fec128db6cb284d03c92b2f1882
Parents: c7af304 4106f56
Author: Sylvain Lebresne <sy...@datastax.com>
Authored: Thu Oct 3 11:10:49 2013 +0200
Committer: Sylvain Lebresne <sy...@datastax.com>
Committed: Thu Oct 3 11:10:49 2013 +0200
----------------------------------------------------------------------
CHANGES.txt | 1 +
.../db/columniterator/IndexedSliceReader.java | 13 +-
.../cassandra/db/ColumnFamilyStoreTest.java | 198 +++++++++++++++++++
3 files changed, 209 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/12c4734b/CHANGES.txt
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/12c4734b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
----------------------------------------------------------------------