You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by be...@apache.org on 2018/11/29 16:00:52 UTC

[1/6] cassandra git commit: Unfiltered.isEmpty conflicts with Row extends AbstractCollection.isEmpty

Repository: cassandra
Updated Branches:
  refs/heads/cassandra-3.0 7bdea449c -> 8404260f1
  refs/heads/cassandra-3.11 1084ad9ee -> e635317cf
  refs/heads/trunk 2ee9d6854 -> d8c549b68


Unfiltered.isEmpty conflicts with Row extends AbstractCollection.isEmpty

patch by Benedict; reviewed by Blake Eggleston for CASSANDRA-14588


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/8404260f
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/8404260f
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/8404260f

Branch: refs/heads/cassandra-3.0
Commit: 8404260f1640efd14613c4591e5e918786fcde10
Parents: 7bdea44
Author: Benedict Elliott Smith <be...@apple.com>
Authored: Fri Jul 27 16:04:13 2018 +0100
Committer: Benedict Elliott Smith <be...@apache.org>
Committed: Thu Nov 29 15:50:25 2018 +0000

----------------------------------------------------------------------
 CHANGES.txt                                        |  1 +
 .../org/apache/cassandra/db/rows/AbstractRow.java  |  8 +++++++-
 .../org/apache/cassandra/db/rows/BTreeRow.java     | 16 +++++++++++++---
 src/java/org/apache/cassandra/db/rows/Row.java     | 17 ++++++++++++++++-
 .../cassandra/db/rows/UnfilteredSerializer.java    |  8 ++++----
 .../validation/entities/SecondaryIndexTest.java    | 16 ++++++++--------
 .../db/SinglePartitionSliceCommandTest.java        |  2 +-
 7 files changed, 50 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 9ba36fa..40016a1 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 3.0.18
+ * Unfiltered.isEmpty conflicts with Row extends AbstractCollection.isEmpty (CASSANDRA-14588)
  * RangeTombstoneList doesn't properly clean up mergeable or superseded rts in some cases (CASSANDRA-14894)
  * Fix handling of collection tombstones for dropped columns from legacy sstables (CASSANDRA-14912)
  * Throw exception if Columns serialized subset encode more columns than possible (CASSANDRA-14591)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/src/java/org/apache/cassandra/db/rows/AbstractRow.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/rows/AbstractRow.java b/src/java/org/apache/cassandra/db/rows/AbstractRow.java
index 67ed219..f91126b 100644
--- a/src/java/org/apache/cassandra/db/rows/AbstractRow.java
+++ b/src/java/org/apache/cassandra/db/rows/AbstractRow.java
@@ -19,6 +19,7 @@ package org.apache.cassandra.db.rows;
 import java.nio.ByteBuffer;
 import java.security.MessageDigest;
 import java.util.AbstractCollection;
+import java.util.Collection;
 import java.util.Objects;
 
 import com.google.common.collect.Iterables;
@@ -35,7 +36,7 @@ import org.apache.cassandra.utils.FBUtilities;
  * Unless you have a very good reason not to, every row implementation
  * should probably extend this class.
  */
-public abstract class AbstractRow extends AbstractCollection<ColumnData> implements Row
+public abstract class AbstractRow implements Row
 {
     public Unfiltered.Kind kind()
     {
@@ -87,6 +88,11 @@ public abstract class AbstractRow extends AbstractCollection<ColumnData> impleme
             cd.validate();
     }
 
+    public String toString()
+    {
+        return columnData().toString();
+    }
+
     public String toString(CFMetaData metadata)
     {
         return toString(metadata, false);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/src/java/org/apache/cassandra/db/rows/BTreeRow.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/rows/BTreeRow.java b/src/java/org/apache/cassandra/db/rows/BTreeRow.java
index b07960c..e46d0cc 100644
--- a/src/java/org/apache/cassandra/db/rows/BTreeRow.java
+++ b/src/java/org/apache/cassandra/db/rows/BTreeRow.java
@@ -167,7 +167,12 @@ public class BTreeRow extends AbstractRow
 
     public Collection<ColumnDefinition> columns()
     {
-        return Collections2.transform(this, ColumnData::column);
+        return Collections2.transform(columnData(), ColumnData::column);
+    }
+
+    public int columnCount()
+    {
+        return BTree.size(btree);
     }
 
     public LivenessInfo primaryKeyLivenessInfo()
@@ -208,9 +213,14 @@ public class BTreeRow extends AbstractRow
         return (ComplexColumnData) BTree.<Object>find(btree, ColumnDefinition.asymmetricColumnDataComparator, c);
     }
 
-    public int size()
+    @Override
+    public Collection<ColumnData> columnData()
     {
-        return BTree.size(btree);
+        return new AbstractCollection<ColumnData>()
+        {
+            @Override public Iterator<ColumnData> iterator() { return BTreeRow.this.iterator(); }
+            @Override public int size() { return BTree.size(btree); }
+        };
     }
 
     public Iterator<ColumnData> iterator()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/src/java/org/apache/cassandra/db/rows/Row.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/rows/Row.java b/src/java/org/apache/cassandra/db/rows/Row.java
index 3c97e09..dcb78f3 100644
--- a/src/java/org/apache/cassandra/db/rows/Row.java
+++ b/src/java/org/apache/cassandra/db/rows/Row.java
@@ -45,7 +45,7 @@ import org.apache.cassandra.utils.btree.UpdateFunction;
  * it's own data. For instance, a {@code Row} cannot contains a cell that is deleted by its own
  * row deletion.
  */
-public interface Row extends Unfiltered, Collection<ColumnData>
+public interface Row extends Unfiltered, Iterable<ColumnData>
 {
     /**
      * The clustering values for this row.
@@ -59,6 +59,12 @@ public interface Row extends Unfiltered, Collection<ColumnData>
      */
     public Collection<ColumnDefinition> columns();
 
+
+    /**
+     * The number of columns for which data (incl. simple tombstones) is present in this row.
+     */
+    public int columnCount();
+
     /**
      * The row deletion.
      *
@@ -148,6 +154,15 @@ public interface Row extends Unfiltered, Collection<ColumnData>
     public Iterable<Cell> cells();
 
     /**
+     * A collection of the ColumnData representation of this row, for columns with some data (possibly not live) present
+     * <p>
+     * The data is returned in column order.
+     *
+     * @return a Collection of the non-empty ColumnData for this row.
+     */
+    public Collection<ColumnData> columnData();
+
+    /**
      * An iterable over the cells of this row that return cells in "legacy order".
      * <p>
      * In 3.0+, columns are sorted so that all simple columns are before all complex columns. Previously

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java b/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
index c4684e1..0342e39 100644
--- a/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
+++ b/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
@@ -133,7 +133,7 @@ public class UnfilteredSerializer
         LivenessInfo pkLiveness = row.primaryKeyLivenessInfo();
         Row.Deletion deletion = row.deletion();
         boolean hasComplexDeletion = row.hasComplexDeletion();
-        boolean hasAllColumns = (row.size() == headerColumns.size());
+        boolean hasAllColumns = (row.columnCount() == headerColumns.size());
         boolean hasExtendedFlags = hasExtendedFlags(row);
 
         if (isStatic)
@@ -181,7 +181,7 @@ public class UnfilteredSerializer
             header.writeDeletionTime(deletion.time(), out);
 
         if (!hasAllColumns)
-            Columns.serializer.serializeSubset(Collections2.transform(row, ColumnData::column), headerColumns, out);
+            Columns.serializer.serializeSubset(row.columns(), headerColumns, out);
 
         SearchIterator<ColumnDefinition, ColumnDefinition> si = headerColumns.iterator();
         for (ColumnData data : row)
@@ -274,7 +274,7 @@ public class UnfilteredSerializer
         LivenessInfo pkLiveness = row.primaryKeyLivenessInfo();
         Row.Deletion deletion = row.deletion();
         boolean hasComplexDeletion = row.hasComplexDeletion();
-        boolean hasAllColumns = (row.size() == headerColumns.size());
+        boolean hasAllColumns = (row.columnCount() == headerColumns.size());
 
         if (!pkLiveness.isEmpty())
             size += header.timestampSerializedSize(pkLiveness.timestamp());
@@ -287,7 +287,7 @@ public class UnfilteredSerializer
             size += header.deletionTimeSerializedSize(deletion.time());
 
         if (!hasAllColumns)
-            size += Columns.serializer.serializedSubsetSize(Collections2.transform(row, ColumnData::column), header.columns(isStatic));
+            size += Columns.serializer.serializedSubsetSize(row.columns(), header.columns(isStatic));
 
         SearchIterator<ColumnDefinition, ColumnDefinition> si = headerColumns.iterator();
         for (ColumnData data : row)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
index 0f6cba7..c2640a0 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
@@ -918,10 +918,10 @@ public class SecondaryIndexTest extends CQLTester
         execute("UPDATE %s USING TIMESTAMP 1 SET v1=1 WHERE k=0 AND c=0");
         assertEquals(1, index.rowsUpdated.size());
         Row oldRow = index.rowsUpdated.get(0).left;
-        assertEquals(1, oldRow.size());
+        assertEquals(1, oldRow.columnCount());
         validateCell(oldRow.getCell(v1), v1, ByteBufferUtil.bytes(0), 0);
         Row newRow = index.rowsUpdated.get(0).right;
-        assertEquals(1, newRow.size());
+        assertEquals(1, newRow.columnCount());
         validateCell(newRow.getCell(v1), v1, ByteBufferUtil.bytes(1), 1);
         index.reset();
 
@@ -929,11 +929,11 @@ public class SecondaryIndexTest extends CQLTester
         execute("UPDATE %s USING TIMESTAMP 2 SET v1=2, v2=2 WHERE k=0 AND c=0");
         assertEquals(1, index.rowsUpdated.size());
         oldRow = index.rowsUpdated.get(0).left;
-        assertEquals(2, oldRow.size());
+        assertEquals(2, oldRow.columnCount());
         validateCell(oldRow.getCell(v1), v1, ByteBufferUtil.bytes(1), 1);
         validateCell(oldRow.getCell(v2), v2, ByteBufferUtil.bytes(0), 0);
         newRow = index.rowsUpdated.get(0).right;
-        assertEquals(2, newRow.size());
+        assertEquals(2, newRow.columnCount());
         validateCell(newRow.getCell(v1), v1, ByteBufferUtil.bytes(2), 2);
         validateCell(newRow.getCell(v2), v2, ByteBufferUtil.bytes(2), 2);
         index.reset();
@@ -942,10 +942,10 @@ public class SecondaryIndexTest extends CQLTester
         execute("DELETE v1 FROM %s USING TIMESTAMP 3 WHERE k=0 AND c=0");
         assertEquals(1, index.rowsUpdated.size());
         oldRow = index.rowsUpdated.get(0).left;
-        assertEquals(1, oldRow.size());
+        assertEquals(1, oldRow.columnCount());
         validateCell(oldRow.getCell(v1), v1, ByteBufferUtil.bytes(2), 2);
         newRow = index.rowsUpdated.get(0).right;
-        assertEquals(1, newRow.size());
+        assertEquals(1, newRow.columnCount());
         Cell newCell = newRow.getCell(v1);
         assertTrue(newCell.isTombstone());
         assertEquals(3, newCell.timestamp());
@@ -957,10 +957,10 @@ public class SecondaryIndexTest extends CQLTester
         execute("INSERT INTO %s(k, c) VALUES (0, 0) USING TIMESTAMP 4");
         assertEquals(1, index.rowsUpdated.size());
         oldRow = index.rowsUpdated.get(0).left;
-        assertEquals(0, oldRow.size());
+        assertEquals(0, oldRow.columnCount());
         assertEquals(0, oldRow.primaryKeyLivenessInfo().timestamp());
         newRow = index.rowsUpdated.get(0).right;
-        assertEquals(0, newRow.size());
+        assertEquals(0, newRow.columnCount());
         assertEquals(4, newRow.primaryKeyLivenessInfo().timestamp());
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java b/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
index ca0dfa5..940b4f9 100644
--- a/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
+++ b/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
@@ -282,7 +282,7 @@ public class SinglePartitionSliceCommandTest
                 assertTrue(unfiltered.isRow());
                 Row row = (Row) unfiltered;
                 assertEquals(deletionTime, row.deletion().time().markedForDeleteAt());
-                assertEquals(0, row.size()); // no btree
+                assertEquals(0, row.columnCount()); // no btree
             }
             count++;
         }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[6/6] cassandra git commit: Merge branch 'cassandra-3.11' into trunk

Posted by be...@apache.org.
Merge branch 'cassandra-3.11' into trunk


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/d8c549b6
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/d8c549b6
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/d8c549b6

Branch: refs/heads/trunk
Commit: d8c549b689488e50ab398d52416e387860108037
Parents: 2ee9d68 e635317
Author: Benedict Elliott Smith <be...@apache.org>
Authored: Thu Nov 29 15:58:13 2018 +0000
Committer: Benedict Elliott Smith <be...@apache.org>
Committed: Thu Nov 29 15:58:13 2018 +0000

----------------------------------------------------------------------
 CHANGES.txt                                        |  1 +
 .../org/apache/cassandra/db/rows/AbstractRow.java  |  7 ++++++-
 .../org/apache/cassandra/db/rows/BTreeRow.java     | 16 +++++++++++++---
 src/java/org/apache/cassandra/db/rows/Row.java     | 17 ++++++++++++++++-
 .../cassandra/db/rows/UnfilteredSerializer.java    |  8 ++++----
 .../apache/cassandra/cql3/GcCompactionTest.java    |  2 +-
 .../validation/entities/SecondaryIndexTest.java    | 16 ++++++++--------
 .../db/SinglePartitionSliceCommandTest.java        |  2 +-
 8 files changed, 50 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/d8c549b6/CHANGES.txt
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d8c549b6/src/java/org/apache/cassandra/db/rows/AbstractRow.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/rows/AbstractRow.java
index 24b088f,8558466..f719db5
--- a/src/java/org/apache/cassandra/db/rows/AbstractRow.java
+++ b/src/java/org/apache/cassandra/db/rows/AbstractRow.java
@@@ -91,19 -99,12 +91,24 @@@ public abstract class AbstractRow imple
              cd.validate();
      }
  
 +    public boolean hasInvalidDeletions()
 +    {
 +        if (primaryKeyLivenessInfo().isExpiring() && (primaryKeyLivenessInfo().ttl() < 0 || primaryKeyLivenessInfo().localExpirationTime() < 0))
 +            return true;
 +        if (!deletion().time().validate())
 +            return true;
 +        for (ColumnData cd : this)
 +            if (cd.hasInvalidDeletions())
 +                return true;
 +        return false;
 +    }
 +
+     public String toString()
+     {
+         return columnData().toString();
+     }
+ 
 -    public String toString(CFMetaData metadata)
 +    public String toString(TableMetadata metadata)
      {
          return toString(metadata, false);
      }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d8c549b6/src/java/org/apache/cassandra/db/rows/BTreeRow.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/rows/BTreeRow.java
index fff7750,ba81a4e..dc3219a
--- a/src/java/org/apache/cassandra/db/rows/BTreeRow.java
+++ b/src/java/org/apache/cassandra/db/rows/BTreeRow.java
@@@ -192,9 -191,14 +192,14 @@@ public class BTreeRow extends AbstractR
          return clustering;
      }
  
 -    public Collection<ColumnDefinition> columns()
 +    public Collection<ColumnMetadata> columns()
      {
-         return Collections2.transform(this, ColumnData::column);
+         return Collections2.transform(columnData(), ColumnData::column);
+     }
+ 
+     public int columnCount()
+     {
+         return BTree.size(btree);
      }
  
      public LivenessInfo primaryKeyLivenessInfo()
@@@ -229,15 -233,20 +234,20 @@@
          return cd.getCell(path);
      }
  
 -    public ComplexColumnData getComplexColumnData(ColumnDefinition c)
 +    public ComplexColumnData getComplexColumnData(ColumnMetadata c)
      {
          assert c.isComplex();
 -        return (ComplexColumnData) BTree.<Object>find(btree, ColumnDefinition.asymmetricColumnDataComparator, c);
 +        return (ComplexColumnData) BTree.<Object>find(btree, ColumnMetadata.asymmetricColumnDataComparator, c);
      }
  
-     public int size()
+     @Override
+     public Collection<ColumnData> columnData()
      {
-         return BTree.size(btree);
+         return new AbstractCollection<ColumnData>()
+         {
+             @Override public Iterator<ColumnData> iterator() { return BTreeRow.this.iterator(); }
+             @Override public int size() { return BTree.size(btree); }
+         };
      }
  
      public Iterator<ColumnData> iterator()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d8c549b6/src/java/org/apache/cassandra/db/rows/Row.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/rows/Row.java
index 74c0040,9bff19d..0174adc
--- a/src/java/org/apache/cassandra/db/rows/Row.java
+++ b/src/java/org/apache/cassandra/db/rows/Row.java
@@@ -60,8 -60,14 +60,14 @@@ public interface Row extends Unfiltered
       * An in-natural-order collection of the columns for which data (incl. simple tombstones)
       * is present in this row.
       */
 -    public Collection<ColumnDefinition> columns();
 +    public Collection<ColumnMetadata> columns();
  
+ 
+     /**
+      * The number of columns for which data (incl. simple tombstones) is present in this row.
+      */
+     public int columnCount();
+ 
      /**
       * The row deletion.
       *

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d8c549b6/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
index 89bc3d5,926f3ef..7b48652
--- a/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
+++ b/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
@@@ -227,9 -227,9 +227,9 @@@ public class UnfilteredSerialize
              header.writeDeletionTime(deletion.time(), out);
  
          if ((flags & HAS_ALL_COLUMNS) == 0)
-             Columns.serializer.serializeSubset(Collections2.transform(row, ColumnData::column), headerColumns, out);
+             Columns.serializer.serializeSubset(row.columns(), headerColumns, out);
  
 -        SearchIterator<ColumnDefinition, ColumnDefinition> si = headerColumns.iterator();
 +        SearchIterator<ColumnMetadata, ColumnMetadata> si = headerColumns.iterator();
  
          try
          {
@@@ -350,12 -350,12 +350,12 @@@
              size += header.deletionTimeSerializedSize(deletion.time());
  
          if (!hasAllColumns)
-             size += Columns.serializer.serializedSubsetSize(Collections2.transform(row, ColumnData::column), header.columns(isStatic));
+             size += Columns.serializer.serializedSubsetSize(row.columns(), header.columns(isStatic));
  
 -        SearchIterator<ColumnDefinition, ColumnDefinition> si = headerColumns.iterator();
 +        SearchIterator<ColumnMetadata, ColumnMetadata> si = headerColumns.iterator();
          for (ColumnData data : row)
          {
 -            ColumnDefinition column = si.next(data.column());
 +            ColumnMetadata column = si.next(data.column());
              assert column != null;
  
              if (data.column.isSimple())

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d8c549b6/test/unit/org/apache/cassandra/cql3/GcCompactionTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d8c549b6/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d8c549b6/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[4/6] cassandra git commit: Merge branch 'cassandra-3.0' into cassandra-3.11

Posted by be...@apache.org.
Merge branch 'cassandra-3.0' into cassandra-3.11


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/e635317c
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/e635317c
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/e635317c

Branch: refs/heads/trunk
Commit: e635317cf8816fc8f130afc47cccc9bfd57a6cd7
Parents: 1084ad9 8404260
Author: Benedict Elliott Smith <be...@apache.org>
Authored: Thu Nov 29 15:56:40 2018 +0000
Committer: Benedict Elliott Smith <be...@apache.org>
Committed: Thu Nov 29 15:56:40 2018 +0000

----------------------------------------------------------------------
 CHANGES.txt                                        |  1 +
 .../org/apache/cassandra/db/rows/AbstractRow.java  |  7 ++++++-
 .../org/apache/cassandra/db/rows/BTreeRow.java     | 16 +++++++++++++---
 src/java/org/apache/cassandra/db/rows/Row.java     | 17 ++++++++++++++++-
 .../cassandra/db/rows/UnfilteredSerializer.java    |  8 ++++----
 .../apache/cassandra/cql3/GcCompactionTest.java    |  2 +-
 .../validation/entities/SecondaryIndexTest.java    | 16 ++++++++--------
 .../db/SinglePartitionSliceCommandTest.java        |  2 +-
 8 files changed, 50 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index c228e8f,40016a1..965d945
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,6 -1,5 +1,7 @@@
 -3.0.18
 +3.11.4
 + * Correct sstable sorting for garbagecollect and levelled compaction (CASSANDRA-14870)
 +Merged from 3.0:
+  * Unfiltered.isEmpty conflicts with Row extends AbstractCollection.isEmpty (CASSANDRA-14588)
   * RangeTombstoneList doesn't properly clean up mergeable or superseded rts in some cases (CASSANDRA-14894)
   * Fix handling of collection tombstones for dropped columns from legacy sstables (CASSANDRA-14912)
   * Throw exception if Columns serialized subset encode more columns than possible (CASSANDRA-14591)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/src/java/org/apache/cassandra/db/rows/AbstractRow.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/src/java/org/apache/cassandra/db/rows/BTreeRow.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/src/java/org/apache/cassandra/db/rows/Row.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
index c74f756,0342e39..926f3ef
--- a/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
+++ b/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
@@@ -226,41 -180,24 +226,41 @@@ public class UnfilteredSerialize
          if ((flags & HAS_DELETION) != 0)
              header.writeDeletionTime(deletion.time(), out);
  
 -        if (!hasAllColumns)
 +        if ((flags & HAS_ALL_COLUMNS) == 0)
-             Columns.serializer.serializeSubset(Collections2.transform(row, ColumnData::column), headerColumns, out);
+             Columns.serializer.serializeSubset(row.columns(), headerColumns, out);
  
          SearchIterator<ColumnDefinition, ColumnDefinition> si = headerColumns.iterator();
 -        for (ColumnData data : row)
 +
 +        try
          {
 -            // We can obtain the column for data directly from data.column(). However, if the cell/complex data
 -            // originates from a sstable, the column we'll get will have the type used when the sstable was serialized,
 -            // and if that type have been recently altered, that may not be the type we want to serialize the column
 -            // with. So we use the ColumnDefinition from the "header" which is "current". Also see #11810 for what
 -            // happens if we don't do that.
 -            ColumnDefinition column = si.next(data.column());
 -            assert column != null;
 +            row.apply(cd -> {
 +                // We can obtain the column for data directly from data.column(). However, if the cell/complex data
 +                // originates from a sstable, the column we'll get will have the type used when the sstable was serialized,
 +                // and if that type have been recently altered, that may not be the type we want to serialize the column
 +                // with. So we use the ColumnDefinition from the "header" which is "current". Also see #11810 for what
 +                // happens if we don't do that.
 +                ColumnDefinition column = si.next(cd.column());
 +                assert column != null : cd.column.toString();
 +
 +                try
 +                {
 +                    if (cd.column.isSimple())
 +                        Cell.serializer.serialize((Cell) cd, column, out, pkLiveness, header);
 +                    else
 +                        writeComplexColumn((ComplexColumnData) cd, column, (flags & HAS_COMPLEX_DELETION) != 0, pkLiveness, header, out);
 +                }
 +                catch (IOException e)
 +                {
 +                    throw new WrappedException(e);
 +                }
 +            }, false);
 +        }
 +        catch (WrappedException e)
 +        {
 +            if (e.getCause() instanceof IOException)
 +                throw (IOException) e.getCause();
  
 -            if (data.column.isSimple())
 -                Cell.serializer.serialize((Cell) data, column, out, pkLiveness, header);
 -            else
 -                writeComplexColumn((ComplexColumnData) data, column, hasComplexDeletion, pkLiveness, header, out);
 +            throw e;
          }
      }
  

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/test/unit/org/apache/cassandra/cql3/GcCompactionTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/cql3/GcCompactionTest.java
index 548cdc1,0000000..3af5dee
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/cql3/GcCompactionTest.java
+++ b/test/unit/org/apache/cassandra/cql3/GcCompactionTest.java
@@@ -1,461 -1,0 +1,461 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.cql3;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.HashSet;
 +import java.util.Set;
 +import java.util.function.Function;
 +
 +import com.google.common.collect.Iterables;
 +import org.junit.Test;
 +
 +import org.apache.cassandra.db.*;
 +import org.apache.cassandra.db.compaction.CompactionManager;
 +import org.apache.cassandra.db.rows.*;
 +import org.apache.cassandra.io.sstable.ISSTableScanner;
 +import org.apache.cassandra.io.sstable.format.SSTableReader;
 +import org.apache.cassandra.schema.CompactionParams;
 +import org.apache.cassandra.utils.FBUtilities;
 +
 +public class GcCompactionTest extends CQLTester
 +{
 +    static final int KEY_COUNT = 10;
 +    static final int CLUSTERING_COUNT = 20;
 +
 +    // Test needs synchronous table drop to avoid flushes causing flaky failures
 +
 +    @Override
 +    protected String createTable(String query)
 +    {
 +        return super.createTable(KEYSPACE_PER_TEST, query);
 +    }
 +
 +    @Override
 +    protected UntypedResultSet execute(String query, Object... values) throws Throwable
 +    {
 +        return executeFormattedQuery(formatQuery(KEYSPACE_PER_TEST, query), values);
 +    }
 +
 +    @Override
 +    public ColumnFamilyStore getCurrentColumnFamilyStore()
 +    {
 +        return super.getCurrentColumnFamilyStore(KEYSPACE_PER_TEST);
 +    }
 +
 +    public void flush()
 +    {
 +        flush(KEYSPACE_PER_TEST);
 +    }
 +
 +    @Test
 +    public void testGcCompactionPartitions() throws Throwable
 +    {
 +        runCompactionTest("CREATE TABLE %s(" +
 +                          "  key int," +
 +                          "  column int," +
 +                          "  data int," +
 +                          "  extra text," +
 +                          "  PRIMARY KEY((key, column), data)" +
 +                          ") WITH compaction = { 'class' :  'SizeTieredCompactionStrategy', 'provide_overlapping_tombstones' : 'row'  };"
 +                          );
 +
 +    }
 +
 +    @Test
 +    public void testGcCompactionRows() throws Throwable
 +    {
 +        runCompactionTest("CREATE TABLE %s(" +
 +                          "  key int," +
 +                          "  column int," +
 +                          "  data int," +
 +                          "  extra text," +
 +                          "  PRIMARY KEY(key, column)" +
 +                          ") WITH compaction = { 'class' :  'SizeTieredCompactionStrategy', 'provide_overlapping_tombstones' : 'row'  };"
 +                          );
 +
 +    }
 +
 +    @Test
 +    public void testGcCompactionRanges() throws Throwable
 +    {
 +
 +        runCompactionTest("CREATE TABLE %s(" +
 +                          "  key int," +
 +                          "  column int," +
 +                          "  col2 int," +
 +                          "  data int," +
 +                          "  extra text," +
 +                          "  PRIMARY KEY(key, column, data)" +
 +                          ") WITH compaction = { 'class' :  'SizeTieredCompactionStrategy', 'provide_overlapping_tombstones' : 'row'  };"
 +                          );
 +    }
 +
 +    private void runCompactionTest(String tableDef) throws Throwable
 +    {
 +        createTable(tableDef);
 +
 +        for (int i = 0; i < KEY_COUNT; ++i)
 +            for (int j = 0; j < CLUSTERING_COUNT; ++j)
 +                execute("INSERT INTO %s (key, column, data, extra) VALUES (?, ?, ?, ?)", i, j, i+j, "" + i + ":" + j);
 +
 +        Set<SSTableReader> readers = new HashSet<>();
 +        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
 +
 +        flush();
 +        assertEquals(1, cfs.getLiveSSTables().size());
 +        SSTableReader table0 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +        int rowCount = countRows(table0);
 +
 +        deleteWithSomeInserts(3, 5, 10);
 +        flush();
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table1 = getNewTable(readers);
 +        assertTrue(countRows(table1) > 0);
 +        assertTrue(countTombstoneMarkers(table1) > 0);
 +
 +        deleteWithSomeInserts(5, 6, 0);
 +        flush();
 +        assertEquals(3, cfs.getLiveSSTables().size());
 +        SSTableReader table2 = getNewTable(readers);
 +        assertEquals(0, countRows(table2));
 +        assertTrue(countTombstoneMarkers(table2) > 0);
 +
 +        CompactionManager.instance.forceUserDefinedCompaction(table0.getFilename());
 +
 +        assertEquals(3, cfs.getLiveSSTables().size());
 +        SSTableReader table3 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table3));
 +        assertTrue(rowCount > countRows(table3));
 +    }
 +
 +    @Test
 +    public void testGarbageCollectOrder() throws Throwable
 +    {
 +        // partition-level deletions, 0 gc_grace
 +        createTable("CREATE TABLE %s(" +
 +                    "  key int," +
 +                    "  column int," +
 +                    "  col2 int," +
 +                    "  data int," +
 +                    "  extra text," +
 +                    "  PRIMARY KEY((key, column))" +
 +                    ") WITH gc_grace_seconds = 0;"
 +        );
 +
 +        assertEquals(1, getCurrentColumnFamilyStore().gcBefore(1)); // make sure gc_grace is 0
 +
 +        for (int i = 0; i < KEY_COUNT; ++i)
 +            for (int j = 0; j < CLUSTERING_COUNT; ++j)
 +                execute("INSERT INTO %s (key, column, data, extra) VALUES (?, ?, ?, ?)", i, j, i+j, "" + i + ":" + j);
 +
 +
 +        Set<SSTableReader> readers = new HashSet<>();
 +        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
 +
 +        flush();
 +        assertEquals(1, cfs.getLiveSSTables().size());
 +        SSTableReader table0 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +        int rowCount0 = countRows(table0);
 +
 +        deleteWithSomeInserts(3, 5, 10);
 +        flush();
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table1 = getNewTable(readers);
 +        final int rowCount1 = countRows(table1);
 +        assertTrue(rowCount1 > 0);
 +        assertTrue(countTombstoneMarkers(table1) > 0);
 +
 +        deleteWithSomeInserts(2, 4, 0);
 +        flush();
 +        assertEquals(3, cfs.getLiveSSTables().size());
 +        SSTableReader table2 = getNewTable(readers);
 +        assertEquals(0, countRows(table2));
 +        assertTrue(countTombstoneMarkers(table2) > 0);
 +
 +        // Wait a little to make sure nowInSeconds is greater than gcBefore
 +        Thread.sleep(1000);
 +
 +        CompactionManager.AllSSTableOpStatus status =
 +                CompactionManager.instance.performGarbageCollection(getCurrentColumnFamilyStore(), CompactionParams.TombstoneOption.ROW, 1);
 +        assertEquals(CompactionManager.AllSSTableOpStatus.SUCCESSFUL, status);
 +
 +        SSTableReader[] tables = cfs.getLiveSSTables().toArray(new SSTableReader[0]);
 +        Arrays.sort(tables, (o1, o2) -> Integer.compare(o1.descriptor.generation, o2.descriptor.generation));  // by order of compaction
 +
 +        // Make sure deleted data was removed
 +        assertTrue(rowCount0 > countRows(tables[0]));
 +        assertTrue(rowCount1 > countRows(tables[1]));
 +
 +        // Make sure all tombstones got purged
 +        for (SSTableReader t : tables)
 +        {
 +            assertEquals("Table " + t + " has tombstones", 0, countTombstoneMarkers(t));
 +        }
 +
 +        // The last table should have become empty and be removed
 +        assertEquals(2, tables.length);
 +    }
 +
 +    @Test
 +    public void testGcCompactionCells() throws Throwable
 +    {
 +        createTable("CREATE TABLE %s(" +
 +                          "  key int," +
 +                          "  column int," +
 +                          "  data int," +
 +                          "  extra text," +
 +                          "  PRIMARY KEY(key)" +
 +                          ") WITH compaction = { 'class' :  'SizeTieredCompactionStrategy', 'provide_overlapping_tombstones' : 'cell'  };"
 +                          );
 +
 +        for (int i = 0; i < KEY_COUNT; ++i)
 +            for (int j = 0; j < CLUSTERING_COUNT; ++j)
 +                execute("INSERT INTO %s (key, column, data, extra) VALUES (?, ?, ?, ?)", i, j, i+j, "" + i + ":" + j);
 +
 +        Set<SSTableReader> readers = new HashSet<>();
 +        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
 +
 +        flush();
 +        assertEquals(1, cfs.getLiveSSTables().size());
 +        SSTableReader table0 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +        int cellCount = countCells(table0);
 +
 +        deleteWithSomeInserts(3, 0, 2);
 +        flush();
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table1 = getNewTable(readers);
 +        assertTrue(countCells(table1) > 0);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +
 +        CompactionManager.instance.forceUserDefinedCompaction(table0.getFilename());
 +
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table3 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table3));
 +        assertTrue(cellCount > countCells(table3));
 +    }
 +
 +    @Test
 +    public void testGcCompactionStatic() throws Throwable
 +    {
 +        createTable("CREATE TABLE %s(" +
 +                          "  key int," +
 +                          "  column int," +
 +                          "  data int static," +
 +                          "  extra text," +
 +                          "  PRIMARY KEY(key, column)" +
 +                          ") WITH compaction = { 'class' :  'SizeTieredCompactionStrategy', 'provide_overlapping_tombstones' : 'cell'  };"
 +                          );
 +
 +        for (int i = 0; i < KEY_COUNT; ++i)
 +            for (int j = 0; j < CLUSTERING_COUNT; ++j)
 +                execute("INSERT INTO %s (key, column, data, extra) VALUES (?, ?, ?, ?)", i, j, i+j, "" + i + ":" + j);
 +
 +        Set<SSTableReader> readers = new HashSet<>();
 +        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
 +
 +        flush();
 +        assertEquals(1, cfs.getLiveSSTables().size());
 +        SSTableReader table0 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +        int cellCount = countStaticCells(table0);
 +        assertEquals(KEY_COUNT, cellCount);
 +
 +        execute("DELETE data FROM %s WHERE key = 0");   // delete static cell
 +        execute("INSERT INTO %s (key, data) VALUES (1, 0)");  // overwrite static cell
 +        flush();
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table1 = getNewTable(readers);
 +        assertTrue(countStaticCells(table1) > 0);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +
 +        CompactionManager.instance.forceUserDefinedCompaction(table0.getFilename());
 +
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table3 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table3));
 +        assertEquals(cellCount - 2, countStaticCells(table3));
 +    }
 +
 +    @Test
 +    public void testGcCompactionComplexColumn() throws Throwable
 +    {
 +        createTable("CREATE TABLE %s(" +
 +                          "  key int," +
 +                          "  data map<int, int>," +
 +                          "  extra text," +
 +                          "  PRIMARY KEY(key)" +
 +                          ") WITH compaction = { 'class' :  'SizeTieredCompactionStrategy', 'provide_overlapping_tombstones' : 'cell'  };"
 +                          );
 +
 +        for (int i = 0; i < KEY_COUNT; ++i)
 +            for (int j = 0; j < CLUSTERING_COUNT; ++j)
 +                execute("UPDATE %s SET data[?] = ? WHERE key = ?", j, i+j, i);
 +
 +        Set<SSTableReader> readers = new HashSet<>();
 +        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
 +
 +        flush();
 +        assertEquals(1, cfs.getLiveSSTables().size());
 +        SSTableReader table0 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +        int cellCount = countComplexCells(table0);
 +
 +        deleteWithSomeInsertsComplexColumn(3, 5, 8);
 +        flush();
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table1 = getNewTable(readers);
 +        assertTrue(countComplexCells(table1) > 0);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +
 +        CompactionManager.instance.forceUserDefinedCompaction(table0.getFilename());
 +
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table3 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table3));
 +        assertEquals(cellCount - 23, countComplexCells(table3));
 +    }
 +
 +    @Test
 +    public void testLocalDeletionTime() throws Throwable
 +    {
 +        createTable("create table %s (k int, c1 int, primary key (k, c1)) with compaction = {'class': 'SizeTieredCompactionStrategy', 'provide_overlapping_tombstones':'row'}");
 +        execute("delete from %s where k = 1");
 +        Set<SSTableReader> readers = new HashSet<>(getCurrentColumnFamilyStore().getLiveSSTables());
 +        getCurrentColumnFamilyStore().forceBlockingFlush();
 +        SSTableReader oldSSTable = getNewTable(readers);
 +        Thread.sleep(2000);
 +        execute("delete from %s where k = 1");
 +        getCurrentColumnFamilyStore().forceBlockingFlush();
 +        SSTableReader newTable = getNewTable(readers);
 +
 +        CompactionManager.instance.forceUserDefinedCompaction(oldSSTable.getFilename());
 +
 +        // Old table now doesn't contain any data and should disappear.
 +        assertEquals(Collections.singleton(newTable), getCurrentColumnFamilyStore().getLiveSSTables());
 +    }
 +
 +    private SSTableReader getNewTable(Set<SSTableReader> readers)
 +    {
 +        Set<SSTableReader> newOnes = new HashSet<>(getCurrentColumnFamilyStore().getLiveSSTables());
 +        newOnes.removeAll(readers);
 +        assertEquals(1, newOnes.size());
 +        readers.addAll(newOnes);
 +        return Iterables.get(newOnes, 0);
 +    }
 +
 +    void deleteWithSomeInserts(int key_step, int delete_step, int readd_step) throws Throwable
 +    {
 +        for (int i = 0; i < KEY_COUNT; i += key_step)
 +        {
 +            if (delete_step > 0)
 +                for (int j = i % delete_step; j < CLUSTERING_COUNT; j += delete_step)
 +                {
 +                    execute("DELETE FROM %s WHERE key = ? AND column = ?", i, j);
 +                }
 +            if (readd_step > 0)
 +                for (int j = i % readd_step; j < CLUSTERING_COUNT; j += readd_step)
 +                {
 +                    execute("INSERT INTO %s (key, column, data, extra) VALUES (?, ?, ?, ?)", i, j, i-j, "readded " + i + ":" + j);
 +                }
 +        }
 +    }
 +
 +    void deleteWithSomeInsertsComplexColumn(int key_step, int delete_step, int readd_step) throws Throwable
 +    {
 +        for (int i = 0; i < KEY_COUNT; i += key_step)
 +        {
 +            if (delete_step > 0)
 +                for (int j = i % delete_step; j < CLUSTERING_COUNT; j += delete_step)
 +                {
 +                    execute("DELETE data[?] FROM %s WHERE key = ?", j, i);
 +                }
 +            if (readd_step > 0)
 +                for (int j = i % readd_step; j < CLUSTERING_COUNT; j += readd_step)
 +                {
 +                    execute("UPDATE %s SET data[?] = ? WHERE key = ?", j, -(i+j), i);
 +                }
 +        }
 +    }
 +
 +    int countTombstoneMarkers(SSTableReader reader)
 +    {
 +        int nowInSec = FBUtilities.nowInSeconds();
 +        return count(reader, x -> x.isRangeTombstoneMarker() || x.isRow() && ((Row) x).hasDeletion(nowInSec) ? 1 : 0, x -> x.partitionLevelDeletion().isLive() ? 0 : 1);
 +    }
 +
 +    int countRows(SSTableReader reader)
 +    {
 +        boolean enforceStrictLiveness = reader.metadata.enforceStrictLiveness();
 +        int nowInSec = FBUtilities.nowInSeconds();
 +        return count(reader, x -> x.isRow() && ((Row) x).hasLiveData(nowInSec, enforceStrictLiveness) ? 1 : 0, x -> 0);
 +    }
 +
 +    int countCells(SSTableReader reader)
 +    {
 +        return count(reader, x -> x.isRow() ? Iterables.size((Row) x) : 0, x -> 0);
 +    }
 +
 +    int countStaticCells(SSTableReader reader)
 +    {
 +        return count(reader, x -> 0, x -> Iterables.size(x.staticRow()));
 +    }
 +
 +    int countComplexCells(SSTableReader reader)
 +    {
-         return count(reader, x -> x.isRow() ? ((Row) x).stream().mapToInt(this::countComplex).sum() : 0, x -> 0);
++        return count(reader, x -> x.isRow() ? ((Row) x).columnData().stream().mapToInt(this::countComplex).sum() : 0, x -> 0);
 +    }
 +
 +    int countComplex(ColumnData c)
 +    {
 +        if (!(c instanceof ComplexColumnData))
 +            return 0;
 +        ComplexColumnData ccd = (ComplexColumnData) c;
 +        return ccd.cellsCount();
 +    }
 +
 +    int count(SSTableReader reader, Function<Unfiltered, Integer> predicate, Function<UnfilteredRowIterator, Integer> partitionPredicate)
 +    {
 +        int instances = 0;
 +        try (ISSTableScanner partitions = reader.getScanner())
 +        {
 +            while (partitions.hasNext())
 +            {
 +                try (UnfilteredRowIterator iter = partitions.next())
 +                {
 +                    instances += partitionPredicate.apply(iter);
 +                    while (iter.hasNext())
 +                    {
 +                        Unfiltered atom = iter.next();
 +                        instances += predicate.apply(atom);
 +                    }
 +                }
 +            }
 +        }
 +        return instances;
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[2/6] cassandra git commit: Unfiltered.isEmpty conflicts with Row extends AbstractCollection.isEmpty

Posted by be...@apache.org.
Unfiltered.isEmpty conflicts with Row extends AbstractCollection.isEmpty

patch by Benedict; reviewed by Blake Eggleston for CASSANDRA-14588


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/8404260f
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/8404260f
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/8404260f

Branch: refs/heads/cassandra-3.11
Commit: 8404260f1640efd14613c4591e5e918786fcde10
Parents: 7bdea44
Author: Benedict Elliott Smith <be...@apple.com>
Authored: Fri Jul 27 16:04:13 2018 +0100
Committer: Benedict Elliott Smith <be...@apache.org>
Committed: Thu Nov 29 15:50:25 2018 +0000

----------------------------------------------------------------------
 CHANGES.txt                                        |  1 +
 .../org/apache/cassandra/db/rows/AbstractRow.java  |  8 +++++++-
 .../org/apache/cassandra/db/rows/BTreeRow.java     | 16 +++++++++++++---
 src/java/org/apache/cassandra/db/rows/Row.java     | 17 ++++++++++++++++-
 .../cassandra/db/rows/UnfilteredSerializer.java    |  8 ++++----
 .../validation/entities/SecondaryIndexTest.java    | 16 ++++++++--------
 .../db/SinglePartitionSliceCommandTest.java        |  2 +-
 7 files changed, 50 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 9ba36fa..40016a1 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 3.0.18
+ * Unfiltered.isEmpty conflicts with Row extends AbstractCollection.isEmpty (CASSANDRA-14588)
  * RangeTombstoneList doesn't properly clean up mergeable or superseded rts in some cases (CASSANDRA-14894)
  * Fix handling of collection tombstones for dropped columns from legacy sstables (CASSANDRA-14912)
  * Throw exception if Columns serialized subset encode more columns than possible (CASSANDRA-14591)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/src/java/org/apache/cassandra/db/rows/AbstractRow.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/rows/AbstractRow.java b/src/java/org/apache/cassandra/db/rows/AbstractRow.java
index 67ed219..f91126b 100644
--- a/src/java/org/apache/cassandra/db/rows/AbstractRow.java
+++ b/src/java/org/apache/cassandra/db/rows/AbstractRow.java
@@ -19,6 +19,7 @@ package org.apache.cassandra.db.rows;
 import java.nio.ByteBuffer;
 import java.security.MessageDigest;
 import java.util.AbstractCollection;
+import java.util.Collection;
 import java.util.Objects;
 
 import com.google.common.collect.Iterables;
@@ -35,7 +36,7 @@ import org.apache.cassandra.utils.FBUtilities;
  * Unless you have a very good reason not to, every row implementation
  * should probably extend this class.
  */
-public abstract class AbstractRow extends AbstractCollection<ColumnData> implements Row
+public abstract class AbstractRow implements Row
 {
     public Unfiltered.Kind kind()
     {
@@ -87,6 +88,11 @@ public abstract class AbstractRow extends AbstractCollection<ColumnData> impleme
             cd.validate();
     }
 
+    public String toString()
+    {
+        return columnData().toString();
+    }
+
     public String toString(CFMetaData metadata)
     {
         return toString(metadata, false);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/src/java/org/apache/cassandra/db/rows/BTreeRow.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/rows/BTreeRow.java b/src/java/org/apache/cassandra/db/rows/BTreeRow.java
index b07960c..e46d0cc 100644
--- a/src/java/org/apache/cassandra/db/rows/BTreeRow.java
+++ b/src/java/org/apache/cassandra/db/rows/BTreeRow.java
@@ -167,7 +167,12 @@ public class BTreeRow extends AbstractRow
 
     public Collection<ColumnDefinition> columns()
     {
-        return Collections2.transform(this, ColumnData::column);
+        return Collections2.transform(columnData(), ColumnData::column);
+    }
+
+    public int columnCount()
+    {
+        return BTree.size(btree);
     }
 
     public LivenessInfo primaryKeyLivenessInfo()
@@ -208,9 +213,14 @@ public class BTreeRow extends AbstractRow
         return (ComplexColumnData) BTree.<Object>find(btree, ColumnDefinition.asymmetricColumnDataComparator, c);
     }
 
-    public int size()
+    @Override
+    public Collection<ColumnData> columnData()
     {
-        return BTree.size(btree);
+        return new AbstractCollection<ColumnData>()
+        {
+            @Override public Iterator<ColumnData> iterator() { return BTreeRow.this.iterator(); }
+            @Override public int size() { return BTree.size(btree); }
+        };
     }
 
     public Iterator<ColumnData> iterator()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/src/java/org/apache/cassandra/db/rows/Row.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/rows/Row.java b/src/java/org/apache/cassandra/db/rows/Row.java
index 3c97e09..dcb78f3 100644
--- a/src/java/org/apache/cassandra/db/rows/Row.java
+++ b/src/java/org/apache/cassandra/db/rows/Row.java
@@ -45,7 +45,7 @@ import org.apache.cassandra.utils.btree.UpdateFunction;
  * it's own data. For instance, a {@code Row} cannot contains a cell that is deleted by its own
  * row deletion.
  */
-public interface Row extends Unfiltered, Collection<ColumnData>
+public interface Row extends Unfiltered, Iterable<ColumnData>
 {
     /**
      * The clustering values for this row.
@@ -59,6 +59,12 @@ public interface Row extends Unfiltered, Collection<ColumnData>
      */
     public Collection<ColumnDefinition> columns();
 
+
+    /**
+     * The number of columns for which data (incl. simple tombstones) is present in this row.
+     */
+    public int columnCount();
+
     /**
      * The row deletion.
      *
@@ -148,6 +154,15 @@ public interface Row extends Unfiltered, Collection<ColumnData>
     public Iterable<Cell> cells();
 
     /**
+     * A collection of the ColumnData representation of this row, for columns with some data (possibly not live) present
+     * <p>
+     * The data is returned in column order.
+     *
+     * @return a Collection of the non-empty ColumnData for this row.
+     */
+    public Collection<ColumnData> columnData();
+
+    /**
      * An iterable over the cells of this row that return cells in "legacy order".
      * <p>
      * In 3.0+, columns are sorted so that all simple columns are before all complex columns. Previously

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java b/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
index c4684e1..0342e39 100644
--- a/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
+++ b/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
@@ -133,7 +133,7 @@ public class UnfilteredSerializer
         LivenessInfo pkLiveness = row.primaryKeyLivenessInfo();
         Row.Deletion deletion = row.deletion();
         boolean hasComplexDeletion = row.hasComplexDeletion();
-        boolean hasAllColumns = (row.size() == headerColumns.size());
+        boolean hasAllColumns = (row.columnCount() == headerColumns.size());
         boolean hasExtendedFlags = hasExtendedFlags(row);
 
         if (isStatic)
@@ -181,7 +181,7 @@ public class UnfilteredSerializer
             header.writeDeletionTime(deletion.time(), out);
 
         if (!hasAllColumns)
-            Columns.serializer.serializeSubset(Collections2.transform(row, ColumnData::column), headerColumns, out);
+            Columns.serializer.serializeSubset(row.columns(), headerColumns, out);
 
         SearchIterator<ColumnDefinition, ColumnDefinition> si = headerColumns.iterator();
         for (ColumnData data : row)
@@ -274,7 +274,7 @@ public class UnfilteredSerializer
         LivenessInfo pkLiveness = row.primaryKeyLivenessInfo();
         Row.Deletion deletion = row.deletion();
         boolean hasComplexDeletion = row.hasComplexDeletion();
-        boolean hasAllColumns = (row.size() == headerColumns.size());
+        boolean hasAllColumns = (row.columnCount() == headerColumns.size());
 
         if (!pkLiveness.isEmpty())
             size += header.timestampSerializedSize(pkLiveness.timestamp());
@@ -287,7 +287,7 @@ public class UnfilteredSerializer
             size += header.deletionTimeSerializedSize(deletion.time());
 
         if (!hasAllColumns)
-            size += Columns.serializer.serializedSubsetSize(Collections2.transform(row, ColumnData::column), header.columns(isStatic));
+            size += Columns.serializer.serializedSubsetSize(row.columns(), header.columns(isStatic));
 
         SearchIterator<ColumnDefinition, ColumnDefinition> si = headerColumns.iterator();
         for (ColumnData data : row)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
index 0f6cba7..c2640a0 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
@@ -918,10 +918,10 @@ public class SecondaryIndexTest extends CQLTester
         execute("UPDATE %s USING TIMESTAMP 1 SET v1=1 WHERE k=0 AND c=0");
         assertEquals(1, index.rowsUpdated.size());
         Row oldRow = index.rowsUpdated.get(0).left;
-        assertEquals(1, oldRow.size());
+        assertEquals(1, oldRow.columnCount());
         validateCell(oldRow.getCell(v1), v1, ByteBufferUtil.bytes(0), 0);
         Row newRow = index.rowsUpdated.get(0).right;
-        assertEquals(1, newRow.size());
+        assertEquals(1, newRow.columnCount());
         validateCell(newRow.getCell(v1), v1, ByteBufferUtil.bytes(1), 1);
         index.reset();
 
@@ -929,11 +929,11 @@ public class SecondaryIndexTest extends CQLTester
         execute("UPDATE %s USING TIMESTAMP 2 SET v1=2, v2=2 WHERE k=0 AND c=0");
         assertEquals(1, index.rowsUpdated.size());
         oldRow = index.rowsUpdated.get(0).left;
-        assertEquals(2, oldRow.size());
+        assertEquals(2, oldRow.columnCount());
         validateCell(oldRow.getCell(v1), v1, ByteBufferUtil.bytes(1), 1);
         validateCell(oldRow.getCell(v2), v2, ByteBufferUtil.bytes(0), 0);
         newRow = index.rowsUpdated.get(0).right;
-        assertEquals(2, newRow.size());
+        assertEquals(2, newRow.columnCount());
         validateCell(newRow.getCell(v1), v1, ByteBufferUtil.bytes(2), 2);
         validateCell(newRow.getCell(v2), v2, ByteBufferUtil.bytes(2), 2);
         index.reset();
@@ -942,10 +942,10 @@ public class SecondaryIndexTest extends CQLTester
         execute("DELETE v1 FROM %s USING TIMESTAMP 3 WHERE k=0 AND c=0");
         assertEquals(1, index.rowsUpdated.size());
         oldRow = index.rowsUpdated.get(0).left;
-        assertEquals(1, oldRow.size());
+        assertEquals(1, oldRow.columnCount());
         validateCell(oldRow.getCell(v1), v1, ByteBufferUtil.bytes(2), 2);
         newRow = index.rowsUpdated.get(0).right;
-        assertEquals(1, newRow.size());
+        assertEquals(1, newRow.columnCount());
         Cell newCell = newRow.getCell(v1);
         assertTrue(newCell.isTombstone());
         assertEquals(3, newCell.timestamp());
@@ -957,10 +957,10 @@ public class SecondaryIndexTest extends CQLTester
         execute("INSERT INTO %s(k, c) VALUES (0, 0) USING TIMESTAMP 4");
         assertEquals(1, index.rowsUpdated.size());
         oldRow = index.rowsUpdated.get(0).left;
-        assertEquals(0, oldRow.size());
+        assertEquals(0, oldRow.columnCount());
         assertEquals(0, oldRow.primaryKeyLivenessInfo().timestamp());
         newRow = index.rowsUpdated.get(0).right;
-        assertEquals(0, newRow.size());
+        assertEquals(0, newRow.columnCount());
         assertEquals(4, newRow.primaryKeyLivenessInfo().timestamp());
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java b/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
index ca0dfa5..940b4f9 100644
--- a/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
+++ b/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
@@ -282,7 +282,7 @@ public class SinglePartitionSliceCommandTest
                 assertTrue(unfiltered.isRow());
                 Row row = (Row) unfiltered;
                 assertEquals(deletionTime, row.deletion().time().markedForDeleteAt());
-                assertEquals(0, row.size()); // no btree
+                assertEquals(0, row.columnCount()); // no btree
             }
             count++;
         }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[5/6] cassandra git commit: Merge branch 'cassandra-3.0' into cassandra-3.11

Posted by be...@apache.org.
Merge branch 'cassandra-3.0' into cassandra-3.11


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/e635317c
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/e635317c
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/e635317c

Branch: refs/heads/cassandra-3.11
Commit: e635317cf8816fc8f130afc47cccc9bfd57a6cd7
Parents: 1084ad9 8404260
Author: Benedict Elliott Smith <be...@apache.org>
Authored: Thu Nov 29 15:56:40 2018 +0000
Committer: Benedict Elliott Smith <be...@apache.org>
Committed: Thu Nov 29 15:56:40 2018 +0000

----------------------------------------------------------------------
 CHANGES.txt                                        |  1 +
 .../org/apache/cassandra/db/rows/AbstractRow.java  |  7 ++++++-
 .../org/apache/cassandra/db/rows/BTreeRow.java     | 16 +++++++++++++---
 src/java/org/apache/cassandra/db/rows/Row.java     | 17 ++++++++++++++++-
 .../cassandra/db/rows/UnfilteredSerializer.java    |  8 ++++----
 .../apache/cassandra/cql3/GcCompactionTest.java    |  2 +-
 .../validation/entities/SecondaryIndexTest.java    | 16 ++++++++--------
 .../db/SinglePartitionSliceCommandTest.java        |  2 +-
 8 files changed, 50 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index c228e8f,40016a1..965d945
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,6 -1,5 +1,7 @@@
 -3.0.18
 +3.11.4
 + * Correct sstable sorting for garbagecollect and levelled compaction (CASSANDRA-14870)
 +Merged from 3.0:
+  * Unfiltered.isEmpty conflicts with Row extends AbstractCollection.isEmpty (CASSANDRA-14588)
   * RangeTombstoneList doesn't properly clean up mergeable or superseded rts in some cases (CASSANDRA-14894)
   * Fix handling of collection tombstones for dropped columns from legacy sstables (CASSANDRA-14912)
   * Throw exception if Columns serialized subset encode more columns than possible (CASSANDRA-14591)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/src/java/org/apache/cassandra/db/rows/AbstractRow.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/src/java/org/apache/cassandra/db/rows/BTreeRow.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/src/java/org/apache/cassandra/db/rows/Row.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
index c74f756,0342e39..926f3ef
--- a/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
+++ b/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
@@@ -226,41 -180,24 +226,41 @@@ public class UnfilteredSerialize
          if ((flags & HAS_DELETION) != 0)
              header.writeDeletionTime(deletion.time(), out);
  
 -        if (!hasAllColumns)
 +        if ((flags & HAS_ALL_COLUMNS) == 0)
-             Columns.serializer.serializeSubset(Collections2.transform(row, ColumnData::column), headerColumns, out);
+             Columns.serializer.serializeSubset(row.columns(), headerColumns, out);
  
          SearchIterator<ColumnDefinition, ColumnDefinition> si = headerColumns.iterator();
 -        for (ColumnData data : row)
 +
 +        try
          {
 -            // We can obtain the column for data directly from data.column(). However, if the cell/complex data
 -            // originates from a sstable, the column we'll get will have the type used when the sstable was serialized,
 -            // and if that type have been recently altered, that may not be the type we want to serialize the column
 -            // with. So we use the ColumnDefinition from the "header" which is "current". Also see #11810 for what
 -            // happens if we don't do that.
 -            ColumnDefinition column = si.next(data.column());
 -            assert column != null;
 +            row.apply(cd -> {
 +                // We can obtain the column for data directly from data.column(). However, if the cell/complex data
 +                // originates from a sstable, the column we'll get will have the type used when the sstable was serialized,
 +                // and if that type have been recently altered, that may not be the type we want to serialize the column
 +                // with. So we use the ColumnDefinition from the "header" which is "current". Also see #11810 for what
 +                // happens if we don't do that.
 +                ColumnDefinition column = si.next(cd.column());
 +                assert column != null : cd.column.toString();
 +
 +                try
 +                {
 +                    if (cd.column.isSimple())
 +                        Cell.serializer.serialize((Cell) cd, column, out, pkLiveness, header);
 +                    else
 +                        writeComplexColumn((ComplexColumnData) cd, column, (flags & HAS_COMPLEX_DELETION) != 0, pkLiveness, header, out);
 +                }
 +                catch (IOException e)
 +                {
 +                    throw new WrappedException(e);
 +                }
 +            }, false);
 +        }
 +        catch (WrappedException e)
 +        {
 +            if (e.getCause() instanceof IOException)
 +                throw (IOException) e.getCause();
  
 -            if (data.column.isSimple())
 -                Cell.serializer.serialize((Cell) data, column, out, pkLiveness, header);
 -            else
 -                writeComplexColumn((ComplexColumnData) data, column, hasComplexDeletion, pkLiveness, header, out);
 +            throw e;
          }
      }
  

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/test/unit/org/apache/cassandra/cql3/GcCompactionTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/cql3/GcCompactionTest.java
index 548cdc1,0000000..3af5dee
mode 100644,000000..100644
--- a/test/unit/org/apache/cassandra/cql3/GcCompactionTest.java
+++ b/test/unit/org/apache/cassandra/cql3/GcCompactionTest.java
@@@ -1,461 -1,0 +1,461 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.cql3;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.HashSet;
 +import java.util.Set;
 +import java.util.function.Function;
 +
 +import com.google.common.collect.Iterables;
 +import org.junit.Test;
 +
 +import org.apache.cassandra.db.*;
 +import org.apache.cassandra.db.compaction.CompactionManager;
 +import org.apache.cassandra.db.rows.*;
 +import org.apache.cassandra.io.sstable.ISSTableScanner;
 +import org.apache.cassandra.io.sstable.format.SSTableReader;
 +import org.apache.cassandra.schema.CompactionParams;
 +import org.apache.cassandra.utils.FBUtilities;
 +
 +public class GcCompactionTest extends CQLTester
 +{
 +    static final int KEY_COUNT = 10;
 +    static final int CLUSTERING_COUNT = 20;
 +
 +    // Test needs synchronous table drop to avoid flushes causing flaky failures
 +
 +    @Override
 +    protected String createTable(String query)
 +    {
 +        return super.createTable(KEYSPACE_PER_TEST, query);
 +    }
 +
 +    @Override
 +    protected UntypedResultSet execute(String query, Object... values) throws Throwable
 +    {
 +        return executeFormattedQuery(formatQuery(KEYSPACE_PER_TEST, query), values);
 +    }
 +
 +    @Override
 +    public ColumnFamilyStore getCurrentColumnFamilyStore()
 +    {
 +        return super.getCurrentColumnFamilyStore(KEYSPACE_PER_TEST);
 +    }
 +
 +    public void flush()
 +    {
 +        flush(KEYSPACE_PER_TEST);
 +    }
 +
 +    @Test
 +    public void testGcCompactionPartitions() throws Throwable
 +    {
 +        runCompactionTest("CREATE TABLE %s(" +
 +                          "  key int," +
 +                          "  column int," +
 +                          "  data int," +
 +                          "  extra text," +
 +                          "  PRIMARY KEY((key, column), data)" +
 +                          ") WITH compaction = { 'class' :  'SizeTieredCompactionStrategy', 'provide_overlapping_tombstones' : 'row'  };"
 +                          );
 +
 +    }
 +
 +    @Test
 +    public void testGcCompactionRows() throws Throwable
 +    {
 +        runCompactionTest("CREATE TABLE %s(" +
 +                          "  key int," +
 +                          "  column int," +
 +                          "  data int," +
 +                          "  extra text," +
 +                          "  PRIMARY KEY(key, column)" +
 +                          ") WITH compaction = { 'class' :  'SizeTieredCompactionStrategy', 'provide_overlapping_tombstones' : 'row'  };"
 +                          );
 +
 +    }
 +
 +    @Test
 +    public void testGcCompactionRanges() throws Throwable
 +    {
 +
 +        runCompactionTest("CREATE TABLE %s(" +
 +                          "  key int," +
 +                          "  column int," +
 +                          "  col2 int," +
 +                          "  data int," +
 +                          "  extra text," +
 +                          "  PRIMARY KEY(key, column, data)" +
 +                          ") WITH compaction = { 'class' :  'SizeTieredCompactionStrategy', 'provide_overlapping_tombstones' : 'row'  };"
 +                          );
 +    }
 +
 +    private void runCompactionTest(String tableDef) throws Throwable
 +    {
 +        createTable(tableDef);
 +
 +        for (int i = 0; i < KEY_COUNT; ++i)
 +            for (int j = 0; j < CLUSTERING_COUNT; ++j)
 +                execute("INSERT INTO %s (key, column, data, extra) VALUES (?, ?, ?, ?)", i, j, i+j, "" + i + ":" + j);
 +
 +        Set<SSTableReader> readers = new HashSet<>();
 +        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
 +
 +        flush();
 +        assertEquals(1, cfs.getLiveSSTables().size());
 +        SSTableReader table0 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +        int rowCount = countRows(table0);
 +
 +        deleteWithSomeInserts(3, 5, 10);
 +        flush();
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table1 = getNewTable(readers);
 +        assertTrue(countRows(table1) > 0);
 +        assertTrue(countTombstoneMarkers(table1) > 0);
 +
 +        deleteWithSomeInserts(5, 6, 0);
 +        flush();
 +        assertEquals(3, cfs.getLiveSSTables().size());
 +        SSTableReader table2 = getNewTable(readers);
 +        assertEquals(0, countRows(table2));
 +        assertTrue(countTombstoneMarkers(table2) > 0);
 +
 +        CompactionManager.instance.forceUserDefinedCompaction(table0.getFilename());
 +
 +        assertEquals(3, cfs.getLiveSSTables().size());
 +        SSTableReader table3 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table3));
 +        assertTrue(rowCount > countRows(table3));
 +    }
 +
 +    @Test
 +    public void testGarbageCollectOrder() throws Throwable
 +    {
 +        // partition-level deletions, 0 gc_grace
 +        createTable("CREATE TABLE %s(" +
 +                    "  key int," +
 +                    "  column int," +
 +                    "  col2 int," +
 +                    "  data int," +
 +                    "  extra text," +
 +                    "  PRIMARY KEY((key, column))" +
 +                    ") WITH gc_grace_seconds = 0;"
 +        );
 +
 +        assertEquals(1, getCurrentColumnFamilyStore().gcBefore(1)); // make sure gc_grace is 0
 +
 +        for (int i = 0; i < KEY_COUNT; ++i)
 +            for (int j = 0; j < CLUSTERING_COUNT; ++j)
 +                execute("INSERT INTO %s (key, column, data, extra) VALUES (?, ?, ?, ?)", i, j, i+j, "" + i + ":" + j);
 +
 +
 +        Set<SSTableReader> readers = new HashSet<>();
 +        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
 +
 +        flush();
 +        assertEquals(1, cfs.getLiveSSTables().size());
 +        SSTableReader table0 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +        int rowCount0 = countRows(table0);
 +
 +        deleteWithSomeInserts(3, 5, 10);
 +        flush();
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table1 = getNewTable(readers);
 +        final int rowCount1 = countRows(table1);
 +        assertTrue(rowCount1 > 0);
 +        assertTrue(countTombstoneMarkers(table1) > 0);
 +
 +        deleteWithSomeInserts(2, 4, 0);
 +        flush();
 +        assertEquals(3, cfs.getLiveSSTables().size());
 +        SSTableReader table2 = getNewTable(readers);
 +        assertEquals(0, countRows(table2));
 +        assertTrue(countTombstoneMarkers(table2) > 0);
 +
 +        // Wait a little to make sure nowInSeconds is greater than gcBefore
 +        Thread.sleep(1000);
 +
 +        CompactionManager.AllSSTableOpStatus status =
 +                CompactionManager.instance.performGarbageCollection(getCurrentColumnFamilyStore(), CompactionParams.TombstoneOption.ROW, 1);
 +        assertEquals(CompactionManager.AllSSTableOpStatus.SUCCESSFUL, status);
 +
 +        SSTableReader[] tables = cfs.getLiveSSTables().toArray(new SSTableReader[0]);
 +        Arrays.sort(tables, (o1, o2) -> Integer.compare(o1.descriptor.generation, o2.descriptor.generation));  // by order of compaction
 +
 +        // Make sure deleted data was removed
 +        assertTrue(rowCount0 > countRows(tables[0]));
 +        assertTrue(rowCount1 > countRows(tables[1]));
 +
 +        // Make sure all tombstones got purged
 +        for (SSTableReader t : tables)
 +        {
 +            assertEquals("Table " + t + " has tombstones", 0, countTombstoneMarkers(t));
 +        }
 +
 +        // The last table should have become empty and be removed
 +        assertEquals(2, tables.length);
 +    }
 +
 +    @Test
 +    public void testGcCompactionCells() throws Throwable
 +    {
 +        createTable("CREATE TABLE %s(" +
 +                          "  key int," +
 +                          "  column int," +
 +                          "  data int," +
 +                          "  extra text," +
 +                          "  PRIMARY KEY(key)" +
 +                          ") WITH compaction = { 'class' :  'SizeTieredCompactionStrategy', 'provide_overlapping_tombstones' : 'cell'  };"
 +                          );
 +
 +        for (int i = 0; i < KEY_COUNT; ++i)
 +            for (int j = 0; j < CLUSTERING_COUNT; ++j)
 +                execute("INSERT INTO %s (key, column, data, extra) VALUES (?, ?, ?, ?)", i, j, i+j, "" + i + ":" + j);
 +
 +        Set<SSTableReader> readers = new HashSet<>();
 +        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
 +
 +        flush();
 +        assertEquals(1, cfs.getLiveSSTables().size());
 +        SSTableReader table0 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +        int cellCount = countCells(table0);
 +
 +        deleteWithSomeInserts(3, 0, 2);
 +        flush();
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table1 = getNewTable(readers);
 +        assertTrue(countCells(table1) > 0);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +
 +        CompactionManager.instance.forceUserDefinedCompaction(table0.getFilename());
 +
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table3 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table3));
 +        assertTrue(cellCount > countCells(table3));
 +    }
 +
 +    @Test
 +    public void testGcCompactionStatic() throws Throwable
 +    {
 +        createTable("CREATE TABLE %s(" +
 +                          "  key int," +
 +                          "  column int," +
 +                          "  data int static," +
 +                          "  extra text," +
 +                          "  PRIMARY KEY(key, column)" +
 +                          ") WITH compaction = { 'class' :  'SizeTieredCompactionStrategy', 'provide_overlapping_tombstones' : 'cell'  };"
 +                          );
 +
 +        for (int i = 0; i < KEY_COUNT; ++i)
 +            for (int j = 0; j < CLUSTERING_COUNT; ++j)
 +                execute("INSERT INTO %s (key, column, data, extra) VALUES (?, ?, ?, ?)", i, j, i+j, "" + i + ":" + j);
 +
 +        Set<SSTableReader> readers = new HashSet<>();
 +        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
 +
 +        flush();
 +        assertEquals(1, cfs.getLiveSSTables().size());
 +        SSTableReader table0 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +        int cellCount = countStaticCells(table0);
 +        assertEquals(KEY_COUNT, cellCount);
 +
 +        execute("DELETE data FROM %s WHERE key = 0");   // delete static cell
 +        execute("INSERT INTO %s (key, data) VALUES (1, 0)");  // overwrite static cell
 +        flush();
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table1 = getNewTable(readers);
 +        assertTrue(countStaticCells(table1) > 0);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +
 +        CompactionManager.instance.forceUserDefinedCompaction(table0.getFilename());
 +
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table3 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table3));
 +        assertEquals(cellCount - 2, countStaticCells(table3));
 +    }
 +
 +    @Test
 +    public void testGcCompactionComplexColumn() throws Throwable
 +    {
 +        createTable("CREATE TABLE %s(" +
 +                          "  key int," +
 +                          "  data map<int, int>," +
 +                          "  extra text," +
 +                          "  PRIMARY KEY(key)" +
 +                          ") WITH compaction = { 'class' :  'SizeTieredCompactionStrategy', 'provide_overlapping_tombstones' : 'cell'  };"
 +                          );
 +
 +        for (int i = 0; i < KEY_COUNT; ++i)
 +            for (int j = 0; j < CLUSTERING_COUNT; ++j)
 +                execute("UPDATE %s SET data[?] = ? WHERE key = ?", j, i+j, i);
 +
 +        Set<SSTableReader> readers = new HashSet<>();
 +        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
 +
 +        flush();
 +        assertEquals(1, cfs.getLiveSSTables().size());
 +        SSTableReader table0 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +        int cellCount = countComplexCells(table0);
 +
 +        deleteWithSomeInsertsComplexColumn(3, 5, 8);
 +        flush();
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table1 = getNewTable(readers);
 +        assertTrue(countComplexCells(table1) > 0);
 +        assertEquals(0, countTombstoneMarkers(table0));
 +
 +        CompactionManager.instance.forceUserDefinedCompaction(table0.getFilename());
 +
 +        assertEquals(2, cfs.getLiveSSTables().size());
 +        SSTableReader table3 = getNewTable(readers);
 +        assertEquals(0, countTombstoneMarkers(table3));
 +        assertEquals(cellCount - 23, countComplexCells(table3));
 +    }
 +
 +    @Test
 +    public void testLocalDeletionTime() throws Throwable
 +    {
 +        createTable("create table %s (k int, c1 int, primary key (k, c1)) with compaction = {'class': 'SizeTieredCompactionStrategy', 'provide_overlapping_tombstones':'row'}");
 +        execute("delete from %s where k = 1");
 +        Set<SSTableReader> readers = new HashSet<>(getCurrentColumnFamilyStore().getLiveSSTables());
 +        getCurrentColumnFamilyStore().forceBlockingFlush();
 +        SSTableReader oldSSTable = getNewTable(readers);
 +        Thread.sleep(2000);
 +        execute("delete from %s where k = 1");
 +        getCurrentColumnFamilyStore().forceBlockingFlush();
 +        SSTableReader newTable = getNewTable(readers);
 +
 +        CompactionManager.instance.forceUserDefinedCompaction(oldSSTable.getFilename());
 +
 +        // Old table now doesn't contain any data and should disappear.
 +        assertEquals(Collections.singleton(newTable), getCurrentColumnFamilyStore().getLiveSSTables());
 +    }
 +
 +    private SSTableReader getNewTable(Set<SSTableReader> readers)
 +    {
 +        Set<SSTableReader> newOnes = new HashSet<>(getCurrentColumnFamilyStore().getLiveSSTables());
 +        newOnes.removeAll(readers);
 +        assertEquals(1, newOnes.size());
 +        readers.addAll(newOnes);
 +        return Iterables.get(newOnes, 0);
 +    }
 +
 +    void deleteWithSomeInserts(int key_step, int delete_step, int readd_step) throws Throwable
 +    {
 +        for (int i = 0; i < KEY_COUNT; i += key_step)
 +        {
 +            if (delete_step > 0)
 +                for (int j = i % delete_step; j < CLUSTERING_COUNT; j += delete_step)
 +                {
 +                    execute("DELETE FROM %s WHERE key = ? AND column = ?", i, j);
 +                }
 +            if (readd_step > 0)
 +                for (int j = i % readd_step; j < CLUSTERING_COUNT; j += readd_step)
 +                {
 +                    execute("INSERT INTO %s (key, column, data, extra) VALUES (?, ?, ?, ?)", i, j, i-j, "readded " + i + ":" + j);
 +                }
 +        }
 +    }
 +
 +    void deleteWithSomeInsertsComplexColumn(int key_step, int delete_step, int readd_step) throws Throwable
 +    {
 +        for (int i = 0; i < KEY_COUNT; i += key_step)
 +        {
 +            if (delete_step > 0)
 +                for (int j = i % delete_step; j < CLUSTERING_COUNT; j += delete_step)
 +                {
 +                    execute("DELETE data[?] FROM %s WHERE key = ?", j, i);
 +                }
 +            if (readd_step > 0)
 +                for (int j = i % readd_step; j < CLUSTERING_COUNT; j += readd_step)
 +                {
 +                    execute("UPDATE %s SET data[?] = ? WHERE key = ?", j, -(i+j), i);
 +                }
 +        }
 +    }
 +
 +    int countTombstoneMarkers(SSTableReader reader)
 +    {
 +        int nowInSec = FBUtilities.nowInSeconds();
 +        return count(reader, x -> x.isRangeTombstoneMarker() || x.isRow() && ((Row) x).hasDeletion(nowInSec) ? 1 : 0, x -> x.partitionLevelDeletion().isLive() ? 0 : 1);
 +    }
 +
 +    int countRows(SSTableReader reader)
 +    {
 +        boolean enforceStrictLiveness = reader.metadata.enforceStrictLiveness();
 +        int nowInSec = FBUtilities.nowInSeconds();
 +        return count(reader, x -> x.isRow() && ((Row) x).hasLiveData(nowInSec, enforceStrictLiveness) ? 1 : 0, x -> 0);
 +    }
 +
 +    int countCells(SSTableReader reader)
 +    {
 +        return count(reader, x -> x.isRow() ? Iterables.size((Row) x) : 0, x -> 0);
 +    }
 +
 +    int countStaticCells(SSTableReader reader)
 +    {
 +        return count(reader, x -> 0, x -> Iterables.size(x.staticRow()));
 +    }
 +
 +    int countComplexCells(SSTableReader reader)
 +    {
-         return count(reader, x -> x.isRow() ? ((Row) x).stream().mapToInt(this::countComplex).sum() : 0, x -> 0);
++        return count(reader, x -> x.isRow() ? ((Row) x).columnData().stream().mapToInt(this::countComplex).sum() : 0, x -> 0);
 +    }
 +
 +    int countComplex(ColumnData c)
 +    {
 +        if (!(c instanceof ComplexColumnData))
 +            return 0;
 +        ComplexColumnData ccd = (ComplexColumnData) c;
 +        return ccd.cellsCount();
 +    }
 +
 +    int count(SSTableReader reader, Function<Unfiltered, Integer> predicate, Function<UnfilteredRowIterator, Integer> partitionPredicate)
 +    {
 +        int instances = 0;
 +        try (ISSTableScanner partitions = reader.getScanner())
 +        {
 +            while (partitions.hasNext())
 +            {
 +                try (UnfilteredRowIterator iter = partitions.next())
 +                {
 +                    instances += partitionPredicate.apply(iter);
 +                    while (iter.hasNext())
 +                    {
 +                        Unfiltered atom = iter.next();
 +                        instances += predicate.apply(atom);
 +                    }
 +                }
 +            }
 +        }
 +        return instances;
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e635317c/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[3/6] cassandra git commit: Unfiltered.isEmpty conflicts with Row extends AbstractCollection.isEmpty

Posted by be...@apache.org.
Unfiltered.isEmpty conflicts with Row extends AbstractCollection.isEmpty

patch by Benedict; reviewed by Blake Eggleston for CASSANDRA-14588


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/8404260f
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/8404260f
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/8404260f

Branch: refs/heads/trunk
Commit: 8404260f1640efd14613c4591e5e918786fcde10
Parents: 7bdea44
Author: Benedict Elliott Smith <be...@apple.com>
Authored: Fri Jul 27 16:04:13 2018 +0100
Committer: Benedict Elliott Smith <be...@apache.org>
Committed: Thu Nov 29 15:50:25 2018 +0000

----------------------------------------------------------------------
 CHANGES.txt                                        |  1 +
 .../org/apache/cassandra/db/rows/AbstractRow.java  |  8 +++++++-
 .../org/apache/cassandra/db/rows/BTreeRow.java     | 16 +++++++++++++---
 src/java/org/apache/cassandra/db/rows/Row.java     | 17 ++++++++++++++++-
 .../cassandra/db/rows/UnfilteredSerializer.java    |  8 ++++----
 .../validation/entities/SecondaryIndexTest.java    | 16 ++++++++--------
 .../db/SinglePartitionSliceCommandTest.java        |  2 +-
 7 files changed, 50 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 9ba36fa..40016a1 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 3.0.18
+ * Unfiltered.isEmpty conflicts with Row extends AbstractCollection.isEmpty (CASSANDRA-14588)
  * RangeTombstoneList doesn't properly clean up mergeable or superseded rts in some cases (CASSANDRA-14894)
  * Fix handling of collection tombstones for dropped columns from legacy sstables (CASSANDRA-14912)
  * Throw exception if Columns serialized subset encode more columns than possible (CASSANDRA-14591)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/src/java/org/apache/cassandra/db/rows/AbstractRow.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/rows/AbstractRow.java b/src/java/org/apache/cassandra/db/rows/AbstractRow.java
index 67ed219..f91126b 100644
--- a/src/java/org/apache/cassandra/db/rows/AbstractRow.java
+++ b/src/java/org/apache/cassandra/db/rows/AbstractRow.java
@@ -19,6 +19,7 @@ package org.apache.cassandra.db.rows;
 import java.nio.ByteBuffer;
 import java.security.MessageDigest;
 import java.util.AbstractCollection;
+import java.util.Collection;
 import java.util.Objects;
 
 import com.google.common.collect.Iterables;
@@ -35,7 +36,7 @@ import org.apache.cassandra.utils.FBUtilities;
  * Unless you have a very good reason not to, every row implementation
  * should probably extend this class.
  */
-public abstract class AbstractRow extends AbstractCollection<ColumnData> implements Row
+public abstract class AbstractRow implements Row
 {
     public Unfiltered.Kind kind()
     {
@@ -87,6 +88,11 @@ public abstract class AbstractRow extends AbstractCollection<ColumnData> impleme
             cd.validate();
     }
 
+    public String toString()
+    {
+        return columnData().toString();
+    }
+
     public String toString(CFMetaData metadata)
     {
         return toString(metadata, false);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/src/java/org/apache/cassandra/db/rows/BTreeRow.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/rows/BTreeRow.java b/src/java/org/apache/cassandra/db/rows/BTreeRow.java
index b07960c..e46d0cc 100644
--- a/src/java/org/apache/cassandra/db/rows/BTreeRow.java
+++ b/src/java/org/apache/cassandra/db/rows/BTreeRow.java
@@ -167,7 +167,12 @@ public class BTreeRow extends AbstractRow
 
     public Collection<ColumnDefinition> columns()
     {
-        return Collections2.transform(this, ColumnData::column);
+        return Collections2.transform(columnData(), ColumnData::column);
+    }
+
+    public int columnCount()
+    {
+        return BTree.size(btree);
     }
 
     public LivenessInfo primaryKeyLivenessInfo()
@@ -208,9 +213,14 @@ public class BTreeRow extends AbstractRow
         return (ComplexColumnData) BTree.<Object>find(btree, ColumnDefinition.asymmetricColumnDataComparator, c);
     }
 
-    public int size()
+    @Override
+    public Collection<ColumnData> columnData()
     {
-        return BTree.size(btree);
+        return new AbstractCollection<ColumnData>()
+        {
+            @Override public Iterator<ColumnData> iterator() { return BTreeRow.this.iterator(); }
+            @Override public int size() { return BTree.size(btree); }
+        };
     }
 
     public Iterator<ColumnData> iterator()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/src/java/org/apache/cassandra/db/rows/Row.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/rows/Row.java b/src/java/org/apache/cassandra/db/rows/Row.java
index 3c97e09..dcb78f3 100644
--- a/src/java/org/apache/cassandra/db/rows/Row.java
+++ b/src/java/org/apache/cassandra/db/rows/Row.java
@@ -45,7 +45,7 @@ import org.apache.cassandra.utils.btree.UpdateFunction;
  * it's own data. For instance, a {@code Row} cannot contains a cell that is deleted by its own
  * row deletion.
  */
-public interface Row extends Unfiltered, Collection<ColumnData>
+public interface Row extends Unfiltered, Iterable<ColumnData>
 {
     /**
      * The clustering values for this row.
@@ -59,6 +59,12 @@ public interface Row extends Unfiltered, Collection<ColumnData>
      */
     public Collection<ColumnDefinition> columns();
 
+
+    /**
+     * The number of columns for which data (incl. simple tombstones) is present in this row.
+     */
+    public int columnCount();
+
     /**
      * The row deletion.
      *
@@ -148,6 +154,15 @@ public interface Row extends Unfiltered, Collection<ColumnData>
     public Iterable<Cell> cells();
 
     /**
+     * A collection of the ColumnData representation of this row, for columns with some data (possibly not live) present
+     * <p>
+     * The data is returned in column order.
+     *
+     * @return a Collection of the non-empty ColumnData for this row.
+     */
+    public Collection<ColumnData> columnData();
+
+    /**
      * An iterable over the cells of this row that return cells in "legacy order".
      * <p>
      * In 3.0+, columns are sorted so that all simple columns are before all complex columns. Previously

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java b/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
index c4684e1..0342e39 100644
--- a/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
+++ b/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
@@ -133,7 +133,7 @@ public class UnfilteredSerializer
         LivenessInfo pkLiveness = row.primaryKeyLivenessInfo();
         Row.Deletion deletion = row.deletion();
         boolean hasComplexDeletion = row.hasComplexDeletion();
-        boolean hasAllColumns = (row.size() == headerColumns.size());
+        boolean hasAllColumns = (row.columnCount() == headerColumns.size());
         boolean hasExtendedFlags = hasExtendedFlags(row);
 
         if (isStatic)
@@ -181,7 +181,7 @@ public class UnfilteredSerializer
             header.writeDeletionTime(deletion.time(), out);
 
         if (!hasAllColumns)
-            Columns.serializer.serializeSubset(Collections2.transform(row, ColumnData::column), headerColumns, out);
+            Columns.serializer.serializeSubset(row.columns(), headerColumns, out);
 
         SearchIterator<ColumnDefinition, ColumnDefinition> si = headerColumns.iterator();
         for (ColumnData data : row)
@@ -274,7 +274,7 @@ public class UnfilteredSerializer
         LivenessInfo pkLiveness = row.primaryKeyLivenessInfo();
         Row.Deletion deletion = row.deletion();
         boolean hasComplexDeletion = row.hasComplexDeletion();
-        boolean hasAllColumns = (row.size() == headerColumns.size());
+        boolean hasAllColumns = (row.columnCount() == headerColumns.size());
 
         if (!pkLiveness.isEmpty())
             size += header.timestampSerializedSize(pkLiveness.timestamp());
@@ -287,7 +287,7 @@ public class UnfilteredSerializer
             size += header.deletionTimeSerializedSize(deletion.time());
 
         if (!hasAllColumns)
-            size += Columns.serializer.serializedSubsetSize(Collections2.transform(row, ColumnData::column), header.columns(isStatic));
+            size += Columns.serializer.serializedSubsetSize(row.columns(), header.columns(isStatic));
 
         SearchIterator<ColumnDefinition, ColumnDefinition> si = headerColumns.iterator();
         for (ColumnData data : row)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
index 0f6cba7..c2640a0 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
@@ -918,10 +918,10 @@ public class SecondaryIndexTest extends CQLTester
         execute("UPDATE %s USING TIMESTAMP 1 SET v1=1 WHERE k=0 AND c=0");
         assertEquals(1, index.rowsUpdated.size());
         Row oldRow = index.rowsUpdated.get(0).left;
-        assertEquals(1, oldRow.size());
+        assertEquals(1, oldRow.columnCount());
         validateCell(oldRow.getCell(v1), v1, ByteBufferUtil.bytes(0), 0);
         Row newRow = index.rowsUpdated.get(0).right;
-        assertEquals(1, newRow.size());
+        assertEquals(1, newRow.columnCount());
         validateCell(newRow.getCell(v1), v1, ByteBufferUtil.bytes(1), 1);
         index.reset();
 
@@ -929,11 +929,11 @@ public class SecondaryIndexTest extends CQLTester
         execute("UPDATE %s USING TIMESTAMP 2 SET v1=2, v2=2 WHERE k=0 AND c=0");
         assertEquals(1, index.rowsUpdated.size());
         oldRow = index.rowsUpdated.get(0).left;
-        assertEquals(2, oldRow.size());
+        assertEquals(2, oldRow.columnCount());
         validateCell(oldRow.getCell(v1), v1, ByteBufferUtil.bytes(1), 1);
         validateCell(oldRow.getCell(v2), v2, ByteBufferUtil.bytes(0), 0);
         newRow = index.rowsUpdated.get(0).right;
-        assertEquals(2, newRow.size());
+        assertEquals(2, newRow.columnCount());
         validateCell(newRow.getCell(v1), v1, ByteBufferUtil.bytes(2), 2);
         validateCell(newRow.getCell(v2), v2, ByteBufferUtil.bytes(2), 2);
         index.reset();
@@ -942,10 +942,10 @@ public class SecondaryIndexTest extends CQLTester
         execute("DELETE v1 FROM %s USING TIMESTAMP 3 WHERE k=0 AND c=0");
         assertEquals(1, index.rowsUpdated.size());
         oldRow = index.rowsUpdated.get(0).left;
-        assertEquals(1, oldRow.size());
+        assertEquals(1, oldRow.columnCount());
         validateCell(oldRow.getCell(v1), v1, ByteBufferUtil.bytes(2), 2);
         newRow = index.rowsUpdated.get(0).right;
-        assertEquals(1, newRow.size());
+        assertEquals(1, newRow.columnCount());
         Cell newCell = newRow.getCell(v1);
         assertTrue(newCell.isTombstone());
         assertEquals(3, newCell.timestamp());
@@ -957,10 +957,10 @@ public class SecondaryIndexTest extends CQLTester
         execute("INSERT INTO %s(k, c) VALUES (0, 0) USING TIMESTAMP 4");
         assertEquals(1, index.rowsUpdated.size());
         oldRow = index.rowsUpdated.get(0).left;
-        assertEquals(0, oldRow.size());
+        assertEquals(0, oldRow.columnCount());
         assertEquals(0, oldRow.primaryKeyLivenessInfo().timestamp());
         newRow = index.rowsUpdated.get(0).right;
-        assertEquals(0, newRow.size());
+        assertEquals(0, newRow.columnCount());
         assertEquals(4, newRow.primaryKeyLivenessInfo().timestamp());
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/8404260f/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java b/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
index ca0dfa5..940b4f9 100644
--- a/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
+++ b/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
@@ -282,7 +282,7 @@ public class SinglePartitionSliceCommandTest
                 assertTrue(unfiltered.isRow());
                 Row row = (Row) unfiltered;
                 assertEquals(deletionTime, row.deletion().time().markedForDeleteAt());
-                assertEquals(0, row.size()); // no btree
+                assertEquals(0, row.columnCount()); // no btree
             }
             count++;
         }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org