You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by al...@apache.org on 2019/06/26 14:31:08 UTC

[cassandra] branch trunk updated (225fa86 -> 3261b23)

This is an automated email from the ASF dual-hosted git repository.

aleksey pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra.git.


    from 225fa86  Fix flaky unit test AuditLoggerTest
     add dad82fb  Handle paging states serialized with a different version than the session's
     add 5514696  Merge branch 'cassandra-3.0' into cassandra-3.11
     new 3261b23  Merge branch 'cassandra-3.11' into trunk

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 CHANGES.txt                                        |   3 +
 .../apache/cassandra/io/util/DataOutputBuffer.java |  13 +
 .../cassandra/service/pager/PagingState.java       | 284 ++++++++++++++++-----
 test/unit/org/apache/cassandra/Util.java           |   7 +-
 .../apache/cassandra/service/QueryPagerTest.java   |  35 +--
 .../cassandra/service/pager/PagingStateTest.java   |  42 ++-
 6 files changed, 295 insertions(+), 89 deletions(-)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[cassandra] 01/01: Merge branch 'cassandra-3.11' into trunk

Posted by al...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

aleksey pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit 3261b23a789f595414f5e2d11545c87b97c44e32
Merge: 225fa86 5514696
Author: Aleksey Yeshchenko <al...@apache.org>
AuthorDate: Wed Jun 26 15:30:41 2019 +0100

    Merge branch 'cassandra-3.11' into trunk

 CHANGES.txt                                        |   3 +
 .../apache/cassandra/io/util/DataOutputBuffer.java |  13 +
 .../cassandra/service/pager/PagingState.java       | 284 ++++++++++++++++-----
 test/unit/org/apache/cassandra/Util.java           |   7 +-
 .../apache/cassandra/service/QueryPagerTest.java   |  35 +--
 .../cassandra/service/pager/PagingStateTest.java   |  42 ++-
 6 files changed, 295 insertions(+), 89 deletions(-)

diff --cc CHANGES.txt
index 6435d03,e2aa652..de8d648
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -366,8 -3,13 +366,11 @@@
   * Fixed nodetool cfstats printing index name twice (CASSANDRA-14903)
   * Add flag to disable SASI indexes, and warnings on creation (CASSANDRA-14866)
  Merged from 3.0:
+  * Handle paging states serialized with a different version than the session's (CASSANDRA-15176)
+  * Throw IOE instead of asserting on unsupporter peer versions (CASSANDRA-15066)
+  * Update token metadata when handling MOVING/REMOVING_TOKEN events (CASSANDRA-15120)
   * Add ability to customize cassandra log directory using $CASSANDRA_LOG_DIR (CASSANDRA-15090)
 - * Skip cells with illegal column names when reading legacy sstables (CASSANDRA-15086)
   * Fix assorted gossip races and add related runtime checks (CASSANDRA-15059)
 - * Fix mixed mode partition range scans with limit (CASSANDRA-15072)
   * cassandra-stress works with frozen collections: list and set (CASSANDRA-14907)
   * Fix handling FS errors on writing and reading flat files - LogTransaction and hints (CASSANDRA-15053)
   * Avoid double closing the iterator to avoid overcounting the number of requests (CASSANDRA-15058)
diff --cc src/java/org/apache/cassandra/service/pager/PagingState.java
index f036f96,9b7eccf..8df2366
--- a/src/java/org/apache/cassandra/service/pager/PagingState.java
+++ b/src/java/org/apache/cassandra/service/pager/PagingState.java
@@@ -21,23 -21,30 +21,31 @@@ import java.io.IOException
  import java.nio.ByteBuffer;
  import java.util.*;
  
- import org.apache.cassandra.schema.TableMetadata;
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.primitives.Ints;
+ 
 -import org.apache.cassandra.config.CFMetaData;
  import org.apache.cassandra.db.Clustering;
 -import org.apache.cassandra.db.LegacyLayout;
 +import org.apache.cassandra.db.CompactTables;
- import org.apache.cassandra.db.TypeSizes;
  import org.apache.cassandra.db.marshal.AbstractType;
  import org.apache.cassandra.db.marshal.BytesType;
 +import org.apache.cassandra.db.marshal.CompositeType;
  import org.apache.cassandra.db.rows.Cell;
  import org.apache.cassandra.db.rows.Row;
  import org.apache.cassandra.io.util.DataInputBuffer;
  import org.apache.cassandra.io.util.DataOutputBuffer;
  import org.apache.cassandra.io.util.DataOutputBufferFixed;
  import org.apache.cassandra.net.MessagingService;
- import org.apache.cassandra.transport.ProtocolVersion;
++import org.apache.cassandra.schema.TableMetadata;
  import org.apache.cassandra.transport.ProtocolException;
- import org.apache.cassandra.utils.ByteBufferUtil;
+ import org.apache.cassandra.transport.ProtocolVersion;
+ 
+ import static org.apache.cassandra.db.TypeSizes.sizeof;
+ import static org.apache.cassandra.db.TypeSizes.sizeofUnsignedVInt;
+ import static org.apache.cassandra.utils.ByteBufferUtil.*;
+ import static org.apache.cassandra.utils.vint.VIntCoding.computeUnsignedVIntSize;
+ import static org.apache.cassandra.utils.vint.VIntCoding.getUnsignedVInt;
  
+ @SuppressWarnings("WeakerAccess")
  public class PagingState
  {
      public final ByteBuffer partitionKey;  // Can be null for single partition queries.
@@@ -217,7 -360,7 +361,7 @@@
                      // If the last returned row has no cell, this means in 2.1/2.2 terms that we stopped on the row
                      // marker. Note that this shouldn't happen if the table is COMPACT.
                      assert !metadata.isCompactTable();
-                     mark = encodeCellName(metadata, row.clustering(), ByteBufferUtil.EMPTY_BYTE_BUFFER, null);
 -                    mark = LegacyLayout.encodeCellName(metadata, row.clustering(), EMPTY_BYTE_BUFFER, null);
++                    mark = encodeCellName(metadata, row.clustering(), EMPTY_BYTE_BUFFER, null);
                  }
                  else
                  {
@@@ -244,80 -387,6 +388,80 @@@
                   : Clustering.serializer.deserialize(mark, MessagingService.VERSION_30, makeClusteringTypes(metadata));
          }
  
 +        // Old (pre-3.0) encoding of cells. We need that for the protocol v3 as that is how things where encoded
 +        private static ByteBuffer encodeCellName(TableMetadata metadata, Clustering clustering, ByteBuffer columnName, ByteBuffer collectionElement)
 +        {
 +            boolean isStatic = clustering == Clustering.STATIC_CLUSTERING;
 +
 +            if (!metadata.isCompound())
 +            {
 +                if (isStatic)
 +                    return columnName;
 +
 +                assert clustering.size() == 1 : "Expected clustering size to be 1, but was " + clustering.size();
 +                return clustering.get(0);
 +            }
 +
 +            // We use comparator.size() rather than clustering.size() because of static clusterings
 +            int clusteringSize = metadata.comparator.size();
 +            int size = clusteringSize + (metadata.isDense() ? 0 : 1) + (collectionElement == null ? 0 : 1);
 +            if (metadata.isSuper())
 +                size = clusteringSize + 1;
 +            ByteBuffer[] values = new ByteBuffer[size];
 +            for (int i = 0; i < clusteringSize; i++)
 +            {
 +                if (isStatic)
 +                {
-                     values[i] = ByteBufferUtil.EMPTY_BYTE_BUFFER;
++                    values[i] = EMPTY_BYTE_BUFFER;
 +                    continue;
 +                }
 +
 +                ByteBuffer v = clustering.get(i);
 +                // we can have null (only for dense compound tables for backward compatibility reasons) but that
 +                // means we're done and should stop there as far as building the composite is concerned.
 +                if (v == null)
 +                    return CompositeType.build(Arrays.copyOfRange(values, 0, i));
 +
 +                values[i] = v;
 +            }
 +
 +            if (metadata.isSuper())
 +            {
 +                // We need to set the "column" (in thrift terms) name, i.e. the value corresponding to the subcomparator.
 +                // What it is depends if this a cell for a declared "static" column or a "dynamic" column part of the
 +                // super-column internal map.
 +                assert columnName != null; // This should never be null for supercolumns, see decodeForSuperColumn() above
 +                values[clusteringSize] = columnName.equals(CompactTables.SUPER_COLUMN_MAP_COLUMN)
 +                                         ? collectionElement
 +                                         : columnName;
 +            }
 +            else
 +            {
 +                if (!metadata.isDense())
 +                    values[clusteringSize] = columnName;
 +                if (collectionElement != null)
 +                    values[clusteringSize + 1] = collectionElement;
 +            }
 +
 +            return CompositeType.build(isStatic, values);
 +        }
 +
 +        private static Clustering decodeClustering(TableMetadata metadata, ByteBuffer value)
 +        {
 +            int csize = metadata.comparator.size();
 +            if (csize == 0)
 +                return Clustering.EMPTY;
 +
 +            if (metadata.isCompound() && CompositeType.isStaticName(value))
 +                return Clustering.STATIC_CLUSTERING;
 +
 +            List<ByteBuffer> components = metadata.isCompound()
 +                                          ? CompositeType.splitName(value)
 +                                          : Collections.singletonList(value);
 +
 +            return Clustering.make(components.subList(0, Math.min(csize, components.size())).toArray(new ByteBuffer[csize]));
 +        }
 +
          @Override
          public final int hashCode()
          {
diff --cc test/unit/org/apache/cassandra/Util.java
index ba5d4d3,006cd76..df45f3c
--- a/test/unit/org/apache/cassandra/Util.java
+++ b/test/unit/org/apache/cassandra/Util.java
@@@ -708,13 -692,17 +708,18 @@@ public class Uti
  
      public static PagingState makeSomePagingState(ProtocolVersion protocolVersion)
      {
+         return makeSomePagingState(protocolVersion, Integer.MAX_VALUE);
+     }
+ 
+     public static PagingState makeSomePagingState(ProtocolVersion protocolVersion, int remainingInPartition)
+     {
 -        CFMetaData metadata = CFMetaData.Builder.create("ks", "tbl")
 -                                                .addPartitionKey("k", AsciiType.instance)
 -                                                .addClusteringColumn("c1", AsciiType.instance)
 -                                                .addClusteringColumn("c1", Int32Type.instance)
 -                                                .addRegularColumn("myCol", AsciiType.instance)
 -                                                .build();
 +        TableMetadata metadata =
 +            TableMetadata.builder("ks", "tbl")
 +                         .addPartitionKeyColumn("k", AsciiType.instance)
 +                         .addClusteringColumn("c1", AsciiType.instance)
 +                         .addClusteringColumn("c2", Int32Type.instance)
 +                         .addRegularColumn("myCol", AsciiType.instance)
 +                         .build();
  
          ByteBuffer pk = ByteBufferUtil.bytes("someKey");
  
@@@ -722,40 -710,6 +727,40 @@@
          Clustering c = Clustering.make(ByteBufferUtil.bytes("c1"), ByteBufferUtil.bytes(42));
          Row row = BTreeRow.singleCellRow(c, BufferCell.live(def, 0, ByteBufferUtil.EMPTY_BYTE_BUFFER));
          PagingState.RowMark mark = PagingState.RowMark.create(metadata, row, protocolVersion);
-         return new PagingState(pk, mark, 10, 0);
+         return new PagingState(pk, mark, 10, remainingInPartition);
      }
 +
 +    public static void assertRCEquals(ReplicaCollection<?> a, ReplicaCollection<?> b)
 +    {
 +        assertTrue(a + " not equal to " + b, Iterables.elementsEqual(a, b));
 +    }
 +
 +    public static void assertNotRCEquals(ReplicaCollection<?> a, ReplicaCollection<?> b)
 +    {
 +        assertFalse(a + " equal to " + b, Iterables.elementsEqual(a, b));
 +    }
 +
 +    /**
 +     * Makes sure that the sstables on disk are the same ones as the cfs live sstables (that they have the same generation)
 +     */
 +    public static void assertOnDiskState(ColumnFamilyStore cfs, int expectedSSTableCount)
 +    {
 +        LifecycleTransaction.waitForDeletions();
 +        assertEquals(expectedSSTableCount, cfs.getLiveSSTables().size());
 +        Set<Integer> liveGenerations = cfs.getLiveSSTables().stream().map(sstable -> sstable.descriptor.generation).collect(Collectors.toSet());
 +        int fileCount = 0;
 +        for (File f : cfs.getDirectories().getCFDirectories())
 +        {
 +            for (File sst : f.listFiles())
 +            {
 +                if (sst.getName().contains("Data"))
 +                {
 +                    Descriptor d = Descriptor.fromFilename(sst.getAbsolutePath());
 +                    assertTrue(liveGenerations.contains(d.generation));
 +                    fileCount++;
 +                }
 +            }
 +        }
 +        assertEquals(expectedSSTableCount, fileCount);
 +    }
  }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org