You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by mc...@apache.org on 2021/07/12 20:57:00 UTC

[cassandra] branch cassandra-3.11 updated (f49b86d -> 0c1e1cc)

This is an automated email from the ASF dual-hosted git repository.

mck pushed a change to branch cassandra-3.11
in repository https://gitbox.apache.org/repos/asf/cassandra.git.


    from f49b86d  Optimize bytes skipping when reading SSTables
     new b3f9921  Introduce SemVer4j for version representation, parsing and handling. And correct supported upgrade paths. Add v4X to Java DTests (after cassandra-4.0 branch was created)
     new 0a84dda  Merge branch 'cassandra-2.2' into cassandra-3.0
     new 0c1e1cc  Merge branch 'cassandra-3.0' into cassandra-3.11

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 build.xml                                          |  2 +-
 .../cassandra/distributed/impl/InstanceConfig.java | 14 ++--
 .../upgrade/CompactStorage2to3UpgradeTest.java     |  8 +--
 .../upgrade/DropCompactStorageTest.java            |  7 +-
 .../distributed/upgrade/MigrateDropColumns.java    | 20 +++---
 .../upgrade/MigrateDropColumns22To30To311Test.java |  2 +-
 .../upgrade/MigrateDropColumns22To311Test.java     |  2 +-
 .../upgrade/MigrateDropColumns30To311Test.java     |  2 +-
 .../upgrade/MixedModeRangeTombstoneTest.java       |  2 +-
 .../upgrade/MixedModeReadRepairTest.java           |  4 +-
 .../distributed/upgrade/MixedModeReadTest.java     |  2 +-
 .../cassandra/distributed/upgrade/PagingTest.java  |  2 +-
 .../cassandra/distributed/upgrade/UpgradeTest.java | 26 +------
 .../distributed/upgrade/UpgradeTestBase.java       | 80 +++++++++++++++-------
 14 files changed, 92 insertions(+), 81 deletions(-)

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[cassandra] 01/01: Merge branch 'cassandra-3.0' into cassandra-3.11

Posted by mc...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

mck pushed a commit to branch cassandra-3.11
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit 0c1e1ccfadacf59e6a45e013e598830c8b637f4d
Merge: f49b86d 0a84dda
Author: Mick Semb Wever <mc...@apache.org>
AuthorDate: Mon Jul 12 22:20:10 2021 +0200

    Merge branch 'cassandra-3.0' into cassandra-3.11

 build.xml                                          |  2 +-
 .../cassandra/distributed/impl/InstanceConfig.java | 14 ++--
 .../upgrade/CompactStorage2to3UpgradeTest.java     |  8 +--
 .../upgrade/DropCompactStorageTest.java            |  7 +-
 .../distributed/upgrade/MigrateDropColumns.java    | 20 +++---
 .../upgrade/MigrateDropColumns22To30To311Test.java |  2 +-
 .../upgrade/MigrateDropColumns22To311Test.java     |  2 +-
 .../upgrade/MigrateDropColumns30To311Test.java     |  2 +-
 .../upgrade/MixedModeRangeTombstoneTest.java       |  2 +-
 .../upgrade/MixedModeReadRepairTest.java           |  4 +-
 .../distributed/upgrade/MixedModeReadTest.java     |  2 +-
 .../cassandra/distributed/upgrade/PagingTest.java  |  2 +-
 .../cassandra/distributed/upgrade/UpgradeTest.java | 26 +------
 .../distributed/upgrade/UpgradeTestBase.java       | 80 +++++++++++++++-------
 14 files changed, 92 insertions(+), 81 deletions(-)

diff --cc build.xml
index e40d62b,87851b8..a2a59d8
--- a/build.xml
+++ b/build.xml
@@@ -416,9 -390,9 +416,9 @@@
            <dependency groupId="org.apache.thrift" artifactId="libthrift" version="0.9.2">
  	         <exclusion groupId="commons-logging" artifactId="commons-logging"/>
            </dependency>
 -          <dependency groupId="junit" artifactId="junit" version="4.6" />
 +          <dependency groupId="junit" artifactId="junit" version="4.12" />
            <dependency groupId="org.mockito" artifactId="mockito-core" version="3.2.4" />
-           <dependency groupId="org.apache.cassandra" artifactId="dtest-api" version="0.0.7" />
+           <dependency groupId="org.apache.cassandra" artifactId="dtest-api" version="0.0.8" />
            <dependency groupId="org.reflections" artifactId="reflections" version="0.9.12" />
            <dependency groupId="org.quicktheories" artifactId="quicktheories" version="0.25" />
            <dependency groupId="org.apache.rat" artifactId="apache-rat" version="0.10">
diff --cc test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorage2to3UpgradeTest.java
index b0bbe64,9ea54c3..7235c72
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorage2to3UpgradeTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorage2to3UpgradeTest.java
@@@ -118,8 -118,9 +118,8 @@@ public class CompactStorage2to3UpgradeT
  
          final ResultsRecorder recorder = new ResultsRecorder();
          new TestCase()
 -
          .nodes(2)
-         .upgrade(Versions.Major.v22, Versions.Major.v3X)
+         .upgradesFrom(v22)
          .withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL))
          .setup(cluster -> {
              cluster.schemaChange(String.format(
@@@ -208,121 -210,119 +208,121 @@@
          final int additionalParititons = 5;
  
          new TestCase()
 -                .nodes(2)
 -                .upgradesFrom(v22)
 -                .withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL).set("enable_drop_compact_storage", true))
 -                .setup(cluster -> {
 -                    cluster.schemaChange(String.format(
 -                            "CREATE TABLE %s.%s (key int, c1 int, c2 int, c3 int, PRIMARY KEY (key, c1, c2)) WITH COMPACT STORAGE",
 -                            KEYSPACE, table));
 -                    ICoordinator coordinator = cluster.coordinator(1);
 -
 -                    for (int i = 1; i <= partitions; i++)
 -                    {
 -                        for (int j = 1; j <= rowsPerPartition; j++)
 -                        {
 -                            coordinator.execute(String.format("INSERT INTO %s.%s (key, c1, c2, c3) VALUES (%d, %d, 1, 1)",
 -                                    KEYSPACE, table, i, j), ConsistencyLevel.ALL);
 -                            coordinator.execute(String.format("INSERT INTO %s.%s (key, c1, c2, c3) VALUES (%d, %d, 2, 2)",
 -                                                              KEYSPACE, table, i, j), ConsistencyLevel.ALL);
 -                            coordinator.execute(String.format("INSERT INTO %s.%s (key, c1, c2, c3) VALUES (%d, %d, 3, 3)",
 -                                                              KEYSPACE, table, i, j), ConsistencyLevel.ALL);
 -                        }
 -                    }
 -                })
 -                .runAfterClusterUpgrade(cluster -> {
 -                    cluster.forEach(n -> n.nodetoolResult("upgradesstables", KEYSPACE).asserts().success());
 -                    Thread.sleep(1000);
 -                    // drop compact storage on only one node before performing writes
 -                    IMessageFilters.Filter filter = cluster.verbs().allVerbs().to(2).drop();
 -                    cluster.schemaChange(String.format("ALTER TABLE %s.%s DROP COMPACT STORAGE", KEYSPACE, table), 1);
 -                    filter.off();
 -
 -                    // add new partitions and delete some of the old ones
 -                    ICoordinator coordinator = cluster.coordinator(1);
 -                    for (int i = 0; i < additionalParititons; i++)
 -                    {
 -                        for (int j = 1; j <= rowsPerPartition; j++)
 -                        {
 -                            coordinator.execute(String.format("INSERT INTO %s.%s (key, c1, c2, c3) VALUES (%d, %d, 1, 1)",
 -                                    KEYSPACE, table, i, j), ConsistencyLevel.ALL);
 -                        }
 -                    }
 -
 -                    coordinator.execute(String.format("DELETE FROM %s.%s WHERE key = %d and c1 = %d",
 -                            KEYSPACE, table, 0, 3), ConsistencyLevel.ALL);
 -
 -                    coordinator.execute(String.format("DELETE FROM %s.%s WHERE key = %d",
 -                            KEYSPACE, table, 1), ConsistencyLevel.ALL);
 -
 -                    coordinator.execute(String.format("DELETE FROM %s.%s WHERE key = %d and c1 = %d and c2 = %d",
 -                            KEYSPACE, table, 7, 2, 2), ConsistencyLevel.ALL);
 -
 -                    coordinator.execute(String.format("DELETE FROM %s.%s WHERE key = %d and c1 = %d and c2 = %d",
 -                            KEYSPACE, table, 7, 6, 1), ConsistencyLevel.ALL);
 -
 -                    coordinator.execute(String.format("DELETE FROM %s.%s WHERE key = %d and c1 = %d and c2 = %d",
 -                                                      KEYSPACE, table, 4, 1, 1), ConsistencyLevel.ALL);
 -
 -                    coordinator.execute(String.format("DELETE c3 FROM %s.%s WHERE key = %d and c1 = %d and c2 = %d",
 -                            KEYSPACE, table, 8, 1, 3), ConsistencyLevel.ALL);
 -
 -                    coordinator.execute(String.format("DELETE FROM %s.%s WHERE key = %d and c1 = %d and c2 > 1",
 -                                                      KEYSPACE, table, 6, 2), ConsistencyLevel.ALL);
 -
 -                    ResultsRecorder recorder = new ResultsRecorder();
 -                    runQueries(coordinator, recorder, new String[] {
 -                            String.format("SELECT * FROM %s.%s", KEYSPACE, table),
 -
 -                            String.format("SELECT * FROM %s.%s WHERE key = %d and c1 = %d",
 -                                    KEYSPACE, table, partitions - 3, rowsPerPartition - 2),
 -
 -                            String.format("SELECT * FROM %s.%s WHERE key = %d and c1 = %d",
 -                                    KEYSPACE, table, partitions - 1, rowsPerPartition - 5),
 -
 -
 -                            String.format("SELECT * FROM %s.%s WHERE key = %d and c1 > %d",
 -                                    KEYSPACE, table, partitions - 8, rowsPerPartition - 3),
 -
 -                            String.format("SELECT * FROM %s.%s WHERE key = %d",
 -                                    KEYSPACE, table, 7),
 -
 -                            String.format("SELECT * FROM %s.%s WHERE key = %d and c1 = %d",
 -                                    KEYSPACE, table, 7, 2),
 -
 -                            String.format("SELECT * FROM %s.%s WHERE key = %d and c1 = %d",
 -                                    KEYSPACE, table, 8, 1),
 -
 -                            String.format("SELECT c1, c2 FROM %s.%s WHERE key = %d and c1 = %d",
 -                                    KEYSPACE, table, 8, 1),
 -
 -                            String.format("SELECT c1, c2 FROM %s.%s WHERE key = %d and c1 = %d",
 -                                          KEYSPACE, table, 8, 1),
 -
 -                            String.format("SELECT c1, c2 FROM %s.%s WHERE key = %d and c1 = %d",
 -                                          KEYSPACE, table, 4, 1),
 -
 -                            String.format("SELECT c1, c2 FROM %s.%s WHERE key = %d",
 -                                          KEYSPACE, table, 6),
 -
 -                            String.format("SELECT * FROM %s.%s WHERE key = %d and c1 > %d",
 -                                    KEYSPACE, table, 0, 1),
 -
 -                            String.format("SELECT * FROM %s.%s WHERE key = %d",
 -                                    KEYSPACE, table, partitions - (additionalParititons - 2)),
 -
 -                            String.format("SELECT * FROM %s.%s WHERE key = %d and c1 > %d",
 -                                    KEYSPACE, table, partitions - (additionalParititons - 3), 4)
 -
 -                    });
 +        .nodes(2)
-         .upgrade(Versions.Major.v22, Versions.Major.v3X)
++        .upgradesFrom(v22)
 +        .withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL).set("enable_drop_compact_storage", true))
 +        .setup(cluster -> {
 +            cluster.schemaChange(String.format(
 +            "CREATE TABLE %s.%s (key int, c1 int, c2 int, c3 int, PRIMARY KEY (key, c1, c2)) WITH COMPACT STORAGE",
 +            KEYSPACE, table));
 +            ICoordinator coordinator = cluster.coordinator(1);
 +
 +            for (int i = 1; i <= partitions; i++)
 +            {
 +                for (int j = 1; j <= rowsPerPartition; j++)
 +                {
 +                    coordinator.execute(String.format("INSERT INTO %s.%s (key, c1, c2, c3) VALUES (%d, %d, 1, 1)",
 +                                                      KEYSPACE, table, i, j), ConsistencyLevel.ALL);
 +                    coordinator.execute(String.format("INSERT INTO %s.%s (key, c1, c2, c3) VALUES (%d, %d, 2, 2)",
 +                                                      KEYSPACE, table, i, j), ConsistencyLevel.ALL);
 +                    coordinator.execute(String.format("INSERT INTO %s.%s (key, c1, c2, c3) VALUES (%d, %d, 3, 3)",
 +                                                      KEYSPACE, table, i, j), ConsistencyLevel.ALL);
 +                }
 +            }
  
 -                    // drop compact storage on remaining node and check result
 -                    cluster.schemaChange(String.format("ALTER TABLE %s.%s DROP COMPACT STORAGE", KEYSPACE, table), 2);
 -                    recorder.validateResults(cluster, 1);
 -                    recorder.validateResults(cluster, 2);
 -                }).run();
 +        })
 +        .runAfterClusterUpgrade(cluster -> {
 +            cluster.forEach(n -> n.nodetoolResult("upgradesstables", KEYSPACE).asserts().success());
 +            Thread.sleep(1000);
 +
 +            // drop compact storage on only one node before performing writes
 +            IMessageFilters.Filter filter = cluster.verbs().allVerbs().to(2).drop();
 +            cluster.schemaChange(String.format("ALTER TABLE %s.%s DROP COMPACT STORAGE", KEYSPACE, table), 1);
 +            filter.off();
 +
 +            // add new partitions and delete some of the old ones
 +            ICoordinator coordinator = cluster.coordinator(1);
 +            for (int i = 0; i < additionalParititons; i++)
 +            {
 +                for (int j = 1; j <= rowsPerPartition; j++)
 +                {
 +                    coordinator.execute(String.format("INSERT INTO %s.%s (key, c1, c2, c3) VALUES (%d, %d, 1, 1)",
 +                                                      KEYSPACE, table, i, j), ConsistencyLevel.ALL);
 +                }
 +            }
 +
 +            coordinator.execute(String.format("DELETE FROM %s.%s WHERE key = %d and c1 = %d",
 +                                              KEYSPACE, table, 0, 3), ConsistencyLevel.ALL);
 +
 +            coordinator.execute(String.format("DELETE FROM %s.%s WHERE key = %d",
 +                                              KEYSPACE, table, 1), ConsistencyLevel.ALL);
 +
 +            coordinator.execute(String.format("DELETE FROM %s.%s WHERE key = %d and c1 = %d and c2 = %d",
 +                                              KEYSPACE, table, 7, 2, 2), ConsistencyLevel.ALL);
 +
 +            coordinator.execute(String.format("DELETE FROM %s.%s WHERE key = %d and c1 = %d and c2 = %d",
 +                                              KEYSPACE, table, 7, 6, 1), ConsistencyLevel.ALL);
 +
 +            coordinator.execute(String.format("DELETE FROM %s.%s WHERE key = %d and c1 = %d and c2 = %d",
 +                                              KEYSPACE, table, 4, 1, 1), ConsistencyLevel.ALL);
 +
 +            coordinator.execute(String.format("DELETE c3 FROM %s.%s WHERE key = %d and c1 = %d and c2 = %d",
 +                                              KEYSPACE, table, 8, 1, 3), ConsistencyLevel.ALL);
 +
 +            coordinator.execute(String.format("DELETE FROM %s.%s WHERE key = %d and c1 = %d and c2 > 1",
 +                                              KEYSPACE, table, 6, 2), ConsistencyLevel.ALL);
 +
 +            ResultsRecorder recorder = new ResultsRecorder();
 +            runQueries(coordinator, recorder, new String[] {
 +            String.format("SELECT * FROM %s.%s", KEYSPACE, table),
 +
 +            String.format("SELECT * FROM %s.%s WHERE key = %d and c1 = %d",
 +                          KEYSPACE, table, partitions - 3, rowsPerPartition - 2),
 +
 +            String.format("SELECT * FROM %s.%s WHERE key = %d and c1 = %d",
 +                          KEYSPACE, table, partitions - 1, rowsPerPartition - 5),
 +
 +
 +            String.format("SELECT * FROM %s.%s WHERE key = %d and c1 > %d",
 +                          KEYSPACE, table, partitions - 8, rowsPerPartition - 3),
 +
 +            String.format("SELECT * FROM %s.%s WHERE key = %d",
 +                          KEYSPACE, table, 7),
 +
 +            String.format("SELECT * FROM %s.%s WHERE key = %d and c1 = %d",
 +                          KEYSPACE, table, 7, 2),
 +
 +            String.format("SELECT * FROM %s.%s WHERE key = %d and c1 = %d",
 +                          KEYSPACE, table, 8, 1),
 +
 +            String.format("SELECT c1, c2 FROM %s.%s WHERE key = %d and c1 = %d",
 +                          KEYSPACE, table, 8, 1),
 +
 +            String.format("SELECT c1, c2 FROM %s.%s WHERE key = %d and c1 = %d",
 +                          KEYSPACE, table, 8, 1),
 +
 +            String.format("SELECT c1, c2 FROM %s.%s WHERE key = %d and c1 = %d",
 +                          KEYSPACE, table, 4, 1),
 +
 +            String.format("SELECT c1, c2 FROM %s.%s WHERE key = %d",
 +                          KEYSPACE, table, 6),
 +
 +            String.format("SELECT * FROM %s.%s WHERE key = %d and c1 > %d",
 +                          KEYSPACE, table, 0, 1),
 +
 +            String.format("SELECT * FROM %s.%s WHERE key = %d",
 +                          KEYSPACE, table, partitions - (additionalParititons - 2)),
 +
 +            String.format("SELECT * FROM %s.%s WHERE key = %d and c1 > %d",
 +                          KEYSPACE, table, partitions - (additionalParititons - 3), 4)
 +
 +            });
 +
 +            // drop compact storage on remaining node and check result
 +            cluster.schemaChange(String.format("ALTER TABLE %s.%s DROP COMPACT STORAGE", KEYSPACE, table), 2);
 +            recorder.validateResults(cluster, 1);
 +            recorder.validateResults(cluster, 2);
 +        }).run();
      }
  
      private void runQueries(ICoordinator coordinator, ResultsRecorder helper, String[] queries)
diff --cc test/distributed/org/apache/cassandra/distributed/upgrade/DropCompactStorageTest.java
index ed763cc,920458a..80ce02a
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/DropCompactStorageTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/DropCompactStorageTest.java
@@@ -32,9 -33,9 +33,9 @@@ import static org.assertj.core.api.Asse
  public class DropCompactStorageTest extends UpgradeTestBase
  {
      @Test
 -    public void dropCompactStorageBeforeUpgradesstablesTo30() throws Throwable
 +    public void dropCompactStorageBeforeUpgradesstablesTo3X() throws Throwable
      {
-         dropCompactStorageBeforeUpgradeSstables(Versions.Major.v3X);
 -        dropCompactStorageBeforeUpgradeSstables(v30);
++        dropCompactStorageBeforeUpgradeSstables(v3X);
      }
  
      /**
diff --cc test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns.java
index dfa1884,0000000..4c94433
mode 100644,000000..100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns.java
@@@ -1,131 -1,0 +1,135 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.distributed.upgrade;
 +
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.Objects;
 +
 +import com.google.common.collect.ImmutableMap;
 +import com.google.common.collect.ImmutableSet;
 +import com.google.common.collect.Sets;
++import com.vdurmont.semver4j.Semver;
 +import org.junit.Assert;
 +import org.junit.Test;
 +
 +import org.apache.cassandra.db.marshal.CompositeType;
 +import org.apache.cassandra.db.marshal.Int32Type;
 +import org.apache.cassandra.db.marshal.MapType;
 +import org.apache.cassandra.distributed.api.ConsistencyLevel;
 +import org.apache.cassandra.distributed.api.Feature;
 +import org.apache.cassandra.distributed.api.ICoordinator;
 +import org.apache.cassandra.distributed.api.QueryResults;
 +import org.apache.cassandra.distributed.api.SimpleQueryResult;
 +import org.apache.cassandra.distributed.shared.AssertUtils;
 +import org.apache.cassandra.distributed.shared.Versions;
 +import org.apache.cassandra.distributed.test.ThriftClientUtils;
 +import org.apache.cassandra.thrift.Deletion;
 +import org.apache.cassandra.thrift.Mutation;
 +import org.apache.cassandra.thrift.SlicePredicate;
 +import org.apache.cassandra.thrift.SliceRange;
 +import org.apache.cassandra.utils.ByteBufferUtil;
 +
 +public abstract class MigrateDropColumns extends UpgradeTestBase
 +{
 +    private static final MapType MAP_TYPE = MapType.getInstance(Int32Type.instance, Int32Type.instance, true);
 +
-     private final Versions.Major initial;
-     private final Versions.Major[] upgrade;
++    private final Semver initial;
++    private final Semver[] upgrades;
 +
-     protected MigrateDropColumns(Versions.Major initial, Versions.Major... upgrade)
++    protected MigrateDropColumns(Semver initial, Semver... upgrade)
 +    {
 +        this.initial = Objects.requireNonNull(initial, "initial");
-         this.upgrade = Objects.requireNonNull(upgrade, "upgrade");
++        this.upgrades = Objects.requireNonNull(upgrade, "upgrade");
 +    }
 +
 +    @Test
 +    public void dropColumns() throws Throwable
 +    {
-         new TestCase()
-         .upgrade(initial, upgrade)
-         .withConfig(c -> c.with(Feature.NATIVE_PROTOCOL))
-         .setup(cluster -> {
++        TestCase testcase = new TestCase();
++				for (Semver upgrade : upgrades)
++            testcase = testcase.singleUpgrade(initial, upgrade);
++        
++				testcase
++			    .withConfig(c -> c.with(Feature.NATIVE_PROTOCOL))
++          .setup(cluster -> {
 +            cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl(pk int, tables map<int, int>, PRIMARY KEY (pk))"));
 +
 +            ICoordinator coordinator = cluster.coordinator(1);
 +
 +            // write a RT to pk=0
 +            ThriftClientUtils.thriftClient(cluster.get(1), thrift -> {
 +                thrift.set_keyspace(KEYSPACE);
 +
 +                Mutation mutation = new Mutation();
 +                Deletion deletion = new Deletion();
 +                SlicePredicate slice = new SlicePredicate();
 +                SliceRange range = new SliceRange();
 +                range.setStart(CompositeType.build(ByteBufferUtil.bytes("tables")));
 +                range.setFinish(CompositeType.build(ByteBufferUtil.bytes("tables")));
 +                slice.setSlice_range(range);
 +                deletion.setPredicate(slice);
 +                deletion.setTimestamp(System.currentTimeMillis());
 +                mutation.setDeletion(deletion);
 +
 +                thrift.batch_mutate(Collections.singletonMap(ByteBufferUtil.bytes(0),
 +                                                             Collections.singletonMap("tbl", Arrays.asList(mutation))),
 +                                    org.apache.cassandra.thrift.ConsistencyLevel.ALL);
 +            });
 +
 +            // write table to pk=1
 +            // NOTE: because jvm-dtest doesn't support collections in the execute interface (see CASSANDRA-15969)
 +            // need to encode to a ByteBuffer first
 +            coordinator.execute(withKeyspace("INSERT INTO %s.tbl (pk, tables) VALUES (?, ?)"), ConsistencyLevel.ONE, 1, MAP_TYPE.decompose(ImmutableMap.of(1, 1)));
 +
 +            cluster.forEach(inst -> inst.flush(KEYSPACE));
 +
 +            cluster.schemaChange(withKeyspace("ALTER TABLE %s.tbl DROP tables"));
 +        })
 +        .runAfterClusterUpgrade(cluster -> {
 +            ICoordinator coordinator = cluster.coordinator(1);
 +            SimpleQueryResult qr = coordinator.executeWithResult("SELECT column_name " +
 +                                                                 "FROM system_schema.dropped_columns " +
 +                                                                 "WHERE keyspace_name=?" +
 +                                                                 " AND table_name=?;",
 +                                                                 ConsistencyLevel.ALL, KEYSPACE, "tbl");
 +            Assert.assertEquals(ImmutableSet.of("tables"), Sets.newHashSet(qr.map(r -> r.getString("column_name"))));
 +
 +            assertRows(coordinator);
 +
 +            // upgradesstables, make sure everything is still working
 +            cluster.forEach(n -> n.nodetoolResult("upgradesstables", KEYSPACE).asserts().success());
 +
 +            assertRows(coordinator);
 +        })
 +        .run();
 +    }
 +
 +    private static void assertRows(ICoordinator coordinator)
 +    {
 +        // since only a RT was written to this row there is no liveness information, so the row will be skipped
 +        AssertUtils.assertRows(
 +        coordinator.executeWithResult(withKeyspace("SELECT * FROM %s.tbl WHERE pk=?"), ConsistencyLevel.ALL, 0),
 +        QueryResults.empty());
 +
 +        AssertUtils.assertRows(
 +        coordinator.executeWithResult(withKeyspace("SELECT * FROM %s.tbl WHERE pk=?"), ConsistencyLevel.ALL, 1),
 +        QueryResults.builder().row(1).build());
 +    }
 +}
diff --cc test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns22To30To311Test.java
index e68f979,0000000..2407dc5
mode 100644,000000..100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns22To30To311Test.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns22To30To311Test.java
@@@ -1,29 -1,0 +1,29 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.distributed.upgrade;
 +
 +import org.apache.cassandra.distributed.shared.Versions;
 +
 +public class MigrateDropColumns22To30To311Test extends MigrateDropColumns
 +{
 +    public MigrateDropColumns22To30To311Test()
 +    {
-         super(Versions.Major.v22, Versions.Major.v30, Versions.Major.v3X);
++        super(v22, v30, v3X);
 +    }
 +}
diff --cc test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns22To311Test.java
index 5de5458,0000000..1235907
mode 100644,000000..100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns22To311Test.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns22To311Test.java
@@@ -1,29 -1,0 +1,29 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.distributed.upgrade;
 +
 +import org.apache.cassandra.distributed.shared.Versions;
 +
 +public class MigrateDropColumns22To311Test extends MigrateDropColumns
 +{
 +    public MigrateDropColumns22To311Test()
 +    {
-         super(Versions.Major.v22, Versions.Major.v3X);
++        super(v22, v3X);
 +    }
 +}
diff --cc test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns30To311Test.java
index 1ec460f,0000000..4a19698
mode 100644,000000..100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns30To311Test.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MigrateDropColumns30To311Test.java
@@@ -1,29 -1,0 +1,29 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.distributed.upgrade;
 +
 +import org.apache.cassandra.distributed.shared.Versions;
 +
 +public class MigrateDropColumns30To311Test extends MigrateDropColumns
 +{
 +    public MigrateDropColumns30To311Test()
 +    {
-         super(Versions.Major.v30, Versions.Major.v3X);
++        super(v30, v3X);
 +    }
 +}
diff --cc test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadTest.java
index d908cd5,0000000..756f894
mode 100644,000000..100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadTest.java
@@@ -1,65 -1,0 +1,65 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.cassandra.distributed.upgrade;
 +
 +import org.junit.Test;
 +
 +import org.apache.cassandra.distributed.api.Feature;
 +import org.apache.cassandra.distributed.api.IInvokableInstance;
 +import org.apache.cassandra.distributed.shared.Versions;
 +import org.apache.cassandra.gms.Gossiper;
 +
 +import static org.apache.cassandra.distributed.test.ReadDigestConsistencyTest.CREATE_TABLE;
 +import static org.apache.cassandra.distributed.test.ReadDigestConsistencyTest.insertData;
 +import static org.apache.cassandra.distributed.test.ReadDigestConsistencyTest.testDigestConsistency;
 +
 +public class MixedModeReadTest extends UpgradeTestBase
 +{
 +    @Test
 +    public void mixedModeReadColumnSubsetDigestCheck() throws Throwable
 +    {
 +        new TestCase()
 +        .nodes(2)
 +        .nodesToUpgrade(1)
-         .upgrade(Versions.Major.v30, Versions.Major.v3X)
++        .singleUpgrade(v30, v3X)
 +        .withConfig(config -> config.with(Feature.GOSSIP, Feature.NETWORK))
 +        .setup(cluster -> {
 +            cluster.schemaChange(CREATE_TABLE);
 +            insertData(cluster.coordinator(1));
 +            testDigestConsistency(cluster.coordinator(1));
 +            testDigestConsistency(cluster.coordinator(2));
 +        })
 +        .runAfterClusterUpgrade(cluster -> {
 +            // we need to let gossip settle or the test will fail
 +            int attempts = 1;
 +            //noinspection Convert2MethodRef
 +            while (!((IInvokableInstance) (cluster.get(1))).callOnInstance(() -> Gossiper.instance.isAnyNodeOn30()))
 +            {
 +                if (attempts++ > 30)
 +                    throw new RuntimeException("Gossiper.instance.isAnyNodeOn30() continually returns false despite expecting to be true");
 +                Thread.sleep(1000);
 +            }
 +
 +            // should not cause a disgest mismatch in mixed mode
 +            testDigestConsistency(cluster.coordinator(1));
 +            testDigestConsistency(cluster.coordinator(2));
 +        })
 +        .run();
 +    }
 +}
diff --cc test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTest.java
index badfd3f,943e305..0932eb1
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTest.java
@@@ -87,27 -87,4 +87,5 @@@ public class UpgradeTest extends Upgrad
              }
          }).run();
      }
 +
-     @Test
-     public void simpleUpgradeWithNetworkAndGossipTest() throws Throwable
-     {
-         new TestCase()
-         .nodes(2)
-         .nodesToUpgrade(1)
-         .withConfig((cfg) -> cfg.with(Feature.NETWORK, Feature.GOSSIP))
-         .upgrade(Versions.Major.v3X, Versions.Major.v4)
-         .setup((cluster) -> {
-             cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
-             cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, 1)", ConsistencyLevel.ALL);
-         })
-         .runAfterNodeUpgrade((cluster, node) -> {
-             for (int i : new int[]{ 1, 2 })
-             {
-                 assertRows(cluster.coordinator(i).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE pk = ?",
-                                                           ConsistencyLevel.ALL,
-                                                           1),
-                            row(1, 1, 1));
-             }
-         }).run();
-     }
  }
diff --cc test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTestBase.java
index 4c9dbab,db34c61..6aa6f61
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTestBase.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTestBase.java
@@@ -71,6 -76,15 +77,18 @@@ public class UpgradeTestBase extends Di
          public void run(UpgradeableCluster cluster, int node) throws Throwable;
      }
  
 -    public static final Semver v22 = new Semver("2.2", SemverType.LOOSE);
 -    public static final Semver v30 = new Semver("3.0", SemverType.LOOSE);
++    public static final Semver v22 = new Semver("2.2.0-beta1", SemverType.LOOSE);
++    public static final Semver v30 = new Semver("3.0.0-alpha1", SemverType.LOOSE);
++    public static final Semver v3X = new Semver("3.11.0", SemverType.LOOSE);
+ 
+     protected static final List<Pair<Semver,Semver>> SUPPORTED_UPGRADE_PATHS = ImmutableList.of(
 -        Pair.create(v22, v30));
++        Pair.create(v22, v30),
++        Pair.create(v22, v3X),
++        Pair.create(v30, v3X));
+ 
+     // the last is always the current
+     public static final Semver CURRENT = SUPPORTED_UPGRADE_PATHS.get(SUPPORTED_UPGRADE_PATHS.size() - 1).right;
+ 
      public static class TestVersions
      {
          final Version initial;

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org